Merge branch 'main' into matyax/monaco-loki-from-hackathon

This commit is contained in:
Matias Chomicki 2022-09-30 17:38:03 +02:00
commit f7fd7d3c27
191 changed files with 8080 additions and 1546 deletions

View File

@ -3337,11 +3337,9 @@ exports[`better eslint`] = {
[0, 0, 0, "Unexpected any. Specify a different type.", "1"]
],
"public/app/features/alerting/unified/components/receivers/form/ChannelOptions.tsx:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"],
[0, 0, 0, "Unexpected any. Specify a different type.", "1"],
[0, 0, 0, "Unexpected any. Specify a different type.", "2"],
[0, 0, 0, "Do not use any type assertions.", "3"],
[0, 0, 0, "Unexpected any. Specify a different type.", "4"]
[0, 0, 0, "Unexpected any. Specify a different type.", "0"],
[0, 0, 0, "Do not use any type assertions.", "1"],
[0, 0, 0, "Unexpected any. Specify a different type.", "2"]
],
"public/app/features/alerting/unified/components/receivers/form/ReceiverForm.tsx:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"],
@ -4528,9 +4526,6 @@ exports[`better eslint`] = {
[0, 0, 0, "Do not use any type assertions.", "2"],
[0, 0, 0, "Do not use any type assertions.", "3"]
],
"public/app/features/inspector/InspectDataTab.tsx:5381": [
[0, 0, 0, "Unexpected any. Specify a different type.", "0"]
],
"public/app/features/inspector/InspectErrorTab.tsx:5381": [
[0, 0, 0, "Unexpected any. Specify a different type.", "0"]
],
@ -4651,6 +4646,13 @@ exports[`better eslint`] = {
"public/app/features/logs/components/logParser.ts:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"]
],
"public/app/features/logs/utils.ts:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"],
[0, 0, 0, "Unexpected any. Specify a different type.", "1"],
[0, 0, 0, "Do not use any type assertions.", "2"],
[0, 0, 0, "Unexpected any. Specify a different type.", "3"],
[0, 0, 0, "Do not use any type assertions.", "4"]
],
"public/app/features/manage-dashboards/DashboardImportPage.tsx:5381": [
[0, 0, 0, "Unexpected any. Specify a different type.", "0"],
[0, 0, 0, "Unexpected any. Specify a different type.", "1"]

View File

@ -113,7 +113,7 @@
}
],
"jsx-a11y/no-noninteractive-element-to-interactive-role": "off",
"jsx-a11y/no-noninteractive-tabindex": "off",
"jsx-a11y/no-noninteractive-tabindex": "error",
"jsx-a11y/no-redundant-roles": "error",
"jsx-a11y/no-static-element-interactions": "off",
"jsx-a11y/role-has-required-aria-props": "error",

View File

@ -16,9 +16,8 @@ jobs:
steps:
- uses: actions/checkout@v3
- run: git clone --single-branch --no-tags --depth 1 -b master https://grafanabot:${{ secrets.GH_BOT_ACCESS_TOKEN }}@github.com/grafana/website-sync ./.github/actions/website-sync
- name: generate-packages-docs
uses: actions/setup-node@v3.4.0
id: generate-docs
- name: setup node
uses: actions/setup-node@v3.4.0
with:
node-version: '16'
- name: Get yarn cache directory path

1
.gitignore vendored
View File

@ -130,6 +130,7 @@ pkg/cmd/grafana-server/__debug_bin
/packages/**/.rpt2_cache
/packages/**/tsdoc-metadata.json
/packages/**/package.tgz
/packages/grafana-toolkit/sass
## CI places the packages in a different location
/npm-artifacts/*.tgz

View File

@ -55,6 +55,8 @@ When to use which log level?
Use a contextual logger to include additional key/value pairs attached to `context.Context`, e.g. `traceID`, to allow correlating logs with traces and/or correlate logs with a common identifier.
You must [Enable tracing in Grafana](#2-enable-tracing-in-grafana) to get a traceID
Example:
```go
@ -241,36 +243,38 @@ Be **careful** to not expose any sensitive information in span names, attribute
### How to collect, visualize and query traces (and correlate logs with traces) locally
1. Start Jaeger
#### 1. Start Jaeger
```bash
make devenv sources=jaeger
```
```bash
make devenv sources=jaeger
```
2. Enable tracing in Grafana
#### 2. Enable tracing in Grafana
opentelemetry tracing (recommended):
To enable tracing in Grafana, you must set the address in your config.ini file
```ini
[tracing.opentelemetry.jaeger]
address = http://localhost:14268/api/traces
```
opentelemetry tracing (recommended):
opentracing tracing (deprecated/not recommended):
```ini
[tracing.opentelemetry.jaeger]
address = http://localhost:14268/api/traces
```
```ini
[tracing.jaeger]
address = localhost:6831
```
opentracing tracing (deprecated/not recommended):
3. Search/browse collected logs and traces in Grafana Explore
```ini
[tracing.jaeger]
address = localhost:6831
```
You need provisioned gdev-jaeger and gdev-loki datasources, see [developer dashboard and data sources](https://github.com/grafana/grafana/tree/main/devenv#developer-dashboards-and-data-sources) for setup instructions.
#### 3. Search/browse collected logs and traces in Grafana Explore
Open Grafana explore and select gdev-loki datasource and use the query `{filename="/var/log/grafana/grafana.log"} | logfmt`.
You need provisioned gdev-jaeger and gdev-loki datasources, see [developer dashboard and data sources](https://github.com/grafana/grafana/tree/main/devenv#developer-dashboards-and-data-sources) for setup instructions.
You can then inspect any log message that includes a `traceID` and from there click on `gdev-jaeger` to split view and inspect the trace in question.
Open Grafana explore and select gdev-loki datasource and use the query `{filename="/var/log/grafana/grafana.log"} | logfmt`.
4. Search/browse collected traces in Jaeger UI
You can then inspect any log message that includes a `traceID` and from there click on `gdev-jaeger` to split view and inspect the trace in question.
You can open http://localhost:16686 to use the Jaeger UI for browsing and searching traces.
#### 4. Search/browse collected traces in Jaeger UI
You can open http://localhost:16686 to use the Jaeger UI for browsing and searching traces.

View File

@ -9,7 +9,7 @@ weight: 105
# Explore Grafana Alerting
Whether you're starting or expanding your implementation of Grafana Alerting, learn more about the key concepts and available features that help you create, manage, and take action on your alerts and improve your teams ability to resolve issues quickly.
Learn about the key concepts and features that help you create, manage, and take action on your alerts and improve your team's ability to resolve issues quickly.
- [Data sources](https://grafana.com/docs/grafana/latest/alerting/fundamentals/data-source-alerting/)
- [Alert rules](https://grafana.com/docs/grafana/latest/alerting/fundamentals/alert-rules/)

View File

@ -96,19 +96,17 @@ EOT
}
```
1. Enter text for your notification in the text field.
2. Enter text for your notification in the text field.
The `text` field supports [Go-style templating](https://pkg.go.dev/text/template). This enables you to manage your Grafana Alerting message templates directly in Terraform.
1. Run the command terraform apply.
3. Run the command terraform apply.
1. Go to the Grafana UI and check the details of your contact point.
**Note:**
4. Go to the Grafana UI and check the details of your contact point.
You cannot edit resources provisioned via Terraform from the UI. This ensures that your alerting stack always stays in sync with your code.
1. Click **Test** to verify that the contact point works correctly.
5. Click **Test** to verify that the contact point works correctly.
**Note:**
@ -172,17 +170,17 @@ contact_point = grafana_contact_point.my_slack_contact_point.name
}
1. In the mute_timings field, link a mute timing to your notification policy.
2. In the mute_timings field, link a mute timing to your notification policy.
1. Run the command terraform apply.
3. Run the command terraform apply.
1. Go to the Grafana UI and check the details of your notification policy.
4. Go to the Grafana UI and check the details of your notification policy.
**Note:**
You cannot edit resources provisioned from Terraform from the UI. This ensures that your alerting stack always stays in sync with your code.
1. Click **Test** to verify that the notification point is working correctly.
5. Click **Test** to verify that the notification point is working correctly.
## Provision mute timings
@ -209,16 +207,16 @@ name = "My Mute Timing"
}
1. Run the command terraform apply.
1. Go to the Grafana UI and check the details of your mute timing.
1. Reference your newly created mute timing in a notification policy using the `mute_timings` field.
2. Run the command terraform apply.
3. Go to the Grafana UI and check the details of your mute timing.
4. Reference your newly created mute timing in a notification policy using the `mute_timings` field.
This will apply your mute timing to some or all of your notifications.
**Note:**
You cannot edit resources provisioned from Terraform from the UI. This ensures that your alerting stack always stays in sync with your code.
1. Click **Test** to verify that the mute timing is working correctly.
5. Click **Test** to verify that the mute timing is working correctly.
## Provision alert rules
@ -243,11 +241,11 @@ resource "grafana_folder" "rule_folder" {
}
```
1. Define an alert rule.
2. Define an alert rule.
For more information on alert rules, refer to [how to create Grafana-managed alerts](https://grafana.com/blog/2022/08/01/grafana-alerting-video-how-to-create-alerts-in-grafana-9/).
1. Create a rule group containing one or more rules.
3. Create a rule group containing one or more rules.
In this example, the `grafana_rule_group` resource group is used.
@ -314,7 +312,7 @@ EOT
}
```
1. Go to the Grafana UI and check your alert rule.
4. Go to the Grafana UI and check your alert rule.
You can see whether or not the alert rule is firing. You can also see a visualization of each of the alert rules query stages

View File

@ -59,5 +59,6 @@ publicDashboards = true
- Exemplars will be omitted from the panel.
- Annotations will not be displayed in public dashboards.
- Grafana Live and real-time event streams are not supported.
- Library panels are currently not supported, but are planned to be in the future.
We are excited to share this enhancement with you and wed love your feedback! Please check out the [Github](https://github.com/grafana/grafana/discussions/49253) discussion and join the conversation.

View File

@ -15,16 +15,416 @@ title: Reporting API
This API allows you to interact programmatically with the [Reporting]({{< relref "../../dashboards/create-reports/" >}}) feature.
> The Reporting API is not stabilized yet, it is still in active development and may change without prior notice.
> Reporting is only available in Grafana Enterprise. Read more about [Grafana Enterprise]({{< relref "../../enterprise/" >}}).
> If you are running Grafana Enterprise, for some endpoints you'll need to have specific permissions. Refer to [Role-based access control permissions]({{< relref "../../administration/roles-and-permissions/access-control/custom-role-actions-scopes/" >}}) for more information.
## List all reports
`GET /api/reports`
#### Required permissions
See note in the [introduction]({{< ref "#reporting-api" >}}) for an explanation.
| Action | Scope |
| ------------ | --------------------------- |
| reports:read | reports:\*<br>reports:id:\* |
### Example request
```http
GET /api/reports HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
```
### Example response
```http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 1840
[
{
"id": 2,
"userId": 1,
"orgId": 1,
"name": "Report 2",
"recipients": "example-report@grafana.com",
"replyTo": "",
"message": "Hi, \nPlease find attached a PDF status report. If you have any questions, feel free to contact me!\nBest,",
"schedule": {
"startDate": "2022-10-02T00:00:00+02:00",
"endDate": null,
"frequency": "once",
"intervalFrequency": "",
"intervalAmount": 0,
"workdaysOnly": false,
"dayOfMonth": "2",
"timeZone": "Europe/Warsaw"
},
"options": {
"orientation": "landscape",
"layout": "grid",
},
"enableDashboardUrl": true,
"state": "scheduled",
"dashboards": [
{
"dashboard": {
"id": 463,
"uid": "7MeksYbmk",
"name": "Alerting with TestData"
},
"reportVariables": {
"namefilter": "TestData"
}
}
],
"formats": [
"pdf",
"csv"
],
"created": "2022-09-19T11:44:42+02:00",
"updated": "2022-09-19T11:44:42+02:00"
}
]
```
### Status Codes
- **200** OK
- **401** - Authentication failed, refer to [Authentication API]({{< relref "auth/" >}}).
- **500** Unexpected error or server misconfiguration. Refer to server logs for more details.
## Get a report
`GET /api/reports/:id`
#### Required permissions
See note in the [introduction]({{< ref "#reporting-api" >}}) for an explanation.
| Action | Scope |
| ------------ | ---------------------------------------------------------- |
| reports:read | reports:\*<br>reports:id:\*<br>reports:id:1(single report) |
### Example request
```http
GET /api/reports/2 HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
```
### Example response
```http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 940
{
"id": 2,
"userId": 1,
"orgId": 1,
"name": "Report 2",
"recipients": "example-report@grafana.com",
"replyTo": "",
"message": "Hi, \nPlease find attached a PDF status report. If you have any questions, feel free to contact me!\nBest,",
"schedule": {
"startDate": "2022-10-02T00:00:00+02:00",
"endDate": null,
"frequency": "once",
"intervalFrequency": "",
"intervalAmount": 0,
"workdaysOnly": false,
"dayOfMonth": "2",
"timeZone": "Europe/Warsaw"
},
"options": {
"orientation": "landscape",
"layout": "grid",
},
"enableDashboardUrl": true,
"state": "scheduled",
"dashboards": [
{
"dashboard": {
"id": 463,
"uid": "7MeksYbmk",
"name": "Alerting with TestData"
},
"timeRange": {
"from": "",
"to": ""
},
"reportVariables": {
"namefilter": "TestData"
}
}
],
"formats": [
"pdf",
"csv"
],
"created": "2022-09-12T11:44:42+02:00",
"updated": "2022-09-12T11:44:42+02:00"
}
```
### Status Codes
- **200** OK
- **400** Bad request (invalid report ID).
- **401** - Authentication failed, refer to [Authentication API]({{< relref "auth/" >}}).
- **403** Forbidden (access denied to a report or a dashboard used in the report).
- **404** Not found (such report does not exist).
- **500** Unexpected error or server misconfiguration. Refer to server logs for more details.
## Create a report
`POST /api/reports`
#### Required permissions
See note in the [introduction]({{< ref "#reporting-api" >}}) for an explanation.
| Action | Scope |
| -------------- | ----- |
| reports:create | n/a |
### Example request
```http
POST /api/reports HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
{
"name": "Report 4",
"recipients": "texample-report@grafana.com",
"replyTo": "",
"message": "Hello, please, find the report attached",
"schedule": {
"startDate": "2022-10-02T10:00:00+02:00",
"endDate": "2022-11-02T20:00:00+02:00",
"frequency": "daily",
"intervalFrequency": "",
"intervalAmount": 0,
"workdaysOnly": true,
"timeZone": "Europe/Warsaw"
},
"options": {
"orientation": "landscape",
"layout": "grid"
},
"enableDashboardUrl": true,
"dashboards": [
{
"dashboard": {
"uid": "7MeksYbmk",
},
"timeRange": {
"from": "2022-08-08T15:00:00+02:00",
"to": "2022-09-02T17:00:00+02:00"
},
"reportVariables": {
"varibale1": "Value1"
}
}
],
"formats": [
"pdf",
"csv"
]
}
```
#### Config JSON Body Schema
| Field name | Data type | Description |
| ------------------ | --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| name | string | Name of the report that is used as an email subject. |
| recipients | string | Comma-separated list of emails to which to send the report to. |
| replyTo | string | Comma-separated list of emails used in a reply-to field of the report email. |
| message | string | Text message used for the body of the report email. |
| startDate | string | Report distribution starts from this date. |
| endDate | string | Report distribution ends on this date. |
| frequency | string | Specifies how often the report should be sent. Can be `once`, `hourly`, `daily`, `weekly`, `monthly`, `last` or `custom`.<br/><br/>`last` - schedules the report for the last day of month.<br/><br/>`custom` - schedules the report to be sent on a custom interval.<br/>It requires `intervalFrequency` and `intervalAmount` to be specified: for example, every 2 weeks, where 2 is an `intervalAmount` and `weeks` is an `intervalFrequency`. |
| intervalFrequency | string | The type of the `custom` interval: `hours`, `days`, `weeks`, `months`. |
| intervalAmount | number | `custom` interval amount. |
| workdaysOnly | string | Send the report only on Monday-Friday. Applicable to `hourly` and `daily` types of schedule. |
| timeZone | string | Time zone used to schedule report execution. |
| orientation | string | Can be `portrait` or `landscape`. |
| layout | string | Can be `grid` or `simple`. |
| enableDashboardUrl | bool | Adds a dashboard url to the bottom of the report email. |
| formats | []string | Specified what kind of attachment to generate for the report - `csv`, `pdf`, `image`.<br/>`pdf` is the default one.<br/>`csv` attaches a CSV file for each table panel.<br/>`image` embeds an image of a dashboard into the email's body. |
| dashboards | []object | Dashboards to generate a report for.<br/> See "Report Dashboard Schema" section below. |
#### Report Dashboard Schema
| Field name | Data type | Description |
| ------------------------------ | --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| dashboard.uid | string | Dashboard [UID](../dashboard#identifier-id-vs-unique-identifier-uid). |
| timeRange.from | string | Dashboard time range from. |
| timeRange.to | string | Dashboard time range to. |
| reportVariables.<variableName> | string | Key-value pairs containing the template variables for this report, in JSON format. If empty, the template variables from the report's dashboard will be used. |
### Example response
```http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 35
{
"id": 4,
"message": "Report created"
}
```
### Status Codes
- **200** OK
- **400** Bad request (invalid json, missing or invalid fields values, etc.).
- **403** - Forbidden (access denied to a report or a dashboard used in the report).
- **500** - Unexpected error or server misconfiguration. Refer to server logs for more details
## Update a report
`PUT /api/reports/:id`
#### Required permissions
See note in the [introduction]({{< ref "#reporting-api" >}}) for an explanation.
| Action | Scope |
| ------------- | --------------------------------------------------------- |
| reports:write | reports:\*</br>reports:id:\*</br>reports:1(single report) |
### Example request
See [JSON body schema]({{< ref "#config-json-body-schema" >}}) for fields description.
```http
GET /api/reports HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
{
"name": "Updated Report",
"recipients": "example-report@grafana.com",
"replyTo": "",
"message": "Hello, please, find the report attached",
"schedule": {
"frequency": "hourly",
"timeZone": "Africa/Cairo",
"workdaysOnly": true,
"startDate": "2022-10-10T10:00:00+02:00",
"endDate": "2022-11-20T19:00:00+02:00"
},
"options": {
"orientation": "landscape",
"layout": "grid",
},
"enableDashboardUrl": true,
"state": "scheduled",
"dashboards": [
{
"dashboard": {
"id": 463,
"uid": "7MeksYbmk",
"name": "Alerting with TestData"
},
"timeRange": {
"from": "2022-08-08T15:00:00+02:00",
"to": "2022-09-02T17:00:00+02:00"
},
"reportVariables": {
"varibale1": "Value1"
}
}
],
"formats": [
"pdf",
"csv"
]
}
```
### Example response
```http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 28
{
"message": "Report updated"
}
```
### Status Codes
- **200** OK
- **400** Bad request (invalid json, missing or invalid fields values, etc.).
- **401** - Authentication failed, refer to [Authentication API]({{< relref "auth/" >}}).
- **403** Forbidden (access denied to a report or a dashboard used in the report).
- **404** Not found (such report does not exist).
- **500** Unexpected error or server misconfiguration. Refer to server logs for more details.
## Delete a report
`DELETE /api/reports/:id`
#### Required permissions
See note in the [introduction]({{< ref "#reporting-api" >}}) for an explanation.
| Action | Scope |
| -------------- | --------------------------------------------------------- |
| reports:delete | reports:\*</br>reports:id:\*</br>reports:1(single report) |
### Example request
```http
GET /api/reports/6 HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
```
### Example response
```http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 39
{
"message": "Report config was removed"
}
```
### Status Codes
- **200** OK
- **400** Bad request (invalid report ID).
- **401** - Authentication failed, refer to [Authentication API]({{< relref "auth/" >}}).
- **404** - Not found (report with this ID does not exist).
- **500** - Unexpected error or server misconfiguration. Refer to server logs for more details
## Send a report
> Only available in Grafana Enterprise v7.0+.
> This API endpoint is experimental and may be deprecated in a future release. On deprecation, a migration strategy will be provided and the endpoint will remain functional until the next major release of Grafana.
`POST /api/reports/email`
Generate and send a report. This API waits for the report to be generated before returning. We recommend that you set the client's timeout to at least 60 seconds.
@ -51,13 +451,13 @@ Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
}
```
### JSON Body Schema
#### JSON Body Schema
| Field name | Data type | Description |
| ------------------- | --------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
| id | string | ID of the report to send. It is the same as in the URL when editing a report, not to be confused with the ID of the dashboard. Required. |
| emails | string | Comma-separated list of emails to which to send the report to. Overrides the emails from the report. Required if **useEmailsFromReport** is not present. |
| useEmailsFromReport | boolean | Send the report to the emails specified in the report. Required if **emails** is not present. |
| Field name | Data type | Description |
| ------------------- | --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| id | string | ID of the report to send. It is the same as in the URL when editing a report, not to be confused with the ID of the dashboard. Required. |
| emails | string | Comma-separated list of emails to which to send the report to. Overrides the emails from the report. Required if `useEmailsFromReport` is not present. |
| useEmailsFromReport | boolean | Send the report to the emails specified in the report. Required if `emails` is not present. |
### Example response
@ -71,11 +471,205 @@ Content-Length: 29
### Status Codes
| Code | Description |
| ---- | ----------------------------------------------------------------------------------- |
| 200 | Report was sent. |
| 400 | Bad request (invalid json, missing content-type, missing or invalid fields, etc.). |
| 401 | Authentication failed, refer to [Authentication API]({{< relref "auth/" >}}). |
| 403 | User is authenticated but is not authorized to generate the report. |
| 404 | Report not found. |
| 500 | Unexpected error or server misconfiguration. Refer to server logs for more details. |
- **200** Report was sent.
- **400** Bad request (invalid json, missing content-type, missing or invalid fields, etc.).
- **401** - Authentication failed, refer to [Authentication API]({{< relref "auth/" >}}).
- **403** - Forbidden (access denied to a report or a dashboard used in the report).
- **404** - Report not found.
- **500** - Unexpected error or server misconfiguration. Refer to server logs for more details.
## Get reports branding settings
`GET /api/reports/settings`
Returns reports branding settings that are global and used across all the reports.
#### Required permissions
See note in the [introduction]({{< ref "#reporting-api" >}}) for an explanation.
| Action | Scope |
| --------------------- | ----- |
| reports.settings:read | n/a |
### Example request
```http
GET /api/reports/settings HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
```
### Example response
```http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 181
{
"id": 1,
"userId": 1,
"orgId": 1,
"branding": {
"reportLogoUrl": "",
"emailLogoUrl": "",
"emailFooterMode": "sent-by",
"emailFooterText": "Grafana Labs",
"emailFooterLink": "https://grafana.com/"
}
}
```
### Status Codes
- **200** OK
- **401** - Authentication failed, refer to [Authentication API]({{< relref "auth/" >}}).
- **500** - Unexpected error or server misconfiguration. Refer to server logs for more detail
## Save reports branding settings
`POST /api/reports/settings`
Creates settings if they don't exist, otherwise updates them. These settings are global and used across all the reports.
#### Required permissions
See note in the [introduction]({{< ref "#reporting-api" >}}) for an explanation.
| Action | Scope |
| ---------------------- | ----- |
| reports.settings:write | n/a |
### Example request
```http
POST /api/reports/settings HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
{
"branding": {
"reportLogoUrl": "https://grafana.com/reportLogo.jpg",
"emailLogoUrl": "https://grafana.com/emailLogo.jpg",
"emailFooterMode": "sent-by",
"emailFooterText": "Grafana Labs",
"emailFooterLink": "https://grafana.com/"
}
}
```
#### JSON Body Schema
| Field name | Data type | Description |
| ------------------------ | --------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| branding.reportLogoUrl | string | URL of an image used as a logo on every page of the report. |
| branding.emailLogoUrl | string | URL of an image used as a logo in the email. |
| branding.emailFooterMode | string | Can be `sent-by` or `none`.<br/>`sent-by` adds a "Sent by `branding.emailFooterText`" footer link to the email. Requires specifying values in the `branding.emailFooterText` and `branding.emailFooterLink` fields.<br/>`none` suppresses adding a "Sent by" footer link to the email. |
| branding.emailFooterText | string | Text of a URL added to the email "Sent by" footer. |
| branding.emailFooterLink | string | URL address value added to the email "Sent by" footer. |
### Example response
```http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 35
{
"message": "Report settings saved"
}
```
### Status Codes
- **200** OK
- **400** Bad request (invalid json, missing or invalid fields values, etc.).
- **401** - Authentication failed, refer to [Authentication API]({{< relref "auth/" >}}).
- **500** - Unexpected error or server misconfiguration. Refer to server logs for more detail
## Send a test email
`POST /api/reports/test-email`
Sends a test email with a report without persisting it in the database.
#### Required permissions
See note in the [introduction]({{< ref "#reporting-api" >}}) for an explanation.
| Action | Scope |
| ------------ | ----- |
| reports:send | n/a |
### Example request
See [JSON body schema]({{< ref "#config-json-body-schema" >}}) for fields description.
```http
POST /api/reports/test-email HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
{{
"name": "Report 4",
"recipients": "example-report@grafana.com",
"replyTo": "",
"message": "Hello, please, find the report attached",
"schedule": {
"startDate": "2022-10-02T10:00:00+02:00",
"endDate": "2022-11-02T20:00:00+02:00",
"frequency": "daily",
"intervalFrequency": "",
"intervalAmount": 0,
"workdaysOnly": true,
"timeZone": "Europe/Warsaw"
},
"options": {
"orientation": "landscape",
"layout": "grid"
},
"enableDashboardUrl": true,
"dashboards": [
{
"dashboard": {
"uid": "7MeksYbmk",
},
"timeRange": {
"from": "2022-08-08T15:00:00+02:00",
"to": "2022-09-02T17:00:00+02:00"
},
"reportVariables": {
"varibale1": "Value1"
}
}
],
"formats": [
"pdf",
"csv"
]
}
```
### Example response
```http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 29
{
"message": "Test email sent"
}
```
### Status Codes
- **200** OK
- **400** Bad request (invalid json, missing or invalid fields values, etc.).
- **401** - Authentication failed, refer to [Authentication API]({{< relref "auth/" >}}).
- **403** - Forbidden (access denied to a report or a dashboard used in the report).
- **500** - Unexpected error or server misconfiguration. Refer to server logs for more details

View File

@ -17,6 +17,8 @@ This guide helps you identify the steps required to update a plugin from the Gra
- [Plugin migration guide](#plugin-migration-guide)
- [Introduction](#introduction)
- [Table of contents](#table-of-contents)
- [From version 9.1.x to 9.2.x](#from-version-91x-to-92x)
- [NavModelItem requires a valid icon name](#navmodelitem-requires-a-valid-icon-name)
- [From version 8.x to 9.x](#from-version-8x-to-9x)
- [9.0 breaking changes](#90-breaking-changes)
- [theme.visualization.getColorByName replaces getColorForTheme](#themevisualizationgetcolorbyname-replaces-getcolorfortheme)
@ -60,6 +62,32 @@ This guide helps you identify the steps required to update a plugin from the Gra
- [Migrate to data frames](#migrate-to-data-frames)
- [Troubleshoot plugin migration](#troubleshoot-plugin-migration)
## From version 9.1.x to 9.2.x
### NavModelItem requires a valid icon name
The typings of the `NavModelItem` have improved to only allow a valid `IconName` for the icon property. You can find the complete list of valid icons [here](https://github.com/grafana/grafana/blob/v9.2.0-beta1/packages/grafana-data/src/types/icon.ts). The icons specified in the list will work for older versions of Grafana 9.
Example:
```ts
// before
const model: NavModelItem = {
id: 'settings',
text: 'Settings',
icon: 'fa fa-cog',
url: `${baseUrl}/settings`,
};
// after
const model: NavModelItem = {
id: 'settings',
text: 'Settings',
icon: 'cog',
url: `${baseUrl}/settings`,
};
```
## From version 8.x to 9.x
### 9.0 breaking changes

View File

@ -54,6 +54,7 @@ You can use the unit dropdown to also specify custom units, custom prefix or suf
To select a custom unit enter the unit and select the last `Custom: xxx` option in the dropdown.
- `suffix:<suffix>` for custom unit that should go after value.
- `prefix:<prefix>` for custom unit that should go before value.
- `time:<format>` For custom date time formats type for example `time:YYYY-MM-DD`. See [formats](https://momentjs.com/docs/#/displaying/) for the format syntax and options.
- `si:<base scale><unit characters>` for custom SI units. For example: `si: mF`. This one is a bit more advanced as you can specify both a unit and the
source data scale. So if your source data is represented as milli (thousands of) something prefix the unit with that

View File

@ -114,7 +114,7 @@ is_nan takes a number or a series and returns `1` for `NaN` values and `0` for o
##### is_null
is_nan takes a number or a series and returns `1` for `null` values and `0` for other values. For example `is_null($A)`.
is_null takes a number or a series and returns `1` for `null` values and `0` for other values. For example `is_null($A)`.
##### is_number

View File

@ -14,6 +14,8 @@ const addDataSource = () => {
});
};
const finalQuery = 'rate({instance=~"instance1|instance2"} | logfmt | __error__=`` [$__interval]';
describe('Loki query builder', () => {
beforeEach(() => {
e2e.flows.login('admin', 'admin');
@ -37,8 +39,6 @@ describe('Loki query builder', () => {
req.reply({ status: 'success', data: [{ instance: 'instance1' }] });
});
const finalQuery = 'rate({instance=~"instance1|instance2"} | logfmt | __error__=`` [$__interval]';
// Go to Explore and choose Loki data source
e2e.pages.Explore.visit();
e2e.components.DataSourcePicker.container().should('be.visible').click();
@ -72,13 +72,21 @@ describe('Loki query builder', () => {
e2e().contains(MISSING_LABEL_FILTER_ERROR_MESSAGE).should('not.exist');
e2e().contains(finalQuery).should('be.visible');
// Switch to code editor and check if query was parsed
for (const word of finalQuery.split(' ')) {
e2e().contains(word).should('be.visible');
}
// Toggle raw query
e2e().contains('label', 'Raw query').click();
e2e().contains('Raw query').should('have.length', 1);
// Switch to explain mode and check if query is visible
// Change to code editor
e2e().contains('label', 'Code').click();
// We need to test this manually because the final query is split into separate DOM elements using e2e().contains(finalQuery).should('be.visible'); does not detect the query.
e2e().contains('rate').should('be.visible');
e2e().contains('instance1|instance2').should('be.visible');
e2e().contains('logfmt').should('be.visible');
e2e().contains('__error__').should('be.visible');
e2e().contains('$__interval').should('be.visible');
// Checks the explain mode toggle
e2e().contains('label', 'Explain').click();
e2e().contains(finalQuery).should('be.visible');
e2e().contains('Fetch all log lines matching label filters.').should('be.visible');
});
});

7
go.mod
View File

@ -55,7 +55,7 @@ require (
github.com/gorilla/websocket v1.5.0
github.com/gosimple/slug v1.12.0
github.com/grafana/cuetsy v0.1.1
github.com/grafana/grafana-aws-sdk v0.10.8
github.com/grafana/grafana-aws-sdk v0.11.0
github.com/grafana/grafana-azure-sdk-go v1.3.0
github.com/grafana/grafana-plugin-sdk-go v0.139.0
github.com/grafana/thema v0.0.0-20220817114012-ebeee841c104
@ -106,7 +106,7 @@ require (
go.opentelemetry.io/otel/trace v1.6.3
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d
golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d
golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect
golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
golang.org/x/time v0.0.0-20220609170525-579cf78fd858
@ -231,7 +231,7 @@ require (
go.opencensus.io v0.23.0 // indirect
go.uber.org/atomic v1.9.0
go.uber.org/goleak v1.1.12 // indirect
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect
golang.org/x/text v0.3.7
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
google.golang.org/appengine v1.6.7 // indirect
@ -254,6 +254,7 @@ require (
github.com/google/go-github/v45 v45.2.0
github.com/grafana/dskit v0.0.0-20211011144203-3a88ec0b675f
github.com/jmoiron/sqlx v1.3.5
github.com/matryer/is v1.4.0
github.com/urfave/cli v1.22.5
go.etcd.io/etcd/api/v3 v3.5.4
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0

12
go.sum
View File

@ -1374,8 +1374,8 @@ github.com/grafana/dskit v0.0.0-20211011144203-3a88ec0b675f h1:FvvSVEbnGeM2bUivG
github.com/grafana/dskit v0.0.0-20211011144203-3a88ec0b675f/go.mod h1:uPG2nyK4CtgNDmWv7qyzYcdI+S90kHHRWvHnBtEMBXM=
github.com/grafana/go-mssqldb v0.0.0-20210326084033-d0ce3c521036 h1:GplhUk6Xes5JIhUUrggPcPBhOn+eT8+WsHiebvq7GgA=
github.com/grafana/go-mssqldb v0.0.0-20210326084033-d0ce3c521036/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/grafana/grafana-aws-sdk v0.10.8 h1:6MGlWlQD4E0aI+Vp4Cfgzsj9V3U7kSQ1wCye9D1NMoU=
github.com/grafana/grafana-aws-sdk v0.10.8/go.mod h1:5Iw3xY7iXJfNaYHrRHMXa/kaB2lWoyntg71PPLGvSs8=
github.com/grafana/grafana-aws-sdk v0.11.0 h1:ncPD/UN0wNcKq3kEU90RdvrnK/6R4VW2Lo5dPcGk9t0=
github.com/grafana/grafana-aws-sdk v0.11.0/go.mod h1:5Iw3xY7iXJfNaYHrRHMXa/kaB2lWoyntg71PPLGvSs8=
github.com/grafana/grafana-azure-sdk-go v1.3.0 h1:zboQpq/ljBjqHo/6UQNZAUwqGTtnEGRYSEnqIQvLuAo=
github.com/grafana/grafana-azure-sdk-go v1.3.0/go.mod h1:rgrnK9m6CgKlgx4rH3FFP/6dTdyRO6LYC2mVZov35yo=
github.com/grafana/grafana-google-sdk-go v0.0.0-20211104130251-b190293eaf58 h1:2ud7NNM7LrGPO4x0NFR8qLq68CqI4SmB7I2yRN2w9oE=
@ -2881,8 +2881,8 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220401154927-543a649e0bdd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220418201149-a630d4f3e7a2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI=
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -3097,8 +3097,8 @@ golang.org/x/sys v0.0.0-20220330033206-e17cdc41300f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=

View File

@ -173,6 +173,7 @@
"babel-loader": "8.2.5",
"babel-plugin-angularjs-annotate": "0.10.0",
"babel-plugin-macros": "3.1.0",
"blob-polyfill": "7.0.20220408",
"copy-webpack-plugin": "9.0.1",
"css-loader": "6.7.1",
"css-minimizer-webpack-plugin": "4.1.0",
@ -257,7 +258,7 @@
"@grafana/e2e-selectors": "workspace:*",
"@grafana/experimental": "^0.0.2-canary.36",
"@grafana/google-sdk": "0.0.3",
"@grafana/lezer-logql": "0.1.0",
"@grafana/lezer-logql": "0.1.1",
"@grafana/monaco-logql": "^0.0.6",
"@grafana/runtime": "workspace:*",
"@grafana/schema": "workspace:*",
@ -354,7 +355,7 @@
"rc-drawer": "4.4.3",
"rc-slider": "9.7.5",
"rc-time-picker": "3.7.3",
"rc-tree": "5.6.6",
"rc-tree": "5.7.0",
"re-resizable": "6.9.9",
"react": "17.0.2",
"react-awesome-query-builder": "5.3.1",
@ -408,7 +409,7 @@
"resolutions": {
"underscore": "1.13.4",
"@types/slate": "0.47.9",
"@rushstack/node-core-library": "3.52.0",
"@rushstack/node-core-library": "3.53.0",
"@rushstack/rig-package": "0.3.13",
"@rushstack/ts-command-line": "4.12.1",
"@storybook/react/webpack": "5.74.0",

View File

@ -23,7 +23,8 @@
},
"files": [
"dist",
"CHANGELOG.md",
"./README.md",
"./CHANGELOG.md",
"LICENSE_APACHE2"
],
"scripts": {

View File

@ -16,6 +16,7 @@ const LOGFMT_REGEXP = /(?:^|\s)([\w\(\)\[\]\{\}]+)=(""|(?:".*?[^\\]"|[^"\s]\S*))
*
* Example: `getLogLevel('WARN 1999-12-31 this is great') // LogLevel.warn`
*/
/** @deprecated will be removed in the next major version */
export function getLogLevel(line: string): LogLevel {
if (!line) {
return LogLevel.unknown;
@ -37,6 +38,7 @@ export function getLogLevel(line: string): LogLevel {
return level;
}
/** @deprecated will be removed in the next major version */
export function getLogLevelFromKey(key: string | number): LogLevel {
const level = (LogLevel as any)[key.toString().toLowerCase()];
if (level) {
@ -46,6 +48,7 @@ export function getLogLevelFromKey(key: string | number): LogLevel {
return LogLevel.unknown;
}
/** @deprecated will be removed in the next major version */
export function addLogLevelToSeries(series: DataFrame, lineIndex: number): DataFrame {
const levels = new ArrayVector<LogLevel>();
const lines = series.fields[lineIndex];
@ -68,6 +71,7 @@ export function addLogLevelToSeries(series: DataFrame, lineIndex: number): DataF
};
}
/** @deprecated will be removed in the next major version */
export const LogsParsers: { [name: string]: LogsParser } = {
JSON: {
buildMatcher: (label) => new RegExp(`(?:{|,)\\s*"${label}"\\s*:\\s*"?([\\d\\.]+|[^"]*)"?`),
@ -109,6 +113,7 @@ export const LogsParsers: { [name: string]: LogsParser } = {
},
};
/** @deprecated will be removed in the next major version */
export function calculateFieldStats(rows: LogRowModel[], extractor: RegExp): LogLabelStatsModel[] {
// Consider only rows that satisfy the matcher
const rowsWithField = rows.filter((row) => extractor.test(row.entry));
@ -124,6 +129,7 @@ export function calculateFieldStats(rows: LogRowModel[], extractor: RegExp): Log
return getSortedCounts(countsByValue, rowCount);
}
/** @deprecated will be removed in the next major version */
export function calculateLogsLabelStats(rows: LogRowModel[], label: string): LogLabelStatsModel[] {
// Consider only rows that have the given label
const rowsWithLabel = rows.filter((row) => row.labels[label] !== undefined);
@ -134,6 +140,7 @@ export function calculateLogsLabelStats(rows: LogRowModel[], label: string): Log
return getSortedCounts(countsByValue, rowCount);
}
/** @deprecated will be removed in the next major version */
export function calculateStats(values: unknown[]): LogLabelStatsModel[] {
const nonEmptyValues = values.filter((value) => value !== undefined && value !== null);
const countsByValue = countBy(nonEmptyValues);
@ -148,6 +155,7 @@ const getSortedCounts = (countsByValue: { [value: string]: number }, rowCount: n
.value();
};
/** @deprecated will be removed in the next major version */
export function getParser(line: string): LogsParser | undefined {
let parser;
try {
@ -163,6 +171,7 @@ export function getParser(line: string): LogsParser | undefined {
return parser;
}
/** @deprecated will be removed in the next major version */
export const sortInAscendingOrder = (a: LogRowModel, b: LogRowModel) => {
// compare milliseconds
if (a.timeEpochMs < b.timeEpochMs) {
@ -185,6 +194,7 @@ export const sortInAscendingOrder = (a: LogRowModel, b: LogRowModel) => {
return 0;
};
/** @deprecated will be removed in the next major version */
export const sortInDescendingOrder = (a: LogRowModel, b: LogRowModel) => {
// compare milliseconds
if (a.timeEpochMs > b.timeEpochMs) {
@ -207,15 +217,18 @@ export const sortInDescendingOrder = (a: LogRowModel, b: LogRowModel) => {
return 0;
};
/** @deprecated will be removed in the next major version */
export const sortLogsResult = (logsResult: LogsModel | null, sortOrder: LogsSortOrder): LogsModel => {
const rows = logsResult ? sortLogRows(logsResult.rows, sortOrder) : [];
return logsResult ? { ...logsResult, rows } : { hasUniqueLabels: false, rows };
};
/** @deprecated will be removed in the next major version */
export const sortLogRows = (logRows: LogRowModel[], sortOrder: LogsSortOrder) =>
sortOrder === LogsSortOrder.Ascending ? logRows.sort(sortInAscendingOrder) : logRows.sort(sortInDescendingOrder);
// Currently supports only error condition in Loki logs
/** @deprecated will be removed in the next major version */
export const checkLogsError = (logRow: LogRowModel): { hasError: boolean; errorMessage?: string } => {
if (logRow.labels.__error__) {
return {
@ -228,5 +241,6 @@ export const checkLogsError = (logRow: LogRowModel): { hasError: boolean; errorM
};
};
/** @deprecated will be removed in the next major version */
export const escapeUnescapedString = (string: string) =>
string.replace(/\\r\\n|\\n|\\t|\\r/g, (match: string) => (match.slice(1) === 't' ? '\t' : '\n'));

View File

@ -26,7 +26,8 @@
},
"files": [
"dist",
"CHANGELOG.md",
"./README.md",
"./CHANGELOG.md",
"LICENSE_APACHE2"
],
"scripts": {

View File

@ -30,7 +30,8 @@
"dist",
"cli.js",
"cypress.json",
"CHANGELOG.md",
"./README.md",
"./CHANGELOG.md",
"LICENSE_APACHE2"
],
"scripts": {

View File

@ -24,7 +24,8 @@
},
"files": [
"dist",
"CHANGELOG.md",
"./README.md",
"./CHANGELOG.md",
"LICENSE_APACHE2"
],
"scripts": {

View File

@ -23,7 +23,8 @@
},
"files": [
"dist",
"CHANGELOG.md",
"./README.md",
"./CHANGELOG.md",
"LICENSE_APACHE2"
],
"scripts": {

View File

@ -74,3 +74,9 @@ export {
defaultFieldConfigSource,
defaultFieldConfig
} from './veneer/dashboard.types';
// Raw generated types from playlist entity type.
export type { Playlist } from './raw/playlist/x/playlist.gen';
// Raw generated default consts from playlist entity type.
export { defaultPlaylist } from './raw/playlist/x/playlist.gen';

View File

@ -0,0 +1,69 @@
// This file is autogenerated. DO NOT EDIT.
//
// Generated by pkg/framework/coremodel/gen.go
//
// Derived from the Thema lineage declared in pkg/coremodel/playlist/coremodel.cue
//
// Run `make gen-cue` from repository root to regenerate.
export interface Playlist {
/**
* Unique playlist identifier for internal use, set by Grafana.
*/
id: number;
/**
* Interval sets the time between switching views in a playlist.
* FIXME: Is this based on a standardized format or what options are available? Can datemath be used?
*/
interval: string;
/**
* The ordered list of items that the playlist will iterate over.
*/
items?: Array<{
/**
* FIXME: The prefixDropper removes playlist from playlist_id, that doesn't work for us since it'll mean we'll have Id twice.
* ID of the playlist item for internal use by Grafana. Deprecated.
*/
id: number;
/**
* PlaylistID for the playlist containing the item. Deprecated.
*/
playlistid: number;
/**
* Type of the item.
*/
type: ('dashboard_by_uid' | 'dashboard_by_id' | 'dashboard_by_tag');
/**
* Value depends on type and describes the playlist item.
*
* - dashboard_by_id: The value is an internal numerical identifier set by Grafana. This
* is not portable as the numerical identifier is non-deterministic between different instances.
* Will be replaced by dashboard_by_uid in the future.
* - dashboard_by_tag: The value is a tag which is set on any number of dashboards. All
* dashboards behind the tag will be added to the playlist.
*/
value: string;
/**
* Title is the human-readable identifier for the playlist item.
*/
title: string;
/**
* Order is the position in the list for the item. Deprecated.
*/
order: number;
}>;
/**
* Name of the playlist.
*/
name: string;
/**
* Unique playlist identifier. Generated on creation, either by the
* creator of the playlist of by the application.
*/
uid: string;
}
export const defaultPlaylist: Partial<Playlist> = {
interval: '5m',
items: [],
};

View File

@ -19,25 +19,26 @@
"grafana-toolkit": "./bin/grafana-toolkit.js"
},
"publishConfig": {
"bin": {
"grafana-toolkit": "./dist/bin/grafana-toolkit.js"
},
"access": "public"
},
"files": [
"dist",
"README.md",
"CHANGELOG.md"
"config",
"src",
"sass",
"./README.md",
"./CHANGELOG.md",
"LICENSE_APACHE2"
],
"scripts": {
"build": "grafana-toolkit toolkit:build",
"clean": "rimraf ./dist ./compiled ./package.tgz",
"precommit": "npm run lint & npm run typecheck",
"clean": "rimraf ./dist ./compiled ./sass ./package.tgz",
"prepack": "mv ./src ./src_bak && cp -r ./dist/src ./src",
"postpack": "rimraf ./src && mv ./src_bak ./src",
"typecheck": "tsc --noEmit"
},
"main": "src/index.ts",
"dependencies": {
"@babel/core": "^7.18.9",
"@babel/core": "7.18.9",
"@babel/plugin-proposal-class-properties": "7.18.6",
"@babel/plugin-proposal-nullish-coalescing-operator": "7.18.6",
"@babel/plugin-proposal-object-rest-spread": "7.18.9",
@ -46,11 +47,11 @@
"@babel/plugin-transform-react-constant-elements": "7.18.9",
"@babel/plugin-transform-runtime": "7.18.10",
"@babel/plugin-transform-typescript": "7.19.0",
"@babel/preset-env": "^7.18.9",
"@babel/preset-react": "^7.18.6",
"@babel/preset-typescript": "^7.18.6",
"@babel/preset-env": "7.18.9",
"@babel/preset-react": "7.18.6",
"@babel/preset-typescript": "7.18.6",
"@grafana/data": "9.3.0-pre",
"@grafana/eslint-config": "^4.0.0",
"@grafana/eslint-config": "5.0.0",
"@grafana/tsconfig": "^1.2.0-rc1",
"@grafana/ui": "9.3.0-pre",
"@jest/core": "27.5.1",

View File

@ -24,8 +24,6 @@ const compile = () =>
const copyFiles = () => {
const files = [
'config/circleci/config.yml',
'bin/grafana-toolkit.js',
'src/config/prettier.plugin.config.json',
'src/config/prettier.plugin.rc.js',
'src/config/tsconfig.plugin.json',
@ -60,12 +58,16 @@ const copyFiles = () => {
const copySassFiles = () => {
const files = ['_variables.generated.scss', '_variables.dark.generated.scss', '_variables.light.generated.scss'];
const exportDir = `${cwd}/sass`;
return useSpinner(`Copy scss files ${files.join(', ')} files`, async () => {
const sassDir = path.resolve(cwd, '../../public/sass/');
if (!fs.existsSync(exportDir)) {
fs.mkdirSync(exportDir);
}
const promises = files.map((file) => {
return new Promise<void>((resolve, reject) => {
const name = file.replace('.generated', '');
fs.copyFile(`${sassDir}/${file}`, `${distDir}/sass/${name}`, (err) => {
fs.copyFile(`${sassDir}/${file}`, `${exportDir}/${name}`, (err) => {
if (err) {
reject(err);
return;
@ -89,8 +91,6 @@ const toolkitBuildTaskRunner: TaskRunner<ToolkitBuildOptions> = async () => {
await clean();
await compile();
fs.mkdirSync('./dist/bin');
fs.mkdirSync('./dist/sass');
await copyFiles();
await copySassFiles();
};

View File

@ -26,7 +26,8 @@
},
"files": [
"dist",
"CHANGELOG.md",
"./README.md",
"./CHANGELOG.md",
"LICENSE_APACHE2"
],
"scripts": {
@ -79,7 +80,7 @@
"rc-slider": "9.7.5",
"rc-time-picker": "^3.7.3",
"react-beautiful-dnd": "13.1.0",
"react-calendar": "3.7.0",
"react-calendar": "3.9.0",
"react-colorful": "5.5.1",
"react-custom-scrollbars-2": "4.5.0",
"react-dropzone": "14.2.2",

View File

@ -1,4 +1,4 @@
import { css } from '@emotion/css';
import { css, cx } from '@emotion/css';
import React, { useEffect, useRef, useState } from 'react';
import { GrafanaTheme2 } from '@grafana/data';
@ -25,6 +25,8 @@ export interface ConfirmModalProps {
dismissText?: string;
/** Icon for the modal header */
icon?: IconName;
/** Additional styling for modal container */
modalClass?: string;
/** Text user needs to fill in before confirming */
confirmationText?: string;
/** Text for alternative button */
@ -46,6 +48,7 @@ export const ConfirmModal = ({
confirmationText,
dismissText = 'Cancel',
alternativeText,
modalClass,
icon = 'exclamation-triangle',
onConfirm,
onDismiss,
@ -66,7 +69,7 @@ export const ConfirmModal = ({
}, [isOpen]);
return (
<Modal className={styles.modal} title={title} icon={icon} isOpen={isOpen} onDismiss={onDismiss}>
<Modal className={cx(styles.modal, modalClass)} title={title} icon={icon} isOpen={isOpen} onDismiss={onDismiss}>
<div className={styles.modalText}>
{body}
{description ? <div className={styles.modalDescription}>{description}</div> : null}

View File

@ -9,7 +9,7 @@ import { WithContextMenu } from '../ContextMenu/WithContextMenu';
import { MenuGroup, MenuItemsGroup } from '../Menu/MenuGroup';
import { MenuItem } from '../Menu/MenuItem';
interface DataLinksContextMenuProps {
export interface DataLinksContextMenuProps {
children: (props: DataLinksContextMenuApi) => JSX.Element;
links: () => LinkModel[];
style?: CSSProperties;

View File

@ -63,7 +63,7 @@ export function RelativeTimeRangePicker(props: RelativeTimeRangePickerProps) {
};
const onOpen = useCallback(
(event: FormEvent<HTMLDivElement>) => {
(event: FormEvent<HTMLButtonElement>) => {
event.stopPropagation();
event.preventDefault();
setIsOpen(!isOpen);
@ -94,7 +94,7 @@ export function RelativeTimeRangePicker(props: RelativeTimeRangePickerProps) {
return (
<div className={styles.container}>
<div tabIndex={0} className={styles.pickerInput} onClick={onOpen}>
<button className={styles.pickerInput} onClick={onOpen}>
<span className={styles.clockIcon}>
<Icon name="clock-nine" />
</span>
@ -104,7 +104,7 @@ export function RelativeTimeRangePicker(props: RelativeTimeRangePickerProps) {
<span className={styles.caretIcon}>
<Icon name={isOpen ? 'angle-up' : 'angle-down'} size="lg" />
</span>
</div>
</button>
{isOpen && (
<ClickOutsideWrapper includeButtonPress={false} onClick={onClose}>
<div className={styles.content}>

View File

@ -51,7 +51,7 @@ export const TimeRangeInput: FC<TimeRangeInputProps> = ({
const theme = useTheme2();
const styles = getStyles(theme, disabled);
const onOpen = (event: FormEvent<HTMLDivElement>) => {
const onOpen = (event: FormEvent<HTMLButtonElement>) => {
event.stopPropagation();
event.preventDefault();
if (disabled) {
@ -78,12 +78,7 @@ export const TimeRangeInput: FC<TimeRangeInputProps> = ({
return (
<div className={styles.container}>
<div
tabIndex={0}
className={styles.pickerInput}
aria-label={selectors.components.TimePicker.openButton}
onClick={onOpen}
>
<button className={styles.pickerInput} aria-label={selectors.components.TimePicker.openButton} onClick={onOpen}>
{isValidTimeRange(value) ? (
<TimePickerButtonLabel value={value} timeZone={timeZone} />
) : (
@ -98,7 +93,7 @@ export const TimeRangeInput: FC<TimeRangeInputProps> = ({
<Icon name={isOpen ? 'angle-up' : 'angle-down'} size="lg" />
</span>
)}
</div>
</button>
{isOpen && (
<ClickOutsideWrapper includeButtonPress={false} onClick={onClose}>
<TimePickerContent

View File

@ -163,7 +163,14 @@ const getStyles = (theme: GrafanaTheme2) => {
.drawer-open .drawer-content-wrapper {
box-shadow: ${theme.shadows.z3};
}
z-index: ${theme.zIndex.dropdown};
${theme.breakpoints.down('sm')} {
.drawer-content-wrapper {
width: 100% !important;
}
}
`,
header: css`
background-color: ${theme.colors.background.canvas};

View File

@ -168,7 +168,7 @@ const getStyles = (theme: GrafanaTheme2) => {
`,
pageIcon: css`
display: none;
${theme.breakpoints.up('md')} {
${theme.breakpoints.up('sm')} {
display: flex;
padding-right: ${theme.spacing(1)};
align-items: center;

View File

@ -40,9 +40,13 @@ export const VizLayout: VizLayoutComponentType = ({ width, height, legend, child
if (!legend) {
return (
<div tabIndex={0} style={containerStyle} className={styles.viz}>
{children(width, height)}
</div>
<>
{/* tabIndex={0} is needed for keyboard accessibility in the plot area */}
{/* eslint-disable-next-line jsx-a11y/no-noninteractive-tabindex */}
<div tabIndex={0} style={containerStyle} className={styles.viz}>
{children(width, height)}
</div>
</>
);
}
@ -88,6 +92,8 @@ export const VizLayout: VizLayoutComponentType = ({ width, height, legend, child
return (
<div style={containerStyle}>
{/* tabIndex={0} is needed for keyboard accessibility in the plot area */}
{/* eslint-disable-next-line jsx-a11y/no-noninteractive-tabindex */}
<div tabIndex={0} className={styles.viz}>
{size && children(size.width, size.height)}
</div>

View File

@ -156,7 +156,11 @@ export { MenuItem, type MenuItemProps } from './Menu/MenuItem';
export { WithContextMenu } from './ContextMenu/WithContextMenu';
export { DataLinksInlineEditor } from './DataLinks/DataLinksInlineEditor/DataLinksInlineEditor';
export { DataLinkInput } from './DataLinks/DataLinkInput';
export { DataLinksContextMenu } from './DataLinks/DataLinksContextMenu';
export {
DataLinksContextMenu,
type DataLinksContextMenuProps,
type DataLinksContextMenuApi,
} from './DataLinks/DataLinksContextMenu';
export { SeriesIcon } from './VizLegend/SeriesIcon';
export { InfoBox } from './InfoBox/InfoBox';
export { FeatureBadge, FeatureInfoBox } from './InfoBox/FeatureInfoBox';

View File

@ -6,6 +6,7 @@ import (
"github.com/grafana/grafana/pkg/api/dtos"
"github.com/grafana/grafana/pkg/api/response"
cmplaylist "github.com/grafana/grafana/pkg/coremodel/playlist"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/services/playlist"
"github.com/grafana/grafana/pkg/web"
@ -83,13 +84,13 @@ func (hs *HTTPServer) GetPlaylist(c *models.ReqContext) response.Response {
playlistDTOs, _ := hs.LoadPlaylistItemDTOs(c.Req.Context(), uid, c.OrgID)
dto := &playlist.PlaylistDTO{
Id: p.Id,
UID: p.UID,
Name: p.Name,
Interval: p.Interval,
OrgId: p.OrgId,
Items: playlistDTOs,
OrgId: p.OrgId,
}
dto.Id = p.Id
dto.Uid = p.UID
dto.Name = p.Name
dto.Interval = p.Interval
dto.Items = &playlistDTOs
return response.JSON(http.StatusOK, dto)
}
@ -106,8 +107,8 @@ func (hs *HTTPServer) LoadPlaylistItemDTOs(ctx context.Context, uid string, orgI
for _, item := range playlistitems {
playlistDTOs = append(playlistDTOs, playlist.PlaylistItemDTO{
Id: item.Id,
PlaylistId: item.PlaylistId,
Type: item.Type,
Playlistid: item.PlaylistId,
Type: cmplaylist.PlaylistItemType(item.Type),
Value: item.Value,
Order: item.Order,
Title: item.Title,
@ -244,7 +245,7 @@ func (hs *HTTPServer) UpdatePlaylist(c *models.ReqContext) response.Response {
return response.Error(500, "Failed to save playlist", err)
}
p.Items = playlistDTOs
p.Items = &playlistDTOs
return response.JSON(http.StatusOK, p)
}

View File

@ -0,0 +1,22 @@
package main
import (
"github.com/grafana/grafana/pkg/build/config"
)
const ReleaseFolder = "release"
const EnterpriseSfx = "-enterprise"
const CacheSettings = "Cache-Control:public, max-age="
type PublishConfig struct {
config.Config
Edition config.Edition
ReleaseMode config.ReleaseMode
GrafanaAPIKey string
WhatsNewURL string
ReleaseNotesURL string
DryRun bool
TTL string
SimulateRelease bool
}

243
pkg/build/cmd/deb.go Normal file
View File

@ -0,0 +1,243 @@
package main
import (
"fmt"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/packaging"
"github.com/grafana/grafana/pkg/infra/fs"
"github.com/urfave/cli/v2"
)
func writeAptlyConf(dbDir, repoDir string) error {
aptlyConf := fmt.Sprintf(`{
"rootDir": "%s",
"downloadConcurrency": 4,
"downloadSpeedLimit": 0,
"architectures": [],
"dependencyFollowSuggests": false,
"dependencyFollowRecommends": false,
"dependencyFollowAllVariants": false,
"dependencyFollowSource": false,
"dependencyVerboseResolve": false,
"gpgDisableSign": false,
"gpgDisableVerify": false,
"gpgProvider": "gpg2",
"downloadSourcePackages": false,
"skipLegacyPool": true,
"ppaDistributorID": "ubuntu",
"ppaCodename": "",
"skipContentsPublishing": false,
"FileSystemPublishEndpoints": {
"repo": {
"rootDir": "%s",
"linkMethod": "copy"
}
},
"S3PublishEndpoints": {},
"SwiftPublishEndpoints": {}
}
`, dbDir, repoDir)
home, err := os.UserHomeDir()
if err != nil {
return err
}
return os.WriteFile(filepath.Join(home, ".aptly.conf"), []byte(aptlyConf), 0600)
}
// downloadDebs downloads Deb packages.
func downloadDebs(cfg PublishConfig, workDir string) error {
if cfg.Bucket == "" {
panic("cfg.Bucket has to be set")
}
if !strings.HasSuffix(workDir, string(filepath.Separator)) {
workDir += string(filepath.Separator)
}
var version string
if cfg.ReleaseMode.Mode == config.TagMode {
if cfg.ReleaseMode.IsBeta {
version = strings.ReplaceAll(cfg.Version, "-", "~")
} else {
version = cfg.Version
}
}
if version == "" {
panic(fmt.Sprintf("Unrecognized version mode %s", cfg.ReleaseMode.Mode))
}
var sfx string
switch cfg.Edition {
case config.EditionOSS:
case config.EditionEnterprise:
sfx = EnterpriseSfx
default:
return fmt.Errorf("unrecognized edition %q", cfg.Edition)
}
u := fmt.Sprintf("gs://%s/%s/%s/grafana%s_%s_*.deb*", cfg.Bucket,
strings.ToLower(string(cfg.Edition)), ReleaseFolder, sfx, version)
log.Printf("Downloading Deb packages %q...\n", u)
args := []string{
"-m",
"cp",
u,
workDir,
}
//nolint:gosec
cmd := exec.Command("gsutil", args...)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to download Deb packages %q: %w\n%s", u, err, output)
}
return nil
}
// updateDebRepo updates the Debian repository with the new release.
func updateDebRepo(cfg PublishConfig, workDir string) error {
if cfg.ReleaseMode.Mode != config.TagMode {
panic(fmt.Sprintf("Unsupported version mode: %s", cfg.ReleaseMode.Mode))
}
if cfg.ReleaseMode.IsTest {
if cfg.Config.DebDBBucket == packaging.DefaultDebDBBucket {
return fmt.Errorf("in test-release mode, the default Deb DB bucket shouldn't be used")
}
if cfg.Config.DebRepoBucket == packaging.DefaultDebRepoBucket {
return fmt.Errorf("in test-release mode, the default Deb repo bucket shouldn't be used")
}
}
if err := downloadDebs(cfg, workDir); err != nil {
return err
}
repoName := "grafana"
if cfg.ReleaseMode.IsBeta {
repoName = "beta"
}
repoRoot := path.Join(os.TempDir(), "deb-repo")
defer func() {
if err := os.RemoveAll(repoRoot); err != nil {
log.Printf("Failed to remove temporary directory %q: %s\n", repoRoot, err.Error())
}
}()
dbDir := filepath.Join(repoRoot, "db")
repoDir := filepath.Join(repoRoot, "repo")
tmpDir := filepath.Join(repoRoot, "tmp")
for _, dpath := range []string{dbDir, repoDir, tmpDir} {
if err := os.MkdirAll(dpath, 0750); err != nil {
return err
}
}
if err := writeAptlyConf(dbDir, repoDir); err != nil {
return err
}
// Download the Debian repo database
u := fmt.Sprintf("gs://%s/%s", cfg.DebDBBucket, strings.ToLower(string(cfg.Edition)))
log.Printf("Downloading Debian repo database from %s...\n", u)
//nolint:gosec
cmd := exec.Command("gsutil", "-m", "rsync", "-r", "-d", u, dbDir)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to download Debian repo database: %w\n%s", err, output)
}
if err := addPkgsToRepo(cfg, workDir, tmpDir, repoName); err != nil {
return err
}
log.Println("Updating local Debian package repository...")
// Update published local repository. This assumes that there exists already a local, published repo.
for _, tp := range []string{"stable", "beta"} {
passArg := fmt.Sprintf("-passphrase-file=%s", cfg.GPGPassPath)
//nolint:gosec
cmd := exec.Command("aptly", "publish", "update", "-batch", passArg, "-force-overwrite", tp,
"filesystem:repo:grafana")
if output, err := cmd.CombinedOutput(); err != nil {
return cli.NewExitError(fmt.Sprintf("failed to update Debian %q repository: %s", tp, output), 1)
}
}
// Update database in GCS
u = fmt.Sprintf("gs://%s/%s", cfg.DebDBBucket, strings.ToLower(string(cfg.Edition)))
if cfg.DryRun {
log.Printf("Simulating upload of Debian repo database to GCS (%s)\n", u)
} else {
log.Printf("Uploading Debian repo database to GCS (%s)...\n", u)
//nolint:gosec
cmd = exec.Command("gsutil", "-m", "rsync", "-r", "-d", dbDir, u)
if output, err := cmd.CombinedOutput(); err != nil {
return cli.NewExitError(fmt.Sprintf("failed to upload Debian repo database to GCS: %s", output), 1)
}
}
// Update metadata and binaries in repository bucket
u = fmt.Sprintf("gs://%s/%s/deb", cfg.DebRepoBucket, strings.ToLower(string(cfg.Edition)))
grafDir := filepath.Join(repoDir, "grafana")
if cfg.DryRun {
log.Printf("Simulating upload of Debian repo resources to GCS (%s)\n", u)
} else {
log.Printf("Uploading Debian repo resources to GCS (%s)...\n", u)
//nolint:gosec
cmd = exec.Command("gsutil", "-m", "rsync", "-r", "-d", grafDir, u)
if output, err := cmd.CombinedOutput(); err != nil {
return cli.NewExitError(fmt.Sprintf("failed to upload Debian repo resources to GCS: %s", output), 1)
}
allRepoResources := fmt.Sprintf("%s/**/*", u)
log.Printf("Setting cache ttl for Debian repo resources on GCS (%s)...\n", allRepoResources)
//nolint:gosec
cmd = exec.Command("gsutil", "-m", "setmeta", "-h", CacheSettings+cfg.TTL, allRepoResources)
if output, err := cmd.CombinedOutput(); err != nil {
return cli.NewExitError(fmt.Sprintf("failed to set cache ttl for Debian repo resources on GCS: %s", output), 1)
}
}
return nil
}
func addPkgsToRepo(cfg PublishConfig, workDir, tmpDir, repoName string) error {
var sfx string
switch cfg.Edition {
case config.EditionOSS:
case config.EditionEnterprise:
sfx = EnterpriseSfx
default:
return fmt.Errorf("unsupported edition %q", cfg.Edition)
}
log.Printf("Adding packages to Debian %q repo...\n", repoName)
// TODO: Be more specific about filename pattern
debs, err := filepath.Glob(filepath.Join(workDir, fmt.Sprintf("grafana%s*.deb", sfx)))
if err != nil {
return err
}
for _, deb := range debs {
basename := filepath.Base(deb)
if strings.Contains(basename, "latest") {
continue
}
tgt := filepath.Join(tmpDir, basename)
if err := fs.CopyFile(deb, tgt); err != nil {
return err
}
}
// XXX: Adds too many packages in enterprise (Arve: What does this mean exactly?)
//nolint:gosec
cmd := exec.Command("aptly", "repo", "add", "-force-replace", repoName, tmpDir)
if output, err := cmd.CombinedOutput(); err != nil {
return cli.NewExitError(fmt.Sprintf("failed to add packages to local Debian repository: %s", output), 1)
}
return nil
}

View File

@ -37,4 +37,13 @@ var (
Name: "sign",
Usage: "Enable plug-in signing (you must set GRAFANA_API_KEY)",
}
dryRunFlag = cli.BoolFlag{
Name: "dry-run",
Usage: "Only simulate actions",
}
gcpKeyFlag = cli.StringFlag{
Name: "gcp-key",
Usage: "Google Cloud Platform key file",
Required: true,
}
)

View File

@ -6,6 +6,7 @@ import (
"strings"
"github.com/grafana/grafana/pkg/build/docker"
"github.com/grafana/grafana/pkg/build/packaging"
"github.com/urfave/cli/v2"
)
@ -169,6 +170,54 @@ func main() {
},
},
},
{
Name: "publish",
Usage: "Publish packages to Grafana com and repositories",
Subcommands: cli.Commands{
{
Name: "packages",
Usage: "publish Grafana packages",
ArgsUsage: "[version]",
Action: PublishPackages,
Flags: []cli.Flag{
&jobsFlag,
&editionFlag,
&buildIDFlag,
&dryRunFlag,
&gcpKeyFlag,
&cli.StringFlag{
Name: "packages-bucket",
Value: "grafana-downloads",
Usage: "Google Cloud Storage Debian database bucket",
},
&cli.StringFlag{
Name: "deb-db-bucket",
Value: packaging.DefaultDebDBBucket,
Usage: "Google Cloud Storage Debian database bucket",
},
&cli.StringFlag{
Name: "deb-repo-bucket",
Value: packaging.DefaultDebRepoBucket,
Usage: "Google Cloud Storage Debian repo bucket",
},
&cli.StringFlag{
Name: "rpm-repo-bucket",
Value: packaging.DefaultRPMRepoBucket,
Usage: "Google Cloud Storage RPM repo bucket",
},
&cli.StringFlag{
Name: "ttl",
Value: packaging.DefaultTTLSeconds,
Usage: "Cache time to live for uploaded packages",
},
&cli.BoolFlag{
Name: "simulate-release",
Usage: "Only simulate creating release at grafana.com",
},
},
},
},
},
}
if err := app.Run(os.Args); err != nil {

View File

@ -0,0 +1,112 @@
package main
import (
"fmt"
"log"
"os"
"strings"
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/gcloud"
"github.com/grafana/grafana/pkg/build/gpg"
"github.com/urfave/cli/v2"
)
// PublishPackages implements the sub-command "publish-packages".
func PublishPackages(c *cli.Context) error {
if err := gcloud.ActivateServiceAccount(); err != nil {
return fmt.Errorf("couldn't activate service account, err: %w", err)
}
metadata, err := GenerateMetadata(c)
if err != nil {
return err
}
releaseMode, err := metadata.GetReleaseMode()
if err != nil {
return err
}
dryRun := c.Bool("dry-run")
simulateRelease := c.Bool("simulate-release")
// Test release mode and dryRun imply simulateRelease
if releaseMode.IsTest || dryRun {
simulateRelease = true
}
grafanaAPIKey := strings.TrimSpace(os.Getenv("GRAFANA_COM_API_KEY"))
if grafanaAPIKey == "" {
return cli.NewExitError("the environment variable GRAFANA_COM_API_KEY must be set", 1)
}
edition := config.Edition(c.String("edition"))
// TODO: Verify config values
cfg := PublishConfig{
Config: config.Config{
Version: metadata.GrafanaVersion,
Bucket: c.String("packages-bucket"),
DebDBBucket: c.String("deb-db-bucket"),
DebRepoBucket: c.String("deb-repo-bucket"),
RPMRepoBucket: c.String("rpm-repo-bucket"),
},
Edition: edition,
ReleaseMode: releaseMode,
GrafanaAPIKey: grafanaAPIKey,
DryRun: dryRun,
TTL: c.String("ttl"),
SimulateRelease: simulateRelease,
}
if err := gpg.LoadGPGKeys(&cfg.Config); err != nil {
return err
}
defer gpg.RemoveGPGFiles(cfg.Config)
// Only update package manager repos for releases.
// In test release mode, the operator should configure different GCS buckets for the package repos,
// so should be safe.
if cfg.ReleaseMode.Mode == config.TagMode {
workDir := os.TempDir()
defer func() {
if err := os.RemoveAll(workDir); err != nil {
log.Printf("Failed to remove temporary directory %q: %s\n", workDir, err.Error())
}
}()
if err := updatePkgRepos(cfg, workDir); err != nil {
return err
}
}
log.Println("Successfully published packages!")
return nil
}
// updatePkgRepos updates package manager repositories.
func updatePkgRepos(cfg PublishConfig, workDir string) error {
if err := gpg.Import(cfg.Config); err != nil {
return err
}
// If updating the Deb repo fails, still continue with the RPM repo, so we don't have to retry
// both by hand
debErr := updateDebRepo(cfg, workDir)
if debErr != nil {
log.Printf("Updating Deb repo failed: %s\n", debErr)
}
rpmErr := updateRPMRepo(cfg, workDir)
if rpmErr != nil {
log.Printf("Updating RPM repo failed: %s\n", rpmErr)
}
if debErr != nil {
return debErr
}
if rpmErr != nil {
return rpmErr
}
log.Println("Updated Deb and RPM repos successfully!")
return nil
}

365
pkg/build/cmd/rpm.go Normal file
View File

@ -0,0 +1,365 @@
package main
import (
"bytes"
"crypto"
"fmt"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"github.com/grafana/grafana/pkg/build/config"
"github.com/grafana/grafana/pkg/build/packaging"
"github.com/grafana/grafana/pkg/infra/fs"
"golang.org/x/crypto/openpgp"
"golang.org/x/crypto/openpgp/armor"
"golang.org/x/crypto/openpgp/packet"
)
// updateRPMRepo updates the RPM repository with the new release.
func updateRPMRepo(cfg PublishConfig, workDir string) error {
if cfg.ReleaseMode.Mode != config.TagMode {
panic(fmt.Sprintf("Unsupported version mode %s", cfg.ReleaseMode.Mode))
}
if cfg.ReleaseMode.IsTest && cfg.Config.RPMRepoBucket == packaging.DefaultRPMRepoBucket {
return fmt.Errorf("in test-release mode, the default RPM repo bucket shouldn't be used")
}
if err := downloadRPMs(cfg, workDir); err != nil {
return err
}
repoRoot := path.Join(os.TempDir(), "rpm-repo")
defer func() {
if err := os.RemoveAll(repoRoot); err != nil {
log.Printf("Failed to remove %q: %s\n", repoRoot, err.Error())
}
}()
repoName := "rpm"
if cfg.ReleaseMode.IsBeta {
repoName = "rpm-beta"
}
folderURI := fmt.Sprintf("gs://%s/%s/%s", cfg.RPMRepoBucket, strings.ToLower(string(cfg.Edition)), repoName)
// Download the RPM database
log.Printf("Downloading RPM database from GCS (%s)...\n", folderURI)
//nolint:gosec
cmd := exec.Command("gsutil", "-m", "rsync", "-r", "-d", folderURI, repoRoot)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to download RPM database from GCS: %w\n%s", err, output)
}
// Add the new release to the repo
var sfx string
switch cfg.Edition {
case config.EditionOSS:
case config.EditionEnterprise:
sfx = EnterpriseSfx
default:
return fmt.Errorf("unsupported edition %q", cfg.Edition)
}
allRPMs, err := filepath.Glob(filepath.Join(workDir, fmt.Sprintf("grafana%s-*.rpm", sfx)))
if err != nil {
return fmt.Errorf("failed to list RPMs in %q: %w", workDir, err)
}
rpms := []string{}
for _, rpm := range allRPMs {
if strings.Contains(rpm, "-latest") {
continue
}
rpms = append(rpms, rpm)
}
// XXX: What does the following comment mean?
// adds to many files for enterprise
for _, rpm := range rpms {
if err := fs.CopyFile(rpm, filepath.Join(repoRoot, filepath.Base(rpm))); err != nil {
return err
}
}
//nolint:gosec
cmd = exec.Command("createrepo", repoRoot)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to create repo at %q: %w\n%s", repoRoot, err, output)
}
if err := signRPMRepo(repoRoot, cfg); err != nil {
return err
}
// Update the repo in GCS
// Sync packages first to avoid cache misses
if cfg.DryRun {
log.Printf("Simulating upload of RPMs to GCS (%s)\n", folderURI)
} else {
log.Printf("Uploading RPMs to GCS (%s)...\n", folderURI)
args := []string{"-m", "cp"}
args = append(args, rpms...)
args = append(args, folderURI)
//nolint:gosec
cmd = exec.Command("gsutil", args...)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to upload RPMs to GCS: %w\n%s", err, output)
}
}
if cfg.DryRun {
log.Printf("Simulating upload of RPM repo metadata to GCS (%s)\n", folderURI)
} else {
log.Printf("Uploading RPM repo metadata to GCS (%s)...\n", folderURI)
//nolint:gosec
cmd = exec.Command("gsutil", "-m", "rsync", "-r", "-d", repoRoot, folderURI)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to upload RPM repo metadata to GCS: %w\n%s", err, output)
}
allRepoResources := fmt.Sprintf("%s/**/*", folderURI)
log.Printf("Setting cache ttl for RPM repo resources on GCS (%s)...\n", allRepoResources)
//nolint:gosec
cmd = exec.Command("gsutil", "-m", "setmeta", "-h", CacheSettings+cfg.TTL, allRepoResources)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to set cache ttl for RPM repo resources on GCS: %w\n%s", err, output)
}
}
return nil
}
// downloadRPMs downloads RPM packages.
func downloadRPMs(cfg PublishConfig, workDir string) error {
if !strings.HasSuffix(workDir, string(filepath.Separator)) {
workDir += string(filepath.Separator)
}
var version string
if cfg.ReleaseMode.Mode == config.TagMode {
if cfg.ReleaseMode.IsBeta {
version = strings.ReplaceAll(cfg.Version, "-", "~")
} else {
version = cfg.Version
}
}
if version == "" {
panic(fmt.Sprintf("Unrecognized version mode %s", cfg.ReleaseMode.Mode))
}
var sfx string
switch cfg.Edition {
case config.EditionOSS:
case config.EditionEnterprise:
sfx = EnterpriseSfx
default:
return fmt.Errorf("unrecognized edition %q", cfg.Edition)
}
u := fmt.Sprintf("gs://%s/%s/%s/grafana%s-%s-*.*.rpm*", cfg.Bucket,
strings.ToLower(string(cfg.Edition)), ReleaseFolder, sfx, version)
log.Printf("Downloading RPM packages %q...\n", u)
args := []string{
"-m",
"cp",
u,
workDir,
}
//nolint:gosec
cmd := exec.Command("gsutil", args...)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to download RPM packages %q: %w\n%s", u, err, output)
}
return nil
}
func getPublicKey(cfg PublishConfig) (*packet.PublicKey, error) {
f, err := os.Open(cfg.GPGPublicKey)
if err != nil {
return nil, fmt.Errorf("failed to open %q: %w", cfg.GPGPublicKey, err)
}
defer func(f *os.File) {
err := f.Close()
if err != nil {
return
}
}(f)
block, err := armor.Decode(f)
if err != nil {
return nil, err
}
if block.Type != openpgp.PublicKeyType {
return nil, fmt.Errorf("invalid public key block type: %q", block.Type)
}
packetReader := packet.NewReader(block.Body)
pkt, err := packetReader.Next()
if err != nil {
return nil, err
}
key, ok := pkt.(*packet.PublicKey)
if !ok {
return nil, fmt.Errorf("got non-public key from packet reader: %T", pkt)
}
return key, nil
}
func getPrivateKey(cfg PublishConfig) (*packet.PrivateKey, error) {
f, err := os.Open(cfg.GPGPrivateKey)
if err != nil {
return nil, fmt.Errorf("failed to open %q: %w", cfg.GPGPrivateKey, err)
}
defer func(f *os.File) {
err := f.Close()
if err != nil {
return
}
}(f)
passphraseB, err := os.ReadFile(cfg.GPGPassPath)
if err != nil {
return nil, fmt.Errorf("failed to read %q: %w", cfg.GPGPrivateKey, err)
}
passphraseB = bytes.TrimSuffix(passphraseB, []byte("\n"))
block, err := armor.Decode(f)
if err != nil {
return nil, err
}
if block.Type != openpgp.PrivateKeyType {
return nil, fmt.Errorf("invalid private key block type: %q", block.Type)
}
packetReader := packet.NewReader(block.Body)
pkt, err := packetReader.Next()
if err != nil {
return nil, err
}
key, ok := pkt.(*packet.PrivateKey)
if !ok {
return nil, fmt.Errorf("got non-private key from packet reader: %T", pkt)
}
if err := key.Decrypt(passphraseB); err != nil {
return nil, fmt.Errorf("failed to decrypt private key: %w", err)
}
return key, nil
}
// signRPMRepo signs an RPM repository using PGP.
// The signature gets written to the file repodata/repomd.xml.asc.
func signRPMRepo(repoRoot string, cfg PublishConfig) error {
if cfg.GPGPublicKey == "" || cfg.GPGPrivateKey == "" {
return fmt.Errorf("private or public key is empty")
}
log.Printf("Signing RPM repo")
pubKey, err := getPublicKey(cfg)
if err != nil {
return err
}
privKey, err := getPrivateKey(cfg)
if err != nil {
return err
}
pcfg := packet.Config{
DefaultHash: crypto.SHA256,
DefaultCipher: packet.CipherAES256,
DefaultCompressionAlgo: packet.CompressionZLIB,
CompressionConfig: &packet.CompressionConfig{
Level: 9,
},
RSABits: 4096,
}
currentTime := pcfg.Now()
uid := packet.NewUserId("", "", "")
isPrimaryID := false
keyLifetimeSecs := uint32(86400 * 365)
signer := openpgp.Entity{
PrimaryKey: pubKey,
PrivateKey: privKey,
Identities: map[string]*openpgp.Identity{
uid.Id: {
Name: uid.Name,
UserId: uid,
SelfSignature: &packet.Signature{
CreationTime: currentTime,
SigType: packet.SigTypePositiveCert,
PubKeyAlgo: packet.PubKeyAlgoRSA,
Hash: pcfg.Hash(),
IsPrimaryId: &isPrimaryID,
FlagsValid: true,
FlagSign: true,
FlagCertify: true,
IssuerKeyId: &pubKey.KeyId,
},
},
},
Subkeys: []openpgp.Subkey{
{
PublicKey: pubKey,
PrivateKey: privKey,
Sig: &packet.Signature{
CreationTime: currentTime,
SigType: packet.SigTypeSubkeyBinding,
PubKeyAlgo: packet.PubKeyAlgoRSA,
Hash: pcfg.Hash(),
PreferredHash: []uint8{8}, // SHA-256
FlagsValid: true,
FlagEncryptStorage: true,
FlagEncryptCommunications: true,
IssuerKeyId: &pubKey.KeyId,
KeyLifetimeSecs: &keyLifetimeSecs,
},
},
},
}
// Ignore gosec G304 as this function is only used in the build process.
//nolint:gosec
freader, err := os.Open(filepath.Join(repoRoot, "repodata", "repomd.xml"))
if err != nil {
return err
}
defer func(freader *os.File) {
err := freader.Close()
if err != nil {
return
}
}(freader)
// Ignore gosec G304 as this function is only used in the build process.
//nolint:gosec
sigwriter, err := os.Create(filepath.Join(repoRoot, "repodata", "repomd.xml.asc"))
if err != nil {
return err
}
defer func(sigwriter *os.File) {
err := sigwriter.Close()
if err != nil {
return
}
}(sigwriter)
if err := openpgp.ArmoredDetachSignText(sigwriter, &signer, freader, nil); err != nil {
return fmt.Errorf("failed to write PGP signature: %w", err)
}
if err := sigwriter.Close(); err != nil {
return fmt.Errorf("failed to write PGP signature: %w", err)
}
return nil
}

146
pkg/build/cmd/rpm_test.go Normal file
View File

@ -0,0 +1,146 @@
package main
import (
"os"
"path/filepath"
"testing"
"github.com/grafana/grafana/pkg/build/config"
"github.com/stretchr/testify/require"
)
const pubKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: OpenPGP.js v4.10.10
Comment: https://openpgpjs.org
xsBNBGM1b9wBCADZM49X7vwOS93KbgA6yhpwrYf8ZlzksGcDaYgp1IzvqHbs
xeU1mmBYVH/bSKRDG0tt3Qdky4Nvl4Oqd+g0e2ZGjmlEy9zUiPTTK/BtXT+5
s8oqih2NIAkyF91BNZABAgvh/vJdYImhYeUQBDqMJgqZ/Y/Ha31N7rSW+jUt
LHspbN0ztJYjuEd/bg2NKH7Gs/AyNvX9IQTC4k7iRRafx7q/PBCVtsk+NCwz
BEkL93xpAdcdYiMNrRP2eIHQjBmNZ/oUCkcDsLCBvcSq6P2lGpNnpPzVoTJf
v2qrWkVn5txJJsOkmBGpEDbECPunVilrWO6RPomP0yYkr6NE4XeCJ3QhABEB
AAHNGWR1bW15IDxkdW1teUBob3RtYWlsLmNvbT7CwI0EEAEIACAFAmM1b9wG
CwkHCAMCBBUICgIEFgIBAAIZAQIbAwIeAQAhCRAoJ1i5w6kkAxYhBCQv+iwt
IFn7vj9PLygnWLnDqSQDPxkH/0Ju2Cah+bOxl09uv2Ft2BVlQh0u+wJyRVgs
KxTxldAXFZwMrN4wK/GUoGWDiy2tzNtoVE6GpxWUj+LvSGFaVLNVjW+Le77I
BP/sl1wKHJbQhseKc7Mz5Zj3i0F1FPM+rLik7tNk6kiEBqYVyyXahyT98Hu1
1OKEV+8NiRG47iNgd/dpgEdVSS4DN/dL6m5q+CVy9YnlR+wXxF/2xcMmWBzR
V2cPVw0JzunpUV8lDDQ/n1sPw61D3oL1aH0bkn8aA8pEceKOVIYOaja7LkLX
uSlROlALA/M2fuubradW9I3FcrJNn+/xA52el2l/Hn/Syf9GQV/Ll/R+qKIo
Z57xWd7OwE0EYzVv3AEIAJl/PNYOF2prNKY58BfZ74XurDb9mNlZ1wsIqrOu
J/euzHEnzkCAjMUuXV7wcugjQlmpcZn6Y0QmQ2uX7SwPCMovDvngbXeAfbdd
6FUKecQ0sG54Plm8HSMNdjetdUVl7ACxjJO8Rdc/Asx7ua7gMm42CVfqMj4L
qN5foUBlaKJ1iGKUpQ+673UQWMYeOBuu9G8awbSzGaphN97CIX7xEMGzGeff
yHLHK+MsfX935uDgDwJQzxJKEugIJDMKgWOLgVz1jRCsJKHlywHTWpVuMiKY
Wnuq4tDNLBUQtaRL7uclG7Wejw/XNN0uD/zNHPgF5rmlYHVhrtDbBCP2XqTn
WU8AEQEAAcLAdgQYAQgACQUCYzVv3AIbDAAhCRAoJ1i5w6kkAxYhBCQv+iwt
IFn7vj9PLygnWLnDqSQDFqYH/AkdNaPUQlE7RQBigNRGOFBuqjhbLsV/rZf+
/4K6wDHojM606lgLm57T4NUXnk53VIF3KO8+v8N11mCtPb+zBngfvVU14COC
HEDNdOK19TlR+tH25cftfUiF+OJsgMQysErGuFEtwLE6TNzpQIcnw7SbjxMr
EGacF9xCBKexB6MlR3GwJ2LBUJm3Lq/fvqImztoTlKDsrpk4JOH5FfYG+G2f
1zU73fVsCCElX4qA/49rRQf0RNfhjRjmHULP8hSvCXUEhfiBggEgxof/vKlC
qauHC55luuIeabju8HaXTjpz019cq+3IUgewX/ky0PhQXEW9SoODKabPY2yS
yUbHFm4=
=OCSx
-----END PGP PUBLIC KEY BLOCK-----
`
const privKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
Version: OpenPGP.js v4.10.10
Comment: https://openpgpjs.org
xcMGBGM1b9wBCADZM49X7vwOS93KbgA6yhpwrYf8ZlzksGcDaYgp1IzvqHbs
xeU1mmBYVH/bSKRDG0tt3Qdky4Nvl4Oqd+g0e2ZGjmlEy9zUiPTTK/BtXT+5
s8oqih2NIAkyF91BNZABAgvh/vJdYImhYeUQBDqMJgqZ/Y/Ha31N7rSW+jUt
LHspbN0ztJYjuEd/bg2NKH7Gs/AyNvX9IQTC4k7iRRafx7q/PBCVtsk+NCwz
BEkL93xpAdcdYiMNrRP2eIHQjBmNZ/oUCkcDsLCBvcSq6P2lGpNnpPzVoTJf
v2qrWkVn5txJJsOkmBGpEDbECPunVilrWO6RPomP0yYkr6NE4XeCJ3QhABEB
AAH+CQMIuDEg1p2Y6zbg0EQ3JvsP7VQBGsuXg9khTjktoxhwici/d+rcIW7Q
SuKWJGqs83LTeeGmS+9etNtf3LqRdPnI7f0qbT47mAqvp2gn7Rvbrabk+5Jj
AQS/DDLlWNiWsPrMBMZ7TZpiQ+g7gnIZaV10taFupYJr69AjtED+NPu8LOvZ
2ItK9xBqOwl5mkNe7ps/uTT6jwYSWxeObp4ymnLDLONY3eHuaYP9QB/NSlw0
80Wo5qBPljlU8JdbEoLFU4gY6wkEbLa/DVbEVXSHfWVtr8jZbzHW39TBxpG2
Dxk52EVyu8Gf9XIQN2ZjDP3CzBGmlxJjLxLUD4GmRSPaDGK7LCN9ZztaXy3Y
WtF6RJfNzEoDdCaV0kkM3AskQDsQ+CWsDVsbbQyDtfncVG6cDzqmoDrBCSq1
Bsoz07k2hj9VP0aP2xU78qcpJWO2rmhAHy9W2NqjXSBJriy1JXrK5o2/lUUr
94R8NLvqeVbInUw/zovVctaujHIBhNKL9wn2T0LWrA2OEJUz0HWo6ZQSaNzl
Obtz0M8gCj/4sDYjRAiDk50FzOcZp8ijYQFVypQTVzHki5T/JfvBnMpo+4Uc
93QB1woyiZuJCIj7DpY3MkZ5fTDtgJPa+0k8r+lPnAmE6auGUaH7JRKhbBu0
8faDwaiSv3kD3EEDffoWX/axLLYta9jTDnitTXbf1jY03pdJeiU/ZX0BQTZi
pehZ/6yi/qXM/F8HDVEWriSLqVsMLrXXeFIojAc3fJ/QPpAZSx6E/Fe2xh8c
yURov5krU1zNJDwqC3SjHsHQ/UlLtamDDmmuXX+xb1CwIDd6WksGsCbe/LoN
TxViV4hOjIeh5TwRP5jQaqsVKCT8fzoDrRXy76taT+Zaaen+J6rC51HQwyEq
Qgf1e7WodzN3r10UV6/L/wNkfdWJgf5MzRlkdW1teSA8ZHVtbXlAaG90bWFp
bC5jb20+wsCNBBABCAAgBQJjNW/cBgsJBwgDAgQVCAoCBBYCAQACGQECGwMC
HgEAIQkQKCdYucOpJAMWIQQkL/osLSBZ+74/Ty8oJ1i5w6kkAz8ZB/9Cbtgm
ofmzsZdPbr9hbdgVZUIdLvsCckVYLCsU8ZXQFxWcDKzeMCvxlKBlg4strczb
aFROhqcVlI/i70hhWlSzVY1vi3u+yAT/7JdcChyW0IbHinOzM+WY94tBdRTz
Pqy4pO7TZOpIhAamFcsl2ock/fB7tdTihFfvDYkRuO4jYHf3aYBHVUkuAzf3
S+puavglcvWJ5UfsF8Rf9sXDJlgc0VdnD1cNCc7p6VFfJQw0P59bD8OtQ96C
9Wh9G5J/GgPKRHHijlSGDmo2uy5C17kpUTpQCwPzNn7rm62nVvSNxXKyTZ/v
8QOdnpdpfx5/0sn/RkFfy5f0fqiiKGee8Vnex8MGBGM1b9wBCACZfzzWDhdq
azSmOfAX2e+F7qw2/ZjZWdcLCKqzrif3rsxxJ85AgIzFLl1e8HLoI0JZqXGZ
+mNEJkNrl+0sDwjKLw754G13gH23XehVCnnENLBueD5ZvB0jDXY3rXVFZewA
sYyTvEXXPwLMe7mu4DJuNglX6jI+C6jeX6FAZWiidYhilKUPuu91EFjGHjgb
rvRvGsG0sxmqYTfewiF+8RDBsxnn38hyxyvjLH1/d+bg4A8CUM8SShLoCCQz
CoFji4Fc9Y0QrCSh5csB01qVbjIimFp7quLQzSwVELWkS+7nJRu1no8P1zTd
Lg/8zRz4Bea5pWB1Ya7Q2wQj9l6k51lPABEBAAH+CQMIwr3YSD15lYrgItoy
MDsrWqMMHJsSxusbQiK0KLgjFBuDuTolsu9zqQCHEm2dxChqT+yQ6AeeynRD
pDMVkHEvhShvGUhB6Bu5wClHj8+xFpyprShE/KbEuppNdfIRgWVYc7UX+TYz
6BymqhzKyIw2Q33ocrXgTRZ02HM7urKVvAhsJCEff0paByOzCspiv/TPRihi
7GAZY0wFLDPe9qr+07ExT2ndMDX8Xb1mlg8IeaSWUaNilm7M8oW3xnUBnXeD
XglTkObGeRVXAINim9uL4soT3lamN4QwgBus9WzFqOOCMk11fjatY8kY1zX9
epO27igGtMwTFl11XcQLlFyvlgPBeWtFam7RiDPa3VF0XubmBYZBmqWpccWs
xl0xHCtUK7Pd0O4kSqxsL9cB0MX9iR1yPkM8wA++Mp6pEfNcXUrGIdlie0H5
aCq8rguYG5VuFosSUatdCbpRVGBxGnhxHes0mNTPgwAoAVNYBWXH5iq5HxKy
i3Zy5V7ZKSyDrfg/0AajtDW5h3g+wglUI9UCdT4tNLFwYbhHqGH2xdBztYI0
iSJ7COLmo26smkA8UXxsrlw8PWPzpbhQOG06EbMjncJimJDMI1YDC6ag7M5l
OcG9uXZQ22ipAz5CSPtyL0/0WAp4yyn1VQRBK42n/y9ld+dMbuq6majazb15
6sEgHUKERcwGs0Ftfj5Zamwhm7ZoIe26XEqvcshpQpv1Q9hktluVeSbiVaBe
Nl8zUZHlo/0zUc5j7G5Up58t+ChSsyOFJGM7CGkKHHawBZYCs0EcpsdAPr3T
1C8A0Wt9POTETYM4pZFOoLds6VTolZZcxeBN5YPoN2kbwFpOgPJN09Zz8z8S
4psQRV4KQ92XDPZ/6q2BH5i2+F2ZwUsvCR4DwgzbVGZSRV6mM7lkjZSmnWfC
AE7DUl7XwsB2BBgBCAAJBQJjNW/cAhsMACEJECgnWLnDqSQDFiEEJC/6LC0g
Wfu+P08vKCdYucOpJAMWpgf8CR01o9RCUTtFAGKA1EY4UG6qOFsuxX+tl/7/
grrAMeiMzrTqWAubntPg1ReeTndUgXco7z6/w3XWYK09v7MGeB+9VTXgI4Ic
QM104rX1OVH60fblx+19SIX44myAxDKwSsa4US3AsTpM3OlAhyfDtJuPEysQ
ZpwX3EIEp7EHoyVHcbAnYsFQmbcur9++oibO2hOUoOyumTgk4fkV9gb4bZ/X
NTvd9WwIISVfioD/j2tFB/RE1+GNGOYdQs/yFK8JdQSF+IGCASDGh/+8qUKp
q4cLnmW64h5puO7wdpdOOnPTX1yr7chSB7Bf+TLQ+FBcRb1Kg4Mpps9jbJLJ
RscWbg==
=KJNy
-----END PGP PRIVATE KEY BLOCK-----
`
// Dummy GPG keys, used only for testing
// nolint:gosec
const passPhrase = `MkDgjkrgdGxt`
func TestSignRPMRepo(t *testing.T) {
repoDir := t.TempDir()
workDir := t.TempDir()
pubKeyPath := filepath.Join(workDir, "pub.key")
err := os.WriteFile(pubKeyPath, []byte(pubKey), 0600)
require.NoError(t, err)
privKeyPath := filepath.Join(workDir, "priv.key")
err = os.WriteFile(privKeyPath, []byte(privKey), 0600)
require.NoError(t, err)
passPhrasePath := filepath.Join(workDir, "passphrase")
err = os.WriteFile(passPhrasePath, []byte(passPhrase), 0600)
require.NoError(t, err)
err = os.Mkdir(filepath.Join(repoDir, "repodata"), 0700)
require.NoError(t, err)
err = os.WriteFile(filepath.Join(repoDir, "repodata", "repomd.xml"), []byte("<xml></xml>"), 0600)
require.NoError(t, err)
cfg := PublishConfig{
Config: config.Config{
GPGPrivateKey: privKeyPath,
GPGPublicKey: pubKeyPath,
GPGPassPath: passPhrasePath,
},
}
err = signRPMRepo(repoDir, cfg)
require.NoError(t, err)
}

View File

@ -208,9 +208,19 @@ var adminCommands = []*cli.Command{
},
{
Name: "generate-file",
Usage: "creates a conflict users file.. Safe to execute multiple times.",
Usage: "creates a conflict users file. Safe to execute multiple times.",
Action: runGenerateConflictUsersFile(),
},
{
Name: "validate-file",
Usage: "validates the conflict users file. Safe to execute multiple times.",
Action: runValidateConflictUsersFile(),
},
{
Name: "ingest-file",
Usage: "ingests the conflict users file",
Action: runIngestConflictUsersFile(),
},
},
},
},

View File

@ -1,8 +1,13 @@
package commands
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/fatih/color"
@ -10,19 +15,49 @@ import (
"github.com/grafana/grafana/pkg/cmd/grafana-cli/logger"
"github.com/grafana/grafana/pkg/cmd/grafana-cli/utils"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/services/sqlstore"
"github.com/grafana/grafana/pkg/services/sqlstore/db"
"github.com/grafana/grafana/pkg/services/sqlstore/migrations"
"github.com/grafana/grafana/pkg/services/user"
"github.com/grafana/grafana/pkg/services/user/userimpl"
"github.com/grafana/grafana/pkg/setting"
"github.com/urfave/cli/v2"
)
func getSqlStore(context *cli.Context) (*sqlstore.SQLStore, error) {
cmd := &utils.ContextCommandLine{Context: context}
cfg, err := initCfg(cmd)
cfg.Logger = nil
func initConflictCfg(cmd *utils.ContextCommandLine) (*setting.Cfg, error) {
configOptions := strings.Split(cmd.String("configOverrides"), " ")
configOptions = append(configOptions, cmd.Args().Slice()...)
cfg, err := setting.NewCfgFromArgs(setting.CommandLineArgs{
Config: cmd.ConfigFile(),
HomePath: cmd.HomePath(),
Args: append(configOptions, "cfg:log.level=error"), // tailing arguments have precedence over the options string
})
if err != nil {
return nil, err
}
return cfg, nil
}
func initializeConflictResolver(cmd *utils.ContextCommandLine, f Formatter, ctx *cli.Context) (*ConflictResolver, error) {
cfg, err := initConflictCfg(cmd)
if err != nil {
return nil, fmt.Errorf("%v: %w", "failed to load configuration", err)
}
s, err := getSqlStore(cfg)
if err != nil {
return nil, fmt.Errorf("%v: %w", "failed to get to sql", err)
}
conflicts, err := GetUsersWithConflictingEmailsOrLogins(ctx, s)
if err != nil {
return nil, fmt.Errorf("%v: %w", "failed to get users with conflicting logins", err)
}
resolver := ConflictResolver{Users: conflicts}
resolver.BuildConflictBlocks(conflicts, f)
return &resolver, nil
}
func getSqlStore(cfg *setting.Cfg) (*sqlstore.SQLStore, error) {
tracer, err := tracing.ProvideService(cfg)
if err != nil {
return nil, fmt.Errorf("%v: %w", "failed to initialize tracer service", err)
@ -33,28 +68,21 @@ func getSqlStore(context *cli.Context) (*sqlstore.SQLStore, error) {
func runListConflictUsers() func(context *cli.Context) error {
return func(context *cli.Context) error {
s, err := getSqlStore(context)
cmd := &utils.ContextCommandLine{Context: context}
whiteBold := color.New(color.FgWhite).Add(color.Bold)
r, err := initializeConflictResolver(cmd, whiteBold.Sprintf, context)
if err != nil {
return fmt.Errorf("%v: %w", "failed to get to sql", err)
return fmt.Errorf("%v: %w", "failed to initialize conflict resolver", err)
}
conflicts, err := GetUsersWithConflictingEmailsOrLogins(context, s)
if err != nil {
return fmt.Errorf("%v: %w", "failed to get users with conflicting logins", err)
}
if len(conflicts) < 1 {
if len(r.Users) < 1 {
logger.Info(color.GreenString("No Conflicting users found.\n\n"))
return nil
}
whiteBold := color.New(color.FgWhite).Add(color.Bold)
resolver := ConflictResolver{Users: conflicts}
resolver.BuildConflictBlocks(whiteBold.Sprintf)
logger.Infof("\n\nShowing Conflicts\n\n")
logger.Infof(resolver.ToStringPresentation())
logger.Infof("\n\nShowing conflicts\n\n")
logger.Infof(r.ToStringPresentation())
logger.Infof("\n")
// TODO: remove line when finished
// this is only for debugging
if len(resolver.DiscardedBlocks) != 0 {
resolver.logDiscardedUsers()
if len(r.DiscardedBlocks) != 0 {
r.logDiscardedUsers()
}
return nil
}
@ -62,74 +90,328 @@ func runListConflictUsers() func(context *cli.Context) error {
func runGenerateConflictUsersFile() func(context *cli.Context) error {
return func(context *cli.Context) error {
s, err := getSqlStore(context)
cmd := &utils.ContextCommandLine{Context: context}
r, err := initializeConflictResolver(cmd, fmt.Sprintf, context)
if err != nil {
return fmt.Errorf("%v: %w", "failed to get to sql", err)
return fmt.Errorf("%v: %w", "failed to initialize conflict resolver", err)
}
conflicts, err := GetUsersWithConflictingEmailsOrLogins(context, s)
if err != nil {
return fmt.Errorf("%v: %w", "failed to get users with conflicting logins", err)
}
if len(conflicts) < 1 {
if len(r.Users) < 1 {
logger.Info(color.GreenString("No Conflicting users found.\n\n"))
return nil
}
resolver := ConflictResolver{Users: conflicts}
resolver.BuildConflictBlocks(fmt.Sprintf)
tmpFile, err := generateConflictUsersFile(&resolver)
tmpFile, err := generateConflictUsersFile(r)
if err != nil {
return fmt.Errorf("generating file return error: %w", err)
}
logger.Infof("\n\ngenerated file\n")
logger.Infof("%s\n\n", tmpFile.Name())
logger.Infof("once the file is edited and resolved conflicts, you can either validate or ingest the file\n\n")
if len(resolver.DiscardedBlocks) != 0 {
resolver.logDiscardedUsers()
if len(r.DiscardedBlocks) != 0 {
r.logDiscardedUsers()
}
return nil
}
}
func runValidateConflictUsersFile() func(context *cli.Context) error {
return func(context *cli.Context) error {
cmd := &utils.ContextCommandLine{Context: context}
r, err := initializeConflictResolver(cmd, fmt.Sprintf, context)
if err != nil {
return fmt.Errorf("%v: %w", "failed to initialize conflict resolver", err)
}
// read in the file to validate
// read in the file to ingest
arg := cmd.Args().First()
if arg == "" {
return errors.New("please specify a absolute path to file to read from")
}
b, err := os.ReadFile(filepath.Clean(arg))
if err != nil {
return fmt.Errorf("could not read file with error %e", err)
}
validErr := getValidConflictUsers(r, b)
if validErr != nil {
return fmt.Errorf("could not validate file with error %s", err)
}
logger.Info("File validation complete without errors.\n\n File can be used with ingesting command `ingest-file`.\n\n")
return nil
}
}
func runIngestConflictUsersFile() func(context *cli.Context) error {
return func(context *cli.Context) error {
cmd := &utils.ContextCommandLine{Context: context}
r, err := initializeConflictResolver(cmd, fmt.Sprintf, context)
if err != nil {
return fmt.Errorf("%v: %w", "failed to initialize conflict resolver", err)
}
// read in the file to ingest
arg := cmd.Args().First()
if arg == "" {
return errors.New("please specify a absolute path to file to read from")
}
b, err := os.ReadFile(filepath.Clean(arg))
if err != nil {
return fmt.Errorf("could not read file with error %e", err)
}
validErr := getValidConflictUsers(r, b)
if validErr != nil {
return fmt.Errorf("could not validate file with error %s", validErr)
}
// should we rebuild blocks here?
// kind of a weird thing maybe?
if len(r.ValidUsers) == 0 {
return fmt.Errorf("no users")
}
r.showChanges()
if !confirm("\n\nWe encourage users to create a db backup before running this command. \n Proceed with operation?") {
return fmt.Errorf("user cancelled")
}
err = r.MergeConflictingUsers(context.Context)
if err != nil {
return fmt.Errorf("not able to merge with %e", err)
}
logger.Info("\n\nconflicts resolved.\n")
return nil
}
}
func getDocumentationForFile() string {
return `# Conflicts File
# This file is generated by the grafana-cli command ` + color.CyanString("grafana-cli admin user-manager conflicts generate-file") + `.
#
# Commands:
# +, keep <user> = keep user
# -, delete <user> = delete user
#
# The fields conflict_email and conflict_login
# indicate that we see a conflict in email and/or login with another user.
# Both these fields can be true.
#
# There needs to be exactly one picked user per conflict block.
#
# The lines can be re-ordered.
#
# If you feel like you want to wait with a specific block,
# delete all lines regarding that conflict block.
#
`
}
func generateConflictUsersFile(r *ConflictResolver) (*os.File, error) {
tmpFile, err := os.CreateTemp(os.TempDir(), "conflicting_user_*.diff")
if err != nil {
return nil, err
}
if _, err := tmpFile.Write([]byte(getDocumentationForFile())); err != nil {
return nil, err
}
if _, err := tmpFile.Write([]byte(r.ToStringPresentation())); err != nil {
return nil, err
}
return tmpFile, nil
}
func getValidConflictUsers(r *ConflictResolver, b []byte) error {
newConflicts := make(ConflictingUsers, 0)
// need to verify that id or email exists
previouslySeenIds := map[string]bool{}
previouslySeenEmails := map[string]bool{}
for _, users := range r.Blocks {
for _, u := range users {
previouslySeenIds[strings.ToLower(u.ID)] = true
previouslySeenEmails[strings.ToLower(u.Email)] = true
}
}
// tested in https://regex101.com/r/una3zC/1
diffPattern := `^[+-]`
// compiling since in a loop
matchingExpression, err := regexp.Compile(diffPattern)
if err != nil {
return fmt.Errorf("unable to compile regex %s: %w", diffPattern, err)
}
for _, row := range strings.Split(string(b), "\n") {
if row == "" {
// end of file
break
}
// if the row starts with a #, it is a comment
if row[0] == '#' {
// comment
continue
}
entryRow := matchingExpression.Match([]byte(row))
if !entryRow {
// block row
// conflict: hej
continue
}
newUser := &ConflictingUser{}
err := newUser.Marshal(row)
if err != nil {
return fmt.Errorf("could not parse the content of the file with error %e", err)
}
if !previouslySeenEmails[strings.ToLower(newUser.Email)] {
return fmt.Errorf("not valid email: %s, email not in previous conflicts seen", newUser.Email)
}
// valid entry
newConflicts = append(newConflicts, *newUser)
}
r.ValidUsers = newConflicts
r.BuildConflictBlocks(newConflicts, fmt.Sprintf)
return nil
}
func (r *ConflictResolver) MergeConflictingUsers(ctx context.Context) error {
for block, users := range r.Blocks {
if len(users) < 2 {
return fmt.Errorf("not enough users to perform merge, found %d for id %s, should be at least 2", len(users), block)
}
var intoUser user.User
var intoUserId int64
var fromUserIds []int64
// creating a session for each block of users
// we want to rollback incase something happens during update / delete
if err := r.Store.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
err := sess.Begin()
if err != nil {
return fmt.Errorf("could not open a db session: %w", err)
}
for _, u := range users {
if u.Direction == "+" {
id, err := strconv.ParseInt(u.ID, 10, 64)
if err != nil {
return fmt.Errorf("could not convert id in +")
}
intoUserId = id
} else if u.Direction == "-" {
id, err := strconv.ParseInt(u.ID, 10, 64)
if err != nil {
return fmt.Errorf("could not convert id in -")
}
fromUserIds = append(fromUserIds, id)
}
}
if _, err := sess.ID(intoUserId).Where(sqlstore.NotServiceAccountFilter(r.Store)).Get(&intoUser); err != nil {
return fmt.Errorf("could not find intoUser: %w", err)
}
for _, fromUserId := range fromUserIds {
var fromUser user.User
exists, err := sess.ID(fromUserId).Where(sqlstore.NotServiceAccountFilter(r.Store)).Get(&fromUser)
if err != nil {
return fmt.Errorf("could not find fromUser: %w", err)
}
if !exists {
fmt.Printf("user with id %d does not exist, skipping\n", fromUserId)
}
// // delete the user
delErr := r.Store.DeleteUserInSession(ctx, sess, &models.DeleteUserCommand{UserId: fromUserId})
if delErr != nil {
return fmt.Errorf("error during deletion of user: %w", delErr)
}
}
commitErr := sess.Commit()
if commitErr != nil {
return fmt.Errorf("could not commit operation for useridentification %s: %w", block, commitErr)
}
userStore := userimpl.ProvideStore(r.Store, setting.NewCfg())
updateMainCommand := &user.UpdateUserCommand{
UserID: intoUser.ID,
Login: strings.ToLower(intoUser.Login),
Email: strings.ToLower(intoUser.Email),
}
updateErr := userStore.Update(ctx, updateMainCommand)
if updateErr != nil {
return fmt.Errorf("could not update user: %w", updateErr)
}
return nil
}); err != nil {
return err
}
}
return nil
}
/*
hej@test.com+hej@test.com
all of the permissions, roles and ownership will be transferred to the user.
+ id: 1, email: hej@test.com, login: hej@test.com
these user(s) will be deleted and their permissions transferred.
- id: 2, email: HEJ@TEST.COM, login: HEJ@TEST.COM
- id: 3, email: hej@TEST.com, login: hej@TEST.com
*/
func (r *ConflictResolver) showChanges() {
if len(r.ValidUsers) == 0 {
fmt.Println("no changes will take place as we have no valid users.")
return
}
var b strings.Builder
for block, users := range r.Blocks {
if _, ok := r.DiscardedBlocks[block]; ok {
// skip block
continue
}
// looping as we want to can get these out of order (meaning the + and -)
var mainUser ConflictingUser
for _, u := range users {
if u.Direction == "+" {
mainUser = u
break
}
}
b.WriteString("Keep the following user.\n")
b.WriteString(fmt.Sprintf("%s\n", block))
b.WriteString(fmt.Sprintf("id: %s, email: %s, login: %s\n", mainUser.ID, mainUser.Email, mainUser.Login))
b.WriteString("\n\n")
b.WriteString("The following user(s) will be deleted.\n")
for _, user := range users {
if user.ID == mainUser.ID {
continue
}
// mergeable users
b.WriteString(fmt.Sprintf("id: %s, email: %s, login: %s\n", user.ID, user.Email, user.Login))
}
b.WriteString("\n\n")
}
logger.Info("\n\nChanges that will take place\n\n")
logger.Infof(b.String())
}
// Formatter make it possible for us to write to terminal and to a file
// with different formats depending on the usecase
type Formatter func(format string, a ...interface{}) string
func BoldFormatter(format string, a ...interface{}) string {
white := color.New(color.FgWhite)
whiteBold := white.Add(color.Bold)
return whiteBold.Sprintf(format, a...)
}
func shouldDiscardBlock(seenUsersInBlock map[string]string, block string, user ConflictingUser) bool {
// loop through users to see if we should skip this block
// we have some more tricky scenarios where we have more than two users that can have conflicts with each other
// we have made the approach to discard any users that we have seen
if _, ok := seenUsersInBlock[user.Id]; ok {
if _, ok := seenUsersInBlock[user.ID]; ok {
// we have seen the user in different block than the current block
if seenUsersInBlock[user.Id] != block {
if seenUsersInBlock[user.ID] != block {
return true
}
}
seenUsersInBlock[user.Id] = block
seenUsersInBlock[user.ID] = block
return false
}
func (r *ConflictResolver) BuildConflictBlocks(f Formatter) {
// BuildConflictBlocks builds blocks of users where each block is a unique email/login
// NOTE: currently this function assumes that the users are in order of grouping already
func (r *ConflictResolver) BuildConflictBlocks(users ConflictingUsers, f Formatter) {
discardedBlocks := make(map[string]bool)
seenUsersToBlock := make(map[string]string)
blocks := make(map[string]ConflictingUsers)
for _, user := range r.Users {
for _, user := range users {
// conflict blocks is how we identify a conflict in the user base.
var conflictBlock string
if user.ConflictEmail != "" {
@ -165,7 +447,7 @@ func (r *ConflictResolver) BuildConflictBlocks(f Formatter) {
func contains(cu ConflictingUsers, target ConflictingUser) bool {
for _, u := range cu {
if u.Id == target.Id {
if u.ID == target.ID {
return true
}
}
@ -176,7 +458,7 @@ func (r *ConflictResolver) logDiscardedUsers() {
keys := make([]string, 0, len(r.DiscardedBlocks))
for block := range r.DiscardedBlocks {
for _, u := range r.Blocks[block] {
keys = append(keys, u.Id)
keys = append(keys, u.ID)
}
}
warn := color.YellowString("Note: We discarded some conflicts that have multiple conflicting types involved.")
@ -208,7 +490,7 @@ func (r *ConflictResolver) ToStringPresentation() string {
- id: 3, email: hej@TEST.com, login: hej@TEST.com
*/
startOfBlock := make(map[string]bool)
fileString := ""
var b strings.Builder
for block, users := range r.Blocks {
if _, ok := r.DiscardedBlocks[block]; ok {
// skip block
@ -216,76 +498,105 @@ func (r *ConflictResolver) ToStringPresentation() string {
}
for _, user := range users {
if !startOfBlock[block] {
fileString += fmt.Sprintf("%s\n", block)
b.WriteString(fmt.Sprintf("%s\n", block))
startOfBlock[block] = true
fileString += fmt.Sprintf("+ id: %s, email: %s, login: %s\n", user.Id, user.Email, user.Login)
b.WriteString(fmt.Sprintf("+ id: %s, email: %s, login: %s, last_seen_at: %s, auth_module: %s, conflict_email: %s, conflict_login: %s\n",
user.ID,
user.Email,
user.Login,
user.LastSeenAt,
user.AuthModule,
user.ConflictEmail,
user.ConflictLogin,
))
continue
}
// mergable users
fileString += fmt.Sprintf("- id: %s, email: %s, login: %s\n", user.Id, user.Email, user.Login)
// mergeable users
b.WriteString(fmt.Sprintf("- id: %s, email: %s, login: %s, last_seen_at: %s, auth_module: %s, conflict_email: %s, conflict_login: %s\n",
user.ID,
user.Email,
user.Login,
user.LastSeenAt,
user.AuthModule,
user.ConflictEmail,
user.ConflictLogin,
))
}
}
return fileString
return b.String()
}
type ConflictResolver struct {
Store *sqlstore.SQLStore
Config *setting.Cfg
Users ConflictingUsers
ValidUsers ConflictingUsers
Blocks map[string]ConflictingUsers
DiscardedBlocks map[string]bool
}
type ConflictingUser struct {
// IDENTIFIER
// TODO: should have conflict block in sql for performance and stability
Direction string `xorm:"direction"`
// FIXME: refactor change to correct type int64
Id string `xorm:"id"`
Email string `xorm:"email"`
Login string `xorm:"login"`
// FIXME: refactor change to correct type <>
LastSeenAt string `xorm:"last_seen_at"`
AuthModule string `xorm:"auth_module"`
// currently not really used for anything
// direction is the +/- which indicates if we should keep or delete the user
Direction string `xorm:"direction"`
ID string `xorm:"id"`
Email string `xorm:"email"`
Login string `xorm:"login"`
LastSeenAt string `xorm:"last_seen_at"`
AuthModule string `xorm:"auth_module"`
ConflictEmail string `xorm:"conflict_email"`
ConflictLogin string `xorm:"conflict_login"`
}
// always better to have a slice of the object
// not a pointer for slice type ConflictingUsers []*ConflictingUser
type ConflictingUsers []ConflictingUser
func (c *ConflictingUser) Marshal(filerow string) error {
// +/- id: 1, email: hej,
// example view of the file to ingest
// +/- id: 1, email: hej, auth_module: LDAP
trimmedSpaces := strings.ReplaceAll(filerow, " ", "")
if trimmedSpaces[0] == '+' {
c.Direction = "+"
} else if trimmedSpaces[0] == '-' {
c.Direction = "-"
} else {
return fmt.Errorf("unable to get which operation the user would receive")
return fmt.Errorf("unable to get which operation was chosen")
}
trimmed := strings.TrimLeft(trimmedSpaces, "+-")
values := strings.Split(trimmed, ",")
if len(values) != 5 {
// fmt errror
return fmt.Errorf("expected 5 values in entryrow")
if len(values) < 3 {
return fmt.Errorf("expected at least 3 values in entry row")
}
// expected fields
id := strings.Split(values[0], ":")
email := strings.Split(values[1], ":")
login := strings.Split(values[2], ":")
c.ID = id[1]
c.Email = email[1]
c.Login = login[1]
// why trim values, 2022-08-20:19:17:12
lastSeenAt := strings.TrimPrefix(values[3], "last_seen_at:")
authModule := strings.Split(values[4], ":")
// optional field
if len(authModule) < 2 {
c.AuthModule = ""
} else {
c.AuthModule = authModule[1]
}
// expected fields
c.Id = id[1]
c.Email = email[1]
c.Login = login[1]
c.LastSeenAt = lastSeenAt
// which conflict
conflictEmail := strings.Split(values[5], ":")
conflictLogin := strings.Split(values[6], ":")
if len(conflictEmail) < 2 {
c.ConflictEmail = ""
} else {
c.ConflictEmail = conflictEmail[1]
}
if len(conflictLogin) < 2 {
c.ConflictLogin = ""
} else {
c.ConflictLogin = conflictLogin[1]
}
return nil
}
@ -306,6 +617,7 @@ func GetUsersWithConflictingEmailsOrLogins(ctx *cli.Context, s *sqlstore.SQLStor
// sorts the users by their useridentification and ids
func conflictingUserEntriesSQL(s *sqlstore.SQLStore) string {
userDialect := db.DB.GetDialect(s).Quote("user")
sqlQuery := `
SELECT DISTINCT
u1.id,
@ -314,12 +626,12 @@ func conflictingUserEntriesSQL(s *sqlstore.SQLStore) string {
u1.last_seen_at,
user_auth.auth_module,
( SELECT
'conflict_email'
'true'
FROM
` + userDialect + `
WHERE (LOWER(u1.email) = LOWER(u2.email)) AND(u1.email != u2.email)) AS conflict_email,
( SELECT
'conflict_login'
'true'
FROM
` + userDialect + `
WHERE (LOWER(u1.login) = LOWER(u2.login) AND(u1.login != u2.login))) AS conflict_login
@ -337,3 +649,21 @@ func notServiceAccount(ss *sqlstore.SQLStore) string {
return fmt.Sprintf("is_service_account = %s",
ss.Dialect.BooleanStr(false))
}
// confirm function asks for user input
// returns bool
func confirm(confirmPrompt string) bool {
var input string
logger.Infof("%s? [y|n]: ", confirmPrompt)
_, err := fmt.Scanln(&input)
if err != nil {
logger.Infof("could not parse input from user for confirmation")
return false
}
input = strings.ToLower(input)
if input == "y" || input == "yes" {
return true
}
return false
}

View File

@ -3,14 +3,250 @@ package commands
import (
"context"
"fmt"
"os"
"sort"
"testing"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/services/team/teamimpl"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/services/sqlstore"
"github.com/grafana/grafana/pkg/services/user"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
)
// "Skipping conflicting users test for mysql as it does make unique constraint case insensitive by default
const ignoredDatabase = "mysql"
func TestBuildConflictBlock(t *testing.T) {
type testBuildConflictBlock struct {
desc string
users []user.User
expectedBlock string
wantDiscardedBlock string
wantConflictUser *ConflictingUser
wantedNumberOfUsers int
}
testOrgID := 1
testCases := []testBuildConflictBlock{
{
desc: "should get one block with only 3 users",
users: []user.User{
{
Email: "ldap-editor",
Login: "ldap-editor",
OrgID: int64(testOrgID),
},
{
Email: "LDAP-EDITOR",
Login: "LDAP-EDITOR",
OrgID: int64(testOrgID),
},
{
Email: "overlapping conflict",
Login: "LDAP-editor",
OrgID: int64(testOrgID),
},
{
Email: "OVERLAPPING conflict",
Login: "no conflict",
OrgID: int64(testOrgID),
},
},
expectedBlock: "conflict: ldap-editor",
wantDiscardedBlock: "conflict: overlapping conflict",
wantedNumberOfUsers: 3,
},
{
desc: "should get conflict_email true and conflict_login empty string",
users: []user.User{
{
Email: "conflict@email",
Login: "login",
OrgID: int64(testOrgID),
},
{
Email: "conflict@EMAIL",
Login: "plainlogin",
OrgID: int64(testOrgID),
},
},
expectedBlock: "conflict: conflict@email",
wantedNumberOfUsers: 2,
wantConflictUser: &ConflictingUser{ConflictEmail: "true", ConflictLogin: ""},
},
{
desc: "should get conflict_email empty string and conflict_login true",
users: []user.User{
{
Email: "regular@email",
Login: "CONFLICTLOGIN",
OrgID: int64(testOrgID),
},
{
Email: "regular-no-conflict@email",
Login: "conflictlogin",
OrgID: int64(testOrgID),
},
},
expectedBlock: "conflict: conflictlogin",
wantedNumberOfUsers: 2,
wantConflictUser: &ConflictingUser{ConflictEmail: "", ConflictLogin: "true"},
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
// Restore after destructive operation
sqlStore := sqlstore.InitTestDB(t)
if sqlStore.GetDialect().DriverName() != ignoredDatabase {
for _, u := range tc.users {
cmd := user.CreateUserCommand{
Email: u.Email,
Name: u.Name,
Login: u.Login,
OrgID: int64(testOrgID),
}
_, err := sqlStore.CreateUser(context.Background(), cmd)
require.NoError(t, err)
}
m, err := GetUsersWithConflictingEmailsOrLogins(&cli.Context{Context: context.Background()}, sqlStore)
require.NoError(t, err)
r := ConflictResolver{Store: sqlStore}
r.BuildConflictBlocks(m, fmt.Sprintf)
require.Equal(t, tc.wantedNumberOfUsers, len(r.Blocks[tc.expectedBlock]))
if tc.wantDiscardedBlock != "" {
require.Equal(t, true, r.DiscardedBlocks[tc.wantDiscardedBlock])
}
if tc.wantConflictUser != nil {
for _, u := range m {
require.Equal(t, tc.wantConflictUser.ConflictEmail, u.ConflictEmail)
require.Equal(t, tc.wantConflictUser.ConflictLogin, u.ConflictLogin)
}
}
}
})
}
}
func TestBuildConflictBlockFromFileRepresentation(t *testing.T) {
type testBuildConflictBlock struct {
desc string
users []user.User
fileString string
expectedBlocks []string
expectedIdsInBlocks map[string][]string
}
testOrgID := 1
testCases := []testBuildConflictBlock{
{
desc: "should be able to parse the fileString containing the conflicts",
users: []user.User{
{
Email: "test",
Login: "test",
OrgID: int64(testOrgID),
},
{
Email: "TEST",
Login: "TEST",
OrgID: int64(testOrgID),
},
{
Email: "test2",
Login: "test2",
OrgID: int64(testOrgID),
},
{
Email: "TEST2",
Login: "TEST2",
OrgID: int64(testOrgID),
},
{
Email: "Test2",
Login: "Test2",
OrgID: int64(testOrgID),
},
},
fileString: `conflict: test
- id: 2, email: test, login: test, last_seen_at: 2012-09-19T08:31:20Z, auth_module: , conflict_email: true, conflict_login: true
+ id: 3, email: TEST, login: TEST, last_seen_at: 2012-09-19T08:31:29Z, auth_module: , conflict_email: true, conflict_login: true
conflict: test2
- id: 4, email: test2, login: test2, last_seen_at: 2012-09-19T08:31:41Z, auth_module: , conflict_email: true, conflict_login: true
+ id: 5, email: TEST2, login: TEST2, last_seen_at: 2012-09-19T08:31:51Z, auth_module: , conflict_email: true, conflict_login: true
- id: 6, email: Test2, login: Test2, last_seen_at: 2012-09-19T08:32:03Z, auth_module: , conflict_email: true, conflict_login: true`,
expectedBlocks: []string{"conflict: test", "conflict: test2"},
expectedIdsInBlocks: map[string][]string{"conflict: test": {"2", "3"}, "conflict: test2": {"4", "5", "6"}},
},
{
desc: "should be able to parse the fileString containing the conflicts 123",
users: []user.User{
{
Email: "saml-misi@example.org",
Login: "saml-misi",
OrgID: int64(testOrgID),
},
{
Email: "saml-misi@example",
Login: "saml-Misi",
OrgID: int64(testOrgID),
},
},
fileString: `conflict: saml-misi
+ id: 5, email: saml-misi@example.org, login: saml-misi, last_seen_at: 2022-09-22T12:00:49Z, auth_module: auth.saml, conflict_email: , conflict_login: true
- id: 15, email: saml-misi@example, login: saml-Misi, last_seen_at: 2012-09-26T11:31:32Z, auth_module: , conflict_email: , conflict_login: true`,
expectedBlocks: []string{"conflict: saml-misi"},
expectedIdsInBlocks: map[string][]string{"conflict: saml-misi": {"5", "15"}},
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
// Restore after destructive operation
sqlStore := sqlstore.InitTestDB(t)
if sqlStore.GetDialect().DriverName() != ignoredDatabase {
for _, u := range tc.users {
cmd := user.CreateUserCommand{
Email: u.Email,
Name: u.Name,
Login: u.Login,
OrgID: int64(testOrgID),
}
_, err := sqlStore.CreateUser(context.Background(), cmd)
require.NoError(t, err)
}
conflicts, err := GetUsersWithConflictingEmailsOrLogins(&cli.Context{Context: context.Background()}, sqlStore)
r := ConflictResolver{Users: conflicts, Store: sqlStore}
r.BuildConflictBlocks(conflicts, fmt.Sprintf)
require.NoError(t, err)
validErr := getValidConflictUsers(&r, []byte(tc.fileString))
require.NoError(t, validErr)
// test starts here
keys := make([]string, 0)
for k := range r.Blocks {
keys = append(keys, k)
}
sort.Strings(keys)
require.Equal(t, tc.expectedBlocks, keys)
// we want to validate the ids in the blocks
for _, block := range tc.expectedBlocks {
// checking for parsing of ids
conflictIds := []string{}
for _, u := range r.Blocks[block] {
conflictIds = append(conflictIds, u.ID)
}
require.Equal(t, tc.expectedIdsInBlocks[block], conflictIds)
}
}
})
}
}
func TestGetConflictingUsers(t *testing.T) {
type testListConflictingUsers struct {
desc string
@ -52,9 +288,6 @@ func TestGetConflictingUsers(t *testing.T) {
},
want: 2,
},
// TODO:
// refactor the sql to get 3 users from this test
// if this is changed, one needs to correct the filerepresentation
{
desc: "should be 5 conflicting users, each conflict gets 2 users",
users: []user.User{
@ -151,8 +384,7 @@ func TestGetConflictingUsers(t *testing.T) {
t.Run(tc.desc, func(t *testing.T) {
// Restore after destructive operation
sqlStore := sqlstore.InitTestDB(t)
// "Skipping conflicting users test for mysql as it does make unique constraint case insensitive by default
if sqlStore.GetDialect().DriverName() != "mysql" {
if sqlStore.GetDialect().DriverName() != ignoredDatabase {
for _, u := range tc.users {
cmd := user.CreateUserCommand{
Email: u.Email,
@ -175,82 +407,16 @@ func TestGetConflictingUsers(t *testing.T) {
}
}
func TestBuildConflictBlock(t *testing.T) {
type testBuildConflictBlock struct {
desc string
users []user.User
expectedBlock string
wantDiscardedBlock string
wantedNumberOfUsers int
}
testOrgID := 1
testCases := []testBuildConflictBlock{
{
desc: "should get one block with only 3 users",
users: []user.User{
{
Email: "ldap-editor",
Login: "ldap-editor",
OrgID: int64(testOrgID),
},
{
Email: "LDAP-EDITOR",
Login: "LDAP-EDITOR",
OrgID: int64(testOrgID),
},
{
Email: "overlapping conflict",
Login: "LDAP-editor",
OrgID: int64(testOrgID),
},
{
Email: "OVERLAPPING conflict",
Login: "no conflict",
OrgID: int64(testOrgID),
},
},
expectedBlock: "conflict: ldap-editor",
wantDiscardedBlock: "conflict: overlapping conflict",
wantedNumberOfUsers: 3,
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
// Restore after destructive operation
sqlStore := sqlstore.InitTestDB(t)
// "Skipping conflicting users test for mysql as it does make unique constraint case insensitive by default
if sqlStore.GetDialect().DriverName() != "mysql" {
for _, u := range tc.users {
cmd := user.CreateUserCommand{
Email: u.Email,
Name: u.Name,
Login: u.Login,
OrgID: int64(testOrgID),
}
_, err := sqlStore.CreateUser(context.Background(), cmd)
require.NoError(t, err)
}
m, err := GetUsersWithConflictingEmailsOrLogins(&cli.Context{Context: context.Background()}, sqlStore)
require.NoError(t, err)
r := ConflictResolver{Users: m}
r.BuildConflictBlocks(fmt.Sprintf)
require.Equal(t, tc.wantedNumberOfUsers, len(r.Blocks[tc.expectedBlock]))
require.Equal(t, true, r.DiscardedBlocks[tc.wantDiscardedBlock])
}
})
}
}
func TestGenerateConflictingUsersFile(t *testing.T) {
type testListConflictingUsers struct {
desc string
users []user.User
wantDiscardedBlock string
want string
type testGenerateConflictUsers struct {
desc string
users []user.User
expectedDiscardedBlock string
expectedBlocks []string
expectedEmailInBlocks map[string][]string
}
testOrgID := 1
testCases := []testListConflictingUsers{
testCases := []testGenerateConflictUsers{
{
desc: "should get conflicting users",
users: []user.User{
@ -290,10 +456,17 @@ func TestGenerateConflictingUsersFile(t *testing.T) {
OrgID: int64(testOrgID),
},
},
wantDiscardedBlock: "conflict: user2",
expectedBlocks: []string{"conflict: ldap-admin", "conflict: user_duplicate_test_login", "conflict: oauth-admin@example.org", "conflict: user2"},
expectedEmailInBlocks: map[string][]string{
"conflict: ldap-admin": {"ldap-admin", "xo"},
"conflict: user_duplicate_test_login": {"user1", "user2"},
"conflict: oauth-admin@example.org": {"oauth-admin@EXAMPLE.ORG", "oauth-admin@example.org"},
"conflict: user2": {"USER2", "user2"},
},
expectedDiscardedBlock: "conflict: user2",
},
{
desc: "should get one block with only 3 users",
desc: "should get only one block with 3 users",
users: []user.User{
{
Email: "ldap-editor",
@ -311,19 +484,15 @@ func TestGenerateConflictingUsersFile(t *testing.T) {
OrgID: int64(testOrgID),
},
},
want: `conflict: ldap-editor
+ id: 1, email: ldap-editor, login: ldap-editor
- id: 2, email: LDAP-EDITOR, login: LDAP-EDITOR
- id: 3, email: No confli, login: LDAP-editor
`,
expectedBlocks: []string{"conflict: ldap-editor"},
expectedEmailInBlocks: map[string][]string{"conflict: ldap-editor": {"ldap-editor", "LDAP-EDITOR", "No confli"}},
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
// Restore after destructive operation
sqlStore := sqlstore.InitTestDB(t)
// "Skipping conflicting users test for mysql as it does make unique constraint case insensitive by default
if sqlStore.GetDialect().DriverName() != "mysql" {
if sqlStore.GetDialect().DriverName() != ignoredDatabase {
for _, u := range tc.users {
cmd := user.CreateUserCommand{
Email: u.Email,
@ -336,49 +505,280 @@ func TestGenerateConflictingUsersFile(t *testing.T) {
}
m, err := GetUsersWithConflictingEmailsOrLogins(&cli.Context{Context: context.Background()}, sqlStore)
require.NoError(t, err)
r := ConflictResolver{Users: m}
r.BuildConflictBlocks(fmt.Sprintf)
if tc.wantDiscardedBlock != "" {
require.Equal(t, true, r.DiscardedBlocks[tc.wantDiscardedBlock])
r := ConflictResolver{Store: sqlStore}
r.BuildConflictBlocks(m, fmt.Sprintf)
if tc.expectedDiscardedBlock != "" {
require.Equal(t, true, r.DiscardedBlocks[tc.expectedDiscardedBlock])
}
if tc.want != "" {
fileString := r.ToStringPresentation()
require.Equal(t, tc.want, fileString)
// test starts here
keys := make([]string, 0)
for k := range r.Blocks {
keys = append(keys, k)
}
expectedBlocks := tc.expectedBlocks
sort.Strings(keys)
sort.Strings(expectedBlocks)
require.Equal(t, expectedBlocks, keys)
// we want to validate the ids in the blocks
for _, block := range tc.expectedBlocks {
// checking for parsing of ids
conflictEmails := []string{}
for _, u := range r.Blocks[block] {
conflictEmails = append(conflictEmails, u.Email)
}
expectedEmailsInBlock := tc.expectedEmailInBlocks[block]
sort.Strings(conflictEmails)
sort.Strings(expectedEmailsInBlock)
require.Equal(t, expectedEmailsInBlock, conflictEmails)
}
}
})
}
}
func TestRunValidateConflictUserFile(t *testing.T) {
t.Run("should validate file thats gets created", func(t *testing.T) {
// Restore after destructive operation
sqlStore := sqlstore.InitTestDB(t)
const testOrgID int64 = 1
if sqlStore.GetDialect().DriverName() != ignoredDatabase {
// add additional user with conflicting login where DOMAIN is upper case
dupUserLogincmd := user.CreateUserCommand{
Email: "userduplicatetest1@test.com",
Login: "user_duplicate_test_1_login",
OrgID: testOrgID,
}
_, err := sqlStore.CreateUser(context.Background(), dupUserLogincmd)
require.NoError(t, err)
dupUserEmailcmd := user.CreateUserCommand{
Email: "USERDUPLICATETEST1@TEST.COM",
Login: "USER_DUPLICATE_TEST_1_LOGIN",
OrgID: testOrgID,
}
_, err = sqlStore.CreateUser(context.Background(), dupUserEmailcmd)
require.NoError(t, err)
// get users
conflictUsers, err := GetUsersWithConflictingEmailsOrLogins(&cli.Context{Context: context.Background()}, sqlStore)
require.NoError(t, err)
r := ConflictResolver{Store: sqlStore}
r.BuildConflictBlocks(conflictUsers, fmt.Sprintf)
tmpFile, err := generateConflictUsersFile(&r)
require.NoError(t, err)
b, err := os.ReadFile(tmpFile.Name())
require.NoError(t, err)
validErr := getValidConflictUsers(&r, b)
require.NoError(t, validErr)
require.Equal(t, 2, len(r.ValidUsers))
}
})
}
func TestMergeUser(t *testing.T) {
t.Run("should be able to merge user", func(t *testing.T) {
// Restore after destructive operation
sqlStore := sqlstore.InitTestDB(t)
teamSvc := teamimpl.ProvideService(sqlStore, setting.NewCfg())
team1, err := teamSvc.CreateTeam("team1 name", "", 1)
require.Nil(t, err)
const testOrgID int64 = 1
if sqlStore.GetDialect().DriverName() != ignoredDatabase {
// add additional user with conflicting login where DOMAIN is upper case
// the order of adding the conflict matters
dupUserLogincmd := user.CreateUserCommand{
Email: "userduplicatetest1@test.com",
Name: "user name 1",
Login: "user_duplicate_test_1_login",
OrgID: testOrgID,
}
_, err := sqlStore.CreateUser(context.Background(), dupUserLogincmd)
require.NoError(t, err)
dupUserEmailcmd := user.CreateUserCommand{
Email: "USERDUPLICATETEST1@TEST.COM",
Name: "user name 1",
Login: "USER_DUPLICATE_TEST_1_LOGIN",
OrgID: testOrgID,
}
userWithUpperCase, err := sqlStore.CreateUser(context.Background(), dupUserEmailcmd)
require.NoError(t, err)
// this is the user we want to update to another team
err = teamSvc.AddTeamMember(userWithUpperCase.ID, testOrgID, team1.Id, false, 0)
require.NoError(t, err)
// get users
conflictUsers, err := GetUsersWithConflictingEmailsOrLogins(&cli.Context{Context: context.Background()}, sqlStore)
require.NoError(t, err)
r := ConflictResolver{Store: sqlStore}
r.BuildConflictBlocks(conflictUsers, fmt.Sprintf)
tmpFile, err := generateConflictUsersFile(&r)
require.NoError(t, err)
// validation to get newConflicts
// edited file
b, err := os.ReadFile(tmpFile.Name())
require.NoError(t, err)
validErr := getValidConflictUsers(&r, b)
require.NoError(t, validErr)
require.Equal(t, 2, len(r.ValidUsers))
// test starts here
err = r.MergeConflictingUsers(context.Background())
require.NoError(t, err)
// user with uppercaseemail should not exist
query := &models.GetUserByIdQuery{Id: userWithUpperCase.ID}
err = sqlStore.GetUserById(context.Background(), query)
require.Error(t, user.ErrUserNotFound, err)
}
})
}
func TestMergeUserFromNewFileInput(t *testing.T) {
t.Run("should be able to merge users after choosing a different user to keep", func(t *testing.T) {
// Restore after destructive operation
sqlStore := sqlstore.InitTestDB(t)
type testBuildConflictBlock struct {
desc string
users []user.User
fileString string
expectedBlocks []string
expectedIdsInBlocks map[string][]string
}
testOrgID := 1
m := make(map[string][]string)
conflict1 := "conflict: test"
conflict2 := "conflict: test2"
m[conflict1] = []string{"2", "3"}
m[conflict2] = []string{"4", "5", "6"}
testCases := []testBuildConflictBlock{
{
desc: "should be able to parse the fileString containing the conflicts",
users: []user.User{
{
Email: "TEST",
Login: "TEST",
OrgID: int64(testOrgID),
},
{
Email: "test",
Login: "test",
OrgID: int64(testOrgID),
},
{
Email: "test2",
Login: "test2",
OrgID: int64(testOrgID),
},
{
Email: "TEST2",
Login: "TEST2",
OrgID: int64(testOrgID),
},
{
Email: "Test2",
Login: "Test2",
OrgID: int64(testOrgID),
},
},
fileString: `conflict: test
- id: 1, email: test, login: test, last_seen_at: 2012-09-19T08:31:20Z, auth_module:, conflict_email: true, conflict_login: true
+ id: 2, email: TEST, login: TEST, last_seen_at: 2012-09-19T08:31:29Z, auth_module:, conflict_email: true, conflict_login: true
conflict: test2
- id: 3, email: test2, login: test2, last_seen_at: 2012-09-19T08:31:41Z, auth_module: , conflict_email: true, conflict_login: true
+ id: 4, email: TEST2, login: TEST2, last_seen_at: 2012-09-19T08:31:51Z, auth_module: , conflict_email: true, conflict_login: true
- id: 5, email: Test2, login: Test2, last_seen_at: 2012-09-19T08:32:03Z, auth_module: , conflict_email: true, conflict_login: true`,
expectedBlocks: []string{"conflict: test", "conflict: test2"},
expectedIdsInBlocks: m,
},
}
for _, tc := range testCases {
if sqlStore.GetDialect().DriverName() != ignoredDatabase {
for _, u := range tc.users {
cmd := user.CreateUserCommand{
Email: u.Email,
Name: u.Name,
Login: u.Login,
OrgID: int64(testOrgID),
}
_, err := sqlStore.CreateUser(context.Background(), cmd)
require.NoError(t, err)
}
// add additional user with conflicting login where DOMAIN is upper case
conflictUsers, err := GetUsersWithConflictingEmailsOrLogins(&cli.Context{Context: context.Background()}, sqlStore)
require.NoError(t, err)
r := ConflictResolver{Store: sqlStore}
r.BuildConflictBlocks(conflictUsers, fmt.Sprintf)
require.NoError(t, err)
// validation to get newConflicts
// edited file
// b, err := os.ReadFile(tmpFile.Name())
// mocked file input
b := tc.fileString
require.NoError(t, err)
validErr := getValidConflictUsers(&r, []byte(b))
require.NoError(t, validErr)
// test starts here
err = r.MergeConflictingUsers(context.Background())
require.NoError(t, err)
}
}
})
}
func TestMarshalConflictUser(t *testing.T) {
// TODO: add more testcases
testCases := []struct {
name string
inputRow string
expectedUser ConflictingUser
}{{
name: "should be able to marshal expected input row",
inputRow: "+ id: 4, email: userduplicatetest1@test.com, login: userduplicatetest1@test.com, last_seen_at: 2012-07-26T16:08:11Z, auth_module:",
expectedUser: ConflictingUser{
Direction: "+",
Id: "4",
Email: "userduplicatetest1@test.com",
Login: "userduplicatetest1@test.com",
LastSeenAt: "2012-07-26T16:08:11Z",
AuthModule: "",
}{
{
name: "should be able to marshal expected input row",
inputRow: "+ id: 4, email: userduplicatetest1@test.com, login: userduplicatetest1, last_seen_at: 2012-07-26T16:08:11Z, auth_module: auth.saml, conflict_email: true, conflict_login: ",
expectedUser: ConflictingUser{
Direction: "+",
ID: "4",
Email: "userduplicatetest1@test.com",
Login: "userduplicatetest1",
LastSeenAt: "2012-07-26T16:08:11Z",
AuthModule: "auth.saml",
ConflictEmail: "true",
ConflictLogin: "",
},
},
}}
{
name: "should be able to marshal expected input row",
inputRow: "+ id: 1, email: userduplicatetest1@test.com, login: user_duplicate_test_1_login, last_seen_at: 2012-07-26T16:08:11Z, auth_module: , conflict_email: , conflict_login: true",
expectedUser: ConflictingUser{
Direction: "+",
ID: "1",
Email: "userduplicatetest1@test.com",
Login: "user_duplicate_test_1_login",
LastSeenAt: "2012-07-26T16:08:11Z",
AuthModule: "",
ConflictEmail: "",
ConflictLogin: "true",
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
user := ConflictingUser{}
err := user.Marshal(tc.inputRow)
require.NoError(t, err)
require.Equal(t, tc.expectedUser.Direction, user.Direction)
require.Equal(t, tc.expectedUser.Id, user.Id)
require.Equal(t, tc.expectedUser.ID, user.ID)
require.Equal(t, tc.expectedUser.Email, user.Email)
require.Equal(t, tc.expectedUser.Login, user.Login)
require.Equal(t, tc.expectedUser.LastSeenAt, user.LastSeenAt)
require.Equal(t, tc.expectedUser.ConflictEmail, user.ConflictEmail)
require.Equal(t, tc.expectedUser.ConflictLogin, user.ConflictLogin)
})
}
}

View File

@ -17,10 +17,15 @@ import (
func TestPasswordMigrationCommand(t *testing.T) {
// setup datasources with password, basic_auth and none
sqlstore := sqlstore.InitTestDB(t)
session := sqlstore.NewSession(context.Background())
defer session.Close()
store := sqlstore.InitTestDB(t)
err := store.WithDbSession(context.Background(), func(sess *sqlstore.DBSession) error {
passwordMigration(t, sess, store)
return nil
})
require.NoError(t, err)
}
func passwordMigration(t *testing.T, session *sqlstore.DBSession, sqlstore *sqlstore.SQLStore) {
ds := []*datasources.DataSource{
{Type: "influxdb", Name: "influxdb", Password: "foobar", Uid: "influx"},
{Type: "graphite", Name: "graphite", BasicAuthPassword: "foobar", Uid: "graphite"},

View File

@ -0,0 +1,263 @@
package codegen
import (
"bytes"
"go/format"
"go/parser"
"go/token"
"testing"
"github.com/matryer/is"
"golang.org/x/tools/go/ast/astutil"
)
func TestPrefixDropper(t *testing.T) {
tt := map[string]struct {
in, out string
skip bool
}{
"basic": {
in: `package foo
type Foo struct {
Id int64
Ref FooThing
}
type FooThing struct {
Id int64
}`,
out: `package foo
type Model struct {
Id int64
Ref Thing
}
type Thing struct {
Id int64
}
`,
},
"pointer": {
in: `package foo
type Foo struct {
Id int64
Ref *FooThing
}
type FooThing struct {
Id int64
}`,
out: `package foo
type Model struct {
Id int64
Ref *Thing
}
type Thing struct {
Id int64
}
`,
},
"sliceref": {
in: `package foo
type Foo struct {
Id int64
Ref []FooThing
PRef []*FooThing
SPRef *[]FooThing
}
type FooThing struct {
Id int64
}`,
out: `package foo
type Model struct {
Id int64
Ref []Thing
PRef []*Thing
SPRef *[]Thing
}
type Thing struct {
Id int64
}
`,
},
"mapref": {
in: `package foo
type Foo struct {
Id int64
KeyRef map[FooThing]string
ValRef map[string]FooThing
BothRef map[FooThing]FooThing
}
type FooThing struct {
Id int64
}`,
out: `package foo
type Model struct {
Id int64
KeyRef map[Thing]string
ValRef map[string]Thing
BothRef map[Thing]Thing
}
type Thing struct {
Id int64
}
`,
},
"pmapref": {
in: `package foo
type Foo struct {
Id int64
KeyRef map[*FooThing]string
ValRef map[string]*FooThing
BothRef map[*FooThing]*FooThing
PKeyRef *map[*FooThing]string
}
type FooThing struct {
Id int64
}`,
out: `package foo
type Model struct {
Id int64
KeyRef map[*Thing]string
ValRef map[string]*Thing
BothRef map[*Thing]*Thing
PKeyRef *map[*Thing]string
}
type Thing struct {
Id int64
}
`,
},
"ignore-fieldname": {
in: `package foo
type Foo struct {
Id int64
FooRef []string
}`,
out: `package foo
type Model struct {
Id int64
FooRef []string
}
`,
},
"const": {
in: `package foo
const one FooThing = "boop"
const (
two FooThing = "boop"
three FooThing = "boop"
)
type FooThing string
`,
out: `package foo
const one Thing = "boop"
const (
two Thing = "boop"
three Thing = "boop"
)
type Thing string
`,
},
"var": {
in: `package foo
var one FooThing = "boop"
var (
two FooThing = "boop"
three FooThing = "boop"
)
type FooThing string
`,
out: `package foo
var one Thing = "boop"
var (
two Thing = "boop"
three Thing = "boop"
)
type Thing string
`,
},
"varp": {
in: `package foo
var one *FooThing = "boop"
var (
two []FooThing = []FooThing{"boop"}
three map[FooThing]string = map[FooThing]string{ "beep": "boop" }
)
type FooThing string
`,
out: `package foo
var one *Thing = "boop"
var (
two []Thing = []Thing{"boop"}
three map[Thing]string = map[Thing]string{ "beep": "boop" }
)
type Thing string
`,
// Skip this one for now - there's currently no codegen that constructs instances
// of objects, only types, so we shouldn't encounter this case.
skip: true,
},
}
for name, it := range tt {
item := it
t.Run(name, func(t *testing.T) {
if item.skip {
t.Skip()
}
is := is.New(t)
fset := token.NewFileSet()
inf, err := parser.ParseFile(fset, "input.go", item.in, parser.ParseComments)
if err != nil {
t.Fatal(err)
}
drop := makePrefixDropper("Foo", "Model")
astutil.Apply(inf, drop, nil)
buf := new(bytes.Buffer)
err = format.Node(buf, fset, inf)
if err != nil {
t.Fatal(err)
}
is.Equal(item.out, buf.String())
})
}
}

View File

@ -21,6 +21,7 @@ import (
"github.com/grafana/grafana/pkg/cuectx"
"github.com/grafana/thema"
"github.com/grafana/thema/encoding/openapi"
"golang.org/x/tools/go/ast/astutil"
)
// CoremodelDeclaration contains the results of statically analyzing a Grafana
@ -279,30 +280,77 @@ type prefixDropper struct {
rxpsuff *regexp.Regexp
}
func makePrefixDropper(str, base string) prefixDropper {
return prefixDropper{
func makePrefixDropper(str, base string) astutil.ApplyFunc {
return (&prefixDropper{
str: str,
base: base,
rxpsuff: regexp.MustCompile(fmt.Sprintf(`%s([a-zA-Z_]*)`, str)),
rxp: regexp.MustCompile(fmt.Sprintf(`%s([\s.,;-])`, str)),
}
}).applyfunc
}
func (d prefixDropper) Visit(n ast.Node) ast.Visitor {
func depoint(e ast.Expr) ast.Expr {
if star, is := e.(*ast.StarExpr); is {
return star.X
}
return e
}
func (d prefixDropper) applyfunc(c *astutil.Cursor) bool {
n := c.Node()
// fmt.Printf("%T %s\n", c.Node(), ast.Print(nil, c.Node()))
switch x := n.(type) {
case *ast.Ident:
if x.Name != d.str {
x.Name = strings.TrimPrefix(x.Name, d.str)
} else {
x.Name = d.base
case *ast.ValueSpec:
// fmt.Printf("%T %s\n", c.Node(), ast.Print(nil, c.Node()))
d.handleExpr(x.Type)
for _, id := range x.Names {
d.do(id)
}
case *ast.TypeSpec:
// Always do typespecs
d.do(x.Name)
case *ast.Field:
// Don't rename struct fields. We just want to rename type declarations, and
// field value specifications that reference those types.
d.handleExpr(x.Type)
// return false
case *ast.CommentGroup:
for _, c := range x.List {
c.Text = d.rxp.ReplaceAllString(c.Text, d.base+"$1")
c.Text = d.rxpsuff.ReplaceAllString(c.Text, "$1")
}
}
return d
return true
}
func (d prefixDropper) handleExpr(e ast.Expr) {
// Deref a StarExpr, if there is one
expr := depoint(e)
switch x := expr.(type) {
case *ast.Ident:
d.do(x)
case *ast.ArrayType:
if id, is := depoint(x.Elt).(*ast.Ident); is {
d.do(id)
}
case *ast.MapType:
if id, is := depoint(x.Key).(*ast.Ident); is {
d.do(id)
}
if id, is := depoint(x.Value).(*ast.Ident); is {
d.do(id)
}
}
}
func (d prefixDropper) do(n *ast.Ident) {
if n.Name != d.str {
n.Name = strings.TrimPrefix(n.Name, d.str)
} else {
n.Name = d.base
}
}
// GenerateCoremodelRegistry produces Go files that define a registry with

View File

@ -3,7 +3,6 @@ package codegen
import (
"bytes"
"fmt"
"go/ast"
"go/format"
"go/parser"
"go/token"
@ -11,12 +10,13 @@ import (
"path/filepath"
"strings"
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/imports"
)
type genGoFile struct {
path string
walker ast.Visitor
walker astutil.ApplyFunc
in []byte
}
@ -30,7 +30,7 @@ func postprocessGoFile(cfg genGoFile) ([]byte, error) {
}
if cfg.walker != nil {
ast.Walk(cfg.walker, gf)
astutil.Apply(gf, cfg.walker, nil)
err = format.Node(buf, fset, gf)
if err != nil {

View File

@ -0,0 +1,54 @@
package playlist
import (
"github.com/grafana/thema"
)
thema.#Lineage
name: "playlist"
seqs: [
{
schemas: [
{//0.0
// Unique playlist identifier for internal use, set by Grafana.
id: int64 @grafana(decisionNeeded)
// Unique playlist identifier. Generated on creation, either by the
// creator of the playlist of by the application.
uid: string
// Name of the playlist.
name: string
// Interval sets the time between switching views in a playlist.
// FIXME: Is this based on a standardized format or what options are available? Can datemath be used?
interval: string | *"5m"
// The ordered list of items that the playlist will iterate over.
items?: [...#PlaylistItem]
///////////////////////////////////////
// Definitions (referenced above) are declared below
#PlaylistItem: {
// FIXME: The prefixDropper removes playlist from playlist_id, that doesn't work for us since it'll mean we'll have Id twice.
// ID of the playlist item for internal use by Grafana. Deprecated.
id: int64 @grafana(decisionNeeded)
// PlaylistID for the playlist containing the item. Deprecated.
playlistid: int64 @grafana(decisionNeeded)
// Type of the item.
type: "dashboard_by_uid" | "dashboard_by_id" | "dashboard_by_tag"
// Value depends on type and describes the playlist item.
//
// - dashboard_by_id: The value is an internal numerical identifier set by Grafana. This
// is not portable as the numerical identifier is non-deterministic between different instances.
// Will be replaced by dashboard_by_uid in the future.
// - dashboard_by_tag: The value is a tag which is set on any number of dashboards. All
// dashboards behind the tag will be added to the playlist.
value: string
// Title is the human-readable identifier for the playlist item.
title: string @grafana(decisionNeeded)
// Order is the position in the list for the item. Deprecated.
order: int64 | *0 @grafana(decisionNeeded)
}
}
]
}
]

View File

@ -0,0 +1,147 @@
// This file is autogenerated. DO NOT EDIT.
//
// Generated by pkg/framework/coremodel/gen.go
//
// Derived from the Thema lineage declared in pkg/coremodel/playlist/coremodel.cue
//
// Run `make gen-cue` from repository root to regenerate.
package playlist
import (
"embed"
"path/filepath"
"github.com/grafana/grafana/pkg/cuectx"
"github.com/grafana/grafana/pkg/framework/coremodel"
"github.com/grafana/thema"
)
// Defines values for PlaylistItemType.
const (
PlaylistItemTypeDashboardById PlaylistItemType = "dashboard_by_id"
PlaylistItemTypeDashboardByTag PlaylistItemType = "dashboard_by_tag"
PlaylistItemTypeDashboardByUid PlaylistItemType = "dashboard_by_uid"
)
// Model is the Go representation of a playlist.
//
// THIS TYPE IS INTENDED FOR INTERNAL USE BY THE GRAFANA BACKEND, AND IS SUBJECT TO BREAKING CHANGES.
// Equivalent Go types at stable import paths are provided in https://github.com/grafana/grok.
type Model struct {
// Unique playlist identifier for internal use, set by Grafana.
Id int64 `json:"id"`
// Interval sets the time between switching views in a playlist.
// FIXME: Is this based on a standardized format or what options are available? Can datemath be used?
Interval string `json:"interval"`
// The ordered list of items that the playlist will iterate over.
Items *[]PlaylistItem `json:"items,omitempty"`
// Name of the playlist.
Name string `json:"name"`
// Unique playlist identifier. Generated on creation, either by the
// creator of the playlist of by the application.
Uid string `json:"uid"`
}
// PlaylistItem is the Go representation of a playlist.Item.
//
// THIS TYPE IS INTENDED FOR INTERNAL USE BY THE GRAFANA BACKEND, AND IS SUBJECT TO BREAKING CHANGES.
// Equivalent Go types at stable import paths are provided in https://github.com/grafana/grok.
type PlaylistItem struct {
// FIXME: The prefixDropper removes playlist from playlist_id, that doesn't work for us since it'll mean we'll have Id twice.
// ID of the playlist item for internal use by Grafana. Deprecated.
Id int64 `json:"id"`
// Order is the position in the list for the item. Deprecated.
Order int `json:"order"`
// ID for the playlist containing the item. Deprecated.
Playlistid int64 `json:"playlistid"`
// Title is the human-readable identifier for the playlist item.
Title string `json:"title"`
// Type of the item.
Type PlaylistItemType `json:"type"`
// Value depends on type and describes the playlist item.
//
// - dashboard_by_id: The value is an internal numerical identifier set by Grafana. This
// is not portable as the numerical identifier is non-deterministic between different instances.
// Will be replaced by dashboard_by_uid in the future.
// - dashboard_by_tag: The value is a tag which is set on any number of dashboards. All
// dashboards behind the tag will be added to the playlist.
Value string `json:"value"`
}
// Type of the item.
//
// THIS TYPE IS INTENDED FOR INTERNAL USE BY THE GRAFANA BACKEND, AND IS SUBJECT TO BREAKING CHANGES.
// Equivalent Go types at stable import paths are provided in https://github.com/grafana/grok.
type PlaylistItemType string
//go:embed coremodel.cue
var cueFS embed.FS
// The current version of the coremodel schema, as declared in coremodel.cue.
// This version determines what schema version is returned from [Coremodel.CurrentSchema],
// and which schema version is used for code generation within the grafana/grafana repository.
//
// The code generator ensures that this is always the latest Thema schema version.
var currentVersion = thema.SV(0, 0)
// Lineage returns the Thema lineage representing a Grafana playlist.
//
// The lineage is the canonical specification of the current playlist schema,
// all prior schema versions, and the mappings that allow migration between
// schema versions.
func Lineage(lib thema.Library, opts ...thema.BindOption) (thema.Lineage, error) {
return cuectx.LoadGrafanaInstancesWithThema(filepath.Join("pkg", "coremodel", "playlist"), cueFS, lib, opts...)
}
var _ thema.LineageFactory = Lineage
var _ coremodel.Interface = &Coremodel{}
// Coremodel contains the foundational schema declaration for playlists.
// It implements coremodel.Interface.
type Coremodel struct {
lin thema.Lineage
}
// Lineage returns the canonical playlist Lineage.
func (c *Coremodel) Lineage() thema.Lineage {
return c.lin
}
// CurrentSchema returns the current (latest) playlist Thema schema.
func (c *Coremodel) CurrentSchema() thema.Schema {
return thema.SchemaP(c.lin, currentVersion)
}
// GoType returns a pointer to an empty Go struct that corresponds to
// the current Thema schema.
func (c *Coremodel) GoType() interface{} {
return &Model{}
}
// New returns a new instance of the playlist coremodel.
//
// Note that this function does not cache, and initially loading a Thema lineage
// can be expensive. As such, the Grafana backend should prefer to access this
// coremodel through a registry (pkg/framework/coremodel/registry), which does cache.
func New(lib thema.Library) (*Coremodel, error) {
lin, err := Lineage(lib)
if err != nil {
return nil, err
}
return &Coremodel{
lin: lin,
}, nil
}

View File

@ -10,6 +10,7 @@ import (
"fmt"
"github.com/grafana/grafana/pkg/coremodel/dashboard"
"github.com/grafana/grafana/pkg/coremodel/playlist"
"github.com/grafana/grafana/pkg/coremodel/pluginmeta"
"github.com/grafana/grafana/pkg/framework/coremodel"
"github.com/grafana/thema"
@ -27,12 +28,14 @@ import (
type Base struct {
all []coremodel.Interface
dashboard *dashboard.Coremodel
playlist *playlist.Coremodel
pluginmeta *pluginmeta.Coremodel
}
// type guards
var (
_ coremodel.Interface = &dashboard.Coremodel{}
_ coremodel.Interface = &playlist.Coremodel{}
_ coremodel.Interface = &pluginmeta.Coremodel{}
)
@ -42,6 +45,12 @@ func (b *Base) Dashboard() *dashboard.Coremodel {
return b.dashboard
}
// Playlist returns the playlist coremodel. The return value is guaranteed to
// implement coremodel.Interface.
func (b *Base) Playlist() *playlist.Coremodel {
return b.playlist
}
// Pluginmeta returns the pluginmeta coremodel. The return value is guaranteed to
// implement coremodel.Interface.
func (b *Base) Pluginmeta() *pluginmeta.Coremodel {
@ -58,6 +67,12 @@ func doProvideBase(lib thema.Library) *Base {
}
reg.all = append(reg.all, reg.dashboard)
reg.playlist, err = playlist.New(lib)
if err != nil {
panic(fmt.Sprintf("error while initializing playlist coremodel: %s", err))
}
reg.all = append(reg.all, reg.playlist)
reg.pluginmeta, err = pluginmeta.New(lib)
if err != nil {
panic(fmt.Sprintf("error while initializing pluginmeta coremodel: %s", err))

View File

@ -54,36 +54,38 @@ func (dc *databaseCache) internalRunGC() {
func (dc *databaseCache) Get(ctx context.Context, key string) (interface{}, error) {
cacheHit := CacheData{}
session := dc.SQLStore.NewSession(ctx)
defer session.Close()
exist, err := session.Where("cache_key= ?", key).Get(&cacheHit)
if err != nil {
return nil, err
}
if !exist {
return nil, ErrCacheItemNotFound
}
if cacheHit.Expires > 0 {
existedButExpired := getTime().Unix()-cacheHit.CreatedAt >= cacheHit.Expires
if existedButExpired {
err = dc.Delete(ctx, key) // ignore this error since we will return `ErrCacheItemNotFound` anyway
if err != nil {
dc.log.Debug("Deletion of expired key failed: %v", err)
}
return nil, ErrCacheItemNotFound
}
}
item := &cachedItem{}
if err = decodeGob(cacheHit.Data, item); err != nil {
return nil, err
}
err := dc.SQLStore.WithDbSession(ctx, func(session *sqlstore.DBSession) error {
exist, err := session.Where("cache_key= ?", key).Get(&cacheHit)
return item.Val, nil
if err != nil {
return err
}
if !exist {
return ErrCacheItemNotFound
}
if cacheHit.Expires > 0 {
existedButExpired := getTime().Unix()-cacheHit.CreatedAt >= cacheHit.Expires
if existedButExpired {
err = dc.Delete(ctx, key) // ignore this error since we will return `ErrCacheItemNotFound` anyway
if err != nil {
dc.log.Debug("Deletion of expired key failed: %v", err)
}
return ErrCacheItemNotFound
}
}
if err = decodeGob(cacheHit.Data, item); err != nil {
return err
}
return nil
})
return item.Val, err
}
func (dc *databaseCache) Set(ctx context.Context, key string, value interface{}, expire time.Duration) error {
@ -93,34 +95,33 @@ func (dc *databaseCache) Set(ctx context.Context, key string, value interface{},
return err
}
session := dc.SQLStore.NewSession(context.Background())
defer session.Close()
return dc.SQLStore.WithDbSession(ctx, func(session *sqlstore.DBSession) error {
var expiresInSeconds int64
if expire != 0 {
expiresInSeconds = int64(expire) / int64(time.Second)
}
var expiresInSeconds int64
if expire != 0 {
expiresInSeconds = int64(expire) / int64(time.Second)
}
// attempt to insert the key
sql := `INSERT INTO cache_data (cache_key,data,created_at,expires) VALUES(?,?,?,?)`
_, err = session.Exec(sql, key, data, getTime().Unix(), expiresInSeconds)
if err != nil {
// attempt to update if a unique constrain violation or a deadlock (for MySQL) occurs
// if the update fails propagate the error
// which eventually will result in a key that is not finally set
// but since it's a cache does not harm a lot
if dc.SQLStore.Dialect.IsUniqueConstraintViolation(err) || dc.SQLStore.Dialect.IsDeadlock(err) {
sql := `UPDATE cache_data SET data=?, created_at=?, expires=? WHERE cache_key=?`
_, err = session.Exec(sql, data, getTime().Unix(), expiresInSeconds, key)
if err != nil && dc.SQLStore.Dialect.IsDeadlock(err) {
// most probably somebody else is upserting the key
// so it is safe enough not to propagate this error
return nil
// attempt to insert the key
sql := `INSERT INTO cache_data (cache_key,data,created_at,expires) VALUES(?,?,?,?)`
_, err := session.Exec(sql, key, data, getTime().Unix(), expiresInSeconds)
if err != nil {
// attempt to update if a unique constrain violation or a deadlock (for MySQL) occurs
// if the update fails propagate the error
// which eventually will result in a key that is not finally set
// but since it's a cache does not harm a lot
if dc.SQLStore.Dialect.IsUniqueConstraintViolation(err) || dc.SQLStore.Dialect.IsDeadlock(err) {
sql := `UPDATE cache_data SET data=?, created_at=?, expires=? WHERE cache_key=?`
_, err = session.Exec(sql, data, getTime().Unix(), expiresInSeconds, key)
if err != nil && dc.SQLStore.Dialect.IsDeadlock(err) {
// most probably somebody else is upserting the key
// so it is safe enough not to propagate this error
return nil
}
}
}
}
return err
return err
})
}
func (dc *databaseCache) Delete(ctx context.Context, key string) error {

View File

@ -8,8 +8,6 @@ import (
"time"
"github.com/go-kit/log/level"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/setting"
"go.etcd.io/etcd/api/v3/version"
jaegerpropagator "go.opentelemetry.io/contrib/propagators/jaeger"
"go.opentelemetry.io/otel"
@ -23,6 +21,9 @@ import (
tracesdk "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
trace "go.opentelemetry.io/otel/trace"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/setting"
)
const (
@ -34,21 +35,6 @@ const (
w3cPropagator string = "w3c"
)
type Tracer interface {
Run(context.Context) error
Start(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, Span)
Inject(context.Context, http.Header, Span)
}
type Span interface {
End()
SetAttributes(key string, value interface{}, kv attribute.KeyValue)
SetName(name string)
SetStatus(code codes.Code, description string)
RecordError(err error, options ...trace.EventOption)
AddEvents(keys []string, values []EventValue)
}
type Opentelemetry struct {
enabled string
address string
@ -307,9 +293,7 @@ func (s OpentelemetrySpan) SetStatus(code codes.Code, description string) {
}
func (s OpentelemetrySpan) RecordError(err error, options ...trace.EventOption) {
for _, o := range options {
s.span.RecordError(err, o)
}
s.span.RecordError(err, options...)
}
func (s OpentelemetrySpan) AddEvents(keys []string, values []EventValue) {

View File

@ -8,9 +8,6 @@ import (
"os"
"strings"
"github.com/grafana/grafana/pkg/cmd/grafana-cli/logger"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/setting"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
ol "github.com/opentracing/opentracing-go/log"
@ -20,6 +17,10 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
trace "go.opentelemetry.io/otel/trace"
"github.com/grafana/grafana/pkg/cmd/grafana-cli/logger"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/setting"
)
const (
@ -27,6 +28,56 @@ const (
envJaegerAgentPort = "JAEGER_AGENT_PORT"
)
// Tracer defines the service used to create new spans.
type Tracer interface {
// Run implements registry.BackgroundService.
Run(context.Context) error
// Start creates a new [Span] and places trace metadata on the
// [context.Context] passed to the method.
// Chose a low cardinality spanName and use [Span.SetAttributes]
// or [Span.AddEvents] for high cardinality data.
Start(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, Span)
// Inject adds identifying information for the span to the
// headers defined in [http.Header] map (this mutates http.Header).
//
// Implementation quirk: Where OpenTelemetry is used, the [Span] is
// picked up from [context.Context] and for OpenTracing the
// information passed as [Span] is preferred.
// Both the context and span must be derived from the same call to
// [Tracer.Start].
Inject(context.Context, http.Header, Span)
}
// Span defines a time range for an operation. This is equivalent to a
// single line in a flame graph.
type Span interface {
// End finalizes the Span and adds its end timestamp.
// Any further operations on the Span are not permitted after
// End has been called.
End()
// SetAttributes adds additional data to a span.
// SetAttributes repeats the key value pair with [string] and [any]
// used for OpenTracing and [attribute.KeyValue] used for
// OpenTelemetry.
SetAttributes(key string, value interface{}, kv attribute.KeyValue)
// SetName renames the span.
SetName(name string)
// SetStatus can be used to indicate whether the span was
// successfully or unsuccessfully executed.
//
// Only useful for OpenTelemetry.
SetStatus(code codes.Code, description string)
// RecordError adds an error to the span.
//
// Only useful for OpenTelemetry.
RecordError(err error, options ...trace.EventOption)
// AddEvents adds additional data with a temporal dimension to the
// span.
//
// Panics if the length of keys is shorter than the length of values.
AddEvents(keys []string, values []EventValue)
}
func ProvideService(cfg *setting.Cfg) (Tracer, error) {
ts, ots, err := parseSettings(cfg)
if err != nil {
@ -239,7 +290,9 @@ func (s OpentracingSpan) SetName(name string) {
}
func (s OpentracingSpan) SetStatus(code codes.Code, description string) {
ext.Error.Set(s.span, true)
if code == codes.Error {
ext.Error.Set(s.span, true)
}
}
func (s OpentracingSpan) RecordError(err error, options ...trace.EventOption) {

View File

@ -51,6 +51,7 @@ func ProvideBackgroundServiceRegistry(
_ serviceaccounts.Service, _ *guardian.Provider,
_ *plugindashboardsservice.DashboardUpdater, _ *sanitizer.Provider,
_ *grpcserver.HealthService,
_ *grpcserver.ReflectionService,
) *BackgroundServiceRegistry {
return NewBackgroundServiceRegistry(
httpServer,

View File

@ -1,16 +1,18 @@
package server
import (
"github.com/grafana/grafana/pkg/services/grpcserver"
"github.com/grafana/grafana/pkg/services/notifications"
"github.com/grafana/grafana/pkg/services/sqlstore"
)
func ProvideTestEnv(server *Server, store *sqlstore.SQLStore, ns *notifications.NotificationServiceMock) (*TestEnv, error) {
return &TestEnv{server, store, ns}, nil
func ProvideTestEnv(server *Server, store *sqlstore.SQLStore, ns *notifications.NotificationServiceMock, grpcServer grpcserver.Provider) (*TestEnv, error) {
return &TestEnv{server, store, ns, grpcServer}, nil
}
type TestEnv struct {
Server *Server
SQLStore *sqlstore.SQLStore
NotificationService *notifications.NotificationServiceMock
GRPCServer grpcserver.Provider
}

View File

@ -344,6 +344,7 @@ var wireBasicSet = wire.NewSet(
orgimpl.ProvideService,
grpcserver.ProvideService,
grpcserver.ProvideHealthService,
grpcserver.ProvideReflectionService,
teamimpl.ProvideService,
tempuserimpl.ProvideService,
loginattemptimpl.ProvideService,

View File

@ -49,7 +49,9 @@ func (s *AccessControlStore) GetUserPermissions(ctx context.Context, query acces
params = append(params, a)
}
}
q += `
ORDER BY permission.scope
`
if err := sess.SQL(q, params...).Find(&result); err != nil {
return err
}

View File

@ -43,10 +43,10 @@ func benchmarkFilter(b *testing.B, numDs, numPermissions int) {
require.NoError(b, err)
var datasources []datasources.DataSource
sess := store.NewSession(context.Background())
err = sess.SQL(baseSql+acFilter.Where, acFilter.Args...).Find(&datasources)
err = store.WithDbSession(context.Background(), func(sess *sqlstore.DBSession) error {
return sess.SQL(baseSql+acFilter.Where, acFilter.Args...).Find(&datasources)
})
require.NoError(b, err)
sess.Close()
require.Len(b, datasources, numPermissions)
}
}

View File

@ -168,40 +168,41 @@ func TestFilter_Datasources(t *testing.T) {
t.Run(tt.desc, func(t *testing.T) {
store := sqlstore.InitTestDB(t)
sess := store.NewSession(context.Background())
defer sess.Close()
// seed 10 data sources
for i := 1; i <= 10; i++ {
dsStore := dsService.CreateStore(store, log.New("accesscontrol.test"))
err := dsStore.AddDataSource(context.Background(), &datasources.AddDataSourceCommand{Name: fmt.Sprintf("ds:%d", i), Uid: fmt.Sprintf("uid%d", i)})
require.NoError(t, err)
}
baseSql := `SELECT data_source.* FROM data_source WHERE`
acFilter, err := accesscontrol.Filter(
&user.SignedInUser{
OrgID: 1,
Permissions: map[int64]map[string][]string{1: tt.permissions},
},
tt.sqlID,
tt.prefix,
tt.actions...,
)
if !tt.expectErr {
require.NoError(t, err)
var datasources []datasources.DataSource
err = sess.SQL(baseSql+acFilter.Where, acFilter.Args...).Find(&datasources)
require.NoError(t, err)
assert.Len(t, datasources, len(tt.expectedDataSources))
for i, ds := range datasources {
assert.Equal(t, tt.expectedDataSources[i], ds.Name)
err := store.WithDbSession(context.Background(), func(sess *sqlstore.DBSession) error {
// seed 10 data sources
for i := 1; i <= 10; i++ {
dsStore := dsService.CreateStore(store, log.New("accesscontrol.test"))
err := dsStore.AddDataSource(context.Background(), &datasources.AddDataSourceCommand{Name: fmt.Sprintf("ds:%d", i), Uid: fmt.Sprintf("uid%d", i)})
require.NoError(t, err)
}
} else {
require.Error(t, err)
}
baseSql := `SELECT data_source.* FROM data_source WHERE`
acFilter, err := accesscontrol.Filter(
&user.SignedInUser{
OrgID: 1,
Permissions: map[int64]map[string][]string{1: tt.permissions},
},
tt.sqlID,
tt.prefix,
tt.actions...,
)
if !tt.expectErr {
require.NoError(t, err)
var datasources []datasources.DataSource
err = sess.SQL(baseSql+acFilter.Where, acFilter.Args...).Find(&datasources)
require.NoError(t, err)
assert.Len(t, datasources, len(tt.expectedDataSources))
for i, ds := range datasources {
assert.Equal(t, tt.expectedDataSources[i], ds.Name)
}
} else {
require.Error(t, err)
}
return nil
})
require.NoError(t, err)
})
}
}

View File

@ -58,7 +58,9 @@ func (ss *sqlStore) DeleteAlertNotification(ctx context.Context, cmd *models.Del
func (ss *sqlStore) DeleteAlertNotificationWithUid(ctx context.Context, cmd *models.DeleteAlertNotificationWithUidCommand) error {
existingNotification := &models.GetAlertNotificationsWithUidQuery{OrgId: cmd.OrgId, Uid: cmd.Uid}
if err := getAlertNotificationWithUidInternal(ctx, existingNotification, ss.db.NewSession(ctx)); err != nil {
if err := ss.db.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
return getAlertNotificationWithUidInternal(ctx, existingNotification, sess)
}); err != nil {
return err
}
@ -79,7 +81,9 @@ func (ss *sqlStore) DeleteAlertNotificationWithUid(ctx context.Context, cmd *mod
}
func (ss *sqlStore) GetAlertNotifications(ctx context.Context, query *models.GetAlertNotificationsQuery) error {
return getAlertNotificationInternal(ctx, query, ss.db.NewSession(ctx))
return ss.db.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
return getAlertNotificationInternal(ctx, query, sess)
})
}
func (ss *sqlStore) GetAlertNotificationUidWithId(ctx context.Context, query *models.GetAlertNotificationUidQuery) error {
@ -90,8 +94,9 @@ func (ss *sqlStore) GetAlertNotificationUidWithId(ctx context.Context, query *mo
return nil
}
err := getAlertNotificationUidInternal(ctx, query, ss.db.NewSession(ctx))
if err != nil {
if err := ss.db.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
return getAlertNotificationUidInternal(ctx, query, sess)
}); err != nil {
return err
}
@ -105,7 +110,9 @@ func newAlertNotificationUidCacheKey(orgID, notificationId int64) string {
}
func (ss *sqlStore) GetAlertNotificationsWithUid(ctx context.Context, query *models.GetAlertNotificationsWithUidQuery) error {
return getAlertNotificationWithUidInternal(ctx, query, ss.db.NewSession(ctx))
return ss.db.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
return getAlertNotificationWithUidInternal(ctx, query, sess)
})
}
func (ss *sqlStore) GetAllAlertNotifications(ctx context.Context, query *models.GetAllAlertNotificationsQuery) error {
@ -444,7 +451,9 @@ func (ss *sqlStore) UpdateAlertNotification(ctx context.Context, cmd *models.Upd
func (ss *sqlStore) UpdateAlertNotificationWithUid(ctx context.Context, cmd *models.UpdateAlertNotificationWithUidCommand) error {
getAlertNotificationWithUidQuery := &models.GetAlertNotificationsWithUidQuery{OrgId: cmd.OrgId, Uid: cmd.Uid}
if err := getAlertNotificationWithUidInternal(ctx, getAlertNotificationWithUidQuery, ss.db.NewSession(ctx)); err != nil {
if err := ss.db.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
return getAlertNotificationWithUidInternal(ctx, getAlertNotificationWithUidQuery, sess)
}); err != nil {
return err
}

View File

@ -132,57 +132,62 @@ func TestOldAnnotationsAreDeletedFirst(t *testing.T) {
Created: time.Now().AddDate(-10, 0, -10).UnixNano() / int64(time.Millisecond),
}
session := fakeSQL.NewSession(context.Background())
defer session.Close()
err := fakeSQL.WithDbSession(context.Background(), func(sess *sqlstore.DBSession) error {
_, err := sess.Insert(a)
require.NoError(t, err, "cannot insert annotation")
_, err = sess.Insert(a)
require.NoError(t, err, "cannot insert annotation")
_, err := session.Insert(a)
require.NoError(t, err, "cannot insert annotation")
_, err = session.Insert(a)
require.NoError(t, err, "cannot insert annotation")
a.AlertId = 20
_, err = sess.Insert(a)
require.NoError(t, err, "cannot insert annotation")
a.AlertId = 20
_, err = session.Insert(a)
require.NoError(t, err, "cannot insert annotation")
// run the clean up task to keep one annotation.
cfg := setting.NewCfg()
cfg.AnnotationCleanupJobBatchSize = 1
cleaner := &xormRepositoryImpl{cfg: cfg, log: log.New("test-logger"), db: fakeSQL}
_, err = cleaner.CleanAnnotations(context.Background(), setting.AnnotationCleanupSettings{MaxCount: 1}, alertAnnotationType)
require.NoError(t, err)
// run the clean up task to keep one annotation.
cfg := setting.NewCfg()
cfg.AnnotationCleanupJobBatchSize = 1
cleaner := &xormRepositoryImpl{cfg: cfg, log: log.New("test-logger"), db: fakeSQL}
_, err = cleaner.CleanAnnotations(context.Background(), setting.AnnotationCleanupSettings{MaxCount: 1}, alertAnnotationType)
// assert that the last annotations were kept
countNew, err := sess.Where("alert_id = 20").Count(&annotations.Item{})
require.NoError(t, err)
require.Equal(t, int64(1), countNew, "the last annotations should be kept")
countOld, err := sess.Where("alert_id = 10").Count(&annotations.Item{})
require.NoError(t, err)
require.Equal(t, int64(0), countOld, "the two first annotations should have been deleted")
return nil
})
require.NoError(t, err)
// assert that the last annotations were kept
countNew, err := session.Where("alert_id = 20").Count(&annotations.Item{})
require.NoError(t, err)
require.Equal(t, int64(1), countNew, "the last annotations should be kept")
countOld, err := session.Where("alert_id = 10").Count(&annotations.Item{})
require.NoError(t, err)
require.Equal(t, int64(0), countOld, "the two first annotations should have been deleted")
}
func assertAnnotationCount(t *testing.T, fakeSQL *sqlstore.SQLStore, sql string, expectedCount int64) {
t.Helper()
session := fakeSQL.NewSession(context.Background())
defer session.Close()
count, err := session.Where(sql).Count(&annotations.Item{})
err := fakeSQL.WithDbSession(context.Background(), func(sess *sqlstore.DBSession) error {
count, err := sess.Where(sql).Count(&annotations.Item{})
require.NoError(t, err)
require.Equal(t, expectedCount, count)
return nil
})
require.NoError(t, err)
require.Equal(t, expectedCount, count)
}
func assertAnnotationTagCount(t *testing.T, fakeSQL *sqlstore.SQLStore, expectedCount int64) {
t.Helper()
session := fakeSQL.NewSession(context.Background())
defer session.Close()
count, err := session.SQL("select count(*) from annotation_tag").Count()
err := fakeSQL.WithDbSession(context.Background(), func(sess *sqlstore.DBSession) error {
count, err := sess.SQL("select count(*) from annotation_tag").Count()
require.NoError(t, err)
require.Equal(t, expectedCount, count)
return nil
})
require.NoError(t, err)
require.Equal(t, expectedCount, count)
}
func createTestAnnotations(t *testing.T, sqlstore *sqlstore.SQLStore, expectedCount int, oldAnnotations int) {
func createTestAnnotations(t *testing.T, store *sqlstore.SQLStore, expectedCount int, oldAnnotations int) {
t.Helper()
cutoffDate := time.Now()
@ -216,16 +221,19 @@ func createTestAnnotations(t *testing.T, sqlstore *sqlstore.SQLStore, expectedCo
a.Created = cutoffDate.AddDate(-10, 0, -10).UnixNano() / int64(time.Millisecond)
}
_, err := sqlstore.NewSession(context.Background()).Insert(a)
require.NoError(t, err, "should be able to save annotation", err)
err := store.WithDbSession(context.Background(), func(sess *sqlstore.DBSession) error {
_, err := sess.Insert(a)
require.NoError(t, err, "should be able to save annotation", err)
// mimick the SQL annotation Save logic by writing records to the annotation_tag table
// we need to ensure they get deleted when we clean up annotations
sess := sqlstore.NewSession(context.Background())
for tagID := range []int{1, 2} {
_, err = sess.Exec("INSERT INTO annotation_tag (annotation_id, tag_id) VALUES(?,?)", a.Id, tagID)
require.NoError(t, err, "should be able to save annotation tag ID", err)
}
// mimick the SQL annotation Save logic by writing records to the annotation_tag table
// we need to ensure they get deleted when we clean up annotations
for tagID := range []int{1, 2} {
_, err = sess.Exec("INSERT INTO annotation_tag (annotation_id, tag_id) VALUES(?,?)", a.Id, tagID)
require.NoError(t, err, "should be able to save annotation tag ID", err)
}
return err
})
require.NoError(t, err)
}
}

View File

@ -566,40 +566,54 @@ type testContext struct {
}
func (c *testContext) getAuthTokenByID(id int64) (*userAuthToken, error) {
sess := c.sqlstore.NewSession(context.Background())
var t userAuthToken
found, err := sess.ID(id).Get(&t)
if err != nil || !found {
return nil, err
}
var res *userAuthToken
err := c.sqlstore.WithDbSession(context.Background(), func(sess *sqlstore.DBSession) error {
var t userAuthToken
found, err := sess.ID(id).Get(&t)
if err != nil || !found {
return err
}
return &t, nil
res = &t
return nil
})
return res, err
}
func (c *testContext) markAuthTokenAsSeen(id int64) (bool, error) {
sess := c.sqlstore.NewSession(context.Background())
res, err := sess.Exec("UPDATE user_auth_token SET auth_token_seen = ? WHERE id = ?", c.sqlstore.Dialect.BooleanStr(true), id)
if err != nil {
return false, err
}
hasRowsAffected := false
err := c.sqlstore.WithDbSession(context.Background(), func(sess *sqlstore.DBSession) error {
res, err := sess.Exec("UPDATE user_auth_token SET auth_token_seen = ? WHERE id = ?", c.sqlstore.Dialect.BooleanStr(true), id)
if err != nil {
return err
}
rowsAffected, err := res.RowsAffected()
if err != nil {
return false, err
}
return rowsAffected == 1, nil
rowsAffected, err := res.RowsAffected()
if err != nil {
return err
}
hasRowsAffected = rowsAffected == 1
return nil
})
return hasRowsAffected, err
}
func (c *testContext) updateRotatedAt(id, rotatedAt int64) (bool, error) {
sess := c.sqlstore.NewSession(context.Background())
res, err := sess.Exec("UPDATE user_auth_token SET rotated_at = ? WHERE id = ?", rotatedAt, id)
if err != nil {
return false, err
}
hasRowsAffected := false
err := c.sqlstore.WithDbSession(context.Background(), func(sess *sqlstore.DBSession) error {
res, err := sess.Exec("UPDATE user_auth_token SET rotated_at = ? WHERE id = ?", rotatedAt, id)
if err != nil {
return err
}
rowsAffected, err := res.RowsAffected()
if err != nil {
return false, err
}
return rowsAffected == 1, nil
rowsAffected, err := res.RowsAffected()
if err != nil {
return err
}
hasRowsAffected = rowsAffected == 1
return nil
})
return hasRowsAffected, err
}

View File

@ -6,6 +6,7 @@ import (
"testing"
"time"
"github.com/grafana/grafana/pkg/services/sqlstore"
"github.com/stretchr/testify/require"
)
@ -21,8 +22,12 @@ func TestUserAuthTokenCleanup(t *testing.T) {
insertToken := func(ctx *testContext, token string, prev string, createdAt, rotatedAt int64) {
ut := userAuthToken{AuthToken: token, PrevAuthToken: prev, CreatedAt: createdAt, RotatedAt: rotatedAt, UserAgent: "", ClientIp: ""}
_, err := ctx.sqlstore.NewSession(context.Background()).Insert(&ut)
require.Nil(t, err)
err := ctx.sqlstore.WithDbSession(context.Background(), func(sess *sqlstore.DBSession) error {
_, err := sess.Insert(&ut)
require.Nil(t, err)
return nil
})
require.NoError(t, err)
}
now := time.Date(2018, 12, 13, 13, 45, 0, 0, time.UTC)

View File

@ -574,7 +574,7 @@ func GetAlertsByDashboardId2(dashboardId int64, sess *sqlstore.DBSession) ([]*mo
}
func (d *DashboardStore) updateAlerts(ctx context.Context, existingAlerts []*models.Alert, alerts []*models.Alert, log log.Logger) error {
return d.sqlStore.WithTransactionalDbSession(ctx, func(sess *sqlstore.DBSession) error {
return d.sqlStore.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
for _, alert := range alerts {
update := false
var alertToUpdate *models.Alert

View File

@ -0,0 +1,35 @@
package grpcserver
import (
"context"
"github.com/grafana/grafana/pkg/setting"
"google.golang.org/grpc/reflection"
"google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
)
// ReflectionService implements the gRPC Server Reflection Protocol:
// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md
type ReflectionService struct {
cfg *setting.Cfg
reflectionServer *reflectionServer
}
type reflectionServer struct {
grpc_reflection_v1alpha.ServerReflectionServer
}
// AuthFuncOverride no auth for reflection service.
func (s *reflectionServer) AuthFuncOverride(ctx context.Context, _ string) (context.Context, error) {
return ctx, nil
}
func ProvideReflectionService(cfg *setting.Cfg, grpcServerProvider Provider) (*ReflectionService, error) {
re := &reflectionServer{reflection.NewServer(reflection.ServerOptions{Services: grpcServerProvider.GetServer()})}
grpc_reflection_v1alpha.RegisterServerReflectionServer(grpcServerProvider.GetServer(), re)
return &ReflectionService{
cfg: cfg,
reflectionServer: re,
}, nil
}

View File

@ -16,18 +16,19 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/reflection"
)
type Provider interface {
registry.BackgroundService
GetServer() *grpc.Server
GetAddress() string
}
type GPRCServerService struct {
cfg *setting.Cfg
logger log.Logger
server *grpc.Server
cfg *setting.Cfg
logger log.Logger
server *grpc.Server
address string
}
func ProvideService(cfg *setting.Cfg, apiKey apikey.Service, userService user.Service) (Provider, error) {
@ -51,9 +52,7 @@ func ProvideService(cfg *setting.Cfg, apiKey apikey.Service, userService user.Se
opts = append(opts, grpc.Creds(credentials.NewTLS(cfg.GRPCServerTLSConfig)))
}
grpcServer := grpc.NewServer(opts...)
reflection.Register(grpcServer)
s.server = grpcServer
s.server = grpc.NewServer(opts...)
return s, nil
}
@ -65,6 +64,8 @@ func (s *GPRCServerService) Run(ctx context.Context) error {
return fmt.Errorf("GRPC server: failed to listen: %w", err)
}
s.address = listener.Addr().String()
serveErr := make(chan error, 1)
go func() {
s.logger.Info("GRPC server: starting")
@ -96,3 +97,7 @@ func (s *GPRCServerService) IsDisabled() bool {
func (s *GPRCServerService) GetServer() *grpc.Server {
return s.server
}
func (s *GPRCServerService) GetAddress() string {
return s.address
}

View File

@ -85,7 +85,7 @@ func (s *ServiceImpl) processAppPlugin(plugin plugins.PluginDTO, c *models.ReqCo
SortWeight: navtree.WeightPlugin,
}
if s.features.IsEnabled(featuremgmt.FlagTopnav) {
if topNavEnabled {
appLink.Url = s.cfg.AppSubURL + "/a/" + plugin.ID
} else {
appLink.Url = path.Join(s.cfg.AppSubURL, plugin.DefaultNavURL)

View File

@ -359,11 +359,11 @@ func (srv RulerSrv) updateAlertRulesInGroup(c *models.ReqContext, groupKey ngmod
logger.Debug("updating database with the authorized changes", "add", len(finalChanges.New), "update", len(finalChanges.New), "delete", len(finalChanges.Delete))
if len(finalChanges.Update) > 0 || len(finalChanges.New) > 0 {
updates := make([]store.UpdateRule, 0, len(finalChanges.Update))
updates := make([]ngmodels.UpdateRule, 0, len(finalChanges.Update))
inserts := make([]ngmodels.AlertRule, 0, len(finalChanges.New))
for _, update := range finalChanges.Update {
logger.Debug("updating rule", "rule_uid", update.New.UID, "diff", update.Diff.String())
updates = append(updates, store.UpdateRule{
updates = append(updates, ngmodels.UpdateRule{
Existing: update.Existing,
New: *update.New,
})

View File

@ -5,7 +5,6 @@ import (
"github.com/grafana/grafana/pkg/models"
ngmodels "github.com/grafana/grafana/pkg/services/ngalert/models"
"github.com/grafana/grafana/pkg/services/ngalert/store"
"github.com/grafana/grafana/pkg/services/user"
)
@ -19,7 +18,7 @@ type RuleStore interface {
// InsertAlertRules will insert all alert rules passed into the function
// and return the map of uuid to id.
InsertAlertRules(ctx context.Context, rule []ngmodels.AlertRule) (map[string]int64, error)
UpdateAlertRules(ctx context.Context, rule []store.UpdateRule) error
UpdateAlertRules(ctx context.Context, rule []ngmodels.UpdateRule) error
DeleteAlertRulesByUID(ctx context.Context, orgID int64, ruleUID ...string) error
// IncreaseVersionForAllRulesInNamespace Increases version for all rules that have specified namespace. Returns all rules that belong to the namespace

View File

@ -379,6 +379,11 @@ type ListOrgRuleGroupsQuery struct {
Result [][]string
}
type UpdateRule struct {
Existing *AlertRule
New AlertRule
}
// Condition contains backend expressions and queries and the RefID
// of the query or expression that will be evaluated.
type Condition struct {

View File

@ -143,14 +143,14 @@ func (service *AlertRuleService) UpdateRuleGroup(ctx context.Context, orgID int6
if err != nil {
return fmt.Errorf("failed to list alert rules: %w", err)
}
updateRules := make([]store.UpdateRule, 0, len(query.Result))
updateRules := make([]models.UpdateRule, 0, len(query.Result))
for _, rule := range query.Result {
if rule.IntervalSeconds == intervalSeconds {
continue
}
newRule := *rule
newRule.IntervalSeconds = intervalSeconds
updateRules = append(updateRules, store.UpdateRule{
updateRules = append(updateRules, models.UpdateRule{
Existing: rule,
New: newRule,
})
@ -216,7 +216,7 @@ func (service *AlertRuleService) ReplaceRuleGroup(ctx context.Context, orgID int
}
}
updates := make([]store.UpdateRule, 0, len(delta.Update))
updates := make([]models.UpdateRule, 0, len(delta.Update))
for _, update := range delta.Update {
// check that provenance is not changed in a invalid way
storedProvenance, err := service.provenanceStore.GetProvenance(ctx, update.New, orgID)
@ -226,7 +226,7 @@ func (service *AlertRuleService) ReplaceRuleGroup(ctx context.Context, orgID int
if storedProvenance != provenance && storedProvenance != models.ProvenanceNone {
return fmt.Errorf("cannot update with provided provenance '%s', needs '%s'", provenance, storedProvenance)
}
updates = append(updates, store.UpdateRule{
updates = append(updates, models.UpdateRule{
Existing: update.Existing,
New: *update.New,
})
@ -281,7 +281,7 @@ func (service *AlertRuleService) UpdateAlertRule(ctx context.Context, rule model
return models.AlertRule{}, err
}
err = service.xact.InTransaction(ctx, func(ctx context.Context) error {
err := service.ruleStore.UpdateAlertRules(ctx, []store.UpdateRule{
err := service.ruleStore.UpdateAlertRules(ctx, []models.UpdateRule{
{
Existing: &storedRule,
New: rule,

View File

@ -7,7 +7,6 @@ import (
"github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions"
"github.com/grafana/grafana/pkg/services/ngalert/models"
"github.com/grafana/grafana/pkg/services/ngalert/store"
"github.com/grafana/grafana/pkg/services/quota"
)
@ -40,7 +39,7 @@ type RuleStore interface {
ListAlertRules(ctx context.Context, query *models.ListAlertRulesQuery) error
GetRuleGroupInterval(ctx context.Context, orgID int64, namespaceUID string, ruleGroup string) (int64, error)
InsertAlertRules(ctx context.Context, rule []models.AlertRule) (map[string]int64, error)
UpdateAlertRules(ctx context.Context, rule []store.UpdateRule) error
UpdateAlertRules(ctx context.Context, rule []models.UpdateRule) error
DeleteAlertRulesByUID(ctx context.Context, orgID int64, ruleUID ...string) error
GetAlertRulesGroupByRuleUID(ctx context.Context, query *models.GetAlertRulesGroupByRuleUIDQuery) error
}

View File

@ -8,7 +8,6 @@ import (
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/services/guardian"
apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions"
ngmodels "github.com/grafana/grafana/pkg/services/ngalert/models"
"github.com/grafana/grafana/pkg/services/sqlstore"
"github.com/grafana/grafana/pkg/services/sqlstore/searchstore"
@ -22,17 +21,6 @@ const AlertRuleMaxTitleLength = 190
// AlertRuleMaxRuleGroupNameLength is the maximum length of the alert rule group name
const AlertRuleMaxRuleGroupNameLength = 190
type UpdateRuleGroupCmd struct {
OrgID int64
NamespaceUID string
RuleGroupConfig apimodels.PostableRuleGroupConfig
}
type UpdateRule struct {
Existing *ngmodels.AlertRule
New ngmodels.AlertRule
}
var (
ErrAlertRuleGroupNotFound = errors.New("rulegroup not found")
ErrOptimisticLock = errors.New("version conflict while updating a record in the database with optimistic locking")
@ -185,7 +173,7 @@ func (st DBstore) InsertAlertRules(ctx context.Context, rules []ngmodels.AlertRu
}
// UpdateAlertRules is a handler for updating alert rules.
func (st DBstore) UpdateAlertRules(ctx context.Context, rules []UpdateRule) error {
func (st DBstore) UpdateAlertRules(ctx context.Context, rules []ngmodels.UpdateRule) error {
return st.SQLStore.WithTransactionalDbSession(ctx, func(sess *sqlstore.DBSession) error {
ruleVersions := make([]ngmodels.AlertRuleVersion, 0, len(rules))
for _, r := range rules {

View File

@ -52,7 +52,7 @@ func TestUpdateAlertRules(t *testing.T) {
rule := createRule(t)
newRule := models.CopyRule(rule)
newRule.Title = util.GenerateShortUID()
err := store.UpdateAlertRules(context.Background(), []UpdateRule{{
err := store.UpdateAlertRules(context.Background(), []models.UpdateRule{{
Existing: rule,
New: *newRule,
},
@ -77,7 +77,7 @@ func TestUpdateAlertRules(t *testing.T) {
newRule := models.CopyRule(rule)
newRule.Title = util.GenerateShortUID()
err := store.UpdateAlertRules(context.Background(), []UpdateRule{{
err := store.UpdateAlertRules(context.Background(), []models.UpdateRule{{
Existing: rule,
New: *newRule,
},

View File

@ -11,6 +11,7 @@ import (
"github.com/grafana/grafana/pkg/services/ngalert/models"
"github.com/grafana/grafana/pkg/services/ngalert/store"
"github.com/grafana/grafana/pkg/services/ngalert/tests"
"github.com/grafana/grafana/pkg/services/sqlstore"
)
func TestIntegrationSaveAndGetImage(t *testing.T) {
@ -168,30 +169,32 @@ func TestIntegrationDeleteExpiredImages(t *testing.T) {
image2 := models.Image{URL: "https://example.com/example.png"}
require.NoError(t, dbstore.SaveImage(ctx, &image2))
s := dbstore.SQLStore.NewSession(ctx)
t.Cleanup(s.Close)
err := dbstore.SQLStore.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
// should return both images
var result1, result2 models.Image
ok, err := sess.Where("token = ?", image1.Token).Get(&result1)
require.NoError(t, err)
assert.True(t, ok)
ok, err = sess.Where("token = ?", image2.Token).Get(&result2)
require.NoError(t, err)
assert.True(t, ok)
// should return both images
var result1, result2 models.Image
ok, err := s.Where("token = ?", image1.Token).Get(&result1)
require.NoError(t, err)
assert.True(t, ok)
ok, err = s.Where("token = ?", image2.Token).Get(&result2)
require.NoError(t, err)
assert.True(t, ok)
// should delete expired image
image1.ExpiresAt = time.Now().Add(-time.Second)
require.NoError(t, dbstore.SaveImage(ctx, &image1))
n, err := dbstore.DeleteExpiredImages(ctx)
require.NoError(t, err)
assert.Equal(t, int64(1), n)
// should delete expired image
image1.ExpiresAt = time.Now().Add(-time.Second)
require.NoError(t, dbstore.SaveImage(ctx, &image1))
n, err := dbstore.DeleteExpiredImages(ctx)
require.NoError(t, err)
assert.Equal(t, int64(1), n)
// should return just the second image
ok, err = sess.Where("token = ?", image1.Token).Get(&result1)
require.NoError(t, err)
assert.False(t, ok)
ok, err = sess.Where("token = ?", image2.Token).Get(&result2)
require.NoError(t, err)
assert.True(t, ok)
// should return just the second image
ok, err = s.Where("token = ?", image1.Token).Get(&result1)
return nil
})
require.NoError(t, err)
assert.False(t, ok)
ok, err = s.Where("token = ?", image2.Token).Get(&result2)
require.NoError(t, err)
assert.True(t, ok)
}

View File

@ -320,7 +320,7 @@ func (f *FakeRuleStore) GetNamespaceByUID(_ context.Context, uid string, orgID i
return nil, fmt.Errorf("not found")
}
func (f *FakeRuleStore) UpdateAlertRules(_ context.Context, q []UpdateRule) error {
func (f *FakeRuleStore) UpdateAlertRules(_ context.Context, q []models.UpdateRule) error {
f.mtx.Lock()
defer f.mtx.Unlock()
f.RecordedOps = append(f.RecordedOps, q)

View File

@ -2,6 +2,8 @@ package playlist
import (
"errors"
"github.com/grafana/grafana/pkg/coremodel/playlist"
)
// Typed errors
@ -21,22 +23,11 @@ type Playlist struct {
}
type PlaylistDTO struct {
Id int64 `json:"id"`
UID string `json:"uid"`
Name string `json:"name"`
Interval string `json:"interval"`
OrgId int64 `json:"-"`
Items []PlaylistItemDTO `json:"items"`
playlist.Model
OrgId int64 `json:"-"`
}
type PlaylistItemDTO struct {
Id int64 `json:"id"`
PlaylistId int64 `json:"playlistid"`
Type string `json:"type"`
Title string `json:"title"`
Value string `json:"value"`
Order int `json:"order"`
}
type PlaylistItemDTO = playlist.PlaylistItem
type PlaylistItem struct {
Id int64 `db:"id"`

View File

@ -42,7 +42,7 @@ func (s *sqlxStore) Insert(ctx context.Context, cmd *playlist.CreatePlaylistComm
for _, item := range cmd.Items {
playlistItems = append(playlistItems, playlist.PlaylistItem{
PlaylistId: p.Id,
Type: item.Type,
Type: string(item.Type),
Value: item.Value,
Order: item.Order,
Title: item.Title,
@ -94,7 +94,7 @@ func (s *sqlxStore) Update(ctx context.Context, cmd *playlist.UpdatePlaylistComm
for index, item := range cmd.Items {
playlistItems = append(playlistItems, playlist.PlaylistItem{
PlaylistId: p.Id,
Type: item.Type,
Type: string(item.Type),
Value: item.Value,
Order: index + 1,
Title: item.Title,

View File

@ -38,7 +38,7 @@ func (s *sqlStore) Insert(ctx context.Context, cmd *playlist.CreatePlaylistComma
for _, item := range cmd.Items {
playlistItems = append(playlistItems, playlist.PlaylistItem{
PlaylistId: p.Id,
Type: item.Type,
Type: string(item.Type),
Value: item.Value,
Order: item.Order,
Title: item.Title,
@ -70,13 +70,12 @@ func (s *sqlStore) Update(ctx context.Context, cmd *playlist.UpdatePlaylistComma
p.Id = existingPlaylist.Id
dto = playlist.PlaylistDTO{
Id: p.Id,
UID: p.UID,
OrgId: p.OrgId,
Name: p.Name,
Interval: p.Interval,
OrgId: p.OrgId,
}
dto.Id = p.Id
dto.Uid = p.UID
dto.Name = p.Name
dto.Interval = p.Interval
_, err = sess.Where("id=?", p.Id).Cols("name", "interval").Update(&p)
if err != nil {
@ -95,7 +94,7 @@ func (s *sqlStore) Update(ctx context.Context, cmd *playlist.UpdatePlaylistComma
for index, item := range cmd.Items {
playlistItems = append(playlistItems, models.PlaylistItem{
PlaylistId: p.Id,
Type: item.Type,
Type: string(item.Type),
Value: item.Value,
Order: index + 1,
Title: item.Title,

View File

@ -96,16 +96,12 @@ func (pd *PublicDashboardServiceImpl) GetPublicDashboardConfig(ctx context.Conte
// SavePublicDashboardConfig is a helper method to persist the sharing config
// to the database. It handles validations for sharing config and persistence
func (pd *PublicDashboardServiceImpl) SavePublicDashboardConfig(ctx context.Context, u *user.SignedInUser, dto *SavePublicDashboardConfigDTO) (*PublicDashboard, error) {
// validate if the dashboard exists
dashboard, err := pd.GetDashboard(ctx, dto.DashboardUid)
if err != nil {
return nil, err
}
err = validation.ValidateSavePublicDashboard(dto, dashboard)
if err != nil {
return nil, err
}
// set default value for time settings
if dto.PublicDashboard.TimeSettings == nil {
dto.PublicDashboard.TimeSettings = &TimeSettings{}
@ -120,6 +116,10 @@ func (pd *PublicDashboardServiceImpl) SavePublicDashboardConfig(ctx context.Cont
// save changes
var pubdashUid string
if existingPubdash == nil {
err = validation.ValidateSavePublicDashboard(dto, dashboard)
if err != nil {
return nil, err
}
pubdashUid, err = pd.savePublicDashboardConfig(ctx, dto)
} else {
pubdashUid, err = pd.updatePublicDashboardConfig(ctx, dto)

View File

@ -126,7 +126,9 @@ func (ss *SecretsStoreImpl) ReEncryptDataKeys(
currProvider secrets.ProviderID,
) error {
keys := make([]*secrets.DataKey, 0)
if err := ss.sqlStore.NewSession(ctx).Table(dataKeysTable).Find(&keys); err != nil {
if err := ss.sqlStore.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
return sess.Table(dataKeysTable).Find(&keys)
}); err != nil {
return err
}

View File

@ -104,8 +104,10 @@ func (m *SecretsMigrator) RollBackSecrets(ctx context.Context) (bool, error) {
return false, nil
}
_, sqlErr := m.sqlStore.NewSession(ctx).Exec("DELETE FROM data_keys")
if sqlErr != nil {
if sqlErr := m.sqlStore.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
_, err := sess.Exec("DELETE FROM data_keys")
return err
}); sqlErr != nil {
logger.Warn("Error while cleaning up data keys table...", "error", sqlErr)
return false, nil
}

View File

@ -18,7 +18,9 @@ func (s simpleSecret) reencrypt(ctx context.Context, secretsSrv *manager.Secrets
Secret []byte
}
if err := sqlStore.NewSession(ctx).Table(s.tableName).Select(fmt.Sprintf("id, %s as secret", s.columnName)).Find(&rows); err != nil {
if err := sqlStore.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
return sess.Table(s.tableName).Select(fmt.Sprintf("id, %s as secret", s.columnName)).Find(&rows)
}); err != nil {
logger.Warn("Could not find any secret to re-encrypt", "table", s.tableName)
return false
}
@ -72,7 +74,9 @@ func (s b64Secret) reencrypt(ctx context.Context, secretsSrv *manager.SecretsSer
Secret string
}
if err := sqlStore.NewSession(ctx).Table(s.tableName).Select(fmt.Sprintf("id, %s as secret", s.columnName)).Find(&rows); err != nil {
if err := sqlStore.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
return sess.Table(s.tableName).Select(fmt.Sprintf("id, %s as secret", s.columnName)).Find(&rows)
}); err != nil {
logger.Warn("Could not find any secret to re-encrypt", "table", s.tableName)
return false
}
@ -140,7 +144,9 @@ func (s jsonSecret) reencrypt(ctx context.Context, secretsSrv *manager.SecretsSe
SecureJsonData map[string][]byte
}
if err := sqlStore.NewSession(ctx).Table(s.tableName).Cols("id", "secure_json_data").Find(&rows); err != nil {
if err := sqlStore.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
return sess.Table(s.tableName).Cols("id", "secure_json_data").Find(&rows)
}); err != nil {
logger.Warn("Could not find any secret to re-encrypt", "table", s.tableName)
return false
}
@ -199,7 +205,9 @@ func (s alertingSecret) reencrypt(ctx context.Context, secretsSrv *manager.Secre
}
selectSQL := "SELECT id, alertmanager_configuration FROM alert_configuration"
if err := sqlStore.NewSession(ctx).SQL(selectSQL).Find(&results); err != nil {
if err := sqlStore.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
return sess.SQL(selectSQL).Find(&results)
}); err != nil {
logger.Warn("Could not find any alert_configuration secret to re-encrypt")
return false
}

View File

@ -24,7 +24,9 @@ func (s simpleSecret) rollback(
Secret []byte
}
if err := sqlStore.NewSession(ctx).Table(s.tableName).Select(fmt.Sprintf("id, %s as secret", s.columnName)).Find(&rows); err != nil {
if err := sqlStore.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
return sess.Table(s.tableName).Select(fmt.Sprintf("id, %s as secret", s.columnName)).Find(&rows)
}); err != nil {
logger.Warn("Could not find any secret to roll back", "table", s.tableName)
return true
}
@ -82,7 +84,9 @@ func (s b64Secret) rollback(
Secret string
}
if err := sqlStore.NewSession(ctx).Table(s.tableName).Select(fmt.Sprintf("id, %s as secret", s.columnName)).Find(&rows); err != nil {
if err := sqlStore.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
return sess.Table(s.tableName).Select(fmt.Sprintf("id, %s as secret", s.columnName)).Find(&rows)
}); err != nil {
logger.Warn("Could not find any secret to roll back", "table", s.tableName)
return true
}
@ -154,7 +158,9 @@ func (s jsonSecret) rollback(
SecureJsonData map[string][]byte
}
if err := sqlStore.NewSession(ctx).Table(s.tableName).Cols("id", "secure_json_data").Find(&rows); err != nil {
if err := sqlStore.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
return sess.Table(s.tableName).Cols("id", "secure_json_data").Find(&rows)
}); err != nil {
logger.Warn("Could not find any secret to roll back", "table", s.tableName)
return true
}
@ -217,7 +223,9 @@ func (s alertingSecret) rollback(
}
selectSQL := "SELECT id, alertmanager_configuration FROM alert_configuration"
if err := sqlStore.NewSession(ctx).SQL(selectSQL).Find(&results); err != nil {
if err := sqlStore.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
return sess.SQL(selectSQL).Find(&results)
}); err != nil {
logger.Warn("Could not find any alert_configuration secret to roll back")
return true
}

View File

@ -20,14 +20,16 @@ type TestUser struct {
Role string
Login string
IsServiceAccount bool
OrgID int64
}
type TestApiKey struct {
Name string
Role org.RoleType
OrgId int64
Key string
IsExpired bool
Name string
Role org.RoleType
OrgId int64
Key string
IsExpired bool
ServiceAccountID *int64
}
func SetupUserServiceAccount(t *testing.T, sqlStore *sqlstore.SQLStore, testUser TestUser) *user.User {
@ -41,6 +43,7 @@ func SetupUserServiceAccount(t *testing.T, sqlStore *sqlstore.SQLStore, testUser
IsServiceAccount: testUser.IsServiceAccount,
DefaultOrgRole: role,
Name: testUser.Name,
OrgID: testUser.OrgID,
})
require.NoError(t, err)
return u1
@ -53,9 +56,10 @@ func SetupApiKey(t *testing.T, sqlStore *sqlstore.SQLStore, testKey TestApiKey)
}
addKeyCmd := &apikey.AddCommand{
Name: testKey.Name,
Role: role,
OrgId: testKey.OrgId,
Name: testKey.Name,
Role: role,
OrgId: testKey.OrgId,
ServiceAccountID: testKey.ServiceAccountID,
}
if testKey.Key != "" {

View File

@ -12,7 +12,7 @@ import (
type DB interface {
WithTransactionalDbSession(ctx context.Context, callback sqlstore.DBTransactionFunc) error
WithDbSession(ctx context.Context, callback sqlstore.DBTransactionFunc) error
NewSession(ctx context.Context) *sqlstore.DBSession
WithNewDbSession(ctx context.Context, callback sqlstore.DBTransactionFunc) error
GetDialect() migrator.Dialect
GetDBType() core.DbType
GetSqlxSession() *session.SessionDB

View File

@ -21,3 +21,7 @@ func (f *FakeDB) WithTransactionalDbSession(ctx context.Context, callback sqlsto
func (f *FakeDB) WithDbSession(ctx context.Context, callback sqlstore.DBTransactionFunc) error {
return f.ExpectedError
}
func (f *FakeDB) WithNewDbSession(ctx context.Context, callback sqlstore.DBTransactionFunc) error {
return f.ExpectedError
}

View File

@ -222,6 +222,10 @@ func (m *SQLStoreMock) WithDbSession(ctx context.Context, callback sqlstore.DBTr
return m.ExpectedError
}
func (m *SQLStoreMock) WithNewDbSession(ctx context.Context, callback sqlstore.DBTransactionFunc) error {
return m.ExpectedError
}
func (m *SQLStoreMock) GetOrgQuotaByTarget(ctx context.Context, query *models.GetOrgQuotaByTargetQuery) error {
return m.ExpectedError
}

View File

@ -14,7 +14,7 @@ func (ss *SQLStore) AddOrgUser(ctx context.Context, cmd *models.AddOrgUserComman
var usr user.User
session := sess.ID(cmd.UserId)
if !cmd.AllowAddingServiceAccount {
session = session.Where(notServiceAccountFilter(ss))
session = session.Where(NotServiceAccountFilter(ss))
}
if exists, err := session.Get(&usr); err != nil {

View File

@ -27,13 +27,6 @@ func (sess *DBSession) PublishAfterCommit(msg interface{}) {
sess.events = append(sess.events, msg)
}
// NewSession returns a new DBSession
func (ss *SQLStore) NewSession(ctx context.Context) *DBSession {
sess := &DBSession{Session: ss.engine.NewSession()}
sess.Session = sess.Session.Context(ctx)
return sess
}
func startSessionOrUseExisting(ctx context.Context, engine *xorm.Engine, beginTran bool) (*DBSession, bool, error) {
value := ctx.Value(ContextSessionKey{})
var sess *DBSession
@ -55,14 +48,24 @@ func startSessionOrUseExisting(ctx context.Context, engine *xorm.Engine, beginTr
}
newSess.Session = newSess.Session.Context(ctx)
return newSess, true, nil
}
// WithDbSession calls the callback with a session.
// WithDbSession calls the callback with the session in the context (if exists).
// Otherwise it creates a new one that is closed upon completion.
// A session is stored in the context if sqlstore.InTransaction() has been been previously called with the same context (and it's not committed/rolledback yet).
func (ss *SQLStore) WithDbSession(ctx context.Context, callback DBTransactionFunc) error {
return withDbSession(ctx, ss.engine, callback)
}
// WithNewDbSession calls the callback with a new session that is closed upon completion.
func (ss *SQLStore) WithNewDbSession(ctx context.Context, callback DBTransactionFunc) error {
sess := &DBSession{Session: ss.engine.NewSession(), transactionOpen: false}
defer sess.Close()
return callback(sess)
}
func withDbSession(ctx context.Context, engine *xorm.Engine, callback DBTransactionFunc) error {
sess, isNew, err := startSessionOrUseExisting(ctx, engine, false)
if err != nil {

View File

@ -30,8 +30,8 @@ type Store interface {
GetSignedInUser(ctx context.Context, query *models.GetSignedInUserQuery) error
UpdateUserPermissions(userID int64, isAdmin bool) error
SetUserHelpFlag(ctx context.Context, cmd *models.SetUserHelpFlagCommand) error
NewSession(ctx context.Context) *DBSession
WithDbSession(ctx context.Context, callback DBTransactionFunc) error
WithNewDbSession(ctx context.Context, callback DBTransactionFunc) error
GetOrgQuotaByTarget(ctx context.Context, query *models.GetOrgQuotaByTargetQuery) error
GetOrgQuotas(ctx context.Context, query *models.GetOrgQuotasQuery) error
UpdateOrgQuota(ctx context.Context, cmd *models.UpdateOrgQuotaCmd) error

View File

@ -20,6 +20,8 @@ func (ss *SQLStore) WithTransactionalDbSession(ctx context.Context, callback DBT
return inTransactionWithRetryCtx(ctx, ss.engine, ss.bus, callback, 0)
}
// InTransaction starts a transaction and calls the fn
// It stores the session in the context
func (ss *SQLStore) InTransaction(ctx context.Context, fn func(ctx context.Context) error) error {
return ss.inTransactionWithRetry(ctx, fn, 0)
}

View File

@ -169,7 +169,7 @@ func (ss *SQLStore) CreateUser(ctx context.Context, cmd user.CreateUserCommand)
return &user, createErr
}
func notServiceAccountFilter(ss *SQLStore) string {
func NotServiceAccountFilter(ss *SQLStore) string {
return fmt.Sprintf("%s.is_service_account = %s",
ss.Dialect.Quote("user"),
ss.Dialect.BooleanStr(false))
@ -180,7 +180,7 @@ func (ss *SQLStore) GetUserById(ctx context.Context, query *models.GetUserByIdQu
usr := new(user.User)
has, err := sess.ID(query.Id).
Where(notServiceAccountFilter(ss)).
Where(NotServiceAccountFilter(ss)).
Get(usr)
if err != nil {
@ -201,67 +201,6 @@ func (ss *SQLStore) GetUserById(ctx context.Context, query *models.GetUserByIdQu
})
}
func (ss *SQLStore) UpdateUser(ctx context.Context, cmd *models.UpdateUserCommand) error {
if ss.Cfg.CaseInsensitiveLogin {
cmd.Login = strings.ToLower(cmd.Login)
cmd.Email = strings.ToLower(cmd.Email)
}
return ss.WithTransactionalDbSession(ctx, func(sess *DBSession) error {
user := user.User{
Name: cmd.Name,
Email: cmd.Email,
Login: cmd.Login,
Theme: cmd.Theme,
Updated: TimeNow(),
}
if _, err := sess.ID(cmd.UserId).Where(notServiceAccountFilter(ss)).Update(&user); err != nil {
return err
}
if ss.Cfg.CaseInsensitiveLogin {
if err := ss.userCaseInsensitiveLoginConflict(ctx, sess, user.Login, user.Email); err != nil {
return err
}
}
sess.publishAfterCommit(&events.UserUpdated{
Timestamp: user.Created,
Id: user.ID,
Name: user.Name,
Login: user.Login,
Email: user.Email,
})
return nil
})
}
func (ss *SQLStore) ChangeUserPassword(ctx context.Context, cmd *models.ChangeUserPasswordCommand) error {
return ss.WithTransactionalDbSession(ctx, func(sess *DBSession) error {
user := user.User{
Password: cmd.NewPassword,
Updated: TimeNow(),
}
_, err := sess.ID(cmd.UserId).Where(notServiceAccountFilter(ss)).Update(&user)
return err
})
}
func (ss *SQLStore) UpdateUserLastSeenAt(ctx context.Context, cmd *models.UpdateUserLastSeenAtCommand) error {
return ss.WithTransactionalDbSession(ctx, func(sess *DBSession) error {
user := user.User{
ID: cmd.UserId,
LastSeenAt: TimeNow(),
}
_, err := sess.ID(cmd.UserId).Update(&user)
return err
})
}
func (ss *SQLStore) SetUsingOrg(ctx context.Context, cmd *models.SetUsingOrgCommand) error {
getOrgsForUserCmd := &models.GetUserOrgListQuery{UserId: cmd.UserId}
if err := ss.GetUserOrgList(ctx, getOrgsForUserCmd); err != nil {
@ -296,7 +235,7 @@ func setUsingOrgInTransaction(sess *DBSession, userID int64, orgID int64) error
func (ss *SQLStore) GetUserProfile(ctx context.Context, query *models.GetUserProfileQuery) error {
return ss.WithDbSession(ctx, func(sess *DBSession) error {
var usr user.User
has, err := sess.ID(query.UserId).Where(notServiceAccountFilter(ss)).Get(&usr)
has, err := sess.ID(query.UserId).Where(NotServiceAccountFilter(ss)).Get(&usr)
if err != nil {
return err
@ -349,7 +288,7 @@ func (ss *SQLStore) GetUserOrgList(ctx context.Context, query *models.GetUserOrg
sess.Join("INNER", "org", "org_user.org_id=org.id")
sess.Join("INNER", ss.Dialect.Quote("user"), fmt.Sprintf("org_user.user_id=%s.id", ss.Dialect.Quote("user")))
sess.Where("org_user.user_id=?", query.UserId)
sess.Where(notServiceAccountFilter(ss))
sess.Where(NotServiceAccountFilter(ss))
sess.Cols("org.name", "org_user.role", "org_user.org_id")
sess.OrderBy("org.name")
err := sess.Find(&query.Result)
@ -642,7 +581,7 @@ func (ss *SQLStore) DisableUser(ctx context.Context, cmd *models.DisableUserComm
usr := user.User{}
sess := dbSess.Table("user")
if has, err := sess.ID(cmd.UserId).Where(notServiceAccountFilter(ss)).Get(&usr); err != nil {
if has, err := sess.ID(cmd.UserId).Where(NotServiceAccountFilter(ss)).Get(&usr); err != nil {
return err
} else if !has {
return user.ErrUserNotFound
@ -672,7 +611,7 @@ func (ss *SQLStore) BatchDisableUsers(ctx context.Context, cmd *models.BatchDisa
disableParams = append(disableParams, v)
}
_, err := sess.Where(notServiceAccountFilter(ss)).Exec(disableParams...)
_, err := sess.Where(NotServiceAccountFilter(ss)).Exec(disableParams...)
return err
})
}
@ -683,10 +622,14 @@ func (ss *SQLStore) DeleteUser(ctx context.Context, cmd *models.DeleteUserComman
})
}
func (ss *SQLStore) DeleteUserInSession(ctx context.Context, sess *DBSession, cmd *models.DeleteUserCommand) error {
return deleteUserInTransaction(ss, sess, cmd)
}
func deleteUserInTransaction(ss *SQLStore, sess *DBSession, cmd *models.DeleteUserCommand) error {
// Check if user exists
usr := user.User{ID: cmd.UserId}
has, err := sess.Where(notServiceAccountFilter(ss)).Get(&usr)
has, err := sess.Where(NotServiceAccountFilter(ss)).Get(&usr)
if err != nil {
return err
}
@ -762,23 +705,20 @@ func UserDeletions() []string {
func (ss *SQLStore) UpdateUserPermissions(userID int64, isAdmin bool) error {
return ss.WithTransactionalDbSession(context.Background(), func(sess *DBSession) error {
var user user.User
if _, err := sess.ID(userID).Where(notServiceAccountFilter(ss)).Get(&user); err != nil {
if _, err := sess.ID(userID).Where(NotServiceAccountFilter(ss)).Get(&user); err != nil {
return err
}
user.IsAdmin = isAdmin
sess.UseBool("is_admin")
_, err := sess.ID(user.ID).Update(&user)
if err != nil {
return err
}
// validate that after update there is at least one server admin
if err := validateOneAdminLeft(sess); err != nil {
return err
}
return nil
})
}

View File

@ -12,73 +12,6 @@ import (
"github.com/stretchr/testify/require"
)
func TestIntegrationUserUpdate(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
ss := InitTestDB(t)
users := createFiveTestUsers(t, ss, func(i int) *user.CreateUserCommand {
return &user.CreateUserCommand{
Email: fmt.Sprint("USER", i, "@test.com"),
Name: fmt.Sprint("USER", i),
Login: fmt.Sprint("loginUSER", i),
IsDisabled: false,
}
})
ss.Cfg.CaseInsensitiveLogin = true
t.Run("Testing DB - update generates duplicate user", func(t *testing.T) {
err := ss.UpdateUser(context.Background(), &models.UpdateUserCommand{
Login: "loginuser2",
UserId: users[0].ID,
})
require.Error(t, err)
})
t.Run("Testing DB - update lowercases existing user", func(t *testing.T) {
err := ss.UpdateUser(context.Background(), &models.UpdateUserCommand{
Login: "loginUSER0",
Email: "USER0@test.com",
UserId: users[0].ID,
})
require.NoError(t, err)
query := models.GetUserByIdQuery{Id: users[0].ID}
err = ss.GetUserById(context.Background(), &query)
require.NoError(t, err)
require.Equal(t, "loginuser0", query.Result.Login)
require.Equal(t, "user0@test.com", query.Result.Email)
})
t.Run("Testing DB - no user info provided", func(t *testing.T) {
err := ss.UpdateUser(context.Background(), &models.UpdateUserCommand{
Login: "",
Email: "",
Name: "Change Name",
UserId: users[3].ID,
})
require.NoError(t, err)
query := models.GetUserByIdQuery{Id: users[3].ID}
err = ss.GetUserById(context.Background(), &query)
require.NoError(t, err)
// Changed
require.Equal(t, "Change Name", query.Result.Name)
// Unchanged
require.Equal(t, "loginUSER3", query.Result.Login)
require.Equal(t, "USER3@test.com", query.Result.Email)
})
ss.Cfg.CaseInsensitiveLogin = false
}
func TestIntegrationUserDataAccess(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")

View File

@ -0,0 +1,21 @@
#!/bin/bash
# To compile all protobuf files in this repository, run
# "mage protobuf" at the top-level.
set -eu
#DST_DIR=../genproto/entity
DST_DIR=./
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
cd "$DIR"
protoc -I ./ \
--go_out=${DST_DIR} \
--go-grpc_out=${DST_DIR} --go-grpc_opt=require_unimplemented_servers=false \
object.proto

View File

@ -1,64 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc v3.21.5
// source: object.proto
package object
// Will be replaced with something from the SDK
type UserInfo struct {
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` // internal grafana ID
Login string `protobuf:"bytes,2,opt,name=login,proto3" json:"login,omitempty"` // string ID?
}
// The canonical object/document data -- this represents the raw bytes and storage level metadata
type RawObject struct {
// Unique ID
UID string `protobuf:"bytes,1,opt,name=UID,proto3" json:"UID,omitempty"`
// Identify the object kind. This kind will be used to apply a schema to the body and
// will trigger additional indexing behavior.
Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"`
// Time in epoch milliseconds that the object was modified
Modified int64 `protobuf:"varint,3,opt,name=modified,proto3" json:"modified,omitempty"`
// Who modified the object
ModifiedBy *UserInfo `protobuf:"bytes,4,opt,name=modified_by,json=modifiedBy,proto3" json:"modified_by,omitempty"`
// Content Length
Size int64 `protobuf:"varint,5,opt,name=size,proto3" json:"size,omitempty"`
// MD5 digest of the body
ETag string `protobuf:"bytes,6,opt,name=ETag,proto3" json:"ETag,omitempty"`
// Raw bytes of the storage object. The kind will determine what is a valid payload
Body []byte `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"`
// The version will change when the object is saved. It is not necessarily sortable
//
// NOTE: currently managed by the dashboard+dashboard_version tables
Version string `protobuf:"bytes,8,opt,name=version,proto3" json:"version,omitempty"`
// optional "save" or "commit" message
//
// NOTE: currently managed by the dashboard_version table, and will be returned from a "history" command
Comment string `protobuf:"bytes,9,opt,name=comment,proto3" json:"comment,omitempty"`
// Location (path/repo/etc) that defines the canonocal form
//
// NOTE: currently managed by the dashboard_provisioning table
SyncSrc string `protobuf:"bytes,10,opt,name=sync_src,json=syncSrc,proto3" json:"sync_src,omitempty"`
// Time in epoch milliseconds that the object was last synced with an external system (provisioning/git)
//
// NOTE: currently managed by the dashboard_provisioning table
SyncTime int64 `protobuf:"varint,11,opt,name=sync_time,json=syncTime,proto3" json:"sync_time,omitempty"`
}
// Searchable fields extracted from the object
type ObjectErrorInfo struct {
Code int64 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // TODO... registry somewhere... should be limited to most severe issues
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
Details string `protobuf:"bytes,3,opt,name=details,proto3" json:"details,omitempty"`
}
type ExternalReference struct {
// datasource, panel
Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
// prometheus / heatmap
Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
// Unique ID for this object
UID string `protobuf:"bytes,3,opt,name=UID,proto3" json:"UID,omitempty"`
}

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More