diff --git a/.betterer.results b/.betterer.results index f3a96284f7f..21156f60eab 100644 --- a/.betterer.results +++ b/.betterer.results @@ -1216,9 +1216,7 @@ exports[`better eslint`] = { [0, 0, 0, "Unexpected any. Specify a different type.", "1"], [0, 0, 0, "Unexpected any. Specify a different type.", "2"], [0, 0, 0, "Unexpected any. Specify a different type.", "3"], - [0, 0, 0, "No untranslated strings. Wrap text with ", "4"], - [0, 0, 0, "Styles should be written using objects.", "5"], - [0, 0, 0, "Styles should be written using objects.", "6"] + [0, 0, 0, "No untranslated strings. Wrap text with ", "4"] ], "public/app/core/components/TimeSeries/utils.ts:5381": [ [0, 0, 0, "Unexpected any. Specify a different type.", "0"], @@ -1450,8 +1448,7 @@ exports[`better eslint`] = { [0, 0, 0, "No untranslated strings. Wrap text with ", "4"], [0, 0, 0, "No untranslated strings. Wrap text with ", "5"], [0, 0, 0, "No untranslated strings. Wrap text with ", "6"], - [0, 0, 0, "No untranslated strings. Wrap text with ", "7"], - [0, 0, 0, "No untranslated strings. Wrap text with ", "8"] + [0, 0, 0, "No untranslated strings. Wrap text with ", "7"] ], "public/app/features/admin/UserListAdminPage.tsx:5381": [ [0, 0, 0, "No untranslated strings. Wrap text with ", "0"] @@ -2056,8 +2053,7 @@ exports[`better eslint`] = { [0, 0, 0, "No untranslated strings. Wrap text with ", "9"], [0, 0, 0, "No untranslated strings. Wrap text with ", "10"], [0, 0, 0, "No untranslated strings. Wrap text with ", "11"], - [0, 0, 0, "No untranslated strings. Wrap text with ", "12"], - [0, 0, 0, "No untranslated strings. Wrap text with ", "13"] + [0, 0, 0, "No untranslated strings. Wrap text with ", "12"] ], "public/app/features/alerting/unified/components/rule-editor/PreviewRule.tsx:5381": [ [0, 0, 0, "No untranslated strings. Wrap text with ", "0"], @@ -2760,7 +2756,8 @@ exports[`better eslint`] = { [0, 0, 0, "No untranslated strings. Wrap text with ", "10"] ], "public/app/features/dashboard-scene/inspect/HelpWizard/utils.ts:5381": [ - [0, 0, 0, "Do not use any type assertions.", "0"] + [0, 0, 0, "Do not use any type assertions.", "0"], + [0, 0, 0, "Do not use any type assertions.", "1"] ], "public/app/features/dashboard-scene/inspect/InspectDataTab.tsx:5381": [ [0, 0, 0, "No untranslated strings. Wrap text with ", "0"] @@ -2881,6 +2878,9 @@ exports[`better eslint`] = { "public/app/features/dashboard-scene/scene/PanelMenuBehavior.tsx:5381": [ [0, 0, 0, "Do not use any type assertions.", "0"] ], + "public/app/features/dashboard-scene/scene/RowRepeaterBehavior.ts:5381": [ + [0, 0, 0, "Do not use any type assertions.", "0"] + ], "public/app/features/dashboard-scene/scene/Scopes/ScopesInput.tsx:5381": [ [0, 0, 0, "Do not use any type assertions.", "0"] ], @@ -5833,10 +5833,6 @@ exports[`better eslint`] = { "public/app/features/variables/pickers/index.ts:5381": [ [0, 0, 0, "Do not re-export imported variable (\`./OptionsPicker/OptionsPicker\`)", "0"] ], - "public/app/features/variables/pickers/shared/VariableLink.tsx:5381": [ - [0, 0, 0, "Styles should be written using objects.", "0"], - [0, 0, 0, "Styles should be written using objects.", "1"] - ], "public/app/features/variables/pickers/shared/VariableOptions.tsx:5381": [ [0, 0, 0, "Use data-testid for E2E selectors instead of aria-label", "0"], [0, 0, 0, "No untranslated strings. Wrap text with ", "1"], diff --git a/.bra.toml b/.bra.toml index 3e9d9992661..b32010204b6 100644 --- a/.bra.toml +++ b/.bra.toml @@ -2,7 +2,7 @@ init_cmds = [ ["GO_BUILD_DEV=1", "make", "build-go"], ["make", "gen-jsonnet"], - ["./bin/grafana", "server", "-profile", "-profile-addr=127.0.0.1", "-profile-port=6000", "-packaging=dev", "cfg:app_mode=development"] + ["./bin/grafana", "server", "-profile", "-profile-addr=127.0.0.1", "-profile-port=6000", "-profile-block-rate=1", "-profile-mutex-rate=5", "-packaging=dev", "cfg:app_mode=development"] ] watch_all = true follow_symlinks = true @@ -18,5 +18,5 @@ build_delay = 1500 cmds = [ ["GO_BUILD_DEV=1", "make", "build-go"], ["make", "gen-jsonnet"], - ["./bin/grafana", "server", "-profile", "-profile-addr=127.0.0.1", "-profile-port=6000", "-packaging=dev", "cfg:app_mode=development"] + ["./bin/grafana", "server", "-profile", "-profile-addr=127.0.0.1", "-profile-port=6000", "-profile-block-rate=1", "-profile-mutex-rate=5", "-packaging=dev", "cfg:app_mode=development"] ] diff --git a/.drone.yml b/.drone.yml index 215a0170f95..eab7c0387ff 100644 --- a/.drone.yml +++ b/.drone.yml @@ -135,7 +135,7 @@ steps: image: node:20.9.0-alpine name: start-storybook - commands: - - npx wait-on@7.0.1 http://$HOST:$PORT + - npx wait-on@7.2.0 -t 1m http://$HOST:$PORT - yarn e2e:storybook depends_on: - start-storybook @@ -151,11 +151,8 @@ trigger: exclude: - docs/** - '*.md' - - pkg/** - - packaging/** - - go.sum - - go.mod - include: [] + include: + - packages/grafana-ui/** type: docker volumes: - host: @@ -197,7 +194,7 @@ steps: name: betterer-frontend - commands: - apk add --update curl jq bash - - is_fork=$(curl "https://$GITHUB_TOKEN@api.github.com/repos/grafana/grafana/pulls/$DRONE_PULL_REQUEST" + - is_fork=$(curl --retry 5 "https://$GITHUB_TOKEN@api.github.com/repos/grafana/grafana/pulls/$DRONE_PULL_REQUEST" | jq .head.repo.fork) - if [ "$is_fork" != false ]; then return 1; fi - git clone "https://$${GITHUB_TOKEN}@github.com/grafana/grafana-enterprise.git" @@ -261,7 +258,7 @@ services: [] steps: - commands: - apk add --update curl jq bash - - is_fork=$(curl "https://$GITHUB_TOKEN@api.github.com/repos/grafana/grafana/pulls/$DRONE_PULL_REQUEST" + - is_fork=$(curl --retry 5 "https://$GITHUB_TOKEN@api.github.com/repos/grafana/grafana/pulls/$DRONE_PULL_REQUEST" | jq .head.repo.fork) - if [ "$is_fork" != false ]; then return 1; fi - git clone "https://$${GITHUB_TOKEN}@github.com/grafana/grafana-enterprise.git" @@ -349,7 +346,7 @@ services: [] steps: - commands: - apk add --update curl jq bash - - is_fork=$(curl "https://$GITHUB_TOKEN@api.github.com/repos/grafana/grafana/pulls/$DRONE_PULL_REQUEST" + - is_fork=$(curl --retry 5 "https://$GITHUB_TOKEN@api.github.com/repos/grafana/grafana/pulls/$DRONE_PULL_REQUEST" | jq .head.repo.fork) - if [ "$is_fork" != false ]; then return 1; fi - git clone "https://$${GITHUB_TOKEN}@github.com/grafana/grafana-enterprise.git" @@ -469,7 +466,7 @@ steps: name: compile-build-cmd - commands: - apk add --update curl jq bash - - is_fork=$(curl "https://$GITHUB_TOKEN@api.github.com/repos/grafana/grafana/pulls/$DRONE_PULL_REQUEST" + - is_fork=$(curl --retry 5 "https://$GITHUB_TOKEN@api.github.com/repos/grafana/grafana/pulls/$DRONE_PULL_REQUEST" | jq .head.repo.fork) - if [ "$is_fork" != false ]; then return 1; fi - git clone "https://$${GITHUB_TOKEN}@github.com/grafana/grafana-enterprise.git" @@ -969,7 +966,7 @@ services: steps: - commands: - apk add --update curl jq bash - - is_fork=$(curl "https://$GITHUB_TOKEN@api.github.com/repos/grafana/grafana/pulls/$DRONE_PULL_REQUEST" + - is_fork=$(curl --retry 5 "https://$GITHUB_TOKEN@api.github.com/repos/grafana/grafana/pulls/$DRONE_PULL_REQUEST" | jq .head.repo.fork) - if [ "$is_fork" != false ]; then return 1; fi - git clone "https://$${GITHUB_TOKEN}@github.com/grafana/grafana-enterprise.git" @@ -1140,6 +1137,7 @@ steps: environment: AM_TENANT_ID: test AM_URL: http://mimir_backend:8080 + failure: ignore image: golang:1.22.4-alpine name: remote-alertmanager-integration-tests trigger: @@ -1309,7 +1307,7 @@ services: [] steps: - commands: - apk add --update curl jq bash - - is_fork=$(curl "https://$GITHUB_TOKEN@api.github.com/repos/grafana/grafana/pulls/$DRONE_PULL_REQUEST" + - is_fork=$(curl --retry 5 "https://$GITHUB_TOKEN@api.github.com/repos/grafana/grafana/pulls/$DRONE_PULL_REQUEST" | jq .head.repo.fork) - if [ "$is_fork" != false ]; then return 1; fi - git clone "https://$${GITHUB_TOKEN}@github.com/grafana/grafana-enterprise.git" @@ -1910,7 +1908,7 @@ steps: image: node:20.9.0-alpine name: start-storybook - commands: - - npx wait-on@7.0.1 http://$HOST:$PORT + - npx wait-on@7.2.0 -t 1m http://$HOST:$PORT - yarn e2e:storybook depends_on: - start-storybook @@ -2645,6 +2643,7 @@ steps: environment: AM_TENANT_ID: test AM_URL: http://mimir_backend:8080 + failure: ignore image: golang:1.22.4-alpine name: remote-alertmanager-integration-tests trigger: @@ -4537,6 +4536,7 @@ steps: environment: AM_TENANT_ID: test AM_URL: http://mimir_backend:8080 + failure: ignore image: golang:1.22.4-alpine name: remote-alertmanager-integration-tests trigger: @@ -5181,6 +5181,6 @@ kind: secret name: gcr_credentials --- kind: signature -hmac: 6c273dec437d3ae5ae9a42450c57956259a691ff0df7c161a57eaa683c867acd +hmac: 33b84712df805ae55115bdfedc6c40f71c75e7d6065656b49295b0f78f47bb9d ... diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index e2a6687ec69..6c79f660966 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -448,6 +448,7 @@ playwright.config.ts @grafana/plugins-platform-frontend /public/app/features/transformers/timeSeriesTable/ @grafana/dataviz-squad @grafana/app-o11y-visualizations /public/app/features/users/ @grafana/access-squad /public/app/features/variables/ @grafana/dashboards-squad +/public/app/features/preferences/ @grafana/grafana-frontend-platform /public/app/plugins/panel/alertlist/ @grafana/alerting-frontend /public/app/plugins/panel/annolist/ @grafana/grafana-frontend-platform /public/app/plugins/panel/barchart/ @grafana/dataviz-squad diff --git a/.github/workflows/detect-breaking-changes-levitate.yml b/.github/workflows/detect-breaking-changes-levitate.yml index 37fa65cb789..778f29e694e 100644 --- a/.github/workflows/detect-breaking-changes-levitate.yml +++ b/.github/workflows/detect-breaking-changes-levitate.yml @@ -8,6 +8,7 @@ on: - 'packages/**' branches: - 'main' + workflow_dispatch: jobs: buildPR: diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml index 91eb76b62cc..a46a12134bf 100644 --- a/.github/workflows/github-release.yml +++ b/.github/workflows/github-release.yml @@ -1,27 +1,48 @@ name: Create or update GitHub release on: + workflow_call: + inputs: + version: + required: true + description: Needs to match, exactly, the name of a milestone (NO v prefix) + type: string + latest: + required: false + default: false + description: Mark this release as latest (`1`) or not (`0`, default) + type: string + dry_run: + required: false + default: false + type: boolean workflow_dispatch: inputs: version: required: true description: Needs to match, exactly, the name of a milestone (NO v prefix) + type: string latest: required: false description: Mark this release as latest (`1`) or not (`0`, default) + type: string + dry_run: + required: false + default: false + type: boolean + +permissions: + # contents: write allows the action(s) to create github releases + contents: write + jobs: main: runs-on: ubuntu-latest steps: - - name: "Generate token" - id: generate_token - uses: tibdex/github-app-token@b62528385c34dbc9f38e5f4225ac829252d1ea92 - with: - app_id: ${{ secrets.GRAFANA_DELIVERY_BOT_APP_ID }} - private_key: ${{ secrets.GRAFANA_DELIVERY_BOT_APP_PEM }} - name: Create GitHub release (manually invoked) uses: grafana/grafana-github-actions-go/github-release@main with: - token: ${{ steps.generate_token.outputs.token }} + token: ${{ secrets.GITHUB_TOKEN }} version: ${{ inputs.version }} metrics_api_key: ${{ secrets.GRAFANA_MISC_STATS_API_KEY }} latest: ${{ inputs.latest }} + dry_run: ${{ inputs.dry_run }} diff --git a/.github/workflows/release-comms.yml b/.github/workflows/release-comms.yml index a62d04a78c0..a6b9ba3d687 100644 --- a/.github/workflows/release-comms.yml +++ b/.github/workflows/release-comms.yml @@ -10,32 +10,69 @@ on: default: true version: required: true + latest: + type: bool + default: false pull_request: types: - - closed + - closed branches: - - 'main' - - 'v*.*.*' + - 'main' + - 'v*.*.*' jobs: - post_release: - name: Post-release comms + setup: + name: Setup and establish latest + outputs: + version: ${{ steps.output.outputs.version }} + dry_run: ${{ steps.output.outputs.dry_run }} + latest: ${{ steps.output.outputs.latest }} runs-on: ubuntu-latest steps: - - if: github.event_name == 'workflow_dispatch' - run: | - echo "VERSION=${{ inputs.version }}" >> $GITHUB_ENV - echo "DRY_RUN=${{ inputs.dry_run }}" >> $GITHUB_ENV - - if: github.event.pull_request.merged == true && startsWith(github.head_ref, 'release/') - run: | - echo "VERSION=$(echo ${{ github.head_ref }} | sed -e 's/release\///g')" >> $GITHUB_ENV - echo "DRY_RUN=false" >> $GITHUB_ENV - - run: | - echo "push-grafana-tag ${VERSION} (dry run: ${DRY_RUN})" - - run: | - echo "post changelog to forums for ${VERSION} (dry run: ${DRY_RUN})" - - run: | - echo "create github release for tag ${VERSION} (dry run: ${DRY_RUN})" - - run: | - echo "publish docs for ${VERSION} (dry run: ${DRY_RUN})" - - run: | - echo "announce on slack that ${VERSION} has been released (dry run: ${DRY_RUN})" + - if: ${{ github.event_name == 'workflow_dispatch' }} + run: | + echo setting up GITHUB_ENV for ${{ github.event_name }} + echo "VERSION=${{ inputs.version }}" >> $GITHUB_ENV + echo "DRY_RUN=${{ inputs.dry_run }}" >> $GITHUB_ENV + echo "LATEST=${{ inputs.latest }}" >> $GITHUB_ENV + - if: github.event.pull_request.merged == true && startsWith(github.head_ref, 'release/') + run: | + echo "VERSION=$(echo ${{ github.head_ref }} | sed -e 's/release\///g')" >> $GITHUB_ENV + echo "DRY_RUN=true" >> $GITHUB_ENV + echo "LATEST=${{ contains(github.event.pull_request.labels.*.name, 'release/latest') }}" >> $GITHUB_ENV + - id: output + run: | + echo "dry_run: $DRY_RUN" + echo "latest: $LATEST" + echo "version: $VERSION" + + echo "dry_run=$DRY_RUN" >> "$GITHUB_OUTPUT" + echo "latest=$LATEST" >> "$GITHUB_OUTPUT" + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + post_changelog_on_forum: + needs: setup + runs-on: ubuntu-latest + steps: + - run: | + echo post changelog to forums for ${{ needs.setup.outputs.version }} + echo dry run: ${{ needs.setup.outputs.dry_run }} + create_github_release: + # a github release requires a git tag + needs: [setup, mirror_tag] + uses: ./.github/workflows/github-release.yml + with: + version: ${{ needs.setup.outputs.version }} + dry_run: ${{ needs.setup.outputs.dry_run == 'true' }} + publish_docs: + needs: setup + runs-on: ubuntu-latest + steps: + - run: | + echo publish docs for ${{ needs.setup.outputs.version }} + echo dry run: ${{ needs.setup.outputs.dry_run }} + post_on_slack: + needs: setup + runs-on: ubuntu-latest + steps: + - run: | + echo announce on slack that ${{ needs.setup.outputs.version }} has been released + echo dry run: ${{ needs.setup.outputs.dry_run }} diff --git a/.github/workflows/release-pr.yml b/.github/workflows/release-pr.yml index 9b0db4be754..17a131c0f34 100644 --- a/.github/workflows/release-pr.yml +++ b/.github/workflows/release-pr.yml @@ -24,6 +24,14 @@ on: required: false default: false type: bool + latest: + required: false + default: false + type: bool + +permissions: + content: write + pull-requests: write jobs: create-prs: @@ -31,12 +39,6 @@ jobs: runs-on: ubuntu-latest if: github.repository == 'grafana/grafana' steps: - - name: Generate token - id: generate_token - uses: tibdex/github-app-token@b62528385c34dbc9f38e5f4225ac829252d1ea92 - with: - app_id: ${{ secrets.GRAFANA_DELIVERY_BOT_APP_ID }} - private_key: ${{ secrets.GRAFANA_DELIVERY_BOT_APP_PEM }} - name: Checkout Grafana uses: actions/checkout@v4 - name: Configure git user @@ -62,12 +64,24 @@ jobs: - name: Create PR without backports if: "${{ inputs.backport == '' }}" run: > - gh pr create --dry-run=${{ inputs.dry_run }} -B "${{ inputs.target }}" --title "Release: ${{ inputs.version }}" --body "These code changes must be merged after a release is complete" + gh pr create \ + $( (( ${{ inputs.latest }} == "true" )) && printf %s '-l "release/latest"') \ + --dry-run=${{ inputs.dry_run }} \ + -B "${{ inputs.target }}" \ + --title "Release: ${{ inputs.version }}" \ + --body "These code changes must be merged after a release is complete" env: - GH_TOKEN: ${{ steps.generate_token.outputs.token }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Create PR with backports if: "${{ inputs.backport != '' }}" run: > - gh pr create -l "backport ${{ inputs.backport }}" -l "product-approved" --dry-run=${{ inputs.dry_run }} -B "${{ inputs.target }}" --title "Release: ${{ inputs.version }}" --body "These code changes must be merged after a release is complete" + gh pr create \ + $( (( ${{ inputs.latest }} == "true" )) && printf %s '-l "release/latest"') \ + -l "backport ${{ inputs.backport }}" \ + -l "product-approved" \ + --dry-run=${{ inputs.dry_run }} \ + -B "${{ inputs.target }}" \ + --title "Release: ${{ inputs.version }}" \ + --body "These code changes must be merged after a release is complete" env: - GH_TOKEN: ${{ steps.generate_token.outputs.token }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/conf/defaults.ini b/conf/defaults.ini index 88ed4791098..3f7693ad008 100644 --- a/conf/defaults.ini +++ b/conf/defaults.ini @@ -312,6 +312,9 @@ application_insights_endpoint_url = # Controls if the UI contains any links to user feedback forms feedback_links_enabled = true +# Static context that is being added to analytics events +reporting_static_context = + #################################### Security ############################ [security] # disable creation of admin user on first start of grafana diff --git a/conf/sample.ini b/conf/sample.ini index c9885cda40f..a12b063078f 100644 --- a/conf/sample.ini +++ b/conf/sample.ini @@ -309,6 +309,9 @@ # Controls if the UI contains any links to user feedback forms ;feedback_links_enabled = true +# Static context that is being added to analytics events +;reporting_static_context = grafanaInstance=12, os=linux + #################################### Security #################################### [security] # disable creation of admin user on first start of grafana diff --git a/contribute/developer-guide.md b/contribute/developer-guide.md index bcdb979b790..804daa903e0 100644 --- a/contribute/developer-guide.md +++ b/contribute/developer-guide.md @@ -19,6 +19,7 @@ We recommend using [Homebrew](https://brew.sh/) for installing any missing depen brew install git brew install go brew install node@20 +brew install corepack corepack enable ``` diff --git a/docs/make-docs b/docs/make-docs index f531df2ebb1..170e361431a 100755 --- a/docs/make-docs +++ b/docs/make-docs @@ -6,6 +6,15 @@ # [Semantic versioning](https://semver.org/) is used to help the reader identify the significance of changes. # Changes are relevant to this script and the support docs.mk GNU Make interface. # +# ## 8.0.1 (2024-07-01) +# +# ### Fixed +# +# - Update log suppression to catch new format of website /docs/ homepage REF_NOT_FOUND warnings. +# +# These warnings are related to missing some pages during the build that are required for the /docs/ homepage. +# They were previously suppressed but the log format changed and without this change they reappear in the latest builds. +# # ## 8.0.0 (2024-05-28) # # ### Changed @@ -905,7 +914,7 @@ EOF -e '/Press Ctrl+C to stop/ d' \ -e '/make/ d' \ -e '/WARNING: The manual_mount source directory/ d' \ - -e '/docs\/_index.md .* not found/ d' + -e '/"docs\/_index.md" not found/d' fi ;; esac diff --git a/docs/sources/administration/data-source-management/teamlbac/configure-teamlbac-for-loki/index.md b/docs/sources/administration/data-source-management/teamlbac/configure-teamlbac-for-loki/index.md index 7a45e25805a..90926100c78 100644 --- a/docs/sources/administration/data-source-management/teamlbac/configure-teamlbac-for-loki/index.md +++ b/docs/sources/administration/data-source-management/teamlbac/configure-teamlbac-for-loki/index.md @@ -42,4 +42,4 @@ We recommend that you remove all permissions for roles and teams that are not re 1. Navigate to Data Source Permissions - Go to the permissions tab of the newly created Loki data source. Here, you'll find the Team LBAC rules section. -For more information on how to setup Team LBAC rules for a Loki data source, [Add Team LBAC rules]({{< relref "./../create-teamlbac-rules/" >}}). +For more information on how to setup Team LBAC rules for a Loki data source, refer to [Create Team LBAC rules for the Loki data source](https://grafana.com/docs/grafana//administration/data-source-management/teamlbac/create-teamlbac-rules/). diff --git a/docs/sources/administration/provisioning/index.md b/docs/sources/administration/provisioning/index.md index c76903a7461..8c5b82c7efe 100644 --- a/docs/sources/administration/provisioning/index.md +++ b/docs/sources/administration/provisioning/index.md @@ -269,9 +269,8 @@ The _HTTP\*_ tag denotes data sources that communicate using the HTTP protocol, #### Custom HTTP headers for data sources -Data sources managed by Grafanas provisioning can be configured to add HTTP headers to all requests -going to that data source. The header name is configured in the `jsonData` field and the header value should be -configured in `secureJsonData`. +Data sources managed with provisioning can be configured to add HTTP headers to all requests. +The header name is configured in the `jsonData` field and the header value is configured in `secureJsonData`. ```yaml apiVersion: 1 diff --git a/docs/sources/alerting/alerting-rules/create-grafana-managed-rule.md b/docs/sources/alerting/alerting-rules/create-grafana-managed-rule.md index c2b6cf84e48..9ab472e172a 100644 --- a/docs/sources/alerting/alerting-rules/create-grafana-managed-rule.md +++ b/docs/sources/alerting/alerting-rules/create-grafana-managed-rule.md @@ -269,6 +269,8 @@ You can also configure the alert instance state when its evaluation returns an e | Normal | Sets alert instance state to `Normal`. | | Keep Last State | Maintains the alert instance in its last state. Useful for mitigating temporary issues, refer to [Keep last state](ref:keep-last-state). | +When you configure the No data or Error behavior to `Alerting` or `Normal`, Grafana will attempt to keep a stable set of fields under notification `Values`. If your query returns no data or an error, Grafana re-uses the latest known set of fields in `Values`, but will use `-1` in place of the measured value. + ## Create alerts from panels Create alerts from any panel type. This means you can reuse the queries in the panel and create alerts based on them. diff --git a/docs/sources/alerting/configure-notifications/manage-contact-points/_index.md b/docs/sources/alerting/configure-notifications/manage-contact-points/_index.md index 9df42401846..810d7549eab 100644 --- a/docs/sources/alerting/configure-notifications/manage-contact-points/_index.md +++ b/docs/sources/alerting/configure-notifications/manage-contact-points/_index.md @@ -31,6 +31,11 @@ refs: destination: /docs/grafana//alerting/configure-notifications/manage-contact-points/integrations/configure-email/ - pattern: /docs/grafana-cloud/ destination: /docs/grafana-cloud/alerting-and-irm/alerting/configure-notifications/manage-contact-points/integrations/configure-email/ + discord: + - pattern: /docs/grafana/ + destination: /docs/grafana//alerting/configure-notifications/manage-contact-points/integrations/configure-discord/ + - pattern: /docs/grafana-cloud/ + destination: /docs/grafana-cloud/alerting-and-irm/alerting/configure-notifications/manage-contact-points/integrations/configure-discord/ telegram: - pattern: /docs/grafana/ destination: /docs/grafana//alerting/configure-notifications/manage-contact-points/integrations/configure-telegram/ @@ -41,6 +46,11 @@ refs: destination: /docs/grafana//alerting/configure-notifications/manage-contact-points/integrations/webhook-notifier/ - pattern: /docs/grafana-cloud/ destination: /docs/grafana-cloud/alerting-and-irm/alerting/configure-notifications/manage-contact-points/integrations/webhook-notifier/ + opsgenie: + - pattern: /docs/grafana/ + destination: /docs/grafana//alerting/configure-notifications/manage-contact-points/integrations/configure-opsgenie/ + - pattern: /docs/grafana-cloud/ + destination: /docs/grafana-cloud/alerting-and-irm/alerting/configure-notifications/manage-contact-points/integrations/configure-opsgenie/ pagerduty: - pattern: /docs/grafana/ destination: /docs/grafana//alerting/configure-notifications/manage-contact-points/integrations/pager-duty/ @@ -56,6 +66,11 @@ refs: destination: /docs/grafana//alerting/configure-notifications/manage-contact-points/integrations/configure-slack/ - pattern: /docs/grafana-cloud/ destination: /docs/grafana-cloud/alerting-and-irm/alerting/configure-notifications/manage-contact-points/integrations/configure-slack/ + teams: + - pattern: /docs/grafana/ + destination: /docs/grafana//alerting/configure-notifications/manage-contact-points/integrations/configure-teams/ + - pattern: /docs/grafana-cloud/ + destination: /docs/grafana-cloud/alerting-and-irm/alerting/configure-notifications/manage-contact-points/integrations/configure-teams/ external-alertmanager: - pattern: /docs/grafana/ destination: /docs/grafana//alerting/set-up/configure-alertmanager/ @@ -144,14 +159,14 @@ The following table lists the contact point integrations supported by Grafana. | Alertmanager | `prometheus-alertmanager` | | Cisco Webex Teams | `webex` | | DingDing | `dingding` | -| Discord | `discord` | +| [Discord](ref:discord) | `discord` | | [Email](ref:email) | `email` | | Google Chat | `googlechat` | | [Grafana Oncall](ref:oncall) | `oncall` | | Kafka REST Proxy | `kafka` | | Line | `line` | -| Microsoft Teams | `teams` | -| Opsgenie | `opsgenie` | +| [Microsoft Teams](ref:teams) | `teams` | +| [Opsgenie](ref:opsgenie) | `opsgenie` | | [Pagerduty](ref:pagerduty) | `pagerduty` | | Pushover | `pushover` | | Sensu Go | `sensugo` | diff --git a/docs/sources/alerting/configure-notifications/manage-contact-points/integrations/configure-discord.md b/docs/sources/alerting/configure-notifications/manage-contact-points/integrations/configure-discord.md new file mode 100644 index 00000000000..de6a7f43de6 --- /dev/null +++ b/docs/sources/alerting/configure-notifications/manage-contact-points/integrations/configure-discord.md @@ -0,0 +1,57 @@ +--- +canonical: https://grafana.com/docs/grafana/latest/alerting/configure-notifications/manage-contact-points/integrations/configure-discord/ +description: Configure the Discord integration to receive notifications when your alerts are firing +keywords: + - grafana + - alerting + - Discord + - integration +labels: + products: + - cloud + - enterprise + - oss +menuTitle: Discord +title: Configure Discord for Alerting +weight: 300 +--- + +# Configure Discord for Alerting + +Use the Grafana Alerting - Discord integration to receive alert notifications in your Discord channels when your Grafana alert rules are triggered and resolved. + +## Before you begin + +Create a Webhook to enable Grafana to send alert notifications to Discord channels. +To create a Webhook in Discord, complete the following steps. + +1. Follow the steps in the [Intro to Webhooks guide](https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks). +1. Copy the Webhook URL. + +## Procedure + +To create your Discord integration in Grafana Alerting, complete the following steps. + +1. Navigate to **Alerts & IRM** -> **Alerting** -> **Contact points**. +1. Click **+ Add contact point**. +1. Enter a contact point name. +1. From the Integration list, select **Discord**. +1. In the **Webhook URL** field, paste in your Webhook URL. +1. Click **Test** to check that your integration works. + + A test alert notification should be sent to the Discord channel that you associated with the Webhook. + +1. Click **Save contact point**. + +## Next steps + +The Discord contact point is ready to receive alert notifications. + +To add this contact point to your alert, complete the following steps. + +1. In Grafana, navigate to **Alerting** > **Alert rules**. +1. Edit or create a new alert rule. +1. Scroll down to the **Configure labels and notifications** section. +1. Under **Notifications** click **Select contact point**. +1. From the drop-down menu, select the previously created contact point. +1. Click **Save rule and exit**. diff --git a/docs/sources/alerting/configure-notifications/manage-contact-points/integrations/configure-opsgenie.md b/docs/sources/alerting/configure-notifications/manage-contact-points/integrations/configure-opsgenie.md new file mode 100644 index 00000000000..dfca507ddb9 --- /dev/null +++ b/docs/sources/alerting/configure-notifications/manage-contact-points/integrations/configure-opsgenie.md @@ -0,0 +1,62 @@ +--- +canonical: https://grafana.com/docs/grafana/latest/alerting/configure-notifications/manage-contact-points/integrations/configure-opsgenie/ +description: Configure the Opsgenie integration to receive notifications when your alerts are firing +keywords: + - grafana + - alerting + - Opsgenie + - integration +labels: + products: + - cloud + - enterprise + - oss +menuTitle: Opsgenie +title: Configure Opsgenie for Alerting +weight: 300 +--- + +# Configure Opsgenie for Alerting + +Use the Grafana Alerting - Opsgenie integration to receive alert notifications in your Opsgenie alert dashboard when your Grafana alert rules are triggered and resolved. + +## Before you begin + +Create an API key to enable Grafana to send alert notifications to Opsgenie alert dashboard. + +To create an API key in Opsgenie, complete the following steps. + +1. Follow the steps in the [API integration guide](https://support.atlassian.com/opsgenie/docs/create-a-default-api-integration/). + + Make sure you turn on the integration. + +1. Copy the API key. + +## Procedure + +To create your Opsgenie integration in Grafana Alerting, complete the following steps. + +1. Navigate to **Alerts & IRM** -> **Alerting** -> **Contact points**. +1. Click **+ Add contact point**. +1. Enter a contact point name. +1. From the **Integration** list, select **Opsgenie**. +1. In the **API key** field, paste in your API key. +1. In the **Alert API URL**, enter `https://api.opsgenie.com/v2/alerts`. +1. Click **Test** to check that your integration works. + + A test alert notification is sent to the Alerts page in Opsgenie. + +1. Click **Save contact point**. + +## Next steps + +The Opsgenie contact point is ready to receive alert notifications. + +To add this contact point to your alert rule, complete the following steps: + +1. In Grafana, navigate to **Alerting** > **Alert rules**. +1. Edit or create a new alert rule. +1. Scroll down to the **Configure labels and notifications** section. +1. Under **Notifications**, click **Select contact point**. +1. From the drop-down menu, select the previously created contact point. +1. Click **Save rule and exit**. diff --git a/docs/sources/alerting/configure-notifications/manage-contact-points/integrations/configure-telegram.md b/docs/sources/alerting/configure-notifications/manage-contact-points/integrations/configure-telegram.md index ca798085037..d7e47e08e79 100644 --- a/docs/sources/alerting/configure-notifications/manage-contact-points/integrations/configure-telegram.md +++ b/docs/sources/alerting/configure-notifications/manage-contact-points/integrations/configure-telegram.md @@ -29,7 +29,7 @@ For longer messages, we recommend using an alternative contact method. ### Telegram bot API token and chat ID -To integrate Grafana with Telegram, you need to obtain a Telegram **bot API token** and a **chat ID** (i.e., the ID of the Telegram chat where you want to receive the alert notifications). +To integrate Grafana with Telegram, you need to get a Telegram **bot API token** and a **chat ID** (the ID of the Telegram chat where you want to receive the alert notifications). To complete the integration, use the browser version of Telegram. ### Set up your Telegram bot @@ -49,29 +49,9 @@ Add the bot to a group chat by following the steps below. Once the bot is added 1. In the Telegram app, **open a group or start a new one**. 1. Search and **add the bot to the group**. -1. **Interact with the bot** by sending a dummy message that starts with "`/`". E.g. `/hola @bot_name`. +1. Copy the **chat ID** from the URL in your browser's address bar. It should look like this: `https://web.telegram.org/a/#-4266674385`. - {{< figure src="/media/blog/telegram-grafana-alerting/telegram-screenshot.png" alt="A screenshot that shows a message to a Telegram bot." >}} - -1. To obtain the **chat ID**, send an [HTTP request](https://core.telegram.org/bots/api#getupdates) to the bot. Copy the below URL and replace `{your_bot_api_token}` with your bot API token. - - ``` - https://api.telegram.org/bot{your_bot_api_token}/getUpdates - ``` - -1. **Paste the URL in your browser**. -1. If the request is successful, it will return a response in JSON format. - - ``` - ... - "chat": { - "id": -4065678900, - "title": "Tony and Hello world bot", - "type": "group", - ... - ``` - -1. Copy the value of the `“id”` that appears under `“chat”`. + The chat ID is the sequence of numbers that follows the `#` symbol. For example: `-4266674385`. ## Procedure diff --git a/docs/sources/alerting/fundamentals/alert-rules/annotation-label.md b/docs/sources/alerting/fundamentals/alert-rules/annotation-label.md index ca46da11579..59fa9ba0e22 100644 --- a/docs/sources/alerting/fundamentals/alert-rules/annotation-label.md +++ b/docs/sources/alerting/fundamentals/alert-rules/annotation-label.md @@ -104,7 +104,7 @@ Labels prefixed with `grafana_` are reserved by Grafana for special use. To stop {{}} -Two alert rules cannot produce alert instances with the same labels. If two alert rules have the same labels such as `foo=bar,bar=baz` and `foo=bar,bar=baz` then one of the generated alert instances will be discarded. +Two alert rules cannot produce alert instances with the same labels. If two alert rules have the same labels such as `foo=bar,bar=baz` and `foo=bar,bar=baz` then one of the generated alert instances is discarded. Ensure the label set for an alert does not have two or more labels with the same name. @@ -114,17 +114,19 @@ Ensure the label set for an alert does not have two or more labels with the same {{< collapse title="Label key format" >}} -Grafana's built-in Alertmanager supports both Unicode label keys and values. If you are using an external Prometheus Alertmanager, label keys must be compatible with their [data model](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). -This means that label keys must only contain **ASCII letters**, **numbers**, as well as **underscores** and match the regex `[a-zA-Z_][a-zA-Z0-9_]*`. -Any invalid characters will be removed or replaced by the Grafana alerting engine before being sent to the external Alertmanager according to the following rules: +Grafana has a built-in Alertmanager that supports both Unicode label keys and values. If you are using an external Prometheus Alertmanager, label keys must be compatible with their [data model](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels). +This means that label keys must only contain _ASCII letters_, _numbers_, and _underscores_. +Label keys must also be matched by the regular expression `[a-zA-Z_][a-zA-Z0-9_]*`. +Any invalid characters are removed or replaced by the Grafana alerting engine before being sent to the external Alertmanager according to the following rules: -- `Whitespace` will be removed. -- `ASCII characters` will be replaced with `_`. -- `All other characters` will be replaced with their lower-case hex representation. If this is the first character it will be prefixed with `_`. +- Whitespace is removed. +- ASCII characters are replaced with `_`. +- All other characters are replaced with their lower-case hex representation. + If this is the first character it's prefixed with `_`. Example: A label key/value pair `Alert! 🔔="🔥"` will become `Alert_0x1f514="🔥"`. -If multiple label keys are sanitized to the same value, the duplicates will have a short hash of the original label appended as a suffix. +If multiple label keys are sanitized to the same value, the duplicates have a short hash of the original label appended as a suffix. {{< /collapse >}} @@ -132,7 +134,7 @@ If multiple label keys are sanitized to the same value, the duplicates will have The purpose of annotations is to add additional information to alert instances, such as extra details for notification messages. -Grafana provides several optional annotations that you can edit for use in notification messages and within Grafana: +Grafana provides several optional annotations that you can edit for use in notification messages and within Grafana. - `summary`: A short summary of what the alert has detected and why. - `description`: A detailed description of what happened and what the alert does. diff --git a/docs/sources/dashboards/create-reports/index.md b/docs/sources/dashboards/create-reports/index.md index 8179c651dc8..b356482d3fc 100644 --- a/docs/sources/dashboards/create-reports/index.md +++ b/docs/sources/dashboards/create-reports/index.md @@ -76,9 +76,9 @@ refs: destination: /docs/grafana-cloud/visualizations/dashboards/use-dashboards/#set-dashboard-time-range send-report: - pattern: /docs/grafana/ - destination: /docs/grafana// + destination: /docs/grafana//developers/http_api/reporting/#send-a-report - pattern: /docs/grafana-cloud/ - destination: /docs/grafana// + destination: /docs/grafana-cloud/developer-resources/api-reference/http-api/reporting/#send-a-report smtp: - pattern: /docs/grafana/ destination: /docs/grafana//setup-grafana/configure-grafana/#smtp diff --git a/docs/sources/datasources/pyroscope/_index.md b/docs/sources/datasources/pyroscope/_index.md index 6395b7e99c2..072803da3b6 100644 --- a/docs/sources/datasources/pyroscope/_index.md +++ b/docs/sources/datasources/pyroscope/_index.md @@ -21,24 +21,24 @@ weight: 1150 refs: flame-graph: - pattern: /docs/grafana/ - destination: /docs/grafana//panels-visualizations/visualizations/flame-graph/ + destination: https://grafana.com/docs/grafana//panels-visualizations/visualizations/flame-graph/ - pattern: /docs/grafana-cloud/ - destination: /docs/grafana//panels-visualizations/visualizations/flame-graph/ + destination: https://grafana.com/docs/grafana-cloud/visualizations/panels-visualizations/visualizations/flame-graph/ configure-tempo-data-source: - pattern: /docs/grafana/ - destination: /docs/grafana//datasources/tempo/configure-tempo-data-source/ + destination: https://grafana.com/docs/grafana//datasources/tempo/configure-tempo-data-source/ - pattern: /docs/grafana-cloud/ - destination: docs/grafana-cloud/connect-externally-hosted/data-sources/tempo/configure-tempo-data-source/ + destination: https://grafana.com/docs/grafana-cloud/connect-externally-hosted/data-sources/tempo/configure-tempo-data-source/ explore: - pattern: /docs/grafana/ - destination: /docs/grafana//explore/ + destination: https://grafana.com/docs/grafana//explore/ - pattern: /docs/grafana-cloud/ - destination: /docs/grafana//explore/ + destination: https://grafana.com/docs/grafana//explore/ provisioning-data-sources: - pattern: /docs/grafana/ - destination: /docs/grafana//administration/provisioning/#datasources + destination: https://grafana.com/docs/grafana//administration/provisioning/#datasources - pattern: /docs/grafana-cloud/ - destination: /docs/grafana//administration/provisioning/#datasources + destination: https://grafana.com/docs/grafana//administration/provisioning/#datasources --- # Grafana Pyroscope data source @@ -73,9 +73,9 @@ For more information, refer to the [Traces to profile section](ref:configure-tem {{< youtube id="AG8VzfFMLxo" >}} -## Provision the Grafana Pyroscope data source +## Provision the Pyroscope data source -You can modify the Grafana configuration files to provision the Grafana Pyroscope data source. +You can modify the Grafana configuration files to provision the Pyroscope data source. To learn more, and to view the available provisioning settings, refer to [provisioning documentation](ref:provisioning-data-sources). Here is an example configuration: diff --git a/docs/sources/datasources/pyroscope/configure-pyroscope-data-source.md b/docs/sources/datasources/pyroscope/configure-pyroscope-data-source.md index 20f0796eb47..5d382cca447 100644 --- a/docs/sources/datasources/pyroscope/configure-pyroscope-data-source.md +++ b/docs/sources/datasources/pyroscope/configure-pyroscope-data-source.md @@ -42,16 +42,89 @@ To configure basic settings for the data source, complete the following steps: 1. Click **Connections** in the left-side menu. 1. Under Your connections, click **Data sources**. 1. Enter `Grafana Pyroscope` in the search bar. +1. Select **Add new data source**. 1. Click **Grafana Pyroscope** to display the **Settings** tab of the data source. +1. Set the data source's basic configuration options. +1. Select **Save & test**. -1. Set the data source's basic configuration options: +## Configuration options - | Name | Description | - | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | - | `Name` | A name to specify the data source in panels, queries, and Explore. | - | `Default` | The default data source will be pre-selected for new panels. | - | `URL` | The URL of the Grafana Pyroscope instance, for example, `http://localhost:4100`. | - | `Basic Auth` | Enable basic authentication to the data source. | - | `User` | User name for basic authentication. | - | `Password` | Password for basic authentication. | - | `Minimal step` | Used for queries returning timeseries data. The Pyroscope backend, similar to Prometheus, scrapes profiles at certain intervals. To prevent querying at smaller interval, use Minimal step same or higher than your Pyroscope scrape interval. This prevents returning too many data points to the frontend. | +You can configure several options for the Pyroscope data source, including the name, HTTP, authentication, querying, and private data source connect. + +If you make any changes, select **Save & test** to preserve those changes. + +![Configuration options for the Pyroscope data source](/media/docs/grafana/data-sources/screenshot-pyroscope-data-source-config.png) + +### Name and default + +**Name** +: Enter a name to specify the data source in panels, queries, and Explore. + +**Default** +: The default data source is pre-selected for new panels. + +### HTTP + +The HTTP section is shown in number 1 in the screenshot. + +**URL** +: The URL of the Grafana Pyroscope instance, for example, `https://localhost:4100`. + +**Allowed cookies** +: The Grafana Proxy deletes forwarded cookies. Use this field to specify cookies by name that should be forwarded to the data source. + +**Timeout** +: HTTP request timeout in seconds. + +### Auth + +The Auth section is shown in number 2 in the screenshot. + +**Basic auth** +: Enable basic authentication to the data source. When activated, it provides **User** and **Password** fields. + +**With Credentials** +: Whether credentials, such as cookies or auth headers, should be sent with cross-site requests. + +**TLS Client Auth** +: Toggle on to use client authentication. When enabled, it adds the **Server name**, **Client cert**, and **Client key** fields. The client provides a certificate that is validated by the server to establish the client's trusted identity. The client key encrypts the data between client and server. These details are encrypted and stored in the Grafana database. + +**With CA Cert** +: Activate this option to verify self-signed TLS certificates. + +**Skip TLS Verify** +: When activated, it bypasses TLS certificate verification. + +**Forward OAuth Identity** +: When activated, the user’s upstream OAuth 2.0 identity is forwarded to the data source along with their access token. + +**Custom HTTP Headers** +: Select Add header to add Header and Value fields. + +**Header** +: Add a custom header. This allows custom headers to be passed based on the needs of your Pyroscope instance. + +**Value** +: The value of the header. + +### Querying + +The **Querying** section is shown in number 3 in the screenshot. + +**Minimum step** is used for queries returning time-series data. The default value is 15 seconds. + +Adjusting this option can help prevent gaps when you zoom in to profiling data. + +### Private data source connect + +The **Private data source connect** section is shown in number 4 in the screenshot. + +This feature is only available in Grafana Cloud. + +This option lets you query data that lives within a secured network without opening the network to inbound traffic from Grafana Cloud. + +Use the drop-down box to select a configured private data sources. + +Select **Manage private data source connect** to configure and manage any private data sources you have configured. + +For more information, refer to [Private data source connect](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/). diff --git a/docs/sources/datasources/tempo/_index.md b/docs/sources/datasources/tempo/_index.md index 645168ba7cc..8b2123b5cd4 100644 --- a/docs/sources/datasources/tempo/_index.md +++ b/docs/sources/datasources/tempo/_index.md @@ -31,22 +31,22 @@ refs: - pattern: /docs/grafana/ destination: /docs/grafana//panels-visualizations/visualizations/node-graph/ - pattern: /docs/grafana-cloud/ - destination: /docs/grafana//panels-visualizations/visualizations/node-graph/ + destination: https://grafana.com/docs/grafana-cloud/visualizations/panels-visualizations/visualizations/node-graph/ configure-tempo-data-source: - pattern: /docs/grafana/ - destination: /docs/grafana//datasources/tempo/configure-tempo-data-source/#provision-the-data-source + destination: https://grafana.com/docs/grafana//datasources/tempo/configure-tempo-data-source/#provision-the-data-source - pattern: /docs/grafana-cloud/ - destination: docs/grafana-cloud/connect-externally-hosted/data-sources/tempo/configure-tempo-data-source/#provision-the-data-source + destination: https://grafana.com/docs/grafana-cloud/connect-externally-hosted/data-sources/tempo/configure-tempo-data-source/ exemplars: - pattern: /docs/grafana/ - destination: /docs/grafana//fundamentals/exemplars/ + destination: https://grafana.com/docs/grafana//fundamentals/exemplars/ - pattern: /docs/grafana-cloud/ - destination: /docs/grafana//fundamentals/exemplars/ + destination: https://grafana.com/docs/grafana//fundamentals/exemplars/ variable-syntax: - pattern: /docs/grafana/ destination: /docs/grafana//dashboards/variables/variable-syntax/ - pattern: /docs/grafana-cloud/ - destination: /docs/grafana//dashboards/variables/variable-syntax/ + destination: https://grafana.com/docs/grafana-cloud/visualizations/dashboards/variables/variable-syntax/ explore-trace-integration: - pattern: /docs/grafana/ destination: /docs/grafana//explore/trace-integration/ diff --git a/docs/sources/developers/plugins/plugin.schema.json b/docs/sources/developers/plugins/plugin.schema.json index 71e3b963214..1a3154e6b11 100644 --- a/docs/sources/developers/plugins/plugin.schema.json +++ b/docs/sources/developers/plugins/plugin.schema.json @@ -179,7 +179,7 @@ "items": { "type": "object", "description": "Plugin dependency. Used to display information about plugin dependencies in the Grafana UI.", - "required": ["id", "name", "type", "version"], + "required": ["id", "name", "type"], "properties": { "id": { "type": "string", @@ -193,7 +193,8 @@ "type": "string" }, "version": { - "type": "string" + "type": "string", + "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)$" } } } diff --git a/docs/sources/panels-visualizations/query-transform-data/expression-queries/index.md b/docs/sources/panels-visualizations/query-transform-data/expression-queries/index.md index 80765e803bd..f4618e298a8 100644 --- a/docs/sources/panels-visualizations/query-transform-data/expression-queries/index.md +++ b/docs/sources/panels-visualizations/query-transform-data/expression-queries/index.md @@ -124,7 +124,7 @@ So if you have numbers with labels like `{host=web01}` in `$A` and another numbe - An item with no labels will join to anything. - If both `$A` and `$B` each contain only one item (one series, or one number), they will join. -- If labels are exact math they will join. +- If labels are exact match they will join. - If labels are a subset of the other, for example and item in `$A` is labeled `{host=A,dc=MIA}` and item in `$B` is labeled `{host=A}` they will join. - Currently, if within a variable such as `$A` there are different tag _keys_ for each item, the join behavior is undefined. diff --git a/docs/sources/setup-grafana/configure-grafana/configure-tracing/index.md b/docs/sources/setup-grafana/configure-grafana/configure-tracing/index.md index 50e0e6ea702..ef36abfc2cd 100644 --- a/docs/sources/setup-grafana/configure-grafana/configure-tracing/index.md +++ b/docs/sources/setup-grafana/configure-grafana/configure-tracing/index.md @@ -2,31 +2,48 @@ aliases: - ../../troubleshooting/diagnostics/ - ../enable-diagnostics/ -description: Learn how to configure tracing so that you can troubleshoot Grafana. +description: Learn how to configure profiling and tracing so that you can troubleshoot Grafana. +keywords: + - grafana + - troubleshooting + - documentation + - guide labels: products: - enterprise - oss -menuTitle: Configure tracing -title: Configure tracing to troubleshoot Grafana +menuTitle: Configure profiling and tracing +title: Configure profiling and tracing to troubleshoot Grafana weight: 200 --- -# Configure tracing to troubleshoot Grafana +# Configure profiling and tracing to troubleshoot Grafana You can set up the `grafana-server` process to enable certain diagnostics when it starts. This can be useful when investigating certain performance problems. It's _not_ recommended to have these enabled by default. -## Turn on profiling +## Turn on profiling and collect profiles The `grafana-server` can be started with the command-line option `-profile` to enable profiling, `-profile-addr` to override the default HTTP address (`localhost`), and -`-profile-port` to override the default HTTP port (`6060`) where the `pprof` debugging endpoints are available. For example: +`-profile-port` to override the default HTTP port (`6060`) where the `pprof` debugging endpoints are available. Further, [`-profile-block-rate`](https://pkg.go.dev/runtime#SetBlockProfileRate) controls the fraction of goroutine blocking events that are reported in the blocking profile, default `1` (i.e. track every event) for backward compatibility reasons, and [`-profile-mutex-rate`](https://pkg.go.dev/runtime#SetMutexProfileFraction) controls the fraction of mutex contention events that are reported in the mutex profile, default `0` (i.e. track no events). The higher the fraction (that is, the smaller this value) the more overhead it adds to normal operations. + +Running Grafana with profiling enabled and without block and mutex profiling enabled should only add a fraction of overhead and is suitable for [continuous profiling](https://grafana.com/oss/pyroscope/). Adding a small fraction of block and mutex profiling, such as 10-5 (10%-20%) should in general be fine. + +Enable profiling: ```bash ./grafana server -profile -profile-addr=0.0.0.0 -profile-port=8080 ``` -Note that `pprof` debugging endpoints are served on a different port than the Grafana HTTP server. +Enable profiling with block and mutex profiling enabled with a fraction of 20%: + +```bash +./grafana server -profile -profile-addr=0.0.0.0 -profile-port=8080 -profile-block-rate=5 -profile-mutex-rate=5 +``` + +Note that `pprof` debugging endpoints are served on a different port than the Grafana HTTP server. Check what debugging endpoints are available by browsing `http:///debug/pprof`. + +There are some additional [godeltaprof](https://github.com/grafana/pyroscope-go/tree/main/godeltaprof) endpoints available which are more suitable in a continuous profiling scenario. These endpoints are `/debug/pprof/delta_heap`, `/debug/pprof/delta_block`, `/debug/pprof/delta_mutex`. You can configure or override profiling settings using environment variables: @@ -34,9 +51,41 @@ You can configure or override profiling settings using environment variables: export GF_DIAGNOSTICS_PROFILING_ENABLED=true export GF_DIAGNOSTICS_PROFILING_ADDR=0.0.0.0 export GF_DIAGNOSTICS_PROFILING_PORT=8080 +export GF_DIAGNOSTICS_PROFILING_BLOCK_RATE=5 +export GF_DIAGNOSTICS_PROFILING_MUTEX_RATE=5 ``` -Refer to [Go command pprof](https://golang.org/cmd/pprof/) for more information about how to collect and analyze profiling data. +In general, you use the [Go command pprof](https://golang.org/cmd/pprof/) to both collect and analyze profiling data. You can also use [curl](https://curl.se/) or similar to collect profiles which could be convenient in environments where you don't have the Go/pprof command available. Next, some usage examples of using curl and pprof to collect and analyze memory and CPU profiles. + +**Analyzing high memory usage/memory leaks:** + +When experiencing high memory usage or potential memory leaks it's useful to collect several heap profiles and later when analyzing, compare them. It's a good idea to wait some time, e.g. 30 seconds, between collecting each profile to allow memory consumption to increase. + +```bash +curl http://:/debug/pprof/heap > heap1.pprof +sleep 30 +curl http://:/debug/pprof/heap > heap2.pprof +``` + +You can then use pprof tool to compare two heap profiles: + +```bash +go tool pprof -http=localhost:8081 --base heap1.pprof heap2.pprof +``` + +**Analyzing high CPU usage:** + +When experiencing high CPU usage it's suggested to collect CPU profiles over a period of time, e.g. 30 seconds. + +```bash +curl 'http://:/debug/pprof/profile?seconds=30' > profile.pprof +``` + +You can then use pprof tool to analyze the collected CPU profile: + +```bash +go tool pprof -http=localhost:8081 profile.pprof +``` ## Use tracing diff --git a/docs/sources/setup-grafana/configure-security/configure-authentication/saml-ui/index.md b/docs/sources/setup-grafana/configure-security/configure-authentication/saml-ui/index.md index 206e5e2104b..2dcfbc23eaf 100644 --- a/docs/sources/setup-grafana/configure-security/configure-authentication/saml-ui/index.md +++ b/docs/sources/setup-grafana/configure-security/configure-authentication/saml-ui/index.md @@ -44,23 +44,41 @@ To follow this guide, you need: - Grafana instance running Grafana version 10.0 or later with [Grafana Enterprise]({{< relref "../../../../introduction/grafana-enterprise" >}}) or [Grafana Cloud Pro or Advanced](/docs/grafana-cloud/) license. -## Steps +{{% admonition type="note" %}} +It is possible to set up Grafana with SAML authentication using Azure AD. However, if an Azure AD user belongs to more than 150 groups, a Graph API endpoint is shared instead. -Follow these steps to configure and enable SAML integration: +Grafana versions 11.1 and below do not support fetching the groups from the Graph API endpoint. As a result, users with more than 150 groups will not be able to retrieve their groups. Instead, it is recommended that you use OIDC/OAuth workflows. + +As of Grafana 11.2, the SAML integration offers a mechanism to retrieve user groups from the Graph API. + +Related links: + +- [Azure AD SAML limitations](https://learn.microsoft.com/en-us/entra/identity-platform/id-token-claims-reference#groups-overage-claim) +- [Set up SAML with Azure AD]({{< relref "../saml#set-up-saml-with-azure-ad" >}}) +- [Configure a Graph API application in Azure AD]({{< relref "../saml#configure-a-graph-api-application-in-azure-ad" >}}) + {{% /admonition %}} + +## Steps To Configure SAML Authentication + +Sign in to Grafana and navigate to **Administration > Authentication > Configure SAML**. + +### 1. General Settings Section -1. Sign in to Grafana and navigate to **Administration > Authentication > Configure SAML**. 1. Complete the **General settings** fields. For assistance, consult the following table for additional guidance about certain fields: -| Field | Description | -| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **Allow signup** | If enabled, you can create new users through the SAML login. If disabled, then only existing Grafana users can log in with SAML. | -| **Auto login** | If enabled, Grafana will attempt to automatically log in with SAML skipping the login screen. | -| **Single logout** | The SAML single logout feature enables users to log out from all applications associated with the current IdP session established using SAML SSO. For more information, refer to [SAML single logout documentation]]({{< relref "../saml#single-logout" >}}). | -| **Identity provider initiated login** | Enables users to log in to Grafana directly from the SAML IdP. For more information, refer to [IdP initiated login documentation]({{< relref "../saml#idp-initiated-single-sign-on-sso" >}}). | + | Field | Description | + | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | + | **Allow signup** | If enabled, you can create new users through the SAML login. If disabled, then only existing Grafana users can log in with SAML. | + | **Auto login** | If enabled, Grafana will attempt to automatically log in with SAML skipping the login screen. | + | **Single logout** | The SAML single logout feature enables users to log out from all applications associated with the current IdP session established using SAML SSO. For more information, refer to [SAML single logout documentation]]({{< relref "../saml#single-logout" >}}). | + | **Identity provider initiated login** | Enables users to log in to Grafana directly from the SAML IdP. For more information, refer to [IdP initiated login documentation]({{< relref "../saml#idp-initiated-single-sign-on-sso" >}}). | + +1. Click **Next: Key and certificate**. + +### 2. Key and Certificate Section -3. Click **Next: Key and certificate**. 1. Provide a certificate and a private key that will be used by the service provider (Grafana) and the SAML IdP. Use the [PKCS #8](https://en.wikipedia.org/wiki/PKCS_8) format to issue the private key. @@ -71,8 +89,24 @@ Follow these steps to configure and enable SAML integration: The SAML standard recommends using a digital signature for some types of messages, like authentication or logout requests to avoid [man-in-the-middle attacks](https://en.wikipedia.org/wiki/Man-in-the-middle_attack). -1. Click **Next: Connect Grafana with Identity Provider** and complete the section. +1. Click **Next: Connect Grafana with Identity Provider**. + +### 3. Connect Grafana with Identity Provider Section + +1. Configure IdP using Grafana Metadata + 1. Copy the **Metadata URL** and provide it to your SAML IdP to establish a connection between Grafana and the IdP. + - The metadata URL contains all the necessary information for the IdP to establish a connection with Grafana. + 1. Copy the **Assertion Consumer Service URL** and provide it to your SAML IdP. + - The Assertion Consumer Service URL is the endpoint where the IdP sends the SAML assertion after the user has been authenticated. + 1. If you want to use the **Single Logout** feature, copy the **Single Logout Service URL** and provide it to your SAML IdP. +1. Finish configuring Grafana using IdP data + 1. Provide IdP Metadata to Grafana. + - The metadata contains all the necessary information for Grafana to establish a connection with the IdP. + - This can be provided as Base64-encoded value, a path to a file, or as a URL. 1. Click **Next: User mapping**. + +### 4. User Mapping Section + 1. If you wish to [map user information from SAML assertions]({{< relref "../saml#assertion-mapping" >}}), complete the **Assertion attributes mappings** section. You also need to configure the **Groups attribute** field if you want to use team sync. Team sync automatically maps users to Grafana teams based on their SAML group membership. @@ -86,6 +120,12 @@ Follow these steps to configure and enable SAML integration: Role mapping will automatically update user's [basic role]({{< relref "../../../../administration/roles-and-permissions/access-control#basic-roles" >}}) based on their SAML roles every time the user logs in to Grafana. Learn more about [SAML role synchronization]({{< relref "../saml#configure-role-sync" >}}). +1. If you're setting up Grafana with Azure AD using the SAML protocol and want to fetch user groups from the Graph API, complete the **Azure AD Service Account Configuration** subsection. + 1. Set up a service account in Azure AD and provide the necessary details in the **Azure AD Service Account Configuration** section. + 1. Provide the **Client ID** of your Azure AD application. + 1. Provide the **Client Secret** of your Azure AD application, the **Client Secret** will be used to request an access token from Azure AD. + 1. Provide the Azure AD request **Access Token URL**. + 1. If you don't have users with more than 150 groups, you can still force the use of the Graph API by enabling the **Force use Graph API** toggle. 1. If you have multiple organizations and want to automatically add users to organizations, complete the **Org mapping section**. First, you need to configure the **Org attribute** field to specify which SAML attribute should be used to retrieve SAML organization information. @@ -96,8 +136,12 @@ Follow these steps to configure and enable SAML integration: Learn more about [SAML organization mapping]({{< relref "../saml#configure-organization-mapping" >}}). 1. If you want to limit the access to Grafana based on user's SAML organization membership, fill in the **Allowed organizations** field. -1. Click **Next: Test and enable** and then click **Save and enable**. - 1. If there are issues with your configuration, an error message will appear. Refer back to the previous steps to correct the issues and click on `Save and apply` on the top right corner once you are done. +1. Click **Next: Test and enable**. + +### 5. Test And Enable Section + +1. Click **Save and enable** + - If there are issues with your configuration, an error message will appear. Refer back to the previous steps to correct the issues and click on `Save and apply` on the top right corner once you are done. 1. If there are no configuration issues, SAML integration status will change to `Enabled`. Your SAML configuration is now enabled. 1. To disable SAML integration, click `Disable` in the top right corner. diff --git a/docs/sources/troubleshooting/_index.md b/docs/sources/troubleshooting/_index.md index df54aed9882..77582c18678 100644 --- a/docs/sources/troubleshooting/_index.md +++ b/docs/sources/troubleshooting/_index.md @@ -56,6 +56,10 @@ sudo yum install freetype* sudo yum install urw-fonts ``` +## Troubleshoot backend performance + +If you're experiencing backend performance problems, such as high memory or CPU usage, please refer to [Configure profiling and tracing to troubleshoot Grafana]({{< relref "../setup-grafana/configure-grafana/configure-tracing/index.md" >}}). + ## More help Check out the [Grafana Community](https://community.grafana.com/) for more troubleshooting help (you must be logged in to post or comment). diff --git a/e2e/scenes/various-suite/frontend-sandbox-datasource.spec.ts b/e2e/scenes/various-suite/frontend-sandbox-datasource.spec.ts index 40ec3eace50..651ba170db9 100644 --- a/e2e/scenes/various-suite/frontend-sandbox-datasource.spec.ts +++ b/e2e/scenes/various-suite/frontend-sandbox-datasource.spec.ts @@ -87,6 +87,9 @@ describe.skip('Datasource sandbox', () => { e2e.components.DataSourcePicker.container().should('be.visible').click(); cy.contains(DATASOURCE_TYPED_NAME).scrollIntoView().should('be.visible').click(); + // make sure the datasource was correctly selected and rendered + e2e.components.Breadcrumbs.breadcrumb(DATASOURCE_TYPED_NAME).should('be.visible'); + cy.wait(300); // wait to prevent false positives because cypress checks too fast cy.get(`div[data-plugin-sandbox="${DATASOURCE_ID}"]`).should('not.exist'); }); @@ -95,6 +98,8 @@ describe.skip('Datasource sandbox', () => { e2e.pages.Explore.visit(); e2e.components.DataSourcePicker.container().should('be.visible').click(); cy.contains(DATASOURCE_TYPED_NAME).scrollIntoView().should('be.visible').click(); + // make sure the datasource was correctly selected and rendered + e2e.components.Breadcrumbs.breadcrumb(DATASOURCE_TYPED_NAME).should('be.visible'); const valueToType = 'test' + random(100); @@ -115,6 +120,8 @@ describe.skip('Datasource sandbox', () => { e2e.pages.Explore.visit(); e2e.components.DataSourcePicker.container().should('be.visible').click(); cy.contains(DATASOURCE_TYPED_NAME).scrollIntoView().should('be.visible').click(); + // make sure the datasource was correctly selected and rendered + e2e.components.Breadcrumbs.breadcrumb(DATASOURCE_TYPED_NAME).should('be.visible'); cy.get(`div[data-plugin-sandbox="${DATASOURCE_ID}"]`).should('exist'); }); @@ -123,6 +130,8 @@ describe.skip('Datasource sandbox', () => { e2e.pages.Explore.visit(); e2e.components.DataSourcePicker.container().should('be.visible').click(); cy.contains(DATASOURCE_TYPED_NAME).scrollIntoView().should('be.visible').click(); + // make sure the datasource was correctly selected and rendered + e2e.components.Breadcrumbs.breadcrumb(DATASOURCE_TYPED_NAME).should('be.visible'); const valueToType = 'test' + random(100); diff --git a/e2e/various-suite/frontend-sandbox-datasource.spec.ts b/e2e/various-suite/frontend-sandbox-datasource.spec.ts index 2881e7dfe5d..bfc3fd3fdc3 100644 --- a/e2e/various-suite/frontend-sandbox-datasource.spec.ts +++ b/e2e/various-suite/frontend-sandbox-datasource.spec.ts @@ -85,6 +85,8 @@ describe('Datasource sandbox', () => { e2e.pages.Explore.visit(); e2e.components.DataSourcePicker.container().should('be.visible').click(); cy.contains(DATASOURCE_TYPED_NAME).scrollIntoView().should('be.visible').click(); + // make sure the datasource was correctly selected and rendered + e2e.components.Breadcrumbs.breadcrumb(DATASOURCE_TYPED_NAME).should('be.visible'); cy.wait(300); // wait to prevent false positives because cypress checks too fast cy.get(`div[data-plugin-sandbox="${DATASOURCE_ID}"]`).should('not.exist'); @@ -95,6 +97,9 @@ describe('Datasource sandbox', () => { e2e.components.DataSourcePicker.container().should('be.visible').click(); cy.contains(DATASOURCE_TYPED_NAME).scrollIntoView().should('be.visible').click(); + // make sure the datasource was correctly selected and rendered + e2e.components.Breadcrumbs.breadcrumb(DATASOURCE_TYPED_NAME).should('be.visible'); + const valueToType = 'test' + random(100); cy.get('[data-testid="sandbox-query-editor-query-input"]').should('not.be.disabled'); @@ -114,6 +119,8 @@ describe('Datasource sandbox', () => { e2e.pages.Explore.visit(); e2e.components.DataSourcePicker.container().should('be.visible').click(); cy.contains(DATASOURCE_TYPED_NAME).scrollIntoView().should('be.visible').click(); + // make sure the datasource was correctly selected and rendered + e2e.components.Breadcrumbs.breadcrumb(DATASOURCE_TYPED_NAME).should('be.visible'); cy.get(`div[data-plugin-sandbox="${DATASOURCE_ID}"]`).should('exist'); }); @@ -123,6 +130,9 @@ describe('Datasource sandbox', () => { e2e.components.DataSourcePicker.container().should('be.visible').click(); cy.contains(DATASOURCE_TYPED_NAME).scrollIntoView().should('be.visible').click(); + // make sure the datasource was correctly selected and rendered + e2e.components.Breadcrumbs.breadcrumb(DATASOURCE_TYPED_NAME).should('be.visible'); + const valueToType = 'test' + random(100); cy.get('[data-testid="sandbox-query-editor-query-input"]').should('not.be.disabled'); diff --git a/go.mod b/go.mod index a1a32c1d0ac..bdc799dc91d 100644 --- a/go.mod +++ b/go.mod @@ -84,17 +84,17 @@ require ( github.com/grafana/cuetsy v0.1.11 // @grafana/grafana-as-code github.com/grafana/dataplane/examples v0.0.1 // @grafana/observability-metrics github.com/grafana/dataplane/sdata v0.0.9 // @grafana/observability-metrics - github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb // @grafana/grafana-backend-group + github.com/grafana/dskit v0.0.0-20240311184239-73feada6c0d7 // @grafana/grafana-backend-group github.com/grafana/gofpdf v0.0.0-20231002120153-857cc45be447 // @grafana/sharing-squad - github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 // @grafana/grafana-operator-experience-squad + github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 // @grafana/grafana-operator-experience-squad github.com/grafana/grafana-aws-sdk v0.28.0 // @grafana/aws-datasources github.com/grafana/grafana-azure-sdk-go/v2 v2.0.4 // @grafana/partner-datasources github.com/grafana/grafana-cloud-migration-snapshot v1.0.0 // @grafana/grafana-operator-experience-squad github.com/grafana/grafana-google-sdk-go v0.1.0 // @grafana/partner-datasources github.com/grafana/grafana-openapi-client-go v0.0.0-20231213163343-bd475d63fb79 // @grafana/grafana-backend-group - github.com/grafana/grafana-plugin-sdk-go v0.235.0 // @grafana/plugins-platform-backend + github.com/grafana/grafana-plugin-sdk-go v0.237.0 // @grafana/plugins-platform-backend github.com/grafana/grafana/pkg/apimachinery v0.0.0-20240701190119-78f9c0fd4144 // @grafana/grafana-app-platform-squad - github.com/grafana/grafana/pkg/apiserver v0.0.0-20240701190119-78f9c0fd4144 // @grafana/grafana-app-platform-squad + github.com/grafana/grafana/pkg/apiserver v0.0.0-20240708134731-e9876749d440 // @grafana/grafana-app-platform-squad // This needs to be here for other projects that import grafana/grafana // For local development grafana/grafana will always use the local files // Check go.work file for details @@ -102,7 +102,7 @@ require ( github.com/grafana/otel-profiling-go v0.5.1 // @grafana/grafana-backend-group github.com/grafana/pyroscope-go/godeltaprof v0.1.7 // @grafana/observability-traces-and-profiling github.com/grafana/pyroscope/api v0.3.0 // @grafana/observability-traces-and-profiling - github.com/grafana/tempo v1.5.1-0.20230524121406-1dc1bfe7085b // @grafana/observability-traces-and-profiling + github.com/grafana/tempo v1.5.1-0.20240604192202-01f4bc8ac2d1 // @grafana/observability-traces-and-profiling github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 // @grafana/plugins-platform-backend github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // @grafana/grafana-backend-group github.com/hashicorp/go-hclog v1.6.3 // @grafana/plugins-platform-backend @@ -127,7 +127,7 @@ require ( github.com/mattn/go-sqlite3 v1.14.22 // @grafana/grafana-backend-group github.com/matttproud/golang_protobuf_extensions v1.0.4 // @grafana/alerting-backend github.com/microsoft/go-mssqldb v1.7.0 // @grafana/grafana-bi-squad - github.com/mitchellh/mapstructure v1.5.0 //@grafana/identity-access-team + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c //@grafana/identity-access-team github.com/modern-go/reflect2 v1.0.2 // @grafana/alerting-backend github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // @grafana/alerting-backend github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // @grafana/grafana-operator-experience-squad @@ -138,9 +138,9 @@ require ( github.com/openfga/openfga v1.5.4 // @grafana/identity-access-team github.com/patrickmn/go-cache v2.1.0+incompatible // @grafana/alerting-backend github.com/prometheus/alertmanager v0.27.0 // @grafana/alerting-backend - github.com/prometheus/client_golang v1.19.0 // @grafana/alerting-backend + github.com/prometheus/client_golang v1.19.1 // @grafana/alerting-backend github.com/prometheus/client_model v0.6.1 // @grafana/grafana-backend-group - github.com/prometheus/common v0.53.0 // @grafana/alerting-backend + github.com/prometheus/common v0.54.0 // @grafana/alerting-backend github.com/prometheus/prometheus v1.8.2-0.20221021121301-51a44e6657c3 // @grafana/alerting-backend github.com/redis/go-redis/v9 v9.1.0 // @grafana/alerting-backend github.com/robfig/cron/v3 v3.0.1 // @grafana/grafana-backend-group @@ -159,15 +159,15 @@ require ( github.com/yudai/gojsondiff v1.0.0 // @grafana/grafana-backend-group go.opentelemetry.io/collector/pdata v1.6.0 // @grafana/grafana-backend-group go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 // @grafana/plugins-platform-backend - go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.51.0 // @grafana/grafana-operator-experience-squad - go.opentelemetry.io/contrib/propagators/jaeger v1.26.0 // @grafana/grafana-backend-group + go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.53.0 // @grafana/grafana-operator-experience-squad + go.opentelemetry.io/contrib/propagators/jaeger v1.28.0 // @grafana/grafana-backend-group go.opentelemetry.io/contrib/samplers/jaegerremote v0.20.0 // @grafana/grafana-backend-group - go.opentelemetry.io/otel v1.26.0 // @grafana/grafana-backend-group + go.opentelemetry.io/otel v1.28.0 // @grafana/grafana-backend-group go.opentelemetry.io/otel/exporters/jaeger v1.17.0 // @grafana/grafana-backend-group - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 // @grafana/grafana-backend-group - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0 // @grafana/grafana-backend-group - go.opentelemetry.io/otel/sdk v1.26.0 // @grafana/grafana-backend-group - go.opentelemetry.io/otel/trace v1.26.0 // @grafana/grafana-backend-group + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // @grafana/grafana-backend-group + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // @grafana/grafana-backend-group + go.opentelemetry.io/otel/sdk v1.28.0 // @grafana/grafana-backend-group + go.opentelemetry.io/otel/trace v1.28.0 // @grafana/grafana-backend-group go.uber.org/atomic v1.11.0 // @grafana/alerting-backend go.uber.org/goleak v1.3.0 // @grafana/grafana-search-and-storage gocloud.dev v0.25.0 // @grafana/grafana-app-platform-squad @@ -180,10 +180,10 @@ require ( golang.org/x/text v0.16.0 // @grafana/grafana-backend-group golang.org/x/time v0.5.0 // @grafana/grafana-backend-group golang.org/x/tools v0.22.0 // @grafana/grafana-as-code - gonum.org/v1/gonum v0.12.0 // @grafana/observability-metrics + gonum.org/v1/gonum v0.14.0 // @grafana/observability-metrics google.golang.org/api v0.176.0 // @grafana/grafana-backend-group google.golang.org/grpc v1.64.0 // @grafana/plugins-platform-backend - google.golang.org/protobuf v1.34.1 // @grafana/plugins-platform-backend + google.golang.org/protobuf v1.34.2 // @grafana/plugins-platform-backend gopkg.in/ini.v1 v1.67.0 // @grafana/alerting-backend gopkg.in/mail.v2 v2.3.1 // @grafana/grafana-backend-group gopkg.in/yaml.v3 v3.0.1 // @grafana/alerting-backend @@ -231,12 +231,18 @@ require ( github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 // indirect - github.com/apache/thrift v0.18.1 // indirect + github.com/apache/thrift v0.20.0 // indirect github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/aws/aws-sdk-go-v2 v1.30.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.13 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.13 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.2 // indirect + github.com/aws/smithy-go v1.20.3 // indirect github.com/axiomhq/hyperloglog v0.0.0-20191112132149-a4c4c47bc57f // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect @@ -271,13 +277,14 @@ require ( github.com/elazarl/goproxy v0.0.0-20231117061959-7cc037d33fb5 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/emicklei/proto v1.10.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect; @grafana/grafana-app-platform-squad + github.com/go-logr/logr v1.4.2 // indirect; @grafana/grafana-app-platform-squad github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/analysis v0.22.2 // indirect @@ -306,14 +313,18 @@ require ( github.com/grafana/grafana/pkg/storage/unified/resource v0.0.0-20240624122844-a89deaeb7365 // @grafana/grafana-search-and-storage github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db // indirect github.com/grafana/sqlds/v3 v3.2.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20191002090509-6af20e3a5340 // indirect; @grafana/plugins-platform-backend github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // @grafana/identity-access-team github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect + github.com/hashicorp/go-retryablehttp v0.7.5 // indirect github.com/hashicorp/go-sockaddr v1.0.6 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/hashicorp/golang-lru v0.6.0 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/memberlist v0.5.0 // indirect github.com/hashicorp/yamux v0.1.1 // indirect github.com/igm/sockjs-go/v3 v3.0.2 // indirect @@ -321,6 +332,10 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/invopop/jsonschema v0.12.0 // indirect github.com/invopop/yaml v0.3.1 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect + github.com/jackc/pgx/v5 v5.5.5 // indirect + github.com/jackc/puddle/v2 v2.2.1 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.7.6 // indirect @@ -334,17 +349,22 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/jszwedko/go-datemath v0.1.1-0.20230526204004-640a500621d6 // indirect + github.com/karlseguin/ccache/v3 v3.0.5 // indirect github.com/klauspost/asmfmt v1.3.2 // indirect github.com/klauspost/compress v1.17.8 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattermost/xml-roundtrip-validator v0.1.0 // indirect github.com/mattetti/filebuffer v1.0.1 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-ieproxy v0.0.3 // indirect + github.com/mattn/go-ieproxy v0.0.11 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mfridman/interpolate v0.0.2 // indirect github.com/miekg/dns v1.1.59 // indirect github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect @@ -364,16 +384,20 @@ require ( github.com/mschoch/smat v0.2.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/natefinch/wrap v0.2.0 // indirect + github.com/ncruces/go-strftime v0.1.9 // indirect github.com/oapi-codegen/runtime v1.1.1 // indirect github.com/oklog/run v1.1.0 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/pelletier/go-toml/v2 v2.1.1 // indirect github.com/perimeterx/marshmallow v1.1.5 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/pressly/goose/v3 v3.20.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/exporter-toolkit v0.11.0 // indirect github.com/prometheus/procfs v0.14.0 // indirect @@ -382,14 +406,24 @@ require ( github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/rs/cors v1.10.1 // @grafana/identity-access-team + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/segmentio/asm v1.2.0 // indirect + github.com/segmentio/encoding v0.3.6 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/sethvargo/go-retry v0.2.4 // indirect github.com/shopspring/decimal v1.3.1 // indirect github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c // indirect github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/viper v1.18.2 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/stretchr/objx v0.5.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/unknwon/bra v0.0.0-20200517080246-1e3013ecaff8 // indirect @@ -406,25 +440,27 @@ require ( go.etcd.io/etcd/api/v3 v3.5.10 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect go.etcd.io/etcd/client/v3 v3.5.10 // indirect - go.mongodb.org/mongo-driver v1.14.0 // indirect + go.mongodb.org/mongo-driver v1.15.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 // indirect - go.opentelemetry.io/otel/metric v1.26.0 // indirect - go.opentelemetry.io/proto/otlp v1.2.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // @grafana/identity-access-team golang.org/x/sys v0.21.0 // indirect golang.org/x/term v0.21.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect; @grafana/grafana-backend-group - google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/kms v0.29.2 // indirect + modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect modernc.org/libc v1.41.0 // indirect modernc.org/mathutil v1.6.0 // indirect modernc.org/memory v1.7.2 // indirect @@ -436,40 +472,6 @@ require ( sigs.k8s.io/yaml v1.4.0 // indirect; @grafana-app-platform-squad ) -require ( - github.com/antlr4-go/antlr/v4 v4.13.0 // indirect - github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect - github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect - github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-retryablehttp v0.7.5 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect - github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect - github.com/jackc/pgx/v5 v5.5.5 // indirect - github.com/jackc/puddle/v2 v2.2.1 // indirect - github.com/karlseguin/ccache/v3 v3.0.5 // indirect - github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect - github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect - github.com/magiconair/properties v1.8.7 // indirect - github.com/mfridman/interpolate v0.0.2 // indirect - github.com/natefinch/wrap v0.2.0 // indirect - github.com/ncruces/go-strftime v0.1.9 // indirect - github.com/pelletier/go-toml/v2 v2.1.1 // indirect - github.com/pressly/goose/v3 v3.20.0 // indirect - github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sagikazarmark/locafero v0.4.0 // indirect - github.com/sagikazarmark/slog-shim v0.1.0 // indirect - github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect - github.com/segmentio/encoding v0.3.6 // indirect - github.com/sethvargo/go-retry v0.2.4 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/viper v1.18.2 // indirect - github.com/subosito/gotenv v1.6.0 // indirect - go.uber.org/mock v0.4.0 // indirect - modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect -) - // Use fork of crewjam/saml with fixes for some issues until changes get merged into upstream replace github.com/crewjam/saml => github.com/grafana/saml v0.4.15-0.20240523142256-cc370b98af7c diff --git a/go.sum b/go.sum index 74c8f8e568e..b960ce93ae3 100644 --- a/go.sum +++ b/go.sum @@ -1548,8 +1548,8 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/apache/thrift v0.17.0/go.mod h1:OLxhMRJxomX+1I/KUw03qoV3mMz16BwaKI+d4fPBx7Q= -github.com/apache/thrift v0.18.1 h1:lNhK/1nqjbwbiOPDBPFJVKxgDEGSepKuTh6OLiXW8kg= -github.com/apache/thrift v0.18.1/go.mod h1:rdQn/dCcDKEWjjylUeueum4vQEjG2v8v2PqriUnbr+I= +github.com/apache/thrift v0.20.0 h1:631+KvYbsBZxmuJjYwhezVsrfc/TbqtZV4QcxOX1fOI= +github.com/apache/thrift v0.20.0/go.mod h1:hOk1BQqcp2OLzGsyVXdfMk7YFlMxK3aoEVhjD06QhB8= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= @@ -1586,22 +1586,26 @@ github.com/aws/aws-sdk-go v1.51.31 h1:4TM+sNc+Dzs7wY1sJ0+J8i60c6rkgnKP1pvPx8ghsS github.com/aws/aws-sdk-go v1.51.31/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2 v1.16.2 h1:fqlCk6Iy3bnCumtrLz9r3mJ/2gUT0pJ0wLFVIdWh+JA= github.com/aws/aws-sdk-go-v2 v1.16.2/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= +github.com/aws/aws-sdk-go-v2 v1.30.1 h1:4y/5Dvfrhd1MxRDD77SrfsDaj8kUkkljU7XE83NPV+o= +github.com/aws/aws-sdk-go-v2 v1.30.1/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 h1:SdK4Ppk5IzLs64ZMvr6MrSficMtjY2oS0WOORXTlxwU= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1/go.mod h1:n8Bs1ElDD2wJ9kCRTczA83gYbBmjSwZp3umc6zF4EeM= -github.com/aws/aws-sdk-go-v2/config v1.15.3 h1:5AlQD0jhVXlGzwo+VORKiUuogkG7pQcLJNzIzK7eodw= github.com/aws/aws-sdk-go-v2/config v1.15.3/go.mod h1:9YL3v07Xc/ohTsxFXzan9ZpFpdTOFl4X65BAKYaz8jg= +github.com/aws/aws-sdk-go-v2/config v1.24.0 h1:4LEk29JO3w+y9dEo/5Tq5QTP7uIEw+KQrKiHOs4xlu4= +github.com/aws/aws-sdk-go-v2/config v1.24.0/go.mod h1:11nNDAuK86kOUHeuEQo8f3CkcV5xuUxvPwFjTZE/PnQ= github.com/aws/aws-sdk-go-v2/credentials v1.11.2 h1:RQQ5fzclAKJyY5TvF+fkjJEwzK4hnxQCLOu5JXzDmQo= github.com/aws/aws-sdk-go-v2/credentials v1.11.2/go.mod h1:j8YsY9TXTm31k4eFhspiQicfXPLZ0gYXA50i4gxPE8g= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.3 h1:LWPg5zjHV9oz/myQr4wMs0gi4CjnDN/ILmyZUFYXZsU= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.3/go.mod h1:uk1vhHHERfSVCUnqSqz8O48LBYDSC+k6brng09jcMOk= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.3 h1:ir7iEq78s4txFGgwcLqD6q9IIPzTQNRJXulJd9h/zQo= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.3/go.mod h1:0dHuD2HZZSiwfJSy1FO5bX1hQ1TxVV1QXXjpn3XUE44= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9 h1:onz/VaaxZ7Z4V+WIN9Txly9XLTmoOh1oJ8XcAC3pako= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9/go.mod h1:AnVH5pvai0pAF4lXRq0bmhbes1u9R8wTE+g+183bZNM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3 h1:9stUQR/u2KXU6HkFJYlqnZEjBnbgrVbG6I5HN09xZh0= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.13 h1:5SAoZ4jYpGH4721ZNoS1znQrhOfZinOhc4XuTXx/nVc= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.13/go.mod h1:+rdA6ZLpaSeM7tSg/B0IEDinCIBJGmW8rKDFkYpP04g= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3/go.mod h1:ssOhaLpRlh88H3UmEcsBoVKq309quMvm3Ds8e9d4eJM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.13 h1:WIijqeaAO7TYFLbhsZmi2rgLEAtWOC1LhxCAVTJlSKw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.13/go.mod h1:i+kbfa76PQbWw/ULoWnp51EYVWH4ENln76fLQE3lXT8= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.10 h1:by9P+oy3P/CwggN4ClnW2D4oL91QV7pBzBICi1chZvQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.10/go.mod h1:8DcYQcz0+ZJaSxANlHIsbbi6S+zMwjwdDqwW3r9AzaE= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= @@ -1622,11 +1626,14 @@ github.com/aws/aws-sdk-go-v2/service/sqs v1.18.3/go.mod h1:skmQo0UPvsjsuYYSYMVmr github.com/aws/aws-sdk-go-v2/service/ssm v1.24.1/go.mod h1:NR/xoKjdbRJ+qx0pMR4mI+N/H1I1ynHwXnO6FowXJc0= github.com/aws/aws-sdk-go-v2/service/sso v1.11.3 h1:frW4ikGcxfAEDfmQqWgMLp+F1n4nRo9sF39OcIb5BkQ= github.com/aws/aws-sdk-go-v2/service/sso v1.11.3/go.mod h1:7UQ/e69kU7LDPtY40OyoHYgRmgfGM4mgsLYtcObdveU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.2 h1:ORnrOK0C4WmYV/uYt3koHEWBLYsRDwk2Np+eEoyV4Z0= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.2/go.mod h1:xyFHA4zGxgYkdD73VeezHt3vSKEG9EmFnGwoKlP00u4= github.com/aws/aws-sdk-go-v2/service/sts v1.16.3 h1:cJGRyzCSVwZC7zZZ1xbx9m32UnrKydRYhOvcD1NYP9Q= github.com/aws/aws-sdk-go-v2/service/sts v1.16.3/go.mod h1:bfBj0iVmsUyUg4weDB4NxktD9rDGeKSVWnjTnwbx9b8= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.11.2 h1:eG/N+CcUMAvsdffgMvjMKwfyDzIkjM6pfxMJ8Mzc6mE= github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= +github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE= +github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/axiomhq/hyperloglog v0.0.0-20191112132149-a4c4c47bc57f h1:y06x6vGnFYfXUoVMbrcP1Uzpj4JG01eB5vRps9G8agM= github.com/axiomhq/hyperloglog v0.0.0-20191112132149-a4c4c47bc57f/go.mod h1:2stgcRjl6QmW+gU2h5E7BQXg4HU0gzxKWDuT5HviN9s= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= @@ -1985,8 +1992,9 @@ github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -2311,12 +2319,12 @@ github.com/grafana/dataplane/examples v0.0.1 h1:K9M5glueWyLoL4//H+EtTQq16lXuHLmO github.com/grafana/dataplane/examples v0.0.1/go.mod h1:h5YwY8s407/17XF5/dS8XrUtsTVV2RnuW8+m1Mp46mg= github.com/grafana/dataplane/sdata v0.0.9 h1:AGL1LZnCUG4MnQtnWpBPbQ8ZpptaZs14w6kE/MWfg7s= github.com/grafana/dataplane/sdata v0.0.9/go.mod h1:Jvs5ddpGmn6vcxT7tCTWAZ1mgi4sbcdFt9utQx5uMAU= -github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb h1:AWE6+kvtE18HP+lRWNUCyvymyrFSXs6TcS2vXIXGIuw= -github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb/go.mod h1:kkWM4WUV230bNG3urVRWPBnSJHs64y/0RmWjftnnn0c= +github.com/grafana/dskit v0.0.0-20240311184239-73feada6c0d7 h1:yd9yoNgEOtp8O0MbtqXoMVqr+ZbU4oZFE8a04z8WXFE= +github.com/grafana/dskit v0.0.0-20240311184239-73feada6c0d7/go.mod h1:RpTvZ9nkdXqyQro5DULQHJl9B6vwvEj95Dk6WIXqTLQ= github.com/grafana/gofpdf v0.0.0-20231002120153-857cc45be447 h1:jxJJ5z0GxqhWFbQUsys3BHG8jnmniJ2Q74tXAG1NaDo= github.com/grafana/gofpdf v0.0.0-20231002120153-857cc45be447/go.mod h1:IxsY6mns6Q5sAnWcrptrgUrSglTZJXH/kXr9nbpb/9I= -github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 h1:/of8Z8taCPftShATouOrBVy6GaTTjgQd/VfNiZp/VXQ= -github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= +github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 h1:X8IKQ0wu40wpvYcKfBcc5T4QnhdQjUhtUtB/1CY89lE= +github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= github.com/grafana/grafana-aws-sdk v0.28.0 h1:ShdA+msLPGJGWWS1SFUYnF+ch1G3gUOlAdGJi6h4sgU= github.com/grafana/grafana-aws-sdk v0.28.0/go.mod h1:ZSVPU7IIJSi5lEg+K3Js+EUpZLXxUaBdaQWH+As1ihI= github.com/grafana/grafana-azure-sdk-go/v2 v2.0.4 h1:z6amQ286IJSBctHf6c+ibJq/v0+TvmEjVkrdMNBd4uY= @@ -2328,12 +2336,12 @@ github.com/grafana/grafana-google-sdk-go v0.1.0/go.mod h1:Vo2TKWfDVmNTELBUM+3lkr github.com/grafana/grafana-openapi-client-go v0.0.0-20231213163343-bd475d63fb79 h1:r+mU5bGMzcXCRVAuOrTn54S80qbfVkvTdUJZfSfTNbs= github.com/grafana/grafana-openapi-client-go v0.0.0-20231213163343-bd475d63fb79/go.mod h1:wc6Hbh3K2TgCUSfBC/BOzabItujtHMESZeFk5ZhdxhQ= github.com/grafana/grafana-plugin-sdk-go v0.114.0/go.mod h1:D7x3ah+1d4phNXpbnOaxa/osSaZlwh9/ZUnGGzegRbk= -github.com/grafana/grafana-plugin-sdk-go v0.235.0 h1:UnZ/iBDvCkfDgwR94opi8trAWJXv4V8Qr1ocJKRRmqA= -github.com/grafana/grafana-plugin-sdk-go v0.235.0/go.mod h1:6n9LbrjGL3xAATntYVNcIi90G9BVHRJjzHKz5FXVfWw= +github.com/grafana/grafana-plugin-sdk-go v0.237.0 h1:sxif4tl9ocYSVyeCtGijWQbW2ygfEOFGKQTCQ/ZX99M= +github.com/grafana/grafana-plugin-sdk-go v0.237.0/go.mod h1:4TgwJYqX8N27PIwEpa2gBEQyWsUW4rIQYGsrkhcgOgY= github.com/grafana/grafana/pkg/apimachinery v0.0.0-20240701190119-78f9c0fd4144 h1:FA9896cLHomvfJcEgTAEhx8q5qGsV1dhuOfnEHM2b+U= github.com/grafana/grafana/pkg/apimachinery v0.0.0-20240701190119-78f9c0fd4144/go.mod h1:DkxMin+qOh1Fgkxfbt+CUfBqqsCQJMG9op8Os/irBPA= -github.com/grafana/grafana/pkg/apiserver v0.0.0-20240701190119-78f9c0fd4144 h1:L6oXfFiskSvg0B/47ueqgC0Hty4oWLQe8b0afK1KAsg= -github.com/grafana/grafana/pkg/apiserver v0.0.0-20240701190119-78f9c0fd4144/go.mod h1:KMzFZMO20l+xNC4iYT3NncdjRUX5G3Fw6TXVHKB9XXM= +github.com/grafana/grafana/pkg/apiserver v0.0.0-20240708134731-e9876749d440 h1:833vWSgndCcOXycwCq2Y98W8+W2ouuuhTL+Gf3BNKg8= +github.com/grafana/grafana/pkg/apiserver v0.0.0-20240708134731-e9876749d440/go.mod h1:qfZc7FEYBdKcxHUTtWtEAH+ArbMIkEQnbVPzr8giY3k= github.com/grafana/grafana/pkg/promlib v0.0.6 h1:FuRyHMIgVVXkLuJnCflNfk3gqJflmyiI+/ZuJ9MoAfY= github.com/grafana/grafana/pkg/promlib v0.0.6/go.mod h1:shFkrG1fQ/PPNRGhxAPNMLp0SAeG/jhqaLoG6n2191M= github.com/grafana/grafana/pkg/storage/unified/resource v0.0.0-20240624122844-a89deaeb7365 h1:XRHqYGxjN2+/4QHPoOtr7kYTL9p2P5UxTXfnbiaO/NI= @@ -2355,8 +2363,8 @@ github.com/grafana/saml v0.4.15-0.20240523142256-cc370b98af7c h1:SWmG1QLZ36Ay0ht github.com/grafana/saml v0.4.15-0.20240523142256-cc370b98af7c/go.mod h1:S4+611dxnKt8z/ulbvaJzcgSHsuhjVc1QHNTcr1R7Fw= github.com/grafana/sqlds/v3 v3.2.0 h1:WXuYEaFfiCvgm8kK2ixx44/zAEjFzCylA2+RF3GBqZA= github.com/grafana/sqlds/v3 v3.2.0/go.mod h1:kH0WuHUR3j0Q7IEymbm2JiaPckUhRCbqjV9ajaBAnmM= -github.com/grafana/tempo v1.5.1-0.20230524121406-1dc1bfe7085b h1:mDlkqgTEJuK7vjPG44f3ZMtId5AAYLWHvBVbiGqIOOQ= -github.com/grafana/tempo v1.5.1-0.20230524121406-1dc1bfe7085b/go.mod h1:UK7kTP5llPeRcGBOe5mm4QTNTd0k/mAqTVSOFdDH6AU= +github.com/grafana/tempo v1.5.1-0.20240604192202-01f4bc8ac2d1 h1:cSE1u4IUQ9EPcQErMZ9YVYayJTIGgH4g2E1Rp2WmGy0= +github.com/grafana/tempo v1.5.1-0.20240604192202-01f4bc8ac2d1/go.mod h1:ttAEYdYVYBNngPulKIHkmHvjXfLfX7jDWI74jzb8jh4= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= @@ -2445,8 +2453,9 @@ github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= @@ -2691,8 +2700,8 @@ github.com/linkedin/goavro/v2 v2.10.0 h1:eTBIRoInBM88gITGXYtUSqqxLTFXfOsJBiX8ZMW github.com/linkedin/goavro/v2 v2.10.0/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= github.com/linode/linodego v1.32.0 h1:OmZzB3iON6uu84VtLFf64uKmAQqJJarvmsVguroioPI= github.com/linode/linodego v1.32.0/go.mod h1:y8GDP9uLVH4jTB9qyrgw79qfKdYJmNCGUOJmfuiOcmI= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= +github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= @@ -2727,8 +2736,9 @@ github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= -github.com/mattn/go-ieproxy v0.0.3 h1:YkaHmK1CzE5C4O7A3hv3TCbfNDPSCf0RKZFX+VhBeYk= github.com/mattn/go-ieproxy v0.0.3/go.mod h1:6ZpRmhBaYuBX1U2za+9rC9iCGLsSp2tftelZne7CPko= +github.com/mattn/go-ieproxy v0.0.11 h1:MQ/5BuGSgDAHZOJe6YY80IF2UVCfGkwfo6AeD7HtHYo= +github.com/mattn/go-ieproxy v0.0.11/go.mod h1:/NsJd+kxZBmjMc5hrJCKMbP57B84rvq9BiDRbtO9AS0= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -2798,8 +2808,9 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -3013,8 +3024,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= +github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/pressly/goose/v3 v3.20.0 h1:uPJdOxF/Ipj7ABVNOAMJXSxwFXZGwMGHNqjC8e61VA0= github.com/pressly/goose/v3 v3.20.0/go.mod h1:BRfF2GcG4FTG12QfdBVy3q1yveaf4ckL9vWwEcIO3lA= @@ -3033,8 +3044,9 @@ github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQg github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -3065,8 +3077,9 @@ github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= -github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= +github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= +github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= @@ -3152,8 +3165,8 @@ github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec= github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw= -github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= -github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shirou/gopsutil/v3 v3.24.2 h1:kcR0erMbLg5/3LcInpw0X/rrPSqq4CDPyI6A6ZRC18Y= +github.com/shirou/gopsutil/v3 v3.24.2/go.mod h1:tSg/594BcA+8UdQU2XcW803GWYgdtauFFPgJCJKZlVk= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= @@ -3336,8 +3349,8 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= -github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zclconf/go-cty v1.13.0 h1:It5dfKTTZHe9aeppbNOda3mN7Ag7sg6QkBNm6TkyFa0= github.com/zclconf/go-cty v1.13.0/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0= github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= @@ -3376,8 +3389,9 @@ go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo= go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= -go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= +go.mongodb.org/mongo-driver v1.15.0 h1:rJCKC8eEliewXjZGf0ddURtl7tTVy1TK3bfl0gkUSLc= +go.mongodb.org/mongo-driver v1.15.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -3400,16 +3414,16 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.4 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 h1:A3SayB3rNyt+1S6qpI9mHPkeHTZbD7XILEqWnYZb2l0= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0/go.mod h1:27iA5uvhuRNmalO+iEUdVn5ZMj2qy10Mm+XRIpRmyuU= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.51.0 h1:974XTyIwHI4nHa1+uSLxHtUnlJ2DiVtAJjk7fd07p/8= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.51.0/go.mod h1:ZvX/taFlN6TGaOOM6D42wrNwPKUV1nGO2FuUXkityBU= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.53.0 h1:IVtyPth4Rs5P8wIf0mP2KVKFNTJ4paX9qQ4Hkh5gFdc= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.53.0/go.mod h1:ImRBLMJv177/pwiLZ7tU7HDGNdBv7rS0HQ99eN/zBl8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0/go.mod h1:DKdbWcT4GH1D0Y3Sqt/PFXt2naRKDWtU+eE6oLdFNA8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= -go.opentelemetry.io/contrib/propagators/jaeger v1.26.0 h1:RH76Cl2pfOLLoCtxAPax9c7oYzuL1tiI7/ZPJEmEmOw= -go.opentelemetry.io/contrib/propagators/jaeger v1.26.0/go.mod h1:W/cylm0ZtJK1uxsuTqoYGYPnqpZ8CeVGgW7TwfXPsGw= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/contrib/propagators/jaeger v1.28.0 h1:xQ3ktSVS128JWIaN1DiPGIjcH+GsvkibIAVRWFjS9eM= +go.opentelemetry.io/contrib/propagators/jaeger v1.28.0/go.mod h1:O9HIyI2kVBrFoEwQZ0IN6PHXykGoit4mZV2aEjkTRH4= go.opentelemetry.io/contrib/samplers/jaegerremote v0.20.0 h1:ja+d7Aea/9PgGxB63+E0jtRFpma717wubS0KFkZpmYw= go.opentelemetry.io/contrib/samplers/jaegerremote v0.20.0/go.mod h1:Yc1eg51SJy7xZdOTyg1xyFcwE+ghcWh3/0hKeLo6Wlo= go.opentelemetry.io/otel v1.17.0/go.mod h1:I2vmBGtFaODIVMBSTPVDlJSzBDNf93k60E6Ft0nyjo0= @@ -3419,16 +3433,16 @@ go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZV go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0= go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg= -go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= -go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0/go.mod h1:h95q0LBGh7hlAC08X2DhSeyIG02YQ0UyioTCVAqRPmc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 h1:1u/AyyOqAWzy+SkPxDpahCNZParHV8Vid1RnI2clyDE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0/go.mod h1:z46paqbJ9l7c9fIPCXTqTGwhQZ5XoTIsfeFYWboizjs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.25.0/go.mod h1:8GlBGcDk8KKi7n+2S4BT/CPZQYH3erLu0/k64r1MYgo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0 h1:Waw9Wfpo/IXzOI8bCB7DIk+0JZcqqsyn1JFnAc+iam8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0/go.mod h1:wnJIG4fOqyynOnnQF/eQb4/16VlX2EJAHhHgqIqWfAo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0/go.mod h1:e7ciERRhZaOZXVjx5MiL8TK5+Xv7G5Gv5PA2ZDEJdL8= go.opentelemetry.io/otel/metric v1.17.0/go.mod h1:h4skoxdZI17AxwITdmdZjjYJQH5nzijUUjm+wtPph5o= go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= @@ -3437,14 +3451,14 @@ go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xC go.opentelemetry.io/otel/metric v1.23.0/go.mod h1:MqUW2X2a6Q8RN96E2/nqNoT+z9BSms20Jb7Bbp+HiTo= go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s= -go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= -go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= go.opentelemetry.io/otel/sdk v1.17.0/go.mod h1:U87sE0f5vQB7hwUoW98pW5Rz4ZDuCFBZFNUBlSgmDFQ= go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= go.opentelemetry.io/otel/sdk v1.25.0/go.mod h1:oFgzCM2zdsxKzz6zwpTZYLLQsFwc+K0daArPdIhuxkw= -go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8= -go.opentelemetry.io/otel/sdk v1.26.0/go.mod h1:0p8MXpqLeJ0pzcszQQN4F0S5FVjBLgypeGSngLsmirs= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= go.opentelemetry.io/otel/trace v1.17.0/go.mod h1:I/4vKTgFclIsXRVucpH25X0mpFSczM7aHeaz0ZBLWjY= go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= @@ -3452,15 +3466,15 @@ go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40 go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk= go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I= -go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= -go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -4131,8 +4145,9 @@ gonum.org/v1/gonum v0.7.0/go.mod h1:L02bwd0sqlsvRv41G7wGWFCsVNZFv/k1xzGIxeANHGM= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= -gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= +gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= +gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= @@ -4448,8 +4463,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20240213162025-012b6fc9bca9/go. google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y= google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be/go.mod h1:dvdCTIoAGbkWbcIKBniID56/7XHTt6WfxXNMxuziJ+w= -google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU= -google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230807174057-1744710a1577/go.mod h1:NjCQG/D8JandXxM57PZbAJL1DCNL6EypA0vPPwfsc7c= google.golang.org/genproto/googleapis/bytestream v0.0.0-20231030173426-d783a09b4405/go.mod h1:GRUCuLdzVqZte8+Dl/D4N25yLzcGqqWaYkeVOwulFqw= @@ -4494,8 +4509,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240415141817-7cd4c1c1f9ec/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -4585,8 +4600,8 @@ google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= diff --git a/go.work.sum b/go.work.sum index bdd0fc906f5..2b684f59699 100644 --- a/go.work.sum +++ b/go.work.sum @@ -27,9 +27,6 @@ cloud.google.com/go/channel v1.17.5 h1:/omiBnyFjm4S1ETHoOmJbL7LH7Ljcei4rYG6Sj3hc cloud.google.com/go/cloudbuild v1.15.1 h1:ZB6oOmJo+MTov9n629fiCrO9YZPOg25FZvQ7gIHu5ng= cloud.google.com/go/clouddms v1.7.4 h1:Sr0Zo5EAcPQiCBgHWICg3VGkcdS/LLP1d9SR7qQBM/s= cloud.google.com/go/cloudtasks v1.12.6 h1:EUt1hIZ9bLv8Iz9yWaCrqgMnIU+Tdh0yXM1MMVGhjfE= -cloud.google.com/go/cloudtasks v1.12.8/go.mod h1:aX8qWCtmVf4H4SDYUbeZth9C0n9dBj4dwiTYi4Or/P4= -cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= -cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU= cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls= cloud.google.com/go/contactcenterinsights v1.13.0 h1:6Vs/YnDG5STGjlWMEjN/xtmft7MrOTOnOZYUZtGTx0w= cloud.google.com/go/container v1.31.0 h1:MAaNH7VRNPWEhvqOypq2j+7ONJKrKzon4v9nS3nLZe0= @@ -144,10 +141,14 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNL github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0 h1:pPvTJ1dY0sA35JOeFq6TsY2xj6Z85Yo23Pj4wCCvu4o= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4= github.com/Azure/azure-service-bus-go v0.11.5 h1:EVMicXGNrSX+rHRCBgm/TRQ4VUZ1m3yAYM/AB2R/SOs= github.com/Azure/go-amqp v0.16.4 h1:/1oIXrq5zwXLHaoYDliJyiFjJSpJZMWGgtMX9e0/Z30= github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA= github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4= github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= @@ -161,6 +162,8 @@ github.com/CloudyKit/jet/v6 v6.2.0 h1:EpcZ6SR9n28BUGtNJSvlBqf90IpjeFr36Tizxhn/oM github.com/CloudyKit/jet/v6 v6.2.0/go.mod h1:d3ypHeIRNo2+XyqnGA8s+aphtcVpjP5hPwP/Lzo7Ro4= github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= github.com/GoogleCloudPlatform/cloudsql-proxy v1.29.0 h1:YNu23BtH0PKF+fg3ykSorCp6jSTjcEtfnYLzbmcjVRA= +github.com/IBM/sarama v1.43.0 h1:YFFDn8mMI2QL0wOrG0J2sFoVIAFl7hS9JQi2YZsXtJc= +github.com/IBM/sarama v1.43.0/go.mod h1:zlE6HEbC/SMQ9mhEYaF7nNLYOUyrs0obySKCckWP9BM= github.com/Joker/jade v1.1.3 h1:Qbeh12Vq6BxURXT1qZBRHsDxeURB8ztcL6f3EXSGeHk= github.com/Joker/jade v1.1.3/go.mod h1:T+2WLyt7VH6Lp0TRxQrUYEs64nRc83wkMQrfeIQKduM= github.com/KimMachineGun/automemlimit v0.6.0 h1:p/BXkH+K40Hax+PuWWPQ478hPjsp9h1CPDhLlA3Z37E= @@ -177,6 +180,7 @@ github.com/RoaringBitmap/gocroaring v0.4.0 h1:5nufXUgWpBEUNEJXw7926YAA58ZAQRpWPr github.com/RoaringBitmap/real-roaring-datasets v0.0.0-20190726190000-eb7c87156f76 h1:ZYlhPbqQFU+AHfgtCdHGDTtRW1a8geZyiE8c6Q+Sl1s= github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06 h1:KkH3I3sJuOLP3TjA/dfr4NAY8bghDwnXiU7cTKxQqo0= github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06/go.mod h1:7erjKLwalezA0k99cWs5L11HWOAPNjdUZ6RxH1BXbbM= +github.com/Shopify/sarama v1.19.0 h1:9oksLxC6uxVPHPVYUmq6xhr1BOF/hHobWH2UzO67z1s= github.com/Shopify/sarama v1.38.1 h1:lqqPUPQZ7zPqYlWpTh+LQ9bhYNu2xJL6k1SJN4WVe2A= github.com/Shopify/sarama v1.38.1/go.mod h1:iwv9a67Ha8VNa+TifujYoWGxWnu2kNVAQdSdZ4X2o5g= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= @@ -191,7 +195,11 @@ github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVd github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kong v0.2.11 h1:RKeJXXWfg9N47RYfMm0+igkxBCTF4bzbneAxaqid0c4= github.com/alecthomas/kong v0.2.11/go.mod h1:kQOmtJgV+Lb4aj+I2LEn40cbtawdWJ9Y8QLq+lElKxE= +github.com/alecthomas/kong v0.8.0 h1:ryDCzutfIqJPnNn0omnrgHLbAggDQM2VWHikE1xqK7s= +github.com/alecthomas/kong v0.8.0/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U= github.com/alecthomas/participle/v2 v2.1.0 h1:z7dElHRrOEEq45F2TG5cbQihMtNTv8vwldytDj7Wrz4= +github.com/alecthomas/participle/v2 v2.1.1 h1:hrjKESvSqGHzRb4yW1ciisFJ4p3MGYih6icjJvbsmV8= +github.com/alecthomas/participle/v2 v2.1.1/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c= github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alexflint/go-arg v1.4.2 h1:lDWZAXxpAnZUq4qwb86p/3rIJJ2Li81EoMbTMujhVa0= @@ -215,6 +223,8 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1 github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a h1:pv34s756C4pEXnjgPfGYgdhg/ZdajGhyOvzx8k+23nw= github.com/aws/aws-lambda-go v1.13.3 h1:SuCy7H3NLyp+1Mrfp+m80jcbi9KYWAs9/BXwppwRDzY= +github.com/aws/aws-sdk-go-v2 v1.22.2 h1:lV0U8fnhAnPz8YcdmZVV60+tr6CakHzqA6P8T46ExJI= +github.com/aws/aws-sdk-go-v2 v1.22.2/go.mod h1:Kd0OJtkW3Q0M0lUWGszapWjEvrXDzRW+D21JNsroB+c= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1 h1:w/fPGB0t5rWwA43mux4e9ozFSH5zF1moQemlA131PWc= github.com/aws/aws-sdk-go-v2/service/kms v1.16.3 h1:nUP29LA4GZZPihNSo5ZcF4Rl73u+bN5IBRnrQA0jFK4= github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.15.4 h1:EmIEXOjAdXtxa2OGM1VAajZV/i06Q8qd4kBpJd9/p1k= @@ -229,6 +239,8 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4Yn github.com/boombuler/barcode v1.0.1 h1:NDBbPmhS+EqABEs5Kg3n/5ZNjy73Pz7SIV+KCeqyXcs= github.com/bradleyjkemp/cupaloy/v2 v2.6.0 h1:knToPYa2xtfg42U3I6punFEjaGFKWQRXJwj0JTv4mTs= github.com/bradleyjkemp/cupaloy/v2 v2.6.0/go.mod h1:bm7JXdkRd4BHJk9HpwqAI8BoAY1lps46Enkdqw6aRX0= +github.com/brianvoe/gofakeit/v6 v6.25.0 h1:ZpFjktOpLZUeF8q223o0rUuXtA+m5qW5srjvVi+JkXk= +github.com/brianvoe/gofakeit/v6 v6.25.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs= github.com/bufbuild/protovalidate-go v0.2.1 h1:pJr07sYhliyfj/STAM7hU4J3FKpVeLVKvOBmOTN8j+s= github.com/bufbuild/protovalidate-go v0.2.1/go.mod h1:e7XXDtlxj5vlEyAgsrxpzayp4cEMKCSSb8ZCkin+MVA= github.com/bwesterb/go-ristretto v1.2.3 h1:1w53tCkGhCQ5djbat3+MH0BAQ5Kfgbt56UZQ/JMzngw= @@ -242,6 +254,7 @@ github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA= github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo= github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= +github.com/chromedp/cdproto v0.0.0-20220208224320-6efb837e6bc2/go.mod h1:At5TxYYdxkbQL0TSefRjhLE3Q0lgvqKKMSFUglJ7i1U= github.com/chromedp/chromedp v0.9.2 h1:dKtNz4kApb06KuSXoTQIyUC2TrA0fhGDwNZf3bcgfKw= github.com/chromedp/sysutil v1.0.0 h1:+ZxhTpfpZlmchB58ih/LBHX52ky7w2VhQVKQMucy3Ic= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= @@ -306,15 +319,24 @@ github.com/drone/funcmap v0.0.0-20211123105308-29742f68a7d1/go.mod h1:Hph0/pT6Zx github.com/drone/signal v1.0.0 h1:NrnM2M/4yAuU/tXs6RP1a1ZfxnaHwYkd0kJurA1p6uI= github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0= github.com/eapache/go-resiliency v1.3.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= +github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 h1:8yY/I9ndfrgrXUbOGObLHKBR4Fl3nZXwM2c7OYTT8hM= github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/elastic/go-sysinfo v1.11.2 h1:mcm4OSYVMyws6+n2HIVMGkln5HOpo5Ie1ZmbbNn0jg4= github.com/elastic/go-sysinfo v1.11.2/go.mod h1:GKqR8bbMK/1ITnez9NIsIfXQr25aLhRJa7AfT8HpBFQ= github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= +github.com/elazarl/goproxy v0.0.0-20230731152917-f99041a5c027/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= +github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= +github.com/expr-lang/expr v1.16.2 h1:JvMnzUs3LeVHBvGFcXYmXo+Q6DPDmzrlcSBO6Wy3w4s= +github.com/expr-lang/expr v1.16.2/go.mod h1:uCkhfG+x7fcZ5A5sXHKuQ07jGZRl6J0FCAaf2k4PtVQ= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= github.com/flosch/pongo2/v4 v4.0.2 h1:gv+5Pe3vaSVmiJvh/BZa82b7/00YUGm0PIyVVLop0Hw= github.com/flosch/pongo2/v4 v4.0.2/go.mod h1:B5ObFANs/36VwxxlgKpdchIJHMvHB562PW+BWPhwZD8= github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8= @@ -335,12 +357,19 @@ github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+ github.com/go-fonts/dejavu v0.1.0 h1:JSajPXURYqpr+Cu8U9bt8K+XcACIHWqWrvWCKyeFmVQ= github.com/go-fonts/latin-modern v0.2.0 h1:5/Tv1Ek/QCr20C6ZOz15vw3g7GELYL98KWr8Hgo+3vk= github.com/go-fonts/liberation v0.2.0 h1:jAkAWJP4S+OsrPLZM4/eC9iW7CtHy+HBXrEwZXWo5VM= +github.com/go-fonts/liberation v0.3.0 h1:3BI2iaE7R/s6uUUtzNCjo3QijJu3aS4wmrMgfSpYQ+8= +github.com/go-fonts/liberation v0.3.0/go.mod h1:jdJ+cqF+F4SUL2V+qxBth8fvBpBDS7yloUL5Fi8GTGY= github.com/go-fonts/stix v0.1.0 h1:UlZlgrvvmT/58o573ot7NFw0vZasZ5I6bcIft/oMdgg= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4 h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I= github.com/go-ini/ini v1.25.4 h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo= github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= +github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81 h1:6zl3BbBhdnMkpSj2YY30qV3gDcVBGtFgVsV3+/i+mKQ= +github.com/go-latex/latex v0.0.0-20230307184459-12ec69307ad9 h1:NxXI5pTAtpEaU49bpLpQoDsu1zrteW/vxzTz8Cd2UAs= +github.com/go-latex/latex v0.0.0-20230307184459-12ec69307ad9/go.mod h1:gWuR/CrFDDeVRFQwHPvsv9soJVB/iqymhuZQuJ3a9OM= +github.com/go-openapi/swag v0.22.8/go.mod h1:6QT22icPLEqAM/z/TChgb4WAveCHF92+2gF0CNjHpPI= github.com/go-pdf/fpdf v0.6.0 h1:MlgtGIfsdMEEQJr2le6b/HNr1ZlQwxyWr77r2aj2U/8= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= @@ -349,9 +378,13 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k= github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= +github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= github.com/gobwas/ws v1.2.1 h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk= +github.com/goccmack/gocc v0.0.0-20230228185258-2292f9e40198 h1:FSii2UQeSLngl3jFoR4tUKZLprO7qUlh/TKKticc0BM= +github.com/goccmack/gocc v0.0.0-20230228185258-2292f9e40198/go.mod h1:DTh/Y2+NbnOVVoypCCQrovMPDKUGp4yZpSbWg5D0XIM= github.com/goccy/go-yaml v1.11.0 h1:n7Z+zx8S9f9KgzG6KtQKf+kwqXZlLNR2F6018Dgau54= github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4 h1:vF83LI8tAakwEwvWZtrIEx7pOySacl2TOxx6eXk4ePo= github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= @@ -379,23 +412,8 @@ github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/grafana/e2e v0.1.1-0.20221018202458-cffd2bb71c7b h1:Ha+kSIoTutf4ytlVw/SaEclDUloYx0+FXDKJWKhNbE4= github.com/grafana/e2e v0.1.1-0.20221018202458-cffd2bb71c7b/go.mod h1:3UsooRp7yW5/NJQBlXcTsAHOoykEhNUYXkQ3r6ehEEY= -github.com/grafana/grafana-aws-sdk v0.28.0 h1:ShdA+msLPGJGWWS1SFUYnF+ch1G3gUOlAdGJi6h4sgU= -github.com/grafana/grafana-aws-sdk v0.28.0/go.mod h1:ZSVPU7IIJSi5lEg+K3Js+EUpZLXxUaBdaQWH+As1ihI= -github.com/grafana/grafana-plugin-sdk-go v0.212.0/go.mod h1:qsI4ktDf0lig74u8SLPJf9zRdVxWV/W4Wi+Ox6gifgs= -github.com/grafana/grafana-plugin-sdk-go v0.215.0/go.mod h1:nBsh3jRItKQUXDF2BQkiQCPxqrsSQeb+7hiFyJTO1RE= -github.com/grafana/grafana-plugin-sdk-go v0.216.0/go.mod h1:FdvSvOliqpVLnytM7e89zCFyYPDE6VOn9SIjVQRvVxM= -github.com/grafana/grafana-plugin-sdk-go v0.227.1-0.20240426134450-5fe9f7b9dfd4 h1:GV9u4RplRyMlqDicJ0t+m1nVTL1SSfqHd38B/RGum+k= -github.com/grafana/grafana-plugin-sdk-go v0.227.1-0.20240426134450-5fe9f7b9dfd4/go.mod h1:UBDIuvdUGUI5fMDHDAl6yAVpFhfwl5ojMaw1N68775w= -github.com/grafana/grafana-plugin-sdk-go v0.227.1-0.20240430073540-ce4d126ae8b8 h1:pyWJN79uW8QHZiQRasHGLCEkXSr3k6HCjdr0J2jZ3rU= -github.com/grafana/grafana-plugin-sdk-go v0.227.1-0.20240430073540-ce4d126ae8b8/go.mod h1:u4K9vVN6eU86loO68977eTXGypC4brUCnk4sfDzutZU= -github.com/grafana/grafana-plugin-sdk-go v0.228.0/go.mod h1:u4K9vVN6eU86loO68977eTXGypC4brUCnk4sfDzutZU= -github.com/grafana/grafana-plugin-sdk-go v0.229.0/go.mod h1:6V6ikT4ryva8MrAp7Bdz5fTJx3/ztzKvpMJFfpzr4CI= -github.com/grafana/grafana-plugin-sdk-go v0.231.1-0.20240523124942-62dae9836284/go.mod h1:bNgmNmub1I7Mc8dzIncgNqHC5jTgSZPPHlZ3aG8HKJQ= -github.com/grafana/grafana-plugin-sdk-go v0.234.0/go.mod h1:FlXjmBESxaD6Hoi8ojWLkH007nyjtJM3XC8SpwzF/YE= -github.com/grafana/grafana/pkg/apimachinery v0.0.0-20240613114114-5e2f08de316d/go.mod h1:adT8O7k6ZSzUKjAC4WS6VfWlCE4G1VavPwSXVhvScCs= -github.com/grafana/grafana/pkg/promlib v0.0.3/go.mod h1:3El4NlsfALz8QQCbEGHGFvJUG+538QLMuALRhZ3pcoo= -github.com/grafana/grafana/pkg/storage/unified/resource v0.0.0-20240620152449-c88de7f4d073/go.mod h1:zOInHv2y6bsgm9bIMsCVDaz1XylqIVX9r4amH4iuWPE= -github.com/grafana/grafana/pkg/storage/unified/resource v0.0.0-20240620184539-d988f5c3b064/go.mod h1:zOInHv2y6bsgm9bIMsCVDaz1XylqIVX9r4amH4iuWPE= +github.com/grafana/e2e v0.1.1 h1:/b6xcv5BtoBnx8cZnCiey9DbjEc8z7gXHO5edoeRYxc= +github.com/grafana/e2e v0.1.1/go.mod h1:RpNLgae5VT+BUHvPE+/zSypmOXKwEu4t+tnEMS1ATaE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= @@ -416,6 +434,7 @@ github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSAS github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465 h1:KwWnWVWCNtNq/ewIX7HIKnELmEx2nDP42yskD/pi7QE= github.com/influxdata/influxdb v1.7.6 h1:8mQ7A/V+3noMGCt/P9pD09ISaiz9XvgCk303UYA3gcs= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab h1:HqW4xhhynfjrtEiiSGcQUd6vrK23iMam1FO8rI7mwig= +github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= github.com/iris-contrib/schema v0.0.6 h1:CPSBLyx2e91H2yJzPuhGuifVRnZBBJ3pCOMbOvPZaTw= github.com/iris-contrib/schema v0.0.6/go.mod h1:iYszG0IOsuIsfzjymw1kMzTL8YQcCWlm65f3wX8J5iA= github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= @@ -434,6 +453,8 @@ github.com/jackspirou/syscerts v0.0.0-20160531025014-b68f5469dff1 h1:9Xm8CKtMZIX github.com/jackspirou/syscerts v0.0.0-20160531025014-b68f5469dff1/go.mod h1:zuHl3Hh+e9P6gmBPvcqR1HjkaWHC/csgyskg6IaFKFo= github.com/jaegertracing/jaeger v1.41.0 h1:vVNky8dP46M2RjGaZ7qRENqylW+tBFay3h57N16Ip7M= github.com/jaegertracing/jaeger v1.41.0/go.mod h1:SIkAT75iVmA9U+mESGYuMH6UQv6V9Qy4qxo0lwfCQAc= +github.com/jaegertracing/jaeger v1.55.0 h1:IJHzKb2B9EYQyKlE7VSoKzNP3emHeqZWnWrKj+kYzzs= +github.com/jaegertracing/jaeger v1.55.0/go.mod h1:S884Mz8H+iGI8Ealq6sM9QzSOeU6P+nbFkYw7uww8CI= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= github.com/jedib0t/go-pretty/v6 v6.2.4 h1:wdaj2KHD2W+mz8JgJ/Q6L/T5dB7kyqEFI16eLq7GEmk= github.com/jedib0t/go-pretty/v6 v6.2.4/go.mod h1:+nE9fyyHGil+PuISTCrp7avEdo6bqoMwqZnuiK2r2a0= @@ -474,6 +495,8 @@ github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46 h1:veS9QfglfvqAw2e+eeNT/SbGySq8ajECXJ9e4fPoLhY= github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs= github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= +github.com/knadh/koanf/v2 v2.1.0 h1:eh4QmHHBuU8BybfIJ8mB8K8gsGCD/AUQTdwGq/GzId8= +github.com/knadh/koanf/v2 v2.1.0/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= @@ -508,8 +531,12 @@ github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v7 v7.0.52 h1:8XhG36F6oKQUDDSuz6dY3rioMzovKjW40W6ANuN0Dps= github.com/minio/minio-go/v7 v7.0.52/go.mod h1:IbbodHyjUAguneyucUaahv+VMNs/EOTV9du7A7/Z3HU= +github.com/minio/minio-go/v7 v7.0.63 h1:GbZ2oCvaUdgT5640WJOpyDhhDxvknAJU2/T3yurwcbQ= +github.com/minio/minio-go/v7 v7.0.63/go.mod h1:Q6X7Qjb7WMhvG65qKf4gUgA5XaiSox74kR1uAEjxRS4= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.1.5 h1:OxRIeJXpAMztws/XHlN2vu6imG5Dpq+j61AzAX5fLng= github.com/mitchellh/gox v0.4.0 h1:lfGJxY7ToLJQjHHwi0EX6uYBdK78egf954SQl13PQJc= github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY= @@ -519,6 +546,8 @@ github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5 h1:8Q0qkMVC/MmWkpI github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU= github.com/mostynb/go-grpc-compression v1.1.17 h1:N9t6taOJN3mNTTi0wDf4e3lp/G/ON1TP67Pn0vTUA9I= github.com/mostynb/go-grpc-compression v1.1.17/go.mod h1:FUSBr0QjKqQgoDG/e0yiqlR6aqyXC39+g/hFLDfSsEY= +github.com/mostynb/go-grpc-compression v1.2.2 h1:XaDbnRvt2+1vgr0b/l0qh4mJAfIxE0bKXtz2Znl3GGI= +github.com/mostynb/go-grpc-compression v1.2.2/go.mod h1:GOCr2KBxXcblCuczg3YdLQlcin1/NfyDA348ckuCH6w= github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8 h1:P48LjvUQpTReR3TQRbxSeSBsMXzfK0uol7eRcr7VBYQ= github.com/natessilva/dag v0.0.0-20180124060714-7194b8dcc5c4 h1:dnMxwus89s86tI8rcGVp2HwZzlz7c5o92VOy7dSckBQ= github.com/nats-io/jwt v1.2.2 h1:w3GMTO969dFg+UOKTmmyuu7IGdusK+7Ytlt//OYH/uU= @@ -537,26 +566,64 @@ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/jaegerexporte github.com/open-telemetry/opentelemetry-collector-contrib/exporter/jaegerexporter v0.74.0/go.mod h1:bIeSj+SaZdP3CE9Xae+zurdQC6DXX0tPP6NAEVmgtt4= github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.74.0 h1:MrVOfBTNBe4n/daZjV4yvHZRR0Jg/MOCl/mNwymHwDM= github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.74.0/go.mod h1:v4H2ATSrKfOTbQnmjCxpvuOjrO/GUURAgey9RzrPsuQ= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.97.0 h1:8GH8y3Cq54Ey6He9tyhcVYLfG4TEs/7pp3s6934zNKA= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.97.0/go.mod h1:QBHXt+tHds39B4xGyBkbOx2TST+p8JLWBiXbKKAhNss= github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.74.0 h1:8Kk5g5PKQBUV3idjJy1NWVLLReEzjnB8C1lFgQxZ0TI= github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.74.0/go.mod h1:UtVfxZGhPU2OvDh7H8o67VKWG9qHAHRNkhmZUWqCvME= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.97.0 h1:WGRSjhOYfW/NCseia4Av8s4q65vZmM66WJPw8ocjU1I= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.97.0/go.mod h1:3QaZO29iFAiW+wCjQ3pI7yFn+lcQB5VLfPjbyBx9ThA= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.97.0 h1:jPhm8t6pJcxgng2DaEhPZ3sY44KBoM0W32j6aI+hMgU= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.97.0/go.mod h1:PAJCWmb9gEZh1OhazzHrvUGQeu31Rzqm8OJQVfxlLwU= github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.74.0 h1:vU5ZebauzCuYNXFlQaWaYnOfjoOAnS+Sc8+oNWoHkbM= github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.74.0/go.mod h1:TEu3TnUv1TuyHtjllrUDQ/ImpyD+GrkDejZv4hxl3G8= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.97.0 h1:vX/BkTfd7/cvydXJ7FmUy5iSviQeNGAgTCoXcLu7/Ww= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.97.0/go.mod h1:yrlbvRlLeny1kFmj4Ac9BSqv/pOr2h7sOIvDE6OMCKk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.97.0 h1:f3HVDcjUVUbOpKWiebD9v8+9YdDdNvzPyKh3IVb0ORY= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.97.0/go.mod h1:110wLws4lB2Jpv58rK7YoaMIhIEmLlzw5/viC0XJhbM= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.97.0 h1:+EA1iNRxeOqtU+9/+4hu8Tz7LhW2xmhFPd3w+ntpxiE= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.97.0/go.mod h1:pHbdUUE+6ZWpvaHyyHQiytt7t6IPVCvacnDPmKWVgu0= github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.74.0 h1:COFBWXiWnhRs9x1oYJbDg5cyiNAozp8sycriD9+1/7E= github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.74.0/go.mod h1:cAKlYKU+/8mk6ETOnD+EAi5gpXZjDrGweAB9YTYrv/g= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.97.0 h1:sd7JEwe5e7VYel/6gFWTKFTsQz8PxzdpU8tWlL6Ymos= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.97.0/go.mod h1:2E6Bx0OjGlwbsdpakU6ZiMb2VeA2hIcssmFL9Ov251g= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.97.0 h1:jZQsM7cMygRatb5ng1Hbstmfuzn6aqXDmhYpwtljZyg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.97.0/go.mod h1:WONvd1Xj3VFOziyxExHxmhmEx70DxqJ0nIVQ3FXtdS0= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.97.0 h1:bVeo7BahYY4rWdaEuzJX2Tn20MbvYcEHXbFjV2IwnPQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.97.0/go.mod h1:lj29zRdEZdvbQvZ6g7Pb+tSPw4dQE/jahe39fRgO+08= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.97.0 h1:MWki5BBYideAF82i3ladRxmed3nkkGHDUA1YC2p2mz8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.97.0/go.mod h1:Zsl3xSYy9QFqZ5lTPXqPHSuxz03kUaMw75kvvScE+94= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.97.0 h1:/aQbyj1DK8mK2lU+7VrzI/OPGISrIayAJyHlu67LI/8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.97.0/go.mod h1:4W8p7s7UtnLzAaFps6At8ELJE9eLc7y4C/xvQJ0t7Mg= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.74.0 h1:ww1pPXfAM0WHsymQnsN+s4B9DgwQC+GyoBq0t27JV/k= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.74.0/go.mod h1:OpEw7tyCg+iG1ywEgZ03qe5sP/8fhYdtWCMoqA8JCug= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.97.0 h1:5/19bkuB0GDo+vScgEnNy/TcXAkmyTu7BenfpaD2a/M= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.97.0/go.mod h1:CuGIeYSpb4Kmyq4Ez83gHbTjNQGxptjYcC4T5dWkPnM= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.74.0 h1:0Fh6OjlUB9HlnX90/gGiyyFvnmNBv6inj7bSaVqQ7UQ= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.74.0/go.mod h1:13ekplz1UmvK99Vz2VjSBWPYqoRBEax5LPmA1tFHnhA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.97.0 h1:vqkBZCBjZLuDpErk9bEWS02ZhwEDMJBtyNmK0eLwi7A= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.97.0/go.mod h1:Z65edZWOfvG3/l5F/X42jnW4yMST5UeuktvG5QMCvF8= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.74.0 h1:A5xoBaMHX1WzLfvlqK6NBXq4XIbuSVJIpec5r6PDE7U= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.74.0/go.mod h1:TJT7HkhFPrJic30Vk4seF/eRk8sa0VQ442Xq/qd+DLY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.97.0 h1:fgzi0d0NRJfSnFP0amVR1+tscNpYPnPs7ODKOzJzBq8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.97.0/go.mod h1:x9pkj9/5Ej/8hwi+uHMPM70hcYpJ2cPf6gLFwGZpOv4= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.97.0 h1:IfJ9EkykXHBYdwuvZd0qfFcCHAAOuTEaiIlTRw6R9g4= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.97.0/go.mod h1:uTs2ukYAG9tCkoUhW39J9bNQAqwpqHhE85eeRVm6zCM= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.74.0 h1:pWNSPCKD+V4rC+MnZj8uErEbcsYUpEqU3InNYyafAPY= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.74.0/go.mod h1:0lXcDf6LUbtDxZZO3zDbRzMuL7gL1Q0FPOR8/3IBwaQ= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.97.0 h1:vPQQQezde/dqG/UzWFVeWPUE5YUU3neybTWtF6dSSLM= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.97.0/go.mod h1:oC3rBafNReIB/XZDshYZjjFulwOF1Feu3ZmY7aYJ4P4= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.74.0 h1:NWd9+rQTd6pELLf3copo7CEuNgKp90kgyhPozpwax2U= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.74.0/go.mod h1:anSbwGOousKpnNAVMNP5YieA4KOFuEzHkvya0vvtsaI= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.97.0 h1:S6b9R3SexIKjaP6wB7+v20oX/7abc9vOsiazMA+j/gM= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.97.0/go.mod h1:GZYAz9b6feR5cZOxqYZip7kiBfYc7gU6x0XL2VatINw= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.74.0 h1:Law7+BImq8DIBsdniSX8Iy2/GH5CRHpT1gsRaC9ZT8A= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.74.0/go.mod h1:uiW3V9EX8A5DOoxqDLuSh++ewHr+owtonCSiqMcpy3w= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.97.0 h1:dHEhNYReKFxRAmVb9cXnpxwSCGgccps/SP+J1c2DOng= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.97.0/go.mod h1:IpzPnQdTgkYETRB7YwGJuzcZkSeznv/ZpxUWpALQL9s= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.74.0 h1:2uysjsaqkf9STFeJN/M6i/sSYEN5pZJ94Qd2/Hg1pKE= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.74.0/go.mod h1:qoGuayD7cAtshnKosIQHd6dobcn6/sqgUn0v/Cg2UB8= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.97.0 h1:LEC8VOsV6BXyuROoCmZlr1m9yRNqtWtNFt5p62RpoG0= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.97.0/go.mod h1:guZo8ioeCrww44DypI+3CIWDDGodECkr/H+il26EzsI= github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= @@ -565,8 +632,12 @@ github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7l github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5 h1:ZCnq+JUrvXcDVhX/xRolRBZifmabN1HcS1wrPSvxhrU= github.com/openzipkin/zipkin-go v0.4.1 h1:kNd/ST2yLLWhaWrkgchya40TJabe8Hioj9udfPcEO5A= github.com/openzipkin/zipkin-go v0.4.1/go.mod h1:qY0VqDSN1pOBN94dBc6w2GJlWLiovAyg7Qt6/I9HecM= +github.com/openzipkin/zipkin-go v0.4.2 h1:zjqfqHjUpPmB3c1GlCvvgsM1G4LkvqQbBDueDOCg/jA= +github.com/openzipkin/zipkin-go v0.4.2/go.mod h1:ZeVkFjuuBiSy13y8vpSDCjMi9GoI3hPpCJSBx/EYFhY= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde h1:x0TT0RDC7UhAVbbWWBzr41ElhJx5tXPWkIHA2HWPRuw= github.com/pact-foundation/pact-go v1.0.4 h1:OYkFijGHoZAYbOIb1LWXrwKQbMMRUv1oQ89blD2Mh2Q= +github.com/parquet-go/parquet-go v0.20.2-0.20240416173845-962b3c5827c3 h1:dHzXGq6rltLEdYkZcB20RzdL+P3hu8/+rvEjna++/nc= +github.com/parquet-go/parquet-go v0.20.2-0.20240416173845-962b3c5827c3/go.mod h1:wMYanjuaE900FTDTNY00JU+67Oqh9uO0pYWRNoPGctQ= github.com/paulmach/orb v0.10.0 h1:guVYVqzxHE/CQ1KpfGO077TR0ATHSNjp4s6XGLn3W9s= github.com/paulmach/orb v0.10.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= @@ -581,6 +652,7 @@ github.com/phpdave11/gofpdi v1.0.13 h1:o61duiW8M9sMlkVXWlvP92sZJtGKENvW3VExs6dZu github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= github.com/pkg/profile v1.2.1 h1:F++O52m40owAmADcojzM+9gyjmMOY/T4oYJkgFDH8RE= +github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= @@ -590,8 +662,12 @@ github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4 github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0= github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= +github.com/prometheus/statsd_exporter v0.26.0 h1:SQl3M6suC6NWQYEzOvIv+EF6dAMYEqIuZy+o4H9F5Ig= +github.com/prometheus/statsd_exporter v0.26.0/go.mod h1:GXFLADOmBTVDrHc7b04nX8ooq3azG61pnECNqT7O5DM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/relvacode/iso8601 v1.4.0 h1:GsInVSEJfkYuirYFxa80nMLbH2aydgZpIf52gYZXUJs= +github.com/relvacode/iso8601 v1.4.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I= github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4 h1:BN/Nyn2nWMoqGRA7G7paDNDqTXE30mXGqzzybrfo05w= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= @@ -622,6 +698,8 @@ github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0b github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad h1:fiWzISvDn0Csy5H0iwgAuJGQTUpVfEMJJd4nRFXogbc= +github.com/stoewer/parquet-cli v0.0.7 h1:rhdZODIbyMS3twr4OM3am8BPPT5pbfMcHLH93whDM5o= +github.com/stoewer/parquet-cli v0.0.7/go.mod h1:bskxHdj8q3H1EmfuCqjViFoeO3NEvs5lzZAQvI8Nfjk= github.com/streadway/amqp v1.0.0 h1:kuuDrUJFZL1QYL9hUNuCxNObNzB0bV/ZG5jV3RWAQgo= github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e h1:mOtuXaRAbVZsxAHVdPR3IjfmN8T1h2iczJLynhLybf8= github.com/substrait-io/substrait-go v0.4.2 h1:buDnjsb3qAqTaNbOR7VKmNgXf4lYQxWEcnSGUWBtmN8= @@ -696,45 +774,137 @@ gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b h1:7gd+rd8P3bqcn/9 go.einride.tech/aip v0.66.0 h1:XfV+NQX6L7EOYK11yoHHFtndeaWh3KbD9/cN/6iWEt8= go.opentelemetry.io/collector v0.74.0 h1:0s2DKWczGj/pLTsXGb1P+Je7dyuGx9Is4/Dri1+cS7g= go.opentelemetry.io/collector v0.74.0/go.mod h1:7NjZAvkhQ6E+NLN4EAH2hw3Nssi+F14t7mV7lMNXCto= +go.opentelemetry.io/collector v0.97.0 h1:qyOju13byHIKEK/JehmTiGMj4pFLa4kDyrOCtTmjHU0= +go.opentelemetry.io/collector v0.97.0/go.mod h1:V6xquYAaO2VHVu4DBK28JYuikRdZajh7DH5Vl/Y8NiA= go.opentelemetry.io/collector/component v0.74.0 h1:W32ILPgbA5LO+m9Se61hbbtiLM6FYusNM36K5/CCOi0= go.opentelemetry.io/collector/component v0.74.0/go.mod h1:zHbWqbdmnHeIZAuO3s1Fo/kWPC2oKuolIhlPmL4bzyo= +go.opentelemetry.io/collector/component v0.97.0 h1:vanKhXl5nptN8igRH4PqVYHOILif653vaPIKv6LCZCI= +go.opentelemetry.io/collector/component v0.97.0/go.mod h1:F/m3HMlkb16RKI7wJjgbECK1IZkAcmB8bu7yD8XOkwM= +go.opentelemetry.io/collector/config/configauth v0.97.0 h1:38M2uUsBzgD7sdJPPXUsOq1BFr6X6P4A5VFg+MOcRNY= +go.opentelemetry.io/collector/config/configauth v0.97.0/go.mod h1:BkCDatBU7CXXStrRPE1b4woj2VLxaYEMg2WTkb50BlI= +go.opentelemetry.io/collector/config/configcompression v1.4.0 h1:qWRKdl49lBvPUr6UWmyf1pR4EOBHN+66pDeGtfQ1Mbk= +go.opentelemetry.io/collector/config/configcompression v1.4.0/go.mod h1:O0fOPCADyGwGLLIf5lf7N3960NsnIfxsm6dr/mIpL+M= +go.opentelemetry.io/collector/config/configgrpc v0.97.0 h1:Ukl1GPtzSko4Pu8KV5jedD8OjySL/C+QgrfRdaakfHk= +go.opentelemetry.io/collector/config/configgrpc v0.97.0/go.mod h1:i8OrrxynYldlcZ6wPOUKNoZmmbUCDp3CzryRT+2mN7c= +go.opentelemetry.io/collector/config/confighttp v0.97.0 h1:Tfw4DtK5x66uSoRdbZc9tQTNGWEo/urR8RAedBdYtNU= +go.opentelemetry.io/collector/config/confighttp v0.97.0/go.mod h1:wyg4yXvCsk1CsfPBWQ3+rZDThz44Q0d35/1lJBHj5VI= +go.opentelemetry.io/collector/config/confignet v0.97.0 h1:KJjv10/YVMslSSLVWW/IIjpLM3JiO3rWvw5dK/t1H7g= +go.opentelemetry.io/collector/config/confignet v0.97.0/go.mod h1:3naWoPss70RhDHhYjGACi7xh4NcVRvs9itzIRVWyu1k= +go.opentelemetry.io/collector/config/configopaque v1.4.0 h1:5KgD9oLN+N07HqDsLzUrU0mE2pC8cMhrCSC1Nf8CEO4= +go.opentelemetry.io/collector/config/configopaque v1.4.0/go.mod h1:7Qzo69x7i+FaNELeA9jmZtVvfnR5lE6JYa5YEOCJPFQ= +go.opentelemetry.io/collector/config/configretry v0.97.0 h1:k7VwQ5H0oBLm6Fgm0ltfDDbmQVsiqSIY9ojijF0hiR0= +go.opentelemetry.io/collector/config/configretry v0.97.0/go.mod h1:s7A6ZGxK8bxqidFzwbr2pITzbsB2qf+aeHEDQDcanV8= +go.opentelemetry.io/collector/config/configtelemetry v0.97.0 h1:JS/WxK09A9m39D5OqsAWaoRe4tG7ESMnzDNIbZ5bD6c= +go.opentelemetry.io/collector/config/configtelemetry v0.97.0/go.mod h1:YV5PaOdtnU1xRomPcYqoHmyCr48tnaAREeGO96EZw8o= +go.opentelemetry.io/collector/config/configtls v0.97.0 h1:wmXj/rKQUGMZzbHVCTyB+xUWImsGxnLqhivwjBE0FdI= +go.opentelemetry.io/collector/config/configtls v0.97.0/go.mod h1:ev/fMI6hm1WTSHHEAEoVjF3RZj0qf38E/XO5itFku7k= +go.opentelemetry.io/collector/config/internal v0.97.0 h1:vhTzCm2u6MUAxdWPprkOePR/Kd57v2uF11twpza1E7o= +go.opentelemetry.io/collector/config/internal v0.97.0/go.mod h1:RVGDn9OH/KHT878cclG497/n2qxe54+zW+u/SVsRLNw= go.opentelemetry.io/collector/confmap v0.74.0 h1:tl4fSHC/MXZiEvsZhDhd03TgzvArOe69Qn020sZsTfQ= go.opentelemetry.io/collector/confmap v0.74.0/go.mod h1:NvUhMS2v8rniLvDAnvGjYOt0qBohk6TIibb1NuyVB1Q= +go.opentelemetry.io/collector/confmap v0.97.0 h1:0CGSk7YW9rPc6jCwJteJzHzN96HRoHTfuqI7J/EmZsg= +go.opentelemetry.io/collector/confmap v0.97.0/go.mod h1:AnJmZcZoOLuykSXGiAf3shi11ZZk5ei4tZd9dDTTpWE= +go.opentelemetry.io/collector/confmap/converter/expandconverter v0.97.0 h1:Tw0+JlvA1Z5xpvHYqzYXsPdsCaq6+oGoqw7fCymh+lc= +go.opentelemetry.io/collector/confmap/converter/expandconverter v0.97.0/go.mod h1:gp3XWfC1OpmwHZsODRIpy4XZtrNy1RryJhvK7sdNgmk= +go.opentelemetry.io/collector/confmap/provider/envprovider v0.97.0 h1:2F3yl+Vr6nJ0sN9HoYeebY5+lJ8OJ4VqxCY16SsVcXs= +go.opentelemetry.io/collector/confmap/provider/envprovider v0.97.0/go.mod h1:GrHP/mOgzx8+fcTRmgB/IgH3lG80nv2bFW1v6oPggRM= +go.opentelemetry.io/collector/confmap/provider/fileprovider v0.97.0 h1:5SXsBAA/6Hv76+ndBY0wZRYGNES/55SKu6VhP4kriqM= +go.opentelemetry.io/collector/confmap/provider/fileprovider v0.97.0/go.mod h1:YAj2CNxE1go08GiAxYO2HSeNkWv8g7C7DHFpEECttI8= +go.opentelemetry.io/collector/confmap/provider/httpprovider v0.97.0 h1:Wd4XR3cOznED8sYM0Qy0NlAToxvpEG8OH9O89RKp+pg= +go.opentelemetry.io/collector/confmap/provider/httpprovider v0.97.0/go.mod h1:2LIGxKR6dJPP5kxkRSTIeWuJ7Mto1Mv456+MlG86RH8= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.97.0 h1:suZwewHxcwA3z0kE6p6tjYcPKlGOYWoIjy/58gBK/c0= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.97.0/go.mod h1:R+cJ8wWzaJll+WCTUOM769zIk1vOb7NQARG9xWNbgUA= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.97.0 h1:ntcR7AMHwFRROTMW1ifx0xVu+ltbPafS/1r/ssxe+hM= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.97.0/go.mod h1:0THo600LMD0RGl7loqyaHgd/47Icgb64QOmqaj0j5dU= +go.opentelemetry.io/collector/connector v0.95.0 h1:HBfGg92Eyw5jkneu7jh3yed7w/dzi7v6SZ+CyqpNK7E= +go.opentelemetry.io/collector/connector v0.95.0/go.mod h1:an/NwQl07kw28iiwmZzN2KUeF3Br6UeW8u9iFpAlCXc= go.opentelemetry.io/collector/consumer v0.74.0 h1:+kjT/ixG+4SVSHg7u9mQe0+LNDc6PuG8Wn2hoL/yGYk= go.opentelemetry.io/collector/consumer v0.74.0/go.mod h1:MuGqt8/OKVAOjrh5WHr1TR2qwHizy64ZP2uNSr+XpvI= +go.opentelemetry.io/collector/consumer v0.97.0 h1:S0BZQtJQxSHT156S8a5rLt3TeWYP8Rq+jn8QEyWQUYk= +go.opentelemetry.io/collector/consumer v0.97.0/go.mod h1:1D06LURiZ/1KA2OnuKNeSn9bvFmJ5ZWe6L8kLu0osSY= go.opentelemetry.io/collector/exporter v0.74.0 h1:VZxDuVz9kJM/Yten3xA/abJwLJNkxLThiao6E1ULW7c= go.opentelemetry.io/collector/exporter v0.74.0/go.mod h1:kw5YoorpKqEpZZ/a5ODSoYFK1mszzcKBNORd32S8Z7c= +go.opentelemetry.io/collector/exporter v0.97.0 h1:kw/fQrpkhTz0/3I/Z0maRj0S8Mi0NK50/WwFuWrRYPc= +go.opentelemetry.io/collector/exporter v0.97.0/go.mod h1:EJYc4biKWxq3kD4Xh4SUSFbZ2lMsxjzwiCozikEDMjk= go.opentelemetry.io/collector/exporter/otlpexporter v0.74.0 h1:YKvTeYcBrJwbcXNy65fJ/xytUSMurpYn/KkJD0x+DAY= go.opentelemetry.io/collector/exporter/otlpexporter v0.74.0/go.mod h1:cRbvsnpSxzySoTSnXbOGPQZu9KHlEyKkTeE21f9Q1p4= +go.opentelemetry.io/collector/exporter/otlpexporter v0.95.0 h1:3GGUHciA0EGqzjAMqw/z9n2PavZjX1vQbCJ0QdChKmo= +go.opentelemetry.io/collector/exporter/otlpexporter v0.95.0/go.mod h1:8iKJe0ueSSns5u8vQjr0jRGKF1HFNSNUuMl/d7cxAlk= +go.opentelemetry.io/collector/extension v0.97.0 h1:LpjZ4KQgnhLG/u3l69QgWkX8qMqeS8IFKWMoDtbPIeE= +go.opentelemetry.io/collector/extension v0.97.0/go.mod h1:jWNG0Npi7AxiqwCclToskDfCQuNKHYHlBPJNnIKHp84= +go.opentelemetry.io/collector/extension/auth v0.97.0 h1:2AYGxSbsi1KC2DOOFbAe7valrERb86m7TfRY85X8hSE= +go.opentelemetry.io/collector/extension/auth v0.97.0/go.mod h1:uElLYtzMPA48mu9baxGIH6lHpOn76NLe4mVHnmV+hEY= go.opentelemetry.io/collector/featuregate v1.5.0 h1:uK8qnYQKz1TMkK+FDTFsywg/EybW/gbnOUaPNUkRznM= +go.opentelemetry.io/collector/otelcol v0.95.0 h1:lEwogX+fvO17SUgXT7/+q1DUXos0SwsAiUkhd0944AU= +go.opentelemetry.io/collector/otelcol v0.95.0/go.mod h1:ETOYHHPSN4a43LvdUwajNSjwr30euS74rjfsrLCWh2A= +go.opentelemetry.io/collector/processor v0.97.0 h1:L3R5R7x56LH2inF3sv0ZOsFfulVo8yuIFhO/OgpkCU0= +go.opentelemetry.io/collector/processor v0.97.0/go.mod h1:OsxBAPQ2fDytAn+yWLdEQ1yjYfl/OIak1AfKGfI8ALs= go.opentelemetry.io/collector/receiver v0.74.0 h1:jlgBFa0iByvn8VuX27UxtqiPiZE8ejmU5lb1nSptWD8= go.opentelemetry.io/collector/receiver v0.74.0/go.mod h1:SQkyATvoZCJefNkI2jnrR63SOdrmDLYCnQqXJ7ACqn0= +go.opentelemetry.io/collector/receiver v0.97.0 h1:ozzE5MhIPtfnYA/UKB/NCcgxSmeLqdwErboi6B/IpLQ= +go.opentelemetry.io/collector/receiver v0.97.0/go.mod h1:1TCN9DRuB45+xKqlwv4BMQR6qXgaJeSSNezFTJhmDUo= go.opentelemetry.io/collector/receiver/otlpreceiver v0.74.0 h1:e/X/W0z2Jtpy3Yd3CXkmEm9vSpKq/P3pKUrEVMUFBRw= go.opentelemetry.io/collector/receiver/otlpreceiver v0.74.0/go.mod h1:9X9/RYFxJIaK0JLlRZ0PpmQSSlYpY+r4KsTOj2jWj14= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.97.1-0.20240327165504-2b0decfcebeb h1:gaxwgzaHKnlx6dAjFexd4eiFD3l0ePwdueqdNystJDU= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.97.1-0.20240327165504-2b0decfcebeb/go.mod h1:JL1oxtysJT2TN80p/nC9vrWkENh/If7kmMRio7yF9WE= go.opentelemetry.io/collector/semconv v0.98.0 h1:zO4L4TmlxXoYu8UgPeYElGY19BW7wPjM+quL5CzoOoY= +go.opentelemetry.io/collector/service v0.95.0 h1:t6RUHV7ByFjkjPKGz5n6n4wIoXZLC8HDQLmnrmCYGhg= +go.opentelemetry.io/collector/service v0.95.0/go.mod h1:4yappQmDE5UZmLE9wwtj6IPM4W5KGLIYfObEAaejtQc= +go.opentelemetry.io/contrib/config v0.4.0 h1:Xb+ncYOqseLroMuBesGNRgVQolXcXOhMj7EhGwJCdHs= +go.opentelemetry.io/contrib/config v0.4.0/go.mod h1:drNk2xRqLWW4/amk6Uh1S+sDAJTc7bcEEN1GfJzj418= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.51.0/go.mod h1:ZvX/taFlN6TGaOOM6D42wrNwPKUV1nGO2FuUXkityBU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= go.opentelemetry.io/contrib/propagators/b3 v1.15.0 h1:bMaonPyFcAvZ4EVzkUNkfnUHP5Zi63CIDlA3dRsEg8Q= go.opentelemetry.io/contrib/propagators/b3 v1.15.0/go.mod h1:VjU0g2v6HSQ+NwfifambSLAeBgevjIcqmceaKWEzl0c= +go.opentelemetry.io/contrib/propagators/b3 v1.23.0 h1:aaIGWc5JdfRGpCafLRxMJbD65MfTa206AwSKkvGS0Hg= +go.opentelemetry.io/contrib/propagators/b3 v1.23.0/go.mod h1:Gyz7V7XghvwTq+mIhLFlTgcc03UDroOg8vezs4NLhwU= +go.opentelemetry.io/contrib/propagators/jaeger v1.26.0/go.mod h1:W/cylm0ZtJK1uxsuTqoYGYPnqpZ8CeVGgW7TwfXPsGw= +go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= go.opentelemetry.io/otel/bridge/opencensus v0.37.0 h1:ieH3gw7b1eg90ARsFAlAsX5LKVZgnCYfaDwRrK6xLHU= go.opentelemetry.io/otel/bridge/opencensus v0.37.0/go.mod h1:ddiK+1PE68l/Xk04BGTh9Y6WIcxcLrmcVxVlS0w5WZ0= +go.opentelemetry.io/otel/bridge/opencensus v1.26.0 h1:DZzxj9QjznMVoehskOJnFP2gsTCWtDTFBDvFhPAY7nc= +go.opentelemetry.io/otel/bridge/opencensus v1.26.0/go.mod h1:rJiX0KrF5m8Tm1XE8jLczpAv5zUaDcvhKecFG0ZoFG4= go.opentelemetry.io/otel/bridge/opentracing v1.10.0 h1:WzAVGovpC1s7KD5g4taU6BWYZP3QGSDVTlbRu9fIHw8= go.opentelemetry.io/otel/bridge/opentracing v1.10.0/go.mod h1:J7GLR/uxxqMAzZptsH0pjte3Ep4GacTCrbGBoDuHBqk= +go.opentelemetry.io/otel/bridge/opentracing v1.26.0 h1:Q/dHj0DOhfLMAs5u5ucAbC7gy66x9xxsZRLpHCJ4XhI= +go.opentelemetry.io/otel/bridge/opentracing v1.26.0/go.mod h1:HfypvOw/8rqu4lXDhwaxVK1ibBAi1lTMXBHV9rywOCw= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.23.1 h1:ZqRWZJGHXV/1yCcEEVJ6/Uz2JtM79DNS8OZYa3vVY/A= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.23.1/go.mod h1:D7ynngPWlGJrqyGSDOdscuv7uqttfCE3jcBvffDv9y4= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.23.1 h1:q/Nj5/2TZRIt6PderQ9oU0M00fzoe8UZuINGw6ETGTw= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.23.1/go.mod h1:DTE9yAu6r08jU3xa68GiSeI7oRcSEQ2RpKbbQGO+dWM= go.opentelemetry.io/otel/exporters/prometheus v0.37.0 h1:NQc0epfL0xItsmGgSXgfbH2C1fq2VLXkZoDFsfRNHpc= go.opentelemetry.io/otel/exporters/prometheus v0.37.0/go.mod h1:hB8qWjsStK36t50/R0V2ULFb4u95X/Q6zupXLgvjTh8= +go.opentelemetry.io/otel/exporters/prometheus v0.46.0 h1:I8WIFXR351FoLJYuloU4EgXbtNX2URfU/85pUPheIEQ= +go.opentelemetry.io/otel/exporters/prometheus v0.46.0/go.mod h1:ztwVUHe5DTR/1v7PeuGRnU5Bbd4QKYwApWmuutKsJSs= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.23.1 h1:C8r95vDR125t815KD+b1tI0Fbc1pFnwHTBxkbIZ6Szc= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.23.1/go.mod h1:Qr0qomr64jentMtOjWMbtYeJMSuMSlsPEjmnRA2sWZ4= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0 h1:s0PHtIkN+3xrbDOpt2M8OTG92cWqUESvzh2MxiR5xY8= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0/go.mod h1:hZlFbDbRt++MMPCCfSJfmhkGIWnX1h3XjkfxZUjLrIA= go.opentelemetry.io/otel/sdk/metric v0.39.0 h1:Kun8i1eYf48kHH83RucG93ffz0zGV1sh46FAScOTuDI= go.opentelemetry.io/otel/sdk/metric v0.39.0/go.mod h1:piDIRgjcK7u0HCL5pCA4e74qpK/jk3NiUoAHATVAmiI= +go.opentelemetry.io/otel/sdk/metric v1.26.0 h1:cWSks5tfriHPdWFnl+qpX3P681aAYqlZHcAyHw5aU9Y= +go.opentelemetry.io/otel/sdk/metric v1.26.0/go.mod h1:ClMFFknnThJCksebJwz7KIyEDHO+nTB6gK8obLy8RyE= +go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= golang.org/x/arch v0.4.0 h1:A8WCeEWhLwPBKNbFi5Wv5UTCBx5zzubnXDlMOFAzFMc= golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/image v0.0.0-20220302094943-723b81ca9867 h1:TcHcE0vrmgzNH1v3ppjcMGbhG5+9fMuvOmUYwNEF4q4= +golang.org/x/image v0.6.0 h1:bR8b5okrPI3g/gyZakLZHeWxAR8Dn5CyxXv1hLH5g/4= +golang.org/x/image v0.6.0/go.mod h1:MXLdDR43H7cDJq5GEGXEVeeNhPgi+YYEQ2pC1byI1x0= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457 h1:zf5N6UOrA487eEFacMePxjXAJctxKmyjKUsjA11Uzuk= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= +gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= gonum.org/v1/plot v0.10.1 h1:dnifSs43YJuNMDzB7v8wV64O4ABBHReuAVAoBxqBqS4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/genproto/googleapis/bytestream v0.0.0-20240325203815-454cdb8f5daa h1:wBkzraZsSqhj1M4L/nMrljUU6XasJkgHvUsq8oRGwF0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE= +google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/cheggaaa/pb.v1 v1.0.25 h1:Ev7yu1/f6+d+b3pi5vPdRPc6nNtP1umSfcWiEfRqv6I= gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= @@ -748,6 +918,7 @@ gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= gopkg.in/telebot.v3 v3.2.1 h1:3I4LohaAyJBiivGmkfB+CiVu7QFOWkuZ4+KHgO/G3rs= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o= howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= @@ -756,6 +927,7 @@ k8s.io/component-base v0.0.0-20240417101527-62c04b35eff6/go.mod h1:l0ukbPS0lwFxO k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/kube-openapi v0.0.0-20240220201932-37d671a357a5/go.mod h1:Pa1PvrP7ACSkuX6I7KYomY6cmMA0Tx86waBhDUgoKPw= lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo= modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q= modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y= diff --git a/packages/grafana-data/src/events/index.ts b/packages/grafana-data/src/events/index.ts deleted file mode 100644 index 358756536ff..00000000000 --- a/packages/grafana-data/src/events/index.ts +++ /dev/null @@ -1,4 +0,0 @@ -export * from './eventFactory'; -export * from './types'; -export * from './EventBus'; -export * from './common'; diff --git a/packages/grafana-data/src/field/displayProcessor.test.ts b/packages/grafana-data/src/field/displayProcessor.test.ts index 94543fa2554..2cd13b48b58 100644 --- a/packages/grafana-data/src/field/displayProcessor.test.ts +++ b/packages/grafana-data/src/field/displayProcessor.test.ts @@ -1,5 +1,5 @@ import { systemDateFormats } from '../datetime/formats'; -import { createTheme } from '../themes'; +import { createTheme } from '../themes/createTheme'; import { FieldConfig, FieldType } from '../types/dataFrame'; import { DisplayProcessor, DisplayValue } from '../types/displayValue'; import { ThresholdsMode } from '../types/thresholds'; diff --git a/packages/grafana-data/src/field/fieldColor.test.ts b/packages/grafana-data/src/field/fieldColor.test.ts index bcf95f1fd79..6a1e0abf285 100644 --- a/packages/grafana-data/src/field/fieldColor.test.ts +++ b/packages/grafana-data/src/field/fieldColor.test.ts @@ -1,4 +1,4 @@ -import { createTheme } from '../themes'; +import { createTheme } from '../themes/createTheme'; import { Field, FieldType } from '../types/dataFrame'; import { FieldColorModeId } from '../types/fieldColor'; diff --git a/packages/grafana-data/src/field/fieldColor.ts b/packages/grafana-data/src/field/fieldColor.ts index c341b84d30e..93818c8a3af 100644 --- a/packages/grafana-data/src/field/fieldColor.ts +++ b/packages/grafana-data/src/field/fieldColor.ts @@ -2,7 +2,7 @@ import { interpolateRgbBasis } from 'd3-interpolate'; import stringHash from 'string-hash'; import tinycolor from 'tinycolor2'; -import { colorManipulator } from '../themes'; +import { getContrastRatio } from '../themes/colorManipulator'; import { GrafanaTheme2 } from '../themes/types'; import { reduceField } from '../transformations/fieldReducer'; import { Field } from '../types/dataFrame'; @@ -70,10 +70,8 @@ export const fieldColorModeRegistry = new Registry(() => { getColors: (theme: GrafanaTheme2) => { return theme.visualization.palette.filter( (color) => - colorManipulator.getContrastRatio( - theme.visualization.getColorByName(color), - theme.colors.background.primary - ) >= theme.colors.contrastThreshold + getContrastRatio(theme.visualization.getColorByName(color), theme.colors.background.primary) >= + theme.colors.contrastThreshold ); }, }), diff --git a/packages/grafana-data/src/field/fieldDisplay.test.ts b/packages/grafana-data/src/field/fieldDisplay.test.ts index 35bf73b9299..718c0e54430 100644 --- a/packages/grafana-data/src/field/fieldDisplay.test.ts +++ b/packages/grafana-data/src/field/fieldDisplay.test.ts @@ -1,7 +1,7 @@ import { merge } from 'lodash'; import { toDataFrame } from '../dataframe/processDataFrame'; -import { createTheme } from '../themes'; +import { createTheme } from '../themes/createTheme'; import { ReducerID } from '../transformations/fieldReducer'; import { FieldConfigPropertyItem } from '../types/fieldOverrides'; import { MappingType, SpecialValueMatch, ValueMapping } from '../types/valueMapping'; diff --git a/packages/grafana-data/src/field/fieldDisplay.ts b/packages/grafana-data/src/field/fieldDisplay.ts index 29a3ebc095f..50f32d6e25e 100644 --- a/packages/grafana-data/src/field/fieldDisplay.ts +++ b/packages/grafana-data/src/field/fieldDisplay.ts @@ -2,7 +2,7 @@ import { isEmpty } from 'lodash'; import { DataFrameView } from '../dataframe/DataFrameView'; import { getTimeField } from '../dataframe/processDataFrame'; -import { GrafanaTheme2 } from '../themes'; +import { GrafanaTheme2 } from '../themes/types'; import { reduceField, ReducerID } from '../transformations/fieldReducer'; import { getFieldMatcher } from '../transformations/matchers'; import { FieldMatcherID } from '../transformations/matchers/ids'; diff --git a/packages/grafana-data/src/field/fieldOverrides.test.ts b/packages/grafana-data/src/field/fieldOverrides.test.ts index c6bba4debf3..84329ca5e86 100644 --- a/packages/grafana-data/src/field/fieldOverrides.test.ts +++ b/packages/grafana-data/src/field/fieldOverrides.test.ts @@ -1,7 +1,7 @@ import { ArrayDataFrame } from '../dataframe/ArrayDataFrame'; import { createDataFrame, toDataFrame } from '../dataframe/processDataFrame'; import { relativeToTimeRange } from '../datetime/rangeutil'; -import { createTheme } from '../themes'; +import { createTheme } from '../themes/createTheme'; import { FieldMatcherID } from '../transformations/matchers/ids'; import { ScopedVars } from '../types/ScopedVars'; import { GrafanaConfig } from '../types/config'; diff --git a/packages/grafana-data/src/field/fieldOverrides.ts b/packages/grafana-data/src/field/fieldOverrides.ts index 47f5552a0ce..549599d81b4 100644 --- a/packages/grafana-data/src/field/fieldOverrides.ts +++ b/packages/grafana-data/src/field/fieldOverrides.ts @@ -7,8 +7,8 @@ import { VariableFormatID } from '@grafana/schema'; import { compareArrayValues, compareDataFrameStructures } from '../dataframe/frameComparisons'; import { guessFieldTypeForField } from '../dataframe/processDataFrame'; import { PanelPlugin } from '../panel/PanelPlugin'; -import { GrafanaTheme2 } from '../themes'; import { asHexString } from '../themes/colorManipulator'; +import { GrafanaTheme2 } from '../themes/types'; import { ReducerID, reduceField } from '../transformations/fieldReducer'; import { fieldMatchers } from '../transformations/matchers'; import { ScopedVars, DataContextScopedVar } from '../types/ScopedVars'; diff --git a/packages/grafana-data/src/field/getFieldDisplayValuesProxy.test.tsx b/packages/grafana-data/src/field/getFieldDisplayValuesProxy.test.tsx index ceab04f404b..d976ea8b703 100644 --- a/packages/grafana-data/src/field/getFieldDisplayValuesProxy.test.tsx +++ b/packages/grafana-data/src/field/getFieldDisplayValuesProxy.test.tsx @@ -1,5 +1,5 @@ import { createDataFrame, toDataFrame } from '../dataframe/processDataFrame'; -import { createTheme } from '../themes'; +import { createTheme } from '../themes/createTheme'; import { applyFieldOverrides } from './fieldOverrides'; import { getFieldDisplayValuesProxy } from './getFieldDisplayValuesProxy'; diff --git a/packages/grafana-data/src/field/scale.test.ts b/packages/grafana-data/src/field/scale.test.ts index ba941992765..141b73c3f87 100644 --- a/packages/grafana-data/src/field/scale.test.ts +++ b/packages/grafana-data/src/field/scale.test.ts @@ -1,4 +1,4 @@ -import { createTheme } from '../themes'; +import { createTheme } from '../themes/createTheme'; import { Field, FieldType } from '../types/dataFrame'; import { FieldColorModeId } from '../types/fieldColor'; import { ThresholdsMode } from '../types/thresholds'; diff --git a/packages/grafana-data/src/field/standardFieldConfigEditorRegistry.ts b/packages/grafana-data/src/field/standardFieldConfigEditorRegistry.ts index 2bc3ac995be..83b7f5050e3 100644 --- a/packages/grafana-data/src/field/standardFieldConfigEditorRegistry.ts +++ b/packages/grafana-data/src/field/standardFieldConfigEditorRegistry.ts @@ -1,6 +1,6 @@ import { ComponentType } from 'react'; -import { EventBus } from '../events'; +import { EventBus } from '../events/types'; import { DataFrame } from '../types/dataFrame'; import { VariableSuggestionsScope, VariableSuggestion } from '../types/dataLink'; import { InterpolateFunction } from '../types/panel'; diff --git a/packages/grafana-data/src/geo/layer.ts b/packages/grafana-data/src/geo/layer.ts index 7b21393ae19..0c4f4ac7799 100644 --- a/packages/grafana-data/src/geo/layer.ts +++ b/packages/grafana-data/src/geo/layer.ts @@ -4,9 +4,9 @@ import { ReactNode } from 'react'; import { MapLayerOptions, FrameGeometrySourceMode } from '@grafana/schema'; -import { EventBus } from '../events'; +import { EventBus } from '../events/types'; import { StandardEditorContext } from '../field/standardFieldConfigEditorRegistry'; -import { GrafanaTheme2 } from '../themes'; +import { GrafanaTheme2 } from '../themes/types'; import { PanelData } from '../types/panel'; import { PanelOptionsEditorBuilder } from '../utils/OptionsUIBuilders'; import { RegistryItemWithOptions } from '../utils/Registry'; diff --git a/packages/grafana-data/src/index.ts b/packages/grafana-data/src/index.ts index 97860df3a19..09c7d2af88d 100644 --- a/packages/grafana-data/src/index.ts +++ b/packages/grafana-data/src/index.ts @@ -4,12 +4,6 @@ * @packageDocumentation */ -export * from './text'; -export * from './events'; -export * from './themes'; -export * from './monaco'; -export * from './geo/layer'; -export * from './query'; export { amendTable, trimTable, type Table } from './table/amendTimeSeries'; // DataFrames @@ -38,7 +32,6 @@ export { type PartialDataFrame, createDataFrame, } from './dataframe/processDataFrame'; - export { type Dimension, type Dimensions, @@ -49,7 +42,6 @@ export { getAllValuesFromDimension, getDimensionByName, } from './dataframe/dimensions'; - export { anySeriesWithTimeField, hasTimeField, @@ -65,9 +57,7 @@ export { type StreamingFrameOptions, closestIdx, } from './dataframe/StreamingDataFrame'; - export { ArrayDataFrame, arrayToDataFrame } from './dataframe/ArrayDataFrame'; - export { type DataFrameJSON, type DataFrameData, @@ -79,9 +69,67 @@ export { dataFrameFromJSON, dataFrameToJSON, } from './dataframe/DataFrameJSON'; - export { compareDataFrameStructures, compareArrayValues, shallowCompare } from './dataframe/frameComparisons'; +// Query +export { getNextRefId } from './query/refId'; + +// Geo +export { + FrameGeometrySourceMode, + type FrameGeometrySource, + type MapLayerOptions, + type MapLayerHandler, + type MapLayerRegistryItem, +} from './geo/layer'; + +// Text +export { + escapeStringForRegex, + unEscapeStringFromRegex, + stringStartsAsRegEx, + stringToJsRegex, + stringToMs, + toNumberString, + toIntegerOrUndefined, + toFloatOrUndefined, + toPascalCase, + escapeRegex, +} from './text/string'; +export { type TextMatch, findHighlightChunksInText, findMatchesInText, parseFlags } from './text/text'; +export { type RenderMarkdownOptions, renderMarkdown, renderTextPanelMarkdown } from './text/markdown'; +export { textUtil } from './text/sanitize'; + +// Events +export { eventFactory } from './events/eventFactory'; +export { + BusEventBase, + BusEventWithPayload, + type BusEvent, + type BusEventType, + type BusEventHandler, + type EventFilterOptions, + type EventBus, + type AppEvent, + type LegacyEmitter, + type LegacyEventHandler, + type EventBusExtended, +} from './events/types'; +export { EventBusSrv } from './events/EventBus'; +export { + type DataHoverPayload, + DataHoverEvent, + DataHoverClearEvent, + DataSelectEvent, + AnnotationChangeEvent, + type DashboardLoadedEventPayload, + DashboardLoadedEvent, + DataSourceUpdatedSuccessfully, + DataSourceTestSucceeded, + DataSourceTestFailed, + SetPanelAttentionEvent, +} from './events/common'; + // Field export { getFieldColorModeForField, @@ -106,7 +154,6 @@ export { getUniqueFieldName, } from './field/fieldState'; export { getScaleCalculator, getFieldConfigWithMinMax, getMinMaxAndDelta } from './field/scale'; - export { type ReduceDataOptions, VAR_SERIES_NAME, @@ -123,9 +170,7 @@ export { getDisplayValueAlignmentFactors, fixCellTemplateExpressions, } from './field/fieldDisplay'; - export { getDisplayProcessor, getRawDisplayProcessor } from './field/displayProcessor'; - export { type StandardEditorContext, type StandardEditorProps, @@ -133,7 +178,6 @@ export { standardFieldConfigEditorRegistry, standardEditorsRegistry, } from './field/standardFieldConfigEditorRegistry'; - export { identityOverrideProcessor, numberOverrideProcessor, @@ -173,7 +217,6 @@ export { convertOldAngularValueMappings, LegacyMappingType } from './utils/value export { containsSearchFilter, type SearchFilterOptions, getSearchFilterScopedVar } from './utils/variables'; export { renderLegendFormat } from './utils/legend'; export { matchPluginId } from './utils/matchPluginId'; - export { type RegistryItem, type RegistryItemWithOptions, Registry } from './utils/Registry'; export { getDataSourceRef, @@ -191,9 +234,7 @@ export { updateDatasourcePluginSecureJsonDataOption, updateDatasourcePluginResetOption, } from './utils/datasource'; - export { deprecationWarning } from './utils/deprecationWarning'; - export { CSVHeaderStyle, type CSVConfig, @@ -203,10 +244,9 @@ export { CSVReader, toCSV, } from './utils/csv'; - export { parseLabels, findCommonLabels, findUniqueLabels, matchAllLabels, formatLabels } from './utils/labels'; export { roundDecimals, guessDecimals } from './utils/numbers'; -export { objRemoveUndefined, isEmptyObject } from './utils/object'; +export { objRemoveUndefined, isEmptyObject, safeStringifyValue } from './utils/object'; export { classicColors } from './utils/namedColorsPalette'; export { getSeriesTimeStep, hasMsResolution } from './utils/series'; export { BinaryOperationID, type BinaryOperation, binaryOperators } from './utils/binaryOperators'; @@ -228,10 +268,8 @@ export { getValueMatcher, } from './transformations/matchers'; export { type FieldValueMatcherConfig } from './transformations/matchers/fieldValueMatcher'; - export { DataTransformerID } from './transformations/transformers/ids'; export { MatcherID, FieldMatcherID, FrameMatcherID, ValueMatcherID } from './transformations/matchers/ids'; - export { ReducerID, isReducerID, @@ -241,7 +279,6 @@ export { defaultCalcs, doStandardCalcs, } from './transformations/fieldReducer'; - export { transformDataFrame } from './transformations/transformDataFrame'; export { type TransformerRegistryItem, @@ -262,11 +299,30 @@ export { } from './transformations/transformers/joinDataFrames'; export * from './transformations/transformers/histogram'; export { ensureTimeField } from './transformations/transformers/convertFieldType'; - // Required for Sparklines util to work in @grafana/data, but ideally kept internal export { applyNullInsertThreshold } from './transformations/transformers/nulls/nullInsertThreshold'; export { nullToValue } from './transformations/transformers/nulls/nullToValue'; +// Monaco +export { type MonacoLanguageRegistryItem, monacoLanguageRegistry } from './monaco/languageRegistry'; + +// Theme +export { createTheme } from './themes/createTheme'; +export { getThemeById, getBuiltInThemes, type ThemeRegistryItem } from './themes/registry'; +export type { NewThemeOptions } from './themes/createTheme'; +export type { ThemeRichColor, GrafanaTheme2 } from './themes/types'; +export type { ThemeColors } from './themes/createColors'; +export type { ThemeBreakpoints, ThemeBreakpointsKey } from './themes/breakpoints'; +export type { ThemeShadows } from './themes/createShadows'; +export type { ThemeShape } from './themes/createShape'; +export type { ThemeTypography, ThemeTypographyVariant, ThemeTypographyVariantTypes } from './themes/createTypography'; +export type { ThemeTransitions } from './themes/createTransitions'; +export type { ThemeSpacing, ThemeSpacingTokens } from './themes/createSpacing'; +export type { ThemeZIndices } from './themes/zIndex'; +export type { ThemeVisualizationColors, ThemeVizColor, ThemeVizHue } from './themes/createVisualizationColors'; +export { colorManipulator } from './themes/colorManipulator'; +export { ThemeContext } from './themes/context'; + // ValueFormats export { type FormattedValue, diff --git a/packages/grafana-data/src/monaco/index.ts b/packages/grafana-data/src/monaco/index.ts deleted file mode 100644 index 4aed44e4d94..00000000000 --- a/packages/grafana-data/src/monaco/index.ts +++ /dev/null @@ -1 +0,0 @@ -export * from './languageRegistry'; diff --git a/packages/grafana-data/src/query/index.ts b/packages/grafana-data/src/query/index.ts deleted file mode 100644 index b01ae2451d5..00000000000 --- a/packages/grafana-data/src/query/index.ts +++ /dev/null @@ -1 +0,0 @@ -export * from './refId'; diff --git a/packages/grafana-data/src/query/refId.test.ts b/packages/grafana-data/src/query/refId.test.ts index c818f6156cc..77da49eec4b 100644 --- a/packages/grafana-data/src/query/refId.test.ts +++ b/packages/grafana-data/src/query/refId.test.ts @@ -1,6 +1,6 @@ import { DataQuery } from '@grafana/schema'; -import { getNextRefId } from '.'; +import { getNextRefId } from './refId'; export interface TestQuery extends DataQuery { name?: string; diff --git a/packages/grafana-data/src/table/amendTimeSeries.ts b/packages/grafana-data/src/table/amendTimeSeries.ts index 4eb35d0a6cf..b93fa116a32 100644 --- a/packages/grafana-data/src/table/amendTimeSeries.ts +++ b/packages/grafana-data/src/table/amendTimeSeries.ts @@ -1,4 +1,4 @@ -import { closestIdx } from '../'; +import { closestIdx } from '../dataframe/StreamingDataFrame'; export type Table = [times: number[], ...values: any[][]]; diff --git a/packages/grafana-data/src/text/index.ts b/packages/grafana-data/src/text/index.ts deleted file mode 100644 index f9ca55a5472..00000000000 --- a/packages/grafana-data/src/text/index.ts +++ /dev/null @@ -1,24 +0,0 @@ -export * from './string'; -export * from './markdown'; -export * from './text'; -import { - escapeHtml, - hasAnsiCodes, - sanitize, - sanitizeUrl, - sanitizeTextPanelContent, - sanitizeSVGContent, - sanitizeTrustedTypes, - sanitizeTrustedTypesRSS, -} from './sanitize'; - -export const textUtil = { - escapeHtml, - hasAnsiCodes, - sanitize, - sanitizeTextPanelContent, - sanitizeUrl, - sanitizeSVGContent, - sanitizeTrustedTypes, - sanitizeTrustedTypesRSS, -}; diff --git a/packages/grafana-data/src/text/sanitize.ts b/packages/grafana-data/src/text/sanitize.ts index b60d053deff..aa3e8a7e311 100644 --- a/packages/grafana-data/src/text/sanitize.ts +++ b/packages/grafana-data/src/text/sanitize.ts @@ -100,3 +100,14 @@ export function escapeHtml(str: string): string { .replace(/'/g, ''') .replace(/"/g, '"'); } + +export const textUtil = { + escapeHtml, + hasAnsiCodes, + sanitize, + sanitizeTextPanelContent, + sanitizeUrl, + sanitizeSVGContent, + sanitizeTrustedTypes, + sanitizeTrustedTypesRSS, +}; diff --git a/packages/grafana-data/src/themes/colorManipulator.ts b/packages/grafana-data/src/themes/colorManipulator.ts index dcd0ce481e2..9964e9a09bc 100644 --- a/packages/grafana-data/src/themes/colorManipulator.ts +++ b/packages/grafana-data/src/themes/colorManipulator.ts @@ -371,3 +371,20 @@ interface DecomposeColor { values: any; colorSpace?: string; } + +export const colorManipulator = { + clamp, + hexToRgb, + rgbToHex, + asHexString, + asRgbString, + hslToRgb, + decomposeColor, + recomposeColor, + getContrastRatio, + getLuminance, + emphasize, + alpha, + darken, + lighten, +}; diff --git a/packages/grafana-data/src/themes/index.ts b/packages/grafana-data/src/themes/index.ts deleted file mode 100644 index 519dc9083d8..00000000000 --- a/packages/grafana-data/src/themes/index.ts +++ /dev/null @@ -1,18 +0,0 @@ -export { createTheme } from './createTheme'; -export { getThemeById, getBuiltInThemes, type ThemeRegistryItem } from './registry'; -export type { NewThemeOptions } from './createTheme'; -export type { ThemeRichColor, GrafanaTheme2 } from './types'; -export type { ThemeColors } from './createColors'; -export type { ThemeBreakpoints, ThemeBreakpointsKey } from './breakpoints'; -export type { ThemeShadows } from './createShadows'; -export type { ThemeShape } from './createShape'; -export type { ThemeTypography, ThemeTypographyVariant, ThemeTypographyVariantTypes } from './createTypography'; -export type { ThemeTransitions } from './createTransitions'; -export type { ThemeSpacing, ThemeSpacingTokens } from './createSpacing'; -export type { ThemeZIndices } from './zIndex'; -export type { ThemeVisualizationColors, ThemeVizColor, ThemeVizHue } from './createVisualizationColors'; - -/** Exporting the module like this to be able to generate docs properly. */ -import * as colorManipulator from './colorManipulator'; -export { colorManipulator }; -export { ThemeContext } from './context'; diff --git a/packages/grafana-data/src/transformations/matchers/refIdMatcher.ts b/packages/grafana-data/src/transformations/matchers/refIdMatcher.ts index f8486a36d69..629600bacfa 100644 --- a/packages/grafana-data/src/transformations/matchers/refIdMatcher.ts +++ b/packages/grafana-data/src/transformations/matchers/refIdMatcher.ts @@ -1,4 +1,4 @@ -import { stringToJsRegex } from '../../text'; +import { stringToJsRegex } from '../../text/string'; import { DataFrame } from '../../types/dataFrame'; import { FrameMatcherInfo } from '../../types/transformations'; diff --git a/packages/grafana-data/src/transformations/transformers/histogram.ts b/packages/grafana-data/src/transformations/transformers/histogram.ts index a94cd182138..bc608996985 100644 --- a/packages/grafana-data/src/transformations/transformers/histogram.ts +++ b/packages/grafana-data/src/transformations/transformers/histogram.ts @@ -1,7 +1,8 @@ import { map } from 'rxjs/operators'; import { getDisplayProcessor } from '../../field/displayProcessor'; -import { createTheme, GrafanaTheme2 } from '../../themes'; +import { createTheme } from '../../themes/createTheme'; +import { GrafanaTheme2 } from '../../themes/types'; import { DataFrame, Field, FieldConfig, FieldType } from '../../types/dataFrame'; import { DataFrameType } from '../../types/dataFrameTypes'; import { DataTransformContext, SynchronousDataTransformerInfo } from '../../types/transformations'; diff --git a/packages/grafana-data/src/types/config.ts b/packages/grafana-data/src/types/config.ts index b8703f55aaf..f4eab1b74a5 100644 --- a/packages/grafana-data/src/types/config.ts +++ b/packages/grafana-data/src/types/config.ts @@ -1,6 +1,6 @@ import { SystemDateFormatSettings } from '../datetime/formats'; import { MapLayerOptions } from '../geo/layer'; -import { GrafanaTheme2 } from '../themes'; +import { GrafanaTheme2 } from '../themes/types'; import { DataSourceInstanceSettings } from './datasource'; import { FeatureToggles } from './featureToggles.gen'; @@ -232,6 +232,7 @@ export interface GrafanaConfig { cloudMigrationIsTarget?: boolean; listDashboardScopesEndpoint?: string; listScopesEndpoint?: string; + reportingStaticContext?: Record; // The namespace to use for kubernetes apiserver requests namespace: string; diff --git a/packages/grafana-data/src/types/datasource.ts b/packages/grafana-data/src/types/datasource.ts index d4970c3d750..524ef8b1c39 100644 --- a/packages/grafana-data/src/types/datasource.ts +++ b/packages/grafana-data/src/types/datasource.ts @@ -1,6 +1,8 @@ import { ComponentType } from 'react'; import { Observable } from 'rxjs'; +import { DataSourceRef } from '@grafana/schema'; + import { makeClassES5Compatible } from '../utils/makeClassES5Compatible'; import { ScopedVars } from './ScopedVars'; @@ -11,7 +13,7 @@ import { KeyValue, LoadingState, TableData, TimeSeries } from './data'; import { DataFrame, DataFrameDTO } from './dataFrame'; import { PanelData } from './panel'; import { GrafanaPlugin, PluginMeta } from './plugin'; -import { DataQuery, DataSourceRef } from './query'; +import { DataQuery } from './query'; import { Scope } from './scopes'; import { AdHocVariableFilter } from './templateVars'; import { RawTimeRange, TimeRange } from './time'; @@ -217,6 +219,11 @@ abstract class DataSourceApi< */ readonly uid: string; + /** + * Set in constructor + */ + readonly apiVersion?: string; + /** * min interval range */ @@ -229,6 +236,7 @@ abstract class DataSourceApi< this.meta = instanceSettings.meta; this.cachingConfig = instanceSettings.cachingConfig; this.uid = instanceSettings.uid; + this.apiVersion = instanceSettings.apiVersion; } /** @@ -322,7 +330,11 @@ abstract class DataSourceApi< /** Get an identifier object for this datasource instance */ getRef(): DataSourceRef { - return { type: this.type, uid: this.uid }; + const ref: DataSourceRef = { type: this.type, uid: this.uid }; + if (this.apiVersion) { + ref.apiVersion = this.apiVersion; + } + return ref; } /** @@ -647,6 +659,7 @@ export interface DataSourceSettings { const theme = createTheme(); diff --git a/packages/grafana-data/src/utils/object.ts b/packages/grafana-data/src/utils/object.ts index 0f40f8e636a..50d3ff25507 100644 --- a/packages/grafana-data/src/utils/object.ts +++ b/packages/grafana-data/src/utils/object.ts @@ -10,3 +10,22 @@ export const isEmptyObject = (value: unknown): value is Record => { return typeof value === 'object' && value !== null && Object.keys(value).length === 0; }; + +/** Stringifies an object that may contain circular references */ +export function safeStringifyValue(value: unknown) { + const getCircularReplacer = () => { + const seen = new WeakSet(); + return (_: string, value: object | null) => { + if (typeof value === 'object' && value !== null) { + if (seen.has(value)) { + return; + } + seen.add(value); + } + + return value; + }; + }; + + return JSON.stringify(value, getCircularReplacer()); +} diff --git a/packages/grafana-runtime/src/analytics/utils.ts b/packages/grafana-runtime/src/analytics/utils.ts index ac901f0879c..a5363420e74 100644 --- a/packages/grafana-runtime/src/analytics/utils.ts +++ b/packages/grafana-runtime/src/analytics/utils.ts @@ -44,6 +44,10 @@ export const reportPageview = () => { * @public */ export const reportInteraction = (interactionName: string, properties?: Record) => { + // get static reporting context and append it to properties + if (config.reportingStaticContext && config.reportingStaticContext instanceof Object) { + properties = { ...properties, ...config.reportingStaticContext }; + } getEchoSrv().addEvent({ type: EchoEventType.Interaction, payload: { diff --git a/packages/grafana-runtime/src/config.ts b/packages/grafana-runtime/src/config.ts index 82fb397c7ab..8d615b27d1a 100644 --- a/packages/grafana-runtime/src/config.ts +++ b/packages/grafana-runtime/src/config.ts @@ -177,6 +177,7 @@ export class GrafanaBootConfig implements GrafanaConfig { rootFolderUID: string | undefined; localFileSystemAvailable: boolean | undefined; cloudMigrationIsTarget: boolean | undefined; + reportingStaticContext?: Record; /** * Language used in Grafana's UI. This is after the user's preference (or deteceted locale) is resolved to one of diff --git a/packages/grafana-schema/src/common/common.gen.ts b/packages/grafana-schema/src/common/common.gen.ts index ff3fce0def6..981c6442c73 100644 --- a/packages/grafana-schema/src/common/common.gen.ts +++ b/packages/grafana-schema/src/common/common.gen.ts @@ -861,6 +861,10 @@ export enum VariableFormatID { } export interface DataSourceRef { + /** + * Datasource API version + */ + apiVersion?: string; /** * The plugin type-id */ diff --git a/packages/grafana-schema/src/common/dataquery_gen.cue b/packages/grafana-schema/src/common/dataquery_gen.cue index bcf14f3d134..9e138a6affe 100644 --- a/packages/grafana-schema/src/common/dataquery_gen.cue +++ b/packages/grafana-schema/src/common/dataquery_gen.cue @@ -42,4 +42,6 @@ DataSourceRef: { type?: string // Specific datasource instance uid?: string + // Datasource API version + apiVersion?: string } @cuetsy(kind="interface") diff --git a/packages/grafana-schema/src/raw/composable/cloudwatch/dataquery/x/CloudWatchDataQuery_types.gen.ts b/packages/grafana-schema/src/raw/composable/cloudwatch/dataquery/x/CloudWatchDataQuery_types.gen.ts index dc54af845e0..f49b4453da4 100644 --- a/packages/grafana-schema/src/raw/composable/cloudwatch/dataquery/x/CloudWatchDataQuery_types.gen.ts +++ b/packages/grafana-schema/src/raw/composable/cloudwatch/dataquery/x/CloudWatchDataQuery_types.gen.ts @@ -86,7 +86,7 @@ export interface CloudWatchMetricsQuery extends common.DataQuery, MetricStat { */ metricEditorMode?: MetricEditorMode; /** - * Whether to use a metric search or metric query. Metric query is referred to as "Metrics Insights" in the AWS console. + * Whether to use a metric search or metric insights query */ metricQueryType?: MetricQueryType; /** @@ -94,11 +94,11 @@ export interface CloudWatchMetricsQuery extends common.DataQuery, MetricStat { */ queryMode?: CloudWatchQueryMode; /** - * When the metric query type is `metricQueryType` is set to `Query` and the `metricEditorMode` is set to `Builder`, this field is used to build up an object representation of a SQL query. + * When the metric query type is set to `Insights` and the `metricEditorMode` is set to `Builder`, this field is used to build up an object representation of a SQL query. */ sql?: SQLExpression; /** - * When the metric query type is `metricQueryType` is set to `Query`, this field is used to specify the query string. + * When the metric query type is set to `Insights`, this field is used to specify the query string. */ sqlExpression?: string; } @@ -106,7 +106,7 @@ export interface CloudWatchMetricsQuery extends common.DataQuery, MetricStat { export type CloudWatchQueryMode = ('Metrics' | 'Logs' | 'Annotations'); export enum MetricQueryType { - Query = 1, + Insights = 1, Search = 0, } diff --git a/packages/grafana-ui/src/components/DataSourceSettings/DataSourceHttpSettings.tsx b/packages/grafana-ui/src/components/DataSourceSettings/DataSourceHttpSettings.tsx index ad412649e7e..6f1dc9e7d58 100644 --- a/packages/grafana-ui/src/components/DataSourceSettings/DataSourceHttpSettings.tsx +++ b/packages/grafana-ui/src/components/DataSourceSettings/DataSourceHttpSettings.tsx @@ -1,10 +1,11 @@ import { css, cx } from '@emotion/css'; import { useState, useCallback, useId } from 'react'; -import { GrafanaTheme2, SelectableValue } from '@grafana/data'; +import { SelectableValue } from '@grafana/data'; import { selectors } from '@grafana/e2e-selectors'; -import { useStyles2, useTheme2 } from '../../themes'; +import { useTheme2 } from '../../themes'; +import { Alert } from '../Alert/Alert'; import { FormField } from '../FormField/FormField'; import { InlineFormLabel } from '../FormLabel/FormLabel'; import { InlineField } from '../Forms/InlineField'; @@ -38,9 +39,8 @@ const DEFAULT_ACCESS_OPTION = { }; const HttpAccessHelp = () => { - const styles = useStyles2(getAccessStyles); return ( -
+

Access mode controls how requests to the data source will be handled. @@ -60,16 +60,10 @@ const HttpAccessHelp = () => { Resource Sharing (CORS) requirements. The URL needs to be accessible from the browser if you select this access mode.

-
+ ); }; -const getAccessStyles = (theme: GrafanaTheme2) => ({ - infoBox: css({ - marginTop: theme.spacing(3), - }), -}); - const LABEL_WIDTH = 26; export const DataSourceHttpSettings = (props: HttpSettingsProps) => { diff --git a/packages/grafana-ui/src/components/DateTimePickers/DateTimePicker/DateTimePicker.test.tsx b/packages/grafana-ui/src/components/DateTimePickers/DateTimePicker/DateTimePicker.test.tsx index 187366e6b47..0d83fc4a53d 100644 --- a/packages/grafana-ui/src/components/DateTimePickers/DateTimePicker/DateTimePicker.test.tsx +++ b/packages/grafana-ui/src/components/DateTimePickers/DateTimePicker/DateTimePicker.test.tsx @@ -14,7 +14,7 @@ afterAll(() => { return setTimeZoneResolver(() => defaultTimeZone); }); -const renderDatetimePicker = (props?: Props) => { +const renderDatetimePicker = (props?: Partial) => { const combinedProps = Object.assign( { date: dateTimeForTimeZone(getTimeZone(), '2021-05-05 12:00:00'), @@ -234,4 +234,20 @@ describe('Date time picker', () => { ).not.toBeInTheDocument(); } ); + + it('should be able to use a custom timeZone', async () => { + renderDatetimePicker({ + timeZone: 'America/New_York', + date: dateTimeForTimeZone(getTimeZone({ timeZone: 'utc' }), '2024-07-01 02:00:00'), + }); + + const dateTimeInput = screen.getByTestId(Components.DateTimePicker.input); + expect(dateTimeInput).toHaveDisplayValue('2024-06-30 22:00:00'); + + await userEvent.click(screen.getByRole('button', { name: 'Time picker' })); + // Check that calendar date is set correctly + expect(screen.getByRole('button', { name: `June 30, 2024` })).toHaveClass('react-calendar__tile--active'); + // Check that time is set correctly + expect(screen.getAllByRole('textbox')[1]).toHaveValue('22:00:00'); + }); }); diff --git a/packages/grafana-ui/src/components/DateTimePickers/DateTimePicker/DateTimePicker.tsx b/packages/grafana-ui/src/components/DateTimePickers/DateTimePicker/DateTimePicker.tsx index 9124d4636d0..1fdeb14114f 100644 --- a/packages/grafana-ui/src/components/DateTimePickers/DateTimePicker/DateTimePicker.tsx +++ b/packages/grafana-ui/src/components/DateTimePickers/DateTimePicker/DateTimePicker.tsx @@ -16,6 +16,7 @@ import { isDateTime, dateTimeForTimeZone, getTimeZone, + TimeZone, } from '@grafana/data'; import { Components } from '@grafana/e2e-selectors'; @@ -53,6 +54,8 @@ export interface Props { disabledSeconds?: () => number[]; /** Can input be cleared/have empty values */ clearable?: boolean; + /** Custom timezone for the date/time display */ + timeZone?: TimeZone; } export const DateTimePicker = ({ @@ -64,6 +67,7 @@ export const DateTimePicker = ({ disabledHours, disabledMinutes, disabledSeconds, + timeZone, showSeconds = true, clearable = false, }: Props) => { @@ -136,6 +140,7 @@ export const DateTimePicker = ({ ref={refs.setReference} showSeconds={showSeconds} clearable={clearable} + timeZone={timeZone} /> {isOpen ? ( isFullscreen ? ( @@ -155,6 +160,7 @@ export const DateTimePicker = ({ disabledHours={disabledHours} disabledMinutes={disabledMinutes} disabledSeconds={disabledSeconds} + timeZone={timeZone} /> @@ -176,6 +182,7 @@ export const DateTimePicker = ({ disabledHours={disabledHours} disabledMinutes={disabledMinutes} disabledSeconds={disabledSeconds} + timeZone={timeZone} /> @@ -187,21 +194,14 @@ export const DateTimePicker = ({ ); }; -interface DateTimeCalendarProps { - date?: DateTime; +interface DateTimeCalendarProps extends Omit { onChange: (date: DateTime) => void; onClose: () => void; isFullscreen: boolean; - maxDate?: Date; - minDate?: Date; style?: React.CSSProperties; - showSeconds?: boolean; - disabledHours?: () => number[]; - disabledMinutes?: () => number[]; - disabledSeconds?: () => number[]; } -type InputProps = Pick & { +type InputProps = Pick & { isFullscreen: boolean; onOpen: (event: FormEvent) => void; }; @@ -212,21 +212,25 @@ type InputState = { }; const DateTimeInput = React.forwardRef( - ({ date, label, onChange, onOpen, showSeconds = true, clearable = false }, ref) => { + ({ date, label, onChange, onOpen, timeZone, showSeconds = true, clearable = false }, ref) => { const styles = useStyles2(getStyles); const format = showSeconds ? 'YYYY-MM-DD HH:mm:ss' : 'YYYY-MM-DD HH:mm'; const [internalDate, setInternalDate] = useState(() => { - return { value: date ? dateTimeFormat(date) : !clearable ? dateTimeFormat(dateTime()) : '', invalid: false }; + return { + value: date ? dateTimeFormat(date, { timeZone }) : !clearable ? dateTimeFormat(dateTime(), { timeZone }) : '', + invalid: false, + }; }); useEffect(() => { if (date) { + const formattedDate = dateTimeFormat(date, { format, timeZone }); setInternalDate({ - invalid: !isValid(dateTimeFormat(date, { format })), - value: isDateTime(date) ? dateTimeFormat(date, { format }) : date, + invalid: !isValid(formattedDate), + value: isDateTime(date) ? formattedDate : date, }); } - }, [date, format]); + }, [date, format, timeZone]); const onChangeDate = useCallback((event: FormEvent) => { const isInvalid = !isValid(event.currentTarget.value); @@ -238,10 +242,10 @@ const DateTimeInput = React.forwardRef( const onBlur = useCallback(() => { if (!internalDate.invalid && internalDate.value) { - const date = dateTimeForTimeZone(getTimeZone(), internalDate.value); + const date = dateTimeForTimeZone(getTimeZone({ timeZone }), internalDate.value); onChange(date); } - }, [internalDate, onChange]); + }, [internalDate, onChange, timeZone]); const clearInternalDate = useCallback(() => { setInternalDate({ value: '', invalid: false }); @@ -285,6 +289,7 @@ const DateTimeCalendar = React.forwardRef disabledHours, disabledMinutes, disabledSeconds, + timeZone, }, ref ) => { @@ -294,17 +299,17 @@ const DateTimeCalendar = React.forwardRef // need to keep these 2 separate in state since react-calendar doesn't support different timezones const [timeOfDayDateTime, setTimeOfDayDateTime] = useState(() => { if (date && date.isValid()) { - return dateTimeForTimeZone(getTimeZone(), date); + return dateTimeForTimeZone(getTimeZone({ timeZone }), date); } - return dateTimeForTimeZone(getTimeZone(), new Date()); + return dateTimeForTimeZone(getTimeZone({ timeZone }), new Date()); }); const [reactCalendarDate, setReactCalendarDate] = useState(() => { if (date && date.isValid()) { - return adjustDateForReactCalendar(date.toDate(), getTimeZone()); + return adjustDateForReactCalendar(date.toDate(), getTimeZone({ timeZone })); } - return new Date(); + return adjustDateForReactCalendar(new Date(), getTimeZone({ timeZone })); }); const onChangeDate = useCallback['onChange']>>((date) => { diff --git a/packages/grafana-ui/src/components/DateTimePickers/RelativeTimeRangePicker/utils.test.ts b/packages/grafana-ui/src/components/DateTimePickers/RelativeTimeRangePicker/utils.test.ts index fce263505e0..11868d66e04 100644 --- a/packages/grafana-ui/src/components/DateTimePickers/RelativeTimeRangePicker/utils.test.ts +++ b/packages/grafana-ui/src/components/DateTimePickers/RelativeTimeRangePicker/utils.test.ts @@ -40,6 +40,17 @@ describe('utils', () => { display: 'now-100m to now-5m', }); }); + + it('should be able to work with future values', () => { + const relativeTimeRange = { from: 600, to: -600 }; + const timeOption = mapRelativeTimeRangeToOption(relativeTimeRange); + + expect(timeOption).toEqual({ + from: 'now-10m', + to: 'now+10m', + display: 'now-10m to now+10m', + }); + }); }); describe('mapOptionToRelativeTimeRange', () => { @@ -56,6 +67,13 @@ describe('utils', () => { expect(relativeTimeRange).toEqual({ from: 86400, to: 43200 }); }); + + it('should map future dates', () => { + const timeOption = { from: 'now-10m', to: 'now+10m', display: 'asdfasdf' }; + const relativeTimeRange = mapOptionToRelativeTimeRange(timeOption); + + expect(relativeTimeRange).toEqual({ from: 600, to: -600 }); + }); }); describe('isRelativeFormat', () => { @@ -83,6 +101,10 @@ describe('utils', () => { expect(isRelativeFormat('now-53w')).toBe(true); }); + it('should consider now+10m as a relative format', () => { + expect(isRelativeFormat('now+10m')).toBe(true); + }); + it('should consider 123123123 as a relative format', () => { expect(isRelativeFormat('123123123')).toBe(false); }); @@ -99,6 +121,11 @@ describe('utils', () => { expect(result.isValid).toBe(true); }); + it('should consider now+10m as a valid relative format', () => { + const result = isRangeValid('now+10m'); + expect(result.isValid).toBe(true); + }); + it('should consider now-90000000d as an invalid relative format', () => { const result = isRangeValid('now-90000000d'); expect(result.isValid).toBe(false); diff --git a/packages/grafana-ui/src/components/DateTimePickers/RelativeTimeRangePicker/utils.ts b/packages/grafana-ui/src/components/DateTimePickers/RelativeTimeRangePicker/utils.ts index cc4d321d886..d1b248d60f2 100644 --- a/packages/grafana-ui/src/components/DateTimePickers/RelativeTimeRangePicker/utils.ts +++ b/packages/grafana-ui/src/components/DateTimePickers/RelativeTimeRangePicker/utils.ts @@ -1,6 +1,6 @@ import { RelativeTimeRange, TimeOption } from '@grafana/data'; -const regex = /^now$|^now\-(\d{1,10})([wdhms])$/; +const regex = /^now$|^now(\-|\+)(\d{1,10})([wdhms])$/; export const mapOptionToRelativeTimeRange = (option: TimeOption): RelativeTimeRange | undefined => { return { @@ -48,18 +48,19 @@ export const isRelativeFormat = (format: string): boolean => { const relativeToSeconds = (relative: string): number => { const match = regex.exec(relative); - if (!match || match.length !== 3) { + if (!match || match.length !== 4) { return 0; } - const [, value, unit] = match; + const [, sign, value, unit] = match; const parsed = parseInt(value, 10); if (isNaN(parsed)) { return 0; } - return parsed * units[unit]; + const seconds = parsed * units[unit]; + return sign === '+' ? seconds * -1 : seconds; }; const units: Record = { @@ -71,25 +72,41 @@ const units: Record = { }; const secondsToRelativeFormat = (seconds: number): string => { - if (seconds <= 0) { + if (seconds === 0) { return 'now'; } - if (seconds >= units.w && seconds % units.w === 0) { - return `now-${seconds / units.w}w`; + const absoluteSeconds = Math.abs(seconds); + if (seconds < 0) { + return `now+${formatDuration(absoluteSeconds)}`; } - if (seconds >= units.d && seconds % units.d === 0) { - return `now-${seconds / units.d}d`; - } - - if (seconds >= units.h && seconds % units.h === 0) { - return `now-${seconds / units.h}h`; - } - - if (seconds >= units.m && seconds % units.m === 0) { - return `now-${seconds / units.m}m`; - } - - return `now-${seconds}s`; + return `now-${formatDuration(absoluteSeconds)}`; }; + +/** + * Formats the given duration in seconds into a human-readable string representation. + * + * @param seconds - The duration in seconds. + * @returns The formatted duration string. + */ +function formatDuration(seconds: number): string { + const units = [ + { unit: 'w', value: 7 * 24 * 60 * 60 }, + { unit: 'd', value: 24 * 60 * 60 }, + { unit: 'h', value: 60 * 60 }, + { unit: 'm', value: 60 }, + { unit: 's', value: 1 }, + ]; + + for (const { unit, value } of units) { + if (seconds % value === 0) { + const quotient = seconds / value; + return `${quotient}${unit}`; + } + } + + // If no perfect division, use the least significant unit + const leastSignificant = units[units.length - 1]; + return `${seconds}${leastSignificant.unit}`; +} diff --git a/packages/grafana-ui/src/components/Drawer/Drawer.tsx b/packages/grafana-ui/src/components/Drawer/Drawer.tsx index 9d5defacf03..b6487f06563 100644 --- a/packages/grafana-ui/src/components/Drawer/Drawer.tsx +++ b/packages/grafana-ui/src/components/Drawer/Drawer.tsx @@ -379,7 +379,7 @@ function getWrapperStyles(theme: GrafanaTheme2, size: 'sm' | 'md' | 'lg') { [theme.breakpoints.down('md')]: { width: `calc(100% - ${theme.spacing(2)}) !important`, - minWidth: 0, + minWidth: '0 !important', }, }); } diff --git a/packages/grafana-ui/src/components/Layout/Stack/Stack.story.tsx b/packages/grafana-ui/src/components/Layout/Stack/Stack.story.tsx index aace7d3f546..450cecb1eb0 100644 --- a/packages/grafana-ui/src/components/Layout/Stack/Stack.story.tsx +++ b/packages/grafana-ui/src/components/Layout/Stack/Stack.story.tsx @@ -80,13 +80,13 @@ export const AlignItemsExamples: StoryFn = () => { return (

Align items flex-start

- + {Array.from({ length: 5 }).map((_, i) => ( ))}

Align items flex-end

- + {Array.from({ length: 5 }).map((_, i) => ( ))} @@ -98,13 +98,13 @@ export const AlignItemsExamples: StoryFn = () => { ))}

Align items center

- + {Array.from({ length: 5 }).map((_, i) => ( ))}

Align items stretch

- + @@ -131,7 +131,7 @@ export const JustifyContentExamples: StoryFn = () => { {justifyContentOptions.map((justifyContent) => ( <>

Justify Content {justifyContent}

- + {Array.from({ length: 5 }).map((_, i) => ( ))} @@ -150,7 +150,7 @@ export const GapExamples: StoryFn = () => { {gapOptions.map((gap) => ( <>

Gap with spacingToken set to {gap}

- + {Array.from({ length: 5 }).map((_, i) => ( ))} @@ -188,7 +188,7 @@ export const DirectionExamples: StoryFn = () => { {directionOptions.map((direction) => ( <>

Direction {direction}

- + {Array.from({ length: 5 }).map((_, i) => ( ))} diff --git a/packages/grafana-ui/src/components/Layout/Stack/Stack.tsx b/packages/grafana-ui/src/components/Layout/Stack/Stack.tsx index 1f3279ae9c1..05d544739f5 100644 --- a/packages/grafana-ui/src/components/Layout/Stack/Stack.tsx +++ b/packages/grafana-ui/src/components/Layout/Stack/Stack.tsx @@ -69,7 +69,7 @@ const getStyles = ( flexDirection: val, })), getResponsiveStyle(theme, wrap, (val) => ({ - flexWrap: val, + flexWrap: typeof val === 'boolean' ? (val ? 'wrap' : 'nowrap') : val, })), getResponsiveStyle(theme, alignItems, (val) => ({ alignItems: val, diff --git a/packages/grafana-ui/src/components/Layout/types.ts b/packages/grafana-ui/src/components/Layout/types.ts index b31f68c97ab..f76d7738942 100644 --- a/packages/grafana-ui/src/components/Layout/types.ts +++ b/packages/grafana-ui/src/components/Layout/types.ts @@ -24,7 +24,7 @@ export type JustifyContent = | 'right'; export type Direction = 'row' | 'row-reverse' | 'column' | 'column-reverse'; -export type Wrap = 'nowrap' | 'wrap' | 'wrap-reverse'; +export type Wrap = boolean | 'nowrap' | 'wrap' | 'wrap-reverse'; type FlexGrow = number; type FlexShrink = number; diff --git a/packages/grafana-ui/src/components/Select/Select.story.tsx b/packages/grafana-ui/src/components/Select/Select.story.tsx index c56102ca64e..ac0a7277c40 100644 --- a/packages/grafana-ui/src/components/Select/Select.story.tsx +++ b/packages/grafana-ui/src/components/Select/Select.story.tsx @@ -31,6 +31,7 @@ const manyGroupedOptions = [ return { label: person, value: person }; }), }, + { label: 'Bar', value: '3' }, ]; const meta: Meta = { @@ -250,6 +251,7 @@ export const MultiSelectWithOptionGroups: StoryFn = (args) => { { label: 'Eagle', value: '13' }, ], }, + { label: 'Bar', value: '3' }, ]} value={value} onChange={(v) => { diff --git a/packages/grafana-ui/src/components/Select/SelectMenu.tsx b/packages/grafana-ui/src/components/Select/SelectMenu.tsx index c8a6aff1712..73fda63e4b6 100644 --- a/packages/grafana-ui/src/components/Select/SelectMenu.tsx +++ b/packages/grafana-ui/src/components/Select/SelectMenu.tsx @@ -81,13 +81,25 @@ export const VirtualizedSelectMenu = ({ // flatten the children to account for any categories // these will have array children that are the individual options - const flattenedChildren = children.flatMap((child) => { + const flattenedChildren = children.flatMap((child, index) => { if (hasArrayChildren(child)) { // need to remove the children from the category else they end up in the DOM twice const childWithoutChildren = React.cloneElement(child, { children: null, }); - return [childWithoutChildren, ...child.props.children]; + return [ + childWithoutChildren, + ...child.props.children.slice(0, -1), + // add a bottom divider to the last item in the category + React.cloneElement(child.props.children.at(-1), { + innerProps: { + style: { + borderBottom: `1px solid ${theme.colors.border.weak}`, + height: VIRTUAL_LIST_ITEM_HEIGHT, + }, + }, + }), + ]; } return [child]; }); diff --git a/packages/grafana-ui/src/components/Table/FilterList.tsx b/packages/grafana-ui/src/components/Table/FilterList.tsx index e4ca6d19df8..0867631fe93 100644 --- a/packages/grafana-ui/src/components/Table/FilterList.tsx +++ b/packages/grafana-ui/src/components/Table/FilterList.tsx @@ -1,7 +1,7 @@ import { css, cx } from '@emotion/css'; import { useCallback, useMemo } from 'react'; import * as React from 'react'; -import { FixedSizeList as List } from 'react-window'; +import { FixedSizeList as List, ListChildComponentProps } from 'react-window'; import { GrafanaTheme2, formattedValueToString, getValueFormat, SelectableValue } from '@grafana/data'; @@ -184,46 +184,59 @@ export const FilterList = ({ )} - {!items.length && } - {items.length && ( - - {({ index, style }) => { - const option = items[index]; - const { value, label } = option; - const isChecked = values.find((s) => s.value === value) !== undefined; - - return ( -
- -
- ); - }} -
- )} - {items.length && ( - -
-
- -
- + {items.length > 0 ? ( + <> + + {ItemRenderer} + + +
+
+ +
+ + + ) : ( + )} ); }; +interface ItemRendererProps extends ListChildComponentProps { + data: { + onCheckedChanged: (option: SelectableValue) => (event: React.FormEvent) => void; + items: SelectableValue[]; + values: SelectableValue[]; + className: string; + }; +} + +function ItemRenderer({ index, style, data: { onCheckedChanged, items, values, className } }: ItemRendererProps) { + const option = items[index]; + const { value, label } = option; + const isChecked = values.find((s) => s.value === value) !== undefined; + + return ( +
+ +
+ ); +} + const getStyles = (theme: GrafanaTheme2) => ({ filterList: css({ label: 'filterList', @@ -246,4 +259,7 @@ const getStyles = (theme: GrafanaTheme2) => ({ borderTop: `1px solid ${theme.colors.border.medium}`, padding: theme.spacing(0.5, 2), }), + noValuesLabel: css({ + paddingTop: theme.spacing(1), + }), }); diff --git a/packages/grafana-ui/src/components/Table/reducer.ts b/packages/grafana-ui/src/components/Table/reducer.ts index 1f450d4a8ba..10e52e351e5 100644 --- a/packages/grafana-ui/src/components/Table/reducer.ts +++ b/packages/grafana-ui/src/components/Table/reducer.ts @@ -15,12 +15,12 @@ export function useTableStateReducer({ onColumnResize, onSortByChange, data }: P switch (action.type) { case 'columnDoneResizing': if (onColumnResize) { - const info = (newState.columnResizing.headerIdWidths as any)[0]; - const columnIdString = info[0]; + const info = (newState.columnResizing?.headerIdWidths as any)?.[0]; + const columnIdString = info?.[0]; const fieldIndex = parseInt(columnIdString, 10); - const width = Math.round(newState.columnResizing.columnWidths[columnIdString]); + const width = Math.round(newState.columnResizing.columnWidths?.[columnIdString]); - const field = data.fields[fieldIndex]; + const field = data.fields?.[fieldIndex]; if (!field) { return newState; } diff --git a/packages/grafana-ui/src/themes/GlobalStyles/GlobalStyles.tsx b/packages/grafana-ui/src/themes/GlobalStyles/GlobalStyles.tsx index f4da62618af..1b8d77910dd 100644 --- a/packages/grafana-ui/src/themes/GlobalStyles/GlobalStyles.tsx +++ b/packages/grafana-ui/src/themes/GlobalStyles/GlobalStyles.tsx @@ -8,12 +8,14 @@ import { getCardStyles } from './card'; import { getCodeStyles } from './code'; import { getElementStyles } from './elements'; import { getExtraStyles } from './extra'; +import { getFilterTableStyles } from './filterTable'; import { getFontStyles } from './fonts'; import { getFormElementStyles } from './forms'; import { getJsonFormatterStyles } from './jsonFormatter'; import { getLegacySelectStyles } from './legacySelect'; import { getMarkdownStyles } from './markdownStyles'; import { getPageStyles } from './page'; +import { getQueryPartStyles } from './queryPart'; import { getRcTimePickerStyles } from './rcTimePicker'; import { getSkeletonStyles } from './skeletonStyles'; import { getSlateStyles } from './slate'; @@ -27,19 +29,21 @@ export function GlobalStyles() { td': { + paddingBottom: 0, + }, + + '.filter-table__avatar': { + width: '25px', + height: '25px', + borderRadius: '50%', + }, + + '&--hover': { + 'tbody tr:hover': { + background: theme.colors.emphasize(theme.colors.background.primary, 0.05), + }, + }, + }, + }); +} diff --git a/packages/grafana-ui/src/themes/GlobalStyles/queryPart.ts b/packages/grafana-ui/src/themes/GlobalStyles/queryPart.ts new file mode 100644 index 00000000000..982e3158328 --- /dev/null +++ b/packages/grafana-ui/src/themes/GlobalStyles/queryPart.ts @@ -0,0 +1,15 @@ +import { css } from '@emotion/react'; + +import { GrafanaTheme2 } from '@grafana/data'; + +export function getQueryPartStyles(theme: GrafanaTheme2) { + return css({ + '.query-part': { + backgroundColor: theme.colors.background.secondary, + + '&:hover': { + background: theme.colors.emphasize(theme.colors.background.secondary, 0.03), + }, + }, + }); +} diff --git a/pkg/api/annotations_test.go b/pkg/api/annotations_test.go index 977f3307ca1..12eea783e5d 100644 --- a/pkg/api/annotations_test.go +++ b/pkg/api/annotations_test.go @@ -15,6 +15,7 @@ import ( "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" "github.com/grafana/grafana/pkg/services/annotations" "github.com/grafana/grafana/pkg/services/annotations/annotationstest" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/dashboards" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/folder" @@ -401,7 +402,7 @@ func TestAPI_Annotations(t *testing.T) { folderDB.On("GetFolderByID", mock.Anything, mock.Anything, mock.Anything).Return(&folder.Folder{UID: folderUID, ID: 1}, nil) hs.DashboardService = dashService hs.folderService = folderService - hs.AccessControl = acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + hs.AccessControl = acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) hs.AccessControl.RegisterScopeAttributeResolver(AnnotationTypeScopeResolver(hs.annotationsRepo, hs.Features, dashService, folderService)) hs.AccessControl.RegisterScopeAttributeResolver(dashboards.NewDashboardIDScopeResolver(folderDB, dashService, folderService)) }) diff --git a/pkg/api/common_test.go b/pkg/api/common_test.go index 91efe95161a..c78f704d7b4 100644 --- a/pkg/api/common_test.go +++ b/pkg/api/common_test.go @@ -26,6 +26,7 @@ import ( "github.com/grafana/grafana/pkg/services/auth/authtest" "github.com/grafana/grafana/pkg/services/authn" "github.com/grafana/grafana/pkg/services/authn/authntest" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/contexthandler" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" dashver "github.com/grafana/grafana/pkg/services/dashboardversion" @@ -188,7 +189,6 @@ func getContextHandler(t *testing.T, cfg *setting.Cfg) *contexthandler.ContextHa return contexthandler.ProvideService( cfg, tracing.InitializeTracerForTest(), - featuremgmt.WithFeatures(), &authntest.FakeService{ExpectedIdentity: &authn.Identity{ID: authn.AnonymousNamespaceID, SessionToken: &usertoken.UserToken{}}}, ) } @@ -269,7 +269,7 @@ func setupSimpleHTTPServer(features featuremgmt.FeatureToggles) *HTTPServer { Cfg: cfg, Features: features, License: &licensing.OSSLicensingService{}, - AccessControl: acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), + AccessControl: acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), annotationsRepo: annotationstest.NewFakeAnnotationsRepo(), authInfoService: &authinfotest.FakeService{ ExpectedLabels: map[int64]string{int64(1): login.GetAuthProviderLabel(login.LDAPAuthModule)}, @@ -312,7 +312,7 @@ func SetupAPITestServer(t *testing.T, opts ...APITestServerOption) *webtest.Serv } if hs.AccessControl == nil { - hs.AccessControl = acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + hs.AccessControl = acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) } hs.registerRoutes() diff --git a/pkg/api/dashboard_snapshot_test.go b/pkg/api/dashboard_snapshot_test.go index e005ae6a6a5..03ecb227b68 100644 --- a/pkg/api/dashboard_snapshot_test.go +++ b/pkg/api/dashboard_snapshot_test.go @@ -17,6 +17,7 @@ import ( "github.com/grafana/grafana/pkg/infra/db/dbtest" "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/dashboards" "github.com/grafana/grafana/pkg/services/dashboardsnapshots" "github.com/grafana/grafana/pkg/services/featuremgmt" @@ -39,7 +40,7 @@ func TestHTTPServer_DeleteDashboardSnapshot(t *testing.T) { hs.DashboardService = svc - hs.AccessControl = acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + hs.AccessControl = acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) guardian.InitAccessControlGuardian(hs.Cfg, hs.AccessControl, hs.DashboardService) }) } diff --git a/pkg/api/dashboard_test.go b/pkg/api/dashboard_test.go index e249765952b..9b0c771d185 100644 --- a/pkg/api/dashboard_test.go +++ b/pkg/api/dashboard_test.go @@ -30,6 +30,7 @@ import ( "github.com/grafana/grafana/pkg/services/accesscontrol/actest" accesscontrolmock "github.com/grafana/grafana/pkg/services/accesscontrol/mock" "github.com/grafana/grafana/pkg/services/annotations/annotationstest" + "github.com/grafana/grafana/pkg/services/authz/zanzana" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" "github.com/grafana/grafana/pkg/services/dashboards" "github.com/grafana/grafana/pkg/services/dashboards/database" @@ -130,7 +131,7 @@ func newTestLive(t *testing.T, store db.DB) *live.GrafanaLive { nil, &usagestats.UsageStatsMock{T: t}, nil, - features, acimpl.ProvideAccessControl(features), &dashboards.FakeDashboardService{}, annotationstest.NewFakeAnnotationsRepo(), nil) + features, acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()), &dashboards.FakeDashboardService{}, annotationstest.NewFakeAnnotationsRepo(), nil) require.NoError(t, err) return gLive } @@ -147,7 +148,7 @@ func TestHTTPServer_GetDashboard_AccessControl(t *testing.T) { hs.DashboardService = dashSvc hs.Cfg = setting.NewCfg() - hs.AccessControl = acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + hs.AccessControl = acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) hs.starService = startest.NewStarServiceFake() hs.dashboardProvisioningService = mockDashboardProvisioningService{} @@ -266,7 +267,7 @@ func TestHTTPServer_DeleteDashboardByUID_AccessControl(t *testing.T) { hs.DashboardService = dashSvc hs.Cfg = setting.NewCfg() - hs.AccessControl = acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + hs.AccessControl = acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) hs.starService = startest.NewStarServiceFake() hs.LibraryPanelService = &mockLibraryPanelService{} @@ -322,7 +323,7 @@ func TestHTTPServer_GetDashboardVersions_AccessControl(t *testing.T) { hs.DashboardService = dashSvc hs.Cfg = setting.NewCfg() - hs.AccessControl = acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + hs.AccessControl = acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) hs.starService = startest.NewStarServiceFake() hs.dashboardVersionService = &dashvertest.FakeDashboardVersionService{ diff --git a/pkg/api/datasources_test.go b/pkg/api/datasources_test.go index a5e9bf72326..9ce98859bb2 100644 --- a/pkg/api/datasources_test.go +++ b/pkg/api/datasources_test.go @@ -19,6 +19,7 @@ import ( ac "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" "github.com/grafana/grafana/pkg/services/accesscontrol/actest" + "github.com/grafana/grafana/pkg/services/authz/zanzana" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" "github.com/grafana/grafana/pkg/services/datasources" "github.com/grafana/grafana/pkg/services/datasources/guardian" @@ -115,7 +116,7 @@ func TestAddDataSource_URLWithoutProtocol(t *testing.T) { expectedDatasource: &datasources.DataSource{}, }, Cfg: setting.NewCfg(), - AccessControl: acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), + AccessControl: acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), accesscontrolService: actest.FakeService{}, } @@ -332,7 +333,7 @@ func TestUpdateDataSource_URLWithoutProtocol(t *testing.T) { expectedDatasource: &datasources.DataSource{}, }, Cfg: setting.NewCfg(), - AccessControl: acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), + AccessControl: acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), accesscontrolService: actest.FakeService{}, } @@ -365,7 +366,7 @@ func TestUpdateDataSourceByID_DataSourceNameExists(t *testing.T) { }, }, Cfg: setting.NewCfg(), - AccessControl: acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), + AccessControl: acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), accesscontrolService: actest.FakeService{}, Live: newTestLive(t, nil), } diff --git a/pkg/api/dtos/frontend_settings.go b/pkg/api/dtos/frontend_settings.go index 2475d0b8892..a58b0ca00b1 100644 --- a/pkg/api/dtos/frontend_settings.go +++ b/pkg/api/dtos/frontend_settings.go @@ -231,6 +231,7 @@ type FrontendSettingsDTO struct { SupportBundlesEnabled bool `json:"supportBundlesEnabled"` SnapshotEnabled bool `json:"snapshotEnabled"` SecureSocksDSProxyEnabled bool `json:"secureSocksDSProxyEnabled"` + ReportingStaticContext map[string]string `json:"reportingStaticContext"` Azure FrontendSettingsAzureDTO `json:"azure"` diff --git a/pkg/api/folder.go b/pkg/api/folder.go index 109281b1990..d8709daf367 100644 --- a/pkg/api/folder.go +++ b/pkg/api/folder.go @@ -64,13 +64,7 @@ func (hs *HTTPServer) GetFolders(c *contextmodel.ReqContext) response.Response { } hits := make([]dtos.FolderSearchHit, 0) - requesterIsSvcAccount := c.SignedInUser.GetID().Namespace() == identity.NamespaceServiceAccount for _, f := range folders { - // only list k6 folders when requested by a service account - prevents showing k6 folders in the UI for users - if (f.UID == accesscontrol.K6FolderUID || f.ParentUID == accesscontrol.K6FolderUID) && !requesterIsSvcAccount { - continue - } - hits = append(hits, dtos.FolderSearchHit{ ID: f.ID, // nolint:staticcheck UID: f.UID, diff --git a/pkg/api/folder_bench_test.go b/pkg/api/folder_bench_test.go index 478a97eb2b9..9a31a50fc23 100644 --- a/pkg/api/folder_bench_test.go +++ b/pkg/api/folder_bench_test.go @@ -69,7 +69,7 @@ const ( ) type benchScenario struct { - db db.DB + db db.ReplDB // signedInUser is the user that is signed in to the server cfg *setting.Cfg signedInUser *user.SignedInUser @@ -202,7 +202,7 @@ func BenchmarkFolderListAndSearch(b *testing.B) { func setupDB(b testing.TB) benchScenario { b.Helper() - db, cfg := sqlstore.InitTestDB(b) + db, cfg := sqlstore.InitTestReplDB(b) IDs := map[int64]struct{}{} opts := sqlstore.NativeSettingsForDialect(db.GetDialect()) @@ -451,26 +451,26 @@ func setupServer(b testing.TB, sc benchScenario, features featuremgmt.FeatureTog quotaSrv := quotatest.New(false, nil) - dashStore, err := database.ProvideDashboardStore(sc.db, sc.cfg, features, tagimpl.ProvideService(sc.db), quotaSrv) + dashStore, err := database.ProvideDashboardStore(sc.db.DB(), sc.cfg, features, tagimpl.ProvideService(sc.db.DB()), quotaSrv) require.NoError(b, err) - folderStore := folderimpl.ProvideDashboardFolderStore(sc.db) + folderStore := folderimpl.ProvideDashboardFolderStore(sc.db.DB()) - ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) - folderServiceWithFlagOn := folderimpl.ProvideService(ac, bus.ProvideBus(tracing.InitializeTracerForTest()), dashStore, folderStore, sc.db, features, supportbundlestest.NewFakeBundleService(), nil) + ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) + folderServiceWithFlagOn := folderimpl.ProvideService(ac, bus.ProvideBus(tracing.InitializeTracerForTest()), dashStore, folderStore, sc.db.DB(), features, supportbundlestest.NewFakeBundleService(), nil) cfg := setting.NewCfg() actionSets := resourcepermissions.NewActionSetService() acSvc := acimpl.ProvideOSSService( sc.cfg, acdb.ProvideService(sc.db), actionSets, localcache.ProvideService(), - features, tracing.InitializeTracerForTest(), zanzana.NewNoopClient(), sc.db, + features, tracing.InitializeTracerForTest(), zanzana.NewNoopClient(), sc.db.DB(), ) folderPermissions, err := ossaccesscontrol.ProvideFolderPermissions( - cfg, features, routing.NewRouteRegister(), sc.db, ac, license, &dashboards.FakeDashboardStore{}, folderServiceWithFlagOn, acSvc, sc.teamSvc, sc.userSvc, actionSets) + cfg, features, routing.NewRouteRegister(), sc.db.DB(), ac, license, &dashboards.FakeDashboardStore{}, folderServiceWithFlagOn, acSvc, sc.teamSvc, sc.userSvc, actionSets) require.NoError(b, err) dashboardPermissions, err := ossaccesscontrol.ProvideDashboardPermissions( - cfg, features, routing.NewRouteRegister(), sc.db, ac, license, &dashboards.FakeDashboardStore{}, folderServiceWithFlagOn, acSvc, sc.teamSvc, sc.userSvc, actionSets) + cfg, features, routing.NewRouteRegister(), sc.db.DB(), ac, license, &dashboards.FakeDashboardStore{}, folderServiceWithFlagOn, acSvc, sc.teamSvc, sc.userSvc, actionSets) require.NoError(b, err) dashboardSvc, err := dashboardservice.ProvideDashboardServiceImpl( @@ -486,15 +486,15 @@ func setupServer(b testing.TB, sc benchScenario, features featuremgmt.FeatureTog hs := &HTTPServer{ CacheService: localcache.New(5*time.Minute, 10*time.Minute), Cfg: sc.cfg, - SQLStore: sc.db, + SQLStore: sc.db.DB(), Features: features, QuotaService: quotaSrv, - SearchService: search.ProvideService(sc.cfg, sc.db, starSvc, dashboardSvc), + SearchService: search.ProvideService(sc.cfg, sc.db.DB(), starSvc, dashboardSvc), folderService: folderServiceWithFlagOn, DashboardService: dashboardSvc, } - hs.AccessControl = acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + hs.AccessControl = acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) guardian.InitAccessControlGuardian(hs.Cfg, hs.AccessControl, hs.DashboardService) m.Get("/api/folders", hs.GetFolders) diff --git a/pkg/api/frontendsettings.go b/pkg/api/frontendsettings.go index 49fdefb13c1..abb5a3783da 100644 --- a/pkg/api/frontendsettings.go +++ b/pkg/api/frontendsettings.go @@ -226,6 +226,7 @@ func (hs *HTTPServer) getFrontendSettings(c *contextmodel.ReqContext) (*dtos.Fro SharedWithMeFolderUID: folder.SharedWithMeFolderUID, RootFolderUID: accesscontrol.GeneralFolderUID, LocalFileSystemAvailable: hs.Cfg.LocalFileSystemAvailable, + ReportingStaticContext: hs.Cfg.ReportingStaticContext, BuildInfo: dtos.FrontendSettingsBuildInfoDTO{ HideVersion: hideVersion, @@ -412,14 +413,15 @@ func (hs *HTTPServer) getFSDataSources(c *contextmodel.ReqContext, availablePlug } dsDTO := plugins.DataSourceDTO{ - ID: ds.ID, - UID: ds.UID, - Type: ds.Type, - Name: ds.Name, - URL: url, - IsDefault: ds.IsDefault, - Access: string(ds.Access), - ReadOnly: ds.ReadOnly, + ID: ds.ID, + UID: ds.UID, + Type: ds.Type, + Name: ds.Name, + URL: url, + IsDefault: ds.IsDefault, + Access: string(ds.Access), + ReadOnly: ds.ReadOnly, + APIVersion: ds.APIVersion, } ap, exists := availablePlugins.Get(plugins.TypeDataSource, ds.Type) diff --git a/pkg/api/login.go b/pkg/api/login.go index 7123a3ba6de..415abb871aa 100644 --- a/pkg/api/login.go +++ b/pkg/api/login.go @@ -201,7 +201,7 @@ func (hs *HTTPServer) LoginAPIPing(c *contextmodel.ReqContext) response.Response } func (hs *HTTPServer) LoginPost(c *contextmodel.ReqContext) response.Response { - identity, err := hs.authnService.Login(c.Req.Context(), authn.ClientForm, &authn.Request{HTTPRequest: c.Req, Resp: c.Resp}) + identity, err := hs.authnService.Login(c.Req.Context(), authn.ClientForm, &authn.Request{HTTPRequest: c.Req}) if err != nil { tokenErr := &auth.CreateTokenErr{} if errors.As(err, &tokenErr) { diff --git a/pkg/api/login_oauth.go b/pkg/api/login_oauth.go index 256a225959e..61cfa7d99e1 100644 --- a/pkg/api/login_oauth.go +++ b/pkg/api/login_oauth.go @@ -28,7 +28,7 @@ func (hs *HTTPServer) OAuthLogin(reqCtx *contextmodel.ReqContext) { code := reqCtx.Query("code") - req := &authn.Request{HTTPRequest: reqCtx.Req, Resp: reqCtx.Resp} + req := &authn.Request{HTTPRequest: reqCtx.Req} if code == "" { redirect, err := hs.authnService.RedirectURL(reqCtx.Req.Context(), authn.ClientWithPrefix(name), req) if err != nil { diff --git a/pkg/api/pluginproxy/ds_proxy_test.go b/pkg/api/pluginproxy/ds_proxy_test.go index 03fa9f076cc..5ceee760033 100644 --- a/pkg/api/pluginproxy/ds_proxy_test.go +++ b/pkg/api/pluginproxy/ds_proxy_test.go @@ -32,6 +32,7 @@ import ( "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" "github.com/grafana/grafana/pkg/services/accesscontrol/actest" "github.com/grafana/grafana/pkg/services/authn" + "github.com/grafana/grafana/pkg/services/authz/zanzana" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" "github.com/grafana/grafana/pkg/services/datasources" datasourceservice "github.com/grafana/grafana/pkg/services/datasources/service" @@ -844,7 +845,7 @@ func getDatasourceProxiedRequest(t *testing.T, ctx *contextmodel.ReqContext, cfg secretsStore := secretskvs.NewSQLSecretsKVStore(sqlStore, secretsService, log.New("test.logger")) features := featuremgmt.WithFeatures() quotaService := quotatest.New(false, nil) - dsService, err := datasourceservice.ProvideService(nil, secretsService, secretsStore, cfg, features, acimpl.ProvideAccessControl(features), + dsService, err := datasourceservice.ProvideService(nil, secretsService, secretsStore, cfg, features, acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()), &actest.FakePermissionsService{}, quotaService, &pluginstore.FakePluginStore{}, &pluginfakes.FakePluginClient{}, plugincontext.ProvideBaseService(cfg, pluginconfig.NewFakePluginRequestConfigProvider())) require.NoError(t, err) @@ -966,7 +967,7 @@ func runDatasourceAuthTest(t *testing.T, secretsService secrets.Service, secrets var routes []*plugins.Route features := featuremgmt.WithFeatures() quotaService := quotatest.New(false, nil) - dsService, err := datasourceservice.ProvideService(nil, secretsService, secretsStore, cfg, features, acimpl.ProvideAccessControl(features), + dsService, err := datasourceservice.ProvideService(nil, secretsService, secretsStore, cfg, features, acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()), &actest.FakePermissionsService{}, quotaService, &pluginstore.FakePluginStore{}, &pluginfakes.FakePluginClient{}, plugincontext.ProvideBaseService(cfg, pluginconfig.NewFakePluginRequestConfigProvider())) require.NoError(t, err) @@ -1022,7 +1023,7 @@ func setupDSProxyTest(t *testing.T, ctx *contextmodel.ReqContext, ds *datasource secretsService := secretsmng.SetupTestService(t, fakes.NewFakeSecretsStore()) secretsStore := secretskvs.NewSQLSecretsKVStore(dbtest.NewFakeDB(), secretsService, log.NewNopLogger()) features := featuremgmt.WithFeatures() - dsService, err := datasourceservice.ProvideService(nil, secretsService, secretsStore, cfg, features, acimpl.ProvideAccessControl(features), + dsService, err := datasourceservice.ProvideService(nil, secretsService, secretsStore, cfg, features, acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()), &actest.FakePermissionsService{}, quotatest.New(false, nil), &pluginstore.FakePluginStore{}, &pluginfakes.FakePluginClient{}, plugincontext.ProvideBaseService(cfg, pluginconfig.NewFakePluginRequestConfigProvider())) require.NoError(t, err) diff --git a/pkg/api/pluginproxy/pluginproxy_test.go b/pkg/api/pluginproxy/pluginproxy_test.go index 7bd4634de0d..ab8961a42ea 100644 --- a/pkg/api/pluginproxy/pluginproxy_test.go +++ b/pkg/api/pluginproxy/pluginproxy_test.go @@ -16,6 +16,7 @@ import ( "github.com/grafana/grafana/pkg/plugins" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" "github.com/grafana/grafana/pkg/services/authn" + "github.com/grafana/grafana/pkg/services/authz/zanzana" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/org" @@ -265,7 +266,7 @@ func TestPluginProxy(t *testing.T) { SecureJSONData: map[string][]byte{}, } cfg := &setting.Cfg{} - proxy, err := NewPluginProxy(ps, routes, ctx, "", cfg, secretsService, tracing.InitializeTracerForTest(), &http.Transport{}, acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), featuremgmt.WithFeatures()) + proxy, err := NewPluginProxy(ps, routes, ctx, "", cfg, secretsService, tracing.InitializeTracerForTest(), &http.Transport{}, acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), featuremgmt.WithFeatures()) require.NoError(t, err) proxy.HandleRequest() @@ -421,7 +422,7 @@ func TestPluginProxyRoutes(t *testing.T) { SecureJSONData: map[string][]byte{}, } cfg := &setting.Cfg{} - proxy, err := NewPluginProxy(ps, testRoutes, ctx, tc.proxyPath, cfg, secretsService, tracing.InitializeTracerForTest(), &http.Transport{}, acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), featuremgmt.WithFeatures(tc.withFeatures...)) + proxy, err := NewPluginProxy(ps, testRoutes, ctx, tc.proxyPath, cfg, secretsService, tracing.InitializeTracerForTest(), &http.Transport{}, acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), featuremgmt.WithFeatures(tc.withFeatures...)) require.NoError(t, err) proxy.HandleRequest() @@ -536,7 +537,7 @@ func TestPluginProxyRoutesAccessControl(t *testing.T) { SecureJSONData: map[string][]byte{}, } cfg := &setting.Cfg{} - proxy, err := NewPluginProxy(ps, testRoutes, ctx, tc.proxyPath, cfg, secretsService, tracing.InitializeTracerForTest(), &http.Transport{}, acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), featuremgmt.WithFeatures(featuremgmt.FlagAccessControlOnCall)) + proxy, err := NewPluginProxy(ps, testRoutes, ctx, tc.proxyPath, cfg, secretsService, tracing.InitializeTracerForTest(), &http.Transport{}, acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), featuremgmt.WithFeatures(featuremgmt.FlagAccessControlOnCall)) require.NoError(t, err) proxy.HandleRequest() @@ -567,7 +568,7 @@ func getPluginProxiedRequest(t *testing.T, ps *pluginsettings.DTO, secretsServic ReqRole: org.RoleEditor, } } - proxy, err := NewPluginProxy(ps, []*plugins.Route{}, ctx, "", cfg, secretsService, tracing.InitializeTracerForTest(), &http.Transport{}, acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), featuremgmt.WithFeatures()) + proxy, err := NewPluginProxy(ps, []*plugins.Route{}, ctx, "", cfg, secretsService, tracing.InitializeTracerForTest(), &http.Transport{}, acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), featuremgmt.WithFeatures()) require.NoError(t, err) req, err := http.NewRequest(http.MethodGet, "/api/plugin-proxy/grafana-simple-app/api/v4/alerts", nil) diff --git a/pkg/api/plugins_test.go b/pkg/api/plugins_test.go index e47f1ec9bb6..1801f2b315a 100644 --- a/pkg/api/plugins_test.go +++ b/pkg/api/plugins_test.go @@ -34,6 +34,7 @@ import ( "github.com/grafana/grafana/pkg/services/accesscontrol/actest" "github.com/grafana/grafana/pkg/services/authn" "github.com/grafana/grafana/pkg/services/authn/authntest" + "github.com/grafana/grafana/pkg/services/authz/zanzana" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/org" @@ -746,7 +747,7 @@ func TestHTTPServer_hasPluginRequestedPermissions(t *testing.T) { } hs.log = logger hs.accesscontrolService = actest.FakeService{} - hs.AccessControl = acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + hs.AccessControl = acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) expectedIdentity := &authn.Identity{ OrgID: tt.orgID, diff --git a/pkg/api/user_test.go b/pkg/api/user_test.go index dbaa0f0f848..5e1ec0d112c 100644 --- a/pkg/api/user_test.go +++ b/pkg/api/user_test.go @@ -28,6 +28,7 @@ import ( "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" acmock "github.com/grafana/grafana/pkg/services/accesscontrol/mock" "github.com/grafana/grafana/pkg/services/auth/idtest" + "github.com/grafana/grafana/pkg/services/authz/zanzana" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/login" @@ -60,7 +61,7 @@ func TestUserAPIEndpoint_userLoggedIn(t *testing.T) { hs := &HTTPServer{ Cfg: settings, SQLStore: sqlStore, - AccessControl: acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), + AccessControl: acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), } mockResult := user.SearchUserQueryResult{ diff --git a/pkg/apimachinery/go.mod b/pkg/apimachinery/go.mod index 332f06abf0a..58832902076 100644 --- a/pkg/apimachinery/go.mod +++ b/pkg/apimachinery/go.mod @@ -11,7 +11,7 @@ require ( require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/swag v0.23.0 // indirect @@ -28,7 +28,7 @@ require ( github.com/rogpeppe/go-internal v1.12.0 // indirect golang.org/x/net v0.26.0 // indirect golang.org/x/text v0.16.0 // indirect - google.golang.org/protobuf v1.34.1 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/apimachinery/go.sum b/pkg/apimachinery/go.sum index 433d2916653..e859c76c7ac 100644 --- a/pkg/apimachinery/go.sum +++ b/pkg/apimachinery/go.sum @@ -1,6 +1,6 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= @@ -22,7 +22,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= diff --git a/pkg/apimachinery/identity/context.go b/pkg/apimachinery/identity/context.go index 94cd9ecee02..b402209497b 100644 --- a/pkg/apimachinery/identity/context.go +++ b/pkg/apimachinery/identity/context.go @@ -3,6 +3,7 @@ package identity import ( "context" "fmt" + "reflect" ) type ctxUserKey struct{} @@ -16,8 +17,12 @@ func WithRequester(ctx context.Context, usr Requester) context.Context { func GetRequester(ctx context.Context) (Requester, error) { // Set by appcontext.WithUser u, ok := ctx.Value(ctxUserKey{}).(Requester) - if ok && u != nil { + if ok && !checkNilRequester(u) { return u, nil } return nil, fmt.Errorf("a Requester was not found in the context") } + +func checkNilRequester(r Requester) bool { + return r == nil || (reflect.ValueOf(r).Kind() == reflect.Ptr && reflect.ValueOf(r).IsNil()) +} diff --git a/pkg/apimachinery/identity/requester.go b/pkg/apimachinery/identity/requester.go index 9791077fe5f..f65ec49e91e 100644 --- a/pkg/apimachinery/identity/requester.go +++ b/pkg/apimachinery/identity/requester.go @@ -43,6 +43,9 @@ type Requester interface { GetOrgName() string // GetAuthID returns external id for entity. GetAuthID() string + // GetAllowedKubernetesNamespace returns either "*" or the single namespace this requester has access to + // An empty value means the implementation has not specified a kubernetes namespace. + GetAllowedKubernetesNamespace() string // GetAuthenticatedBy returns the authentication method used to authenticate the entity. GetAuthenticatedBy() string // IsAuthenticatedBy returns true if entity was authenticated by any of supplied providers. diff --git a/pkg/apimachinery/identity/static.go b/pkg/apimachinery/identity/static.go index 84df3966b80..a76420aa119 100644 --- a/pkg/apimachinery/identity/static.go +++ b/pkg/apimachinery/identity/static.go @@ -9,20 +9,21 @@ var _ Requester = &StaticRequester{} // This is mostly copied from: // https://github.com/grafana/grafana/blob/v11.0.0/pkg/services/user/identity.go#L16 type StaticRequester struct { - Namespace Namespace - UserID int64 - UserUID string - OrgID int64 - OrgName string - OrgRole RoleType - Login string - Name string - DisplayName string - Email string - EmailVerified bool - AuthID string - AuthenticatedBy string - IsGrafanaAdmin bool + Namespace Namespace + UserID int64 + UserUID string + OrgID int64 + OrgName string + OrgRole RoleType + Login string + Name string + DisplayName string + Email string + EmailVerified bool + AuthID string + AuthenticatedBy string + AllowedKubernetesNamespace string + IsGrafanaAdmin bool // Permissions grouped by orgID and actions Permissions map[int64]map[string][]string IDToken string @@ -123,6 +124,10 @@ func (u *StaticRequester) GetAuthID() string { return u.AuthID } +func (u *StaticRequester) GetAllowedKubernetesNamespace() string { + return u.AllowedKubernetesNamespace +} + func (u *StaticRequester) GetAuthenticatedBy() string { return u.AuthenticatedBy } diff --git a/pkg/apis/alerting_notifications/v0alpha1/register.go b/pkg/apis/alerting_notifications/v0alpha1/register.go index cbf36e4aab4..c773957155c 100644 --- a/pkg/apis/alerting_notifications/v0alpha1/register.go +++ b/pkg/apis/alerting_notifications/v0alpha1/register.go @@ -1,11 +1,16 @@ package v0alpha1 import ( + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/registry/generic" common "github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1" + scope "github.com/grafana/grafana/pkg/apis/scope/v0alpha1" ) func init() { @@ -20,7 +25,7 @@ const ( var ( TimeIntervalResourceInfo = common.NewResourceInfo(GROUP, VERSION, - "timeintervals", "timeinterval", "TimeIntervals", + "timeintervals", "timeinterval", "TimeInterval", func() runtime.Object { return &TimeInterval{} }, func() runtime.Object { return &TimeIntervalList{} }, ) @@ -51,9 +56,36 @@ func AddKnownTypesGroup(scheme *runtime.Scheme, g schema.GroupVersion) error { &ReceiverList{}, ) metav1.AddToGroupVersion(scheme, g) + + err := scheme.AddFieldLabelConversionFunc( + TimeIntervalResourceInfo.GroupVersionKind(), + func(label, value string) (string, string, error) { + fieldSet := SelectableTimeIntervalsFields(&TimeInterval{}) + for key := range fieldSet { + if label == key { + return label, value, nil + } + } + return "", "", fmt.Errorf("field label not supported for %s: %s", scope.ScopeNodeResourceInfo.GroupVersionKind(), label) + }, + ) + if err != nil { + return err + } + return nil } +func SelectableTimeIntervalsFields(obj *TimeInterval) fields.Set { + if obj == nil { + return nil + } + return generic.MergeFieldsSets(generic.ObjectMetaFieldsSet(&obj.ObjectMeta, false), fields.Set{ + "metadata.provenance": obj.GetProvenanceStatus(), + "spec.name": obj.Spec.Name, + }) +} + // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() diff --git a/pkg/apis/alerting_notifications/v0alpha1/types_ext.go b/pkg/apis/alerting_notifications/v0alpha1/types_ext.go new file mode 100644 index 00000000000..df9079bf675 --- /dev/null +++ b/pkg/apis/alerting_notifications/v0alpha1/types_ext.go @@ -0,0 +1,46 @@ +package v0alpha1 + +const ProvenanceStatusAnnotationKey = "grafana.com/provenance" +const ProvenanceStatusNone = "none" + +func (o *TimeInterval) GetProvenanceStatus() string { + if o == nil || o.Annotations == nil { + return "" + } + s, ok := o.Annotations[ProvenanceStatusAnnotationKey] + if !ok || s == "" { + return ProvenanceStatusNone + } + return s +} + +func (o *TimeInterval) SetProvenanceStatus(status string) { + if o.Annotations == nil { + o.Annotations = make(map[string]string, 1) + } + if status == "" { + status = ProvenanceStatusNone + } + o.Annotations[ProvenanceStatusAnnotationKey] = status +} + +func (o *Receiver) GetProvenanceStatus() string { + if o == nil || o.Annotations == nil { + return "" + } + s, ok := o.Annotations[ProvenanceStatusAnnotationKey] + if !ok || s == "" { + return ProvenanceStatusNone + } + return s +} + +func (o *Receiver) SetProvenanceStatus(status string) { + if o.Annotations == nil { + o.Annotations = make(map[string]string, 1) + } + if status == "" { + status = ProvenanceStatusNone + } + o.Annotations[ProvenanceStatusAnnotationKey] = status +} diff --git a/pkg/apiserver/endpoints/filters/requester.go b/pkg/apiserver/endpoints/filters/requester.go index d091246d024..fe09802b3c0 100644 --- a/pkg/apiserver/endpoints/filters/requester.go +++ b/pkg/apiserver/endpoints/filters/requester.go @@ -33,13 +33,16 @@ func WithRequester(handler http.Handler) http.Handler { slices.Contains(info.GetGroups(), user.SystemPrivilegedGroup) { orgId := int64(1) requester = &identity.StaticRequester{ - Namespace: identity.NamespaceServiceAccount, // system:apiserver - UserID: 1, - OrgID: orgId, - Name: info.GetName(), - Login: info.GetName(), - OrgRole: identity.RoleAdmin, - IsGrafanaAdmin: true, + Namespace: identity.NamespaceServiceAccount, // system:apiserver + UserID: 1, + OrgID: orgId, + Name: info.GetName(), + Login: info.GetName(), + OrgRole: identity.RoleAdmin, + + IsGrafanaAdmin: true, + AllowedKubernetesNamespace: "default", + Permissions: map[int64]map[string][]string{ orgId: { "*": {"*"}, // all resources, all scopes diff --git a/pkg/apiserver/go.mod b/pkg/apiserver/go.mod index 828015a3f06..e1186be8ac2 100644 --- a/pkg/apiserver/go.mod +++ b/pkg/apiserver/go.mod @@ -5,9 +5,9 @@ go 1.21.10 require ( github.com/bwmarrin/snowflake v0.3.0 github.com/grafana/grafana/pkg/apimachinery v0.0.0-20240701135906-559738ce6ae1 - github.com/prometheus/client_golang v1.19.0 + github.com/prometheus/client_golang v1.19.1 github.com/stretchr/testify v1.9.0 - go.opentelemetry.io/otel/trace v1.26.0 + go.opentelemetry.io/otel/trace v1.28.0 k8s.io/apimachinery v0.29.3 k8s.io/apiserver v0.29.2 k8s.io/client-go v0.29.3 @@ -25,7 +25,7 @@ require ( github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect @@ -50,7 +50,7 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.53.0 // indirect + github.com/prometheus/common v0.54.0 // indirect github.com/prometheus/procfs v0.14.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -59,13 +59,13 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect go.etcd.io/etcd/client/v3 v3.5.10 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 // indirect - go.opentelemetry.io/otel v1.26.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0 // indirect - go.opentelemetry.io/otel/metric v1.26.0 // indirect - go.opentelemetry.io/otel/sdk v1.26.0 // indirect - go.opentelemetry.io/proto/otlp v1.2.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/net v0.26.0 // indirect @@ -76,10 +76,10 @@ require ( golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.22.0 // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect google.golang.org/grpc v1.64.0 // indirect - google.golang.org/protobuf v1.34.1 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/apiserver/go.sum b/pkg/apiserver/go.sum index 64739aae0aa..2f5d7253ff3 100644 --- a/pkg/apiserver/go.sum +++ b/pkg/apiserver/go.sum @@ -38,8 +38,7 @@ github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= @@ -128,16 +127,14 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= -github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= +github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.14.0 h1:Lw4VdGGoKEZilJsayHf0B+9YgLGREba2C6xr+Fdfq6s= github.com/prometheus/procfs v0.14.0/go.mod h1:XL+Iwz8k8ZabyZfMFHPiilCniixqQarAy5Mu67pHlNQ= @@ -184,22 +181,14 @@ go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 h1:A3SayB3rNyt+1S6qpI9mHPkeHTZbD7XILEqWnYZb2l0= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0/go.mod h1:27iA5uvhuRNmalO+iEUdVn5ZMj2qy10Mm+XRIpRmyuU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= -go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= -go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 h1:1u/AyyOqAWzy+SkPxDpahCNZParHV8Vid1RnI2clyDE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0/go.mod h1:z46paqbJ9l7c9fIPCXTqTGwhQZ5XoTIsfeFYWboizjs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0 h1:Waw9Wfpo/IXzOI8bCB7DIk+0JZcqqsyn1JFnAc+iam8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0/go.mod h1:wnJIG4fOqyynOnnQF/eQb4/16VlX2EJAHhHgqIqWfAo= -go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= -go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= -go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8= -go.opentelemetry.io/otel/sdk v1.26.0/go.mod h1:0p8MXpqLeJ0pzcszQQN4F0S5FVjBLgypeGSngLsmirs= -go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= -go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -281,10 +270,8 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU= -google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -293,8 +280,7 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/pkg/apiserver/rest/dualwriter.go b/pkg/apiserver/rest/dualwriter.go index c75b92b73f2..440a2f6cc8c 100644 --- a/pkg/apiserver/rest/dualwriter.go +++ b/pkg/apiserver/rest/dualwriter.go @@ -8,6 +8,7 @@ import ( "fmt" "github.com/prometheus/client_golang/prometheus" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/rest" @@ -216,6 +217,9 @@ var defaultConverter = runtime.UnstructuredConverter(runtime.DefaultUnstructured // Compare asserts on the equality of objects returned from both stores (object storage and legacy storage) func Compare(storageObj, legacyObj runtime.Object) bool { + if storageObj == nil || legacyObj == nil { + return storageObj == nil && legacyObj == nil + } return bytes.Equal(removeMeta(storageObj), removeMeta(legacyObj)) } @@ -226,10 +230,23 @@ func removeMeta(obj runtime.Object) []byte { return nil } // we don't want to compare meta fields - delete(unstObj, "meta") + delete(unstObj, "metadata") + jsonObj, err := json.Marshal(cpy) if err != nil { return nil } return jsonObj } + +func getName(o runtime.Object) string { + if o == nil { + return "" + } + accessor, err := meta.Accessor(o) + if err != nil { + klog.Error("failed to get object name: ", err) + return "" + } + return accessor.GetName() +} diff --git a/pkg/apiserver/rest/dualwriter_mode1.go b/pkg/apiserver/rest/dualwriter_mode1.go index 75aa3a10c43..4801b75374c 100644 --- a/pkg/apiserver/rest/dualwriter_mode1.go +++ b/pkg/apiserver/rest/dualwriter_mode1.go @@ -35,9 +35,9 @@ func (d *DualWriterMode1) Mode() DualWriterMode { // Create overrides the behavior of the generic DualWriter and writes only to LegacyStorage. func (d *DualWriterMode1) Create(ctx context.Context, original runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) { - log := d.Log.WithValues("kind", options.Kind) - ctx = klog.NewContext(ctx, log) var method = "create" + log := d.Log.WithValues("kind", options.Kind, "method", method) + ctx = klog.NewContext(ctx, log) startLegacy := time.Now() created, err := d.Legacy.Create(ctx, original, createValidation, options) @@ -50,7 +50,7 @@ func (d *DualWriterMode1) Create(ctx context.Context, original runtime.Object, c createdCopy := created.DeepCopyObject() - go func() { + go func(createdCopy runtime.Object) { ctx, cancel := context.WithTimeoutCause(ctx, time.Second*10, errors.New("storage create timeout")) defer cancel() @@ -59,18 +59,26 @@ func (d *DualWriterMode1) Create(ctx context.Context, original runtime.Object, c } startStorage := time.Now() - _, errObjectSt := d.Storage.Create(ctx, createdCopy, createValidation, options) + storageObj, errObjectSt := d.Storage.Create(ctx, createdCopy, createValidation, options) d.recordStorageDuration(errObjectSt != nil, mode1Str, options.Kind, method, startStorage) - }() + if err != nil { + cancel() + } + areEqual := Compare(storageObj, createdCopy) + d.recordOutcome(mode1Str, getName(createdCopy), areEqual, method) + if !areEqual { + log.Info("object from legacy and storage are not equal") + } + }(createdCopy) return created, err } // Get overrides the behavior of the generic DualWriter and reads only from LegacyStorage. func (d *DualWriterMode1) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) { - log := d.Log.WithValues("kind", options.Kind) - ctx = klog.NewContext(ctx, log) var method = "get" + log := d.Log.WithValues("kind", options.Kind, "method", method, "name", name) + ctx = klog.NewContext(ctx, log) startLegacy := time.Now() res, errLegacy := d.Legacy.Get(ctx, name, options) @@ -79,22 +87,32 @@ func (d *DualWriterMode1) Get(ctx context.Context, name string, options *metav1. } d.recordLegacyDuration(errLegacy != nil, mode1Str, options.Kind, method, startLegacy) - go func() { + go func(res runtime.Object) { startStorage := time.Now() ctx, cancel := context.WithTimeoutCause(ctx, time.Second*10, errors.New("storage get timeout")) defer cancel() - _, err := d.Storage.Get(ctx, name, options) + storageObj, err := d.Storage.Get(ctx, name, options) d.recordStorageDuration(err != nil, mode1Str, options.Kind, method, startStorage) - }() + if err != nil { + log.Error(err, "unable to get object in storage") + cancel() + } + + areEqual := Compare(storageObj, res) + d.recordOutcome(mode1Str, name, areEqual, method) + if !areEqual { + log.WithValues("name", name).Info("object from legacy and storage are not equal") + } + }(res) return res, errLegacy } // List overrides the behavior of the generic DualWriter and reads only from LegacyStorage. func (d *DualWriterMode1) List(ctx context.Context, options *metainternalversion.ListOptions) (runtime.Object, error) { - log := d.Log.WithValues("kind", options.Kind, "resourceVersion", options.ResourceVersion, "kind", options.Kind) - ctx = klog.NewContext(ctx, log) var method = "list" + log := d.Log.WithValues("kind", options.Kind, "resourceVersion", options.ResourceVersion, "kind", options.Kind, "method", method) + ctx = klog.NewContext(ctx, log) startLegacy := time.Now() res, errLegacy := d.Legacy.List(ctx, options) @@ -103,21 +121,29 @@ func (d *DualWriterMode1) List(ctx context.Context, options *metainternalversion } d.recordLegacyDuration(errLegacy != nil, mode1Str, options.Kind, method, startLegacy) - go func() { + go func(res runtime.Object) { startStorage := time.Now() ctx, cancel := context.WithTimeoutCause(ctx, time.Second*10, errors.New("storage list timeout")) defer cancel() - _, err := d.Storage.List(ctx, options) + storageObj, err := d.Storage.List(ctx, options) d.recordStorageDuration(err != nil, mode1Str, options.Kind, method, startStorage) - }() + if err != nil { + cancel() + } + areEqual := Compare(storageObj, res) + d.recordOutcome(mode1Str, getName(res), areEqual, method) + if !areEqual { + log.Info("object from legacy and storage are not equal") + } + }(res) return res, errLegacy } func (d *DualWriterMode1) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) { - log := d.Log.WithValues("name", name, "kind", options.Kind) - ctx = klog.NewContext(ctx, d.Log) var method = "delete" + log := d.Log.WithValues("name", name, "kind", options.Kind, "method", method, "name", name) + ctx = klog.NewContext(ctx, d.Log) startLegacy := time.Now() res, async, err := d.Legacy.Delete(ctx, name, deleteValidation, options) @@ -128,22 +154,30 @@ func (d *DualWriterMode1) Delete(ctx context.Context, name string, deleteValidat } d.recordLegacyDuration(false, mode1Str, name, method, startLegacy) - go func() { + go func(res runtime.Object) { startStorage := time.Now() ctx, cancel := context.WithTimeoutCause(ctx, time.Second*10, errors.New("storage delete timeout")) defer cancel() - _, _, err := d.Storage.Delete(ctx, name, deleteValidation, options) + storageObj, _, err := d.Storage.Delete(ctx, name, deleteValidation, options) d.recordStorageDuration(err != nil, mode1Str, options.Kind, method, startStorage) - }() + if err != nil { + cancel() + } + areEqual := Compare(storageObj, res) + d.recordOutcome(mode1Str, name, areEqual, method) + if !areEqual { + log.Info("object from legacy and storage are not equal") + } + }(res) return res, async, err } // DeleteCollection overrides the behavior of the generic DualWriter and deletes only from LegacyStorage. func (d *DualWriterMode1) DeleteCollection(ctx context.Context, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error) { - log := d.Log.WithValues("kind", options.Kind, "resourceVersion", listOptions.ResourceVersion) - ctx = klog.NewContext(ctx, log) var method = "delete-collection" + log := d.Log.WithValues("kind", options.Kind, "resourceVersion", listOptions.ResourceVersion, "method", method) + ctx = klog.NewContext(ctx, log) startLegacy := time.Now() res, err := d.Legacy.DeleteCollection(ctx, deleteValidation, options, listOptions) @@ -154,21 +188,29 @@ func (d *DualWriterMode1) DeleteCollection(ctx context.Context, deleteValidation } d.recordLegacyDuration(false, mode1Str, options.Kind, method, startLegacy) - go func() { + go func(res runtime.Object) { startStorage := time.Now() ctx, cancel := context.WithTimeoutCause(ctx, time.Second*10, errors.New("storage deletecollection timeout")) defer cancel() - _, err := d.Storage.DeleteCollection(ctx, deleteValidation, options, listOptions) + storageObj, err := d.Storage.DeleteCollection(ctx, deleteValidation, options, listOptions) d.recordStorageDuration(err != nil, mode1Str, options.Kind, method, startStorage) - }() + if err != nil { + cancel() + } + areEqual := Compare(storageObj, res) + d.recordOutcome(mode1Str, getName(res), areEqual, method) + if !areEqual { + log.Info("object from legacy and storage are not equal") + } + }(res) return res, err } func (d *DualWriterMode1) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) { - log := d.Log.WithValues("name", name, "kind", options.Kind) - ctx = klog.NewContext(ctx, log) var method = "update" + log := d.Log.WithValues("name", name, "kind", options.Kind, "method", method, "name", name) + ctx = klog.NewContext(ctx, log) startLegacy := time.Now() res, async, err := d.Legacy.Update(ctx, name, objInfo, createValidation, updateValidation, forceAllowCreate, options) @@ -179,7 +221,7 @@ func (d *DualWriterMode1) Update(ctx context.Context, name string, objInfo rest. } d.recordLegacyDuration(false, mode1Str, options.Kind, method, startLegacy) - go func() { + go func(res runtime.Object) { ctx, cancel := context.WithTimeoutCause(ctx, time.Second*10, errors.New("storage update timeout")) resCopy := res.DeepCopyObject() @@ -212,9 +254,17 @@ func (d *DualWriterMode1) Update(ctx context.Context, name string, objInfo rest. } startStorage := time.Now() defer cancel() - _, _, errObjectSt := d.Storage.Update(ctx, name, objInfo, createValidation, updateValidation, forceAllowCreate, options) + storageObj, _, errObjectSt := d.Storage.Update(ctx, name, objInfo, createValidation, updateValidation, forceAllowCreate, options) d.recordStorageDuration(errObjectSt != nil, mode1Str, options.Kind, method, startStorage) - }() + if err != nil { + cancel() + } + areEqual := Compare(storageObj, res) + d.recordOutcome(mode1Str, name, areEqual, method) + if !areEqual { + log.WithValues("name", name).Info("object from legacy and storage are not equal") + } + }(res) return res, async, err } diff --git a/pkg/apiserver/rest/dualwriter_mode2.go b/pkg/apiserver/rest/dualwriter_mode2.go index 954dd1d7668..df725b1c58e 100644 --- a/pkg/apiserver/rest/dualwriter_mode2.go +++ b/pkg/apiserver/rest/dualwriter_mode2.go @@ -37,9 +37,9 @@ func (d *DualWriterMode2) Mode() DualWriterMode { // Create overrides the behavior of the generic DualWriter and writes to LegacyStorage and Storage. func (d *DualWriterMode2) Create(ctx context.Context, original runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) { - log := d.Log.WithValues("kind", options.Kind) - ctx = klog.NewContext(ctx, log) var method = "create" + log := d.Log.WithValues("kind", options.Kind, "method", method) + ctx = klog.NewContext(ctx, log) startLegacy := time.Now() created, err := d.Legacy.Create(ctx, original, createValidation, options) @@ -59,47 +59,65 @@ func (d *DualWriterMode2) Create(ctx context.Context, original runtime.Object, c if err != nil { log.WithValues("name").Error(err, "unable to create object in storage") d.recordStorageDuration(true, mode2Str, options.Kind, method, startStorage) + return rsp, err } d.recordStorageDuration(false, mode2Str, options.Kind, method, startStorage) + + areEqual := Compare(rsp, created) + d.recordOutcome(mode2Str, getName(rsp), areEqual, method) + if !areEqual { + log.Info("object from legacy and storage are not equal") + } return rsp, err } // It retrieves an object from Storage if possible, and if not it falls back to LegacyStorage. func (d *DualWriterMode2) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) { - log := d.Log.WithValues("name", name, "resourceVersion", options.ResourceVersion, "kind", options.Kind) - ctx = klog.NewContext(ctx, log) var method = "get" + log := d.Log.WithValues("name", name, "resourceVersion", options.ResourceVersion, "kind", options.Kind, "method", method) + ctx = klog.NewContext(ctx, log) startStorage := time.Now() - res, err := d.Storage.Get(ctx, name, options) + objStorage, err := d.Storage.Get(ctx, name, options) + d.recordStorageDuration(err != nil, mode2Str, options.Kind, method, startStorage) if err != nil { // if it errors because it's not found, we try to fetch it from the legacy storage - if apierrors.IsNotFound(err) { - d.recordStorageDuration(false, mode2Str, options.Kind, method, startStorage) - - log.Info("object not found in storage, fetching from legacy") - startLegacy := time.Now() - res, err = d.Legacy.Get(ctx, name, options) - if err != nil { - log.Error(err, "unable to fetch object from legacy") - d.recordLegacyDuration(true, mode2Str, options.Kind, method, startLegacy) - } - d.recordLegacyDuration(false, mode2Str, options.Kind, method, startLegacy) - return res, err + if !apierrors.IsNotFound(err) { + log.Error(err, "unable to fetch object from storage") + return objStorage, err } - d.recordStorageDuration(true, mode2Str, options.Kind, method, startStorage) - log.Error(err, "unable to fetch object from storage") - return res, err + log.Info("object not found in storage, fetching from legacy") } - return res, err + + startLegacy := time.Now() + objLegacy, err := d.Legacy.Get(ctx, name, options) + if err != nil { + log.Error(err, "unable to fetch object from legacy") + d.recordLegacyDuration(true, mode2Str, options.Kind, method, startLegacy) + return objLegacy, err + } + d.recordLegacyDuration(false, mode2Str, options.Kind, method, startLegacy) + + areEqual := Compare(objStorage, objLegacy) + d.recordOutcome(mode2Str, name, areEqual, method) + if !areEqual { + log.Info("object from legacy and storage are not equal") + } + + // if there is no object in storage, we return the object from legacy + if objStorage == nil { + d.recordReadLegacyCount(options.Kind, method) + return objLegacy, nil + } + return objStorage, err } // List overrides the behavior of the generic DualWriter. // It returns Storage entries if possible and falls back to LegacyStorage entries if not. func (d *DualWriterMode2) List(ctx context.Context, options *metainternalversion.ListOptions) (runtime.Object, error) { - log := d.Log.WithValues("kind", options.Kind, "resourceVersion", options.ResourceVersion, "kind", options.Kind) - ctx = klog.NewContext(ctx, log) var method = "list" + log := d.Log.WithValues("kind", options.Kind, "resourceVersion", options.ResourceVersion, "kind", options.Kind, "method", method) + ctx = klog.NewContext(ctx, log) startLegacy := time.Now() ll, err := d.Legacy.List(ctx, options) @@ -123,7 +141,6 @@ func (d *DualWriterMode2) List(ctx context.Context, options *metainternalversion return nil, err } - // TODO: why do we need this? if optionsStorage.LabelSelector == nil { return ll, nil } @@ -148,22 +165,35 @@ func (d *DualWriterMode2) List(ctx context.Context, options *metainternalversion if err != nil { return nil, err } - if legacyIndex, ok := indexMap[accessor.GetName()]; ok { + name := accessor.GetName() + if legacyIndex, ok := indexMap[name]; ok { legacyList[legacyIndex] = obj + areEqual := Compare(obj, legacyList[legacyIndex]) + d.recordOutcome(mode2Str, name, areEqual, method) + if !areEqual { + log.WithValues("name", name).Info("object from legacy and storage are not equal") + } } } if err = meta.SetList(ll, legacyList); err != nil { return nil, err } + + // if the number of items in the legacy list and the storage list are the same, we can return the storage list + if len(storageList) == len(legacyList) { + return sl, nil + } + log.Info("lists from legacy and storage are not the same size") + d.recordReadLegacyCount(options.Kind, method) return ll, nil } // DeleteCollection overrides the behavior of the generic DualWriter and deletes from both LegacyStorage and Storage. func (d *DualWriterMode2) DeleteCollection(ctx context.Context, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error) { - log := d.Log.WithValues("kind", options.Kind, "resourceVersion", listOptions.ResourceVersion) - ctx = klog.NewContext(ctx, log) var method = "delete-collection" + log := d.Log.WithValues("kind", options.Kind, "resourceVersion", listOptions.ResourceVersion, "method", method) + ctx = klog.NewContext(ctx, log) startLegacy := time.Now() deleted, err := d.Legacy.DeleteCollection(ctx, deleteValidation, options, listOptions) @@ -194,16 +224,23 @@ func (d *DualWriterMode2) DeleteCollection(ctx context.Context, deleteValidation if err != nil { log.WithValues("deleted", res).Error(err, "failed to delete collection successfully from Storage") d.recordStorageDuration(true, mode2Str, options.Kind, method, startStorage) + return res, err } d.recordStorageDuration(false, mode2Str, options.Kind, method, startStorage) + areEqual := Compare(res, deleted) + d.recordOutcome(mode2Str, getName(res), areEqual, method) + if !areEqual { + log.Info("object from legacy and storage are not equal") + } + return res, err } func (d *DualWriterMode2) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) { - log := d.Log.WithValues("name", name, "kind", options.Kind) - ctx = klog.NewContext(ctx, log) var method = "delete" + log := d.Log.WithValues("name", name, "kind", options.Kind, "method", method) + ctx = klog.NewContext(ctx, log) startLegacy := time.Now() deletedLS, async, err := d.Legacy.Delete(ctx, name, deleteValidation, options) @@ -223,14 +260,23 @@ func (d *DualWriterMode2) Delete(ctx context.Context, name string, deleteValidat log.WithValues("objectList", deletedS).Error(err, "could not delete from duplicate storage") d.recordStorageDuration(true, mode2Str, options.Kind, method, startStorage) } + return deletedS, async, err } d.recordStorageDuration(false, mode2Str, options.Kind, method, startStorage) - return deletedLS, async, err + + areEqual := Compare(deletedS, deletedLS) + d.recordOutcome(mode2Str, name, areEqual, method) + if !areEqual { + log.WithValues("name", name).Info("object from legacy and storage are not equal") + } + + return deletedS, async, err } // Update overrides the generic behavior of the Storage and writes first to the legacy storage and then to storage. func (d *DualWriterMode2) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) { - log := d.Log.WithValues("name", name, "kind", options.Kind) + var method = "update" + log := d.Log.WithValues("name", name, "kind", options.Kind, "method", method) ctx = klog.NewContext(ctx, log) // get foundObj and (updated) object so they can be stored in legacy store @@ -276,8 +322,15 @@ func (d *DualWriterMode2) Update(ctx context.Context, name string, objInfo rest. res, created, err := d.Storage.Update(ctx, name, objInfo, createValidation, updateValidation, forceAllowCreate, options) if err != nil { log.WithValues("object", res).Error(err, "could not update in storage") + d.recordStorageDuration(true, mode2Str, options.Kind, "update", startStorage) + return res, created, err + } + + areEqual := Compare(res, obj) + d.recordOutcome(mode2Str, name, areEqual, method) + if !areEqual { + log.WithValues("name", name).Info("object from legacy and storage are not equal") } - d.recordStorageDuration(err != nil, mode2Str, options.Kind, "update", startStorage) return res, created, err } @@ -346,7 +399,6 @@ func enrichLegacyObject(originalObj, returnedObj runtime.Object) error { } accessorReturned.SetAnnotations(ac) - // otherwise, we propagate the original RV and UID accessorReturned.SetResourceVersion(accessorOriginal.GetResourceVersion()) accessorReturned.SetUID(accessorOriginal.GetUID()) return nil diff --git a/pkg/apiserver/rest/metrics.go b/pkg/apiserver/rest/metrics.go index 4e5db41e0e2..5e066daec50 100644 --- a/pkg/apiserver/rest/metrics.go +++ b/pkg/apiserver/rest/metrics.go @@ -9,9 +9,10 @@ import ( ) type dualWriterMetrics struct { - legacy *prometheus.HistogramVec - storage *prometheus.HistogramVec - outcome *prometheus.HistogramVec + legacy *prometheus.HistogramVec + storage *prometheus.HistogramVec + outcome *prometheus.HistogramVec + legacyReads *prometheus.CounterVec } // DualWriterStorageDuration is a metric summary for dual writer storage duration per mode @@ -38,6 +39,12 @@ var DualWriterOutcome = prometheus.NewHistogramVec(prometheus.HistogramOpts{ NativeHistogramBucketFactor: 1.1, }, []string{"mode", "name", "method"}) +var DualWriterReadLegacyCounts = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "dual_writer_read_legacy_count", + Help: "Histogram for the runtime of dual writer reads from legacy", + Namespace: "grafana", +}, []string{"kind", "method"}) + func (m *dualWriterMetrics) init(reg prometheus.Registerer) { log := klog.NewKlogr() m.legacy = DualWriterLegacyDuration @@ -61,7 +68,6 @@ func (m *dualWriterMetrics) recordStorageDuration(isError bool, mode string, nam m.storage.WithLabelValues(strconv.FormatBool(isError), mode, name, method).Observe(duration) } -// nolint:unused func (m *dualWriterMetrics) recordOutcome(mode string, name string, outcome bool, method string) { var observeValue float64 if outcome { @@ -69,3 +75,7 @@ func (m *dualWriterMetrics) recordOutcome(mode string, name string, outcome bool } m.outcome.WithLabelValues(mode, name, method).Observe(observeValue) } + +func (m *dualWriterMetrics) recordReadLegacyCount(kind string, method string) { + m.legacyReads.WithLabelValues(kind, method).Inc() +} diff --git a/pkg/apiserver/storage/file/file.go b/pkg/apiserver/storage/file/file.go index 8f4fafc97a7..8bd936a671a 100644 --- a/pkg/apiserver/storage/file/file.go +++ b/pkg/apiserver/storage/file/file.go @@ -27,6 +27,8 @@ import ( "k8s.io/apiserver/pkg/storage/storagebackend/factory" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" + + grafanaregistry "github.com/grafana/grafana/pkg/apiserver/registry/generic" ) const MaxUpdateAttempts = 30 @@ -37,13 +39,6 @@ var _ storage.Interface = (*Storage)(nil) // When we upgrade to 1.29 var errResourceVersionSetOnCreate = errors.New("resourceVersion should not be set on objects to be created") -type parsedKey struct { - group string - resource string - namespace string - name string -} - // Storage implements storage.Interface and storage resources as JSON files on disk. type Storage struct { root string @@ -291,14 +286,14 @@ func (s *Storage) Watch(ctx context.Context, key string, opts storage.ListOption return nil, apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err)) } - parsedkey, err := s.convertToParsedKey(key, p) + parsedkey, err := grafanaregistry.ParseKey(key) if err != nil { return nil, err } var namespace *string - if parsedkey.namespace != "" { - namespace = &parsedkey.namespace + if parsedkey.Namespace != "" { + namespace = &parsedkey.Namespace } if (opts.SendInitialEvents == nil && requestedRV == 0) || (opts.SendInitialEvents != nil && *opts.SendInitialEvents) { @@ -391,10 +386,15 @@ func (s *Storage) Get(ctx context.Context, key string, opts storage.GetOptions, // No RV generation locking in single item get since its read from the disk fpath := s.filePath(key) + rv, err := s.versioner.ParseResourceVersion(opts.ResourceVersion) + if err != nil { + return err + } + // Since it's a get, check if the dir exists and return early as needed dirname := filepath.Dir(fpath) if !exists(dirname) { - return apierrors.NewNotFound(s.gr, s.nameFromKey(key)) + return storage.NewKeyNotFoundError(key, int64(rv)) } obj, err := readFile(s.codec, fpath, func() runtime.Object { @@ -404,10 +404,6 @@ func (s *Storage) Get(ctx context.Context, key string, opts storage.GetOptions, if opts.IgnoreNotFound { return runtime.SetZeroValue(objPtr) } - rv, err := s.versioner.ParseResourceVersion(opts.ResourceVersion) - if err != nil { - return err - } return storage.NewKeyNotFoundError(key, int64(rv)) } @@ -603,10 +599,6 @@ func (s *Storage) GuaranteedUpdate( s.rvMutex.Lock() generatedRV := s.getNewResourceVersion() - if err != nil { - s.rvMutex.Unlock() - return err - } s.rvMutex.Unlock() if err := s.versioner.UpdateObject(updatedObj, generatedRV); err != nil { @@ -683,70 +675,6 @@ func (s *Storage) nameFromKey(key string) string { return strings.Replace(key, s.resourcePrefix+"/", "", 1) } -// While this is an inefficient way to differentiate the ambiguous keys, -// we only need it for initial namespace calculation in watch -// This helps us with watcher tests that don't always set up requestcontext correctly -func (s *Storage) convertToParsedKey(key string, p storage.SelectionPredicate) (*parsedKey, error) { - // NOTE: the following supports the watcher tests that run against v1/pods - // Other than that, there are ambiguities in the key format that only field selector - // when set to use metadata.name can be used to bring clarity in the 3-segment case - - // Cases handled below: - // namespace scoped: - // ///[]/[] - // ///[] - // - // cluster scoped: - // ///[] - // // - parts := strings.SplitN(key, "/", 5) - if len(parts) < 3 && s.gr.Group != "" { - return nil, fmt.Errorf("invalid key (expecting at least 2 parts): %s", key) - } - - if len(parts) < 2 && s.gr.Group == "" { - return nil, fmt.Errorf("invalid key (expecting at least 1 part): %s", key) - } - - // beware this empty "" as the first separated part for the rest of the parsing below - if parts[0] != "" { - return nil, fmt.Errorf("invalid key (expecting leading slash): %s", key) - } - - k := &parsedKey{} - - // for v1/pods that tests use, Group is empty - if len(parts) > 1 && s.gr.Group == "" { - k.resource = parts[1] - } - - if len(parts) > 2 { - // for v1/pods that tests use, Group is empty - if parts[1] == s.gr.Resource { - k.resource = parts[1] - if _, found := p.Field.RequiresExactMatch("metadata.name"); !found { - k.namespace = parts[2] - } - } else { - k.group = parts[1] - k.resource = parts[2] - } - } - - if len(parts) > 3 { - // for v1/pods that tests use, Group is empty - if parts[1] == s.gr.Resource { - k.name = parts[3] - } else { - if _, found := p.Field.RequiresExactMatch("metadata.name"); !found { - k.namespace = parts[3] - } - } - } - - return k, nil -} - func copyModifiedObjectToDestination(updatedObj runtime.Object, destination runtime.Object) error { u, err := conversion.EnforcePtr(updatedObj) if err != nil { diff --git a/pkg/apiserver/storage/file/restoptions.go b/pkg/apiserver/storage/file/restoptions.go index 52981cef7fe..69d0ca71d87 100644 --- a/pkg/apiserver/storage/file/restoptions.go +++ b/pkg/apiserver/storage/file/restoptions.go @@ -71,7 +71,7 @@ func (r *RESTOptionsGetter) GetRESTOptions(resource schema.GroupResource) (gener DeleteCollectionWorkers: 0, EnableGarbageCollection: false, // k8s expects forward slashes here, we'll convert them to os path separators in the storage - ResourcePrefix: "/" + resource.Group + "/" + resource.Resource, + ResourcePrefix: "/group/" + resource.Group + "/resource/" + resource.Resource, CountMetricPollPeriod: 1 * time.Second, StorageObjectCountTracker: storageConfig.Config.StorageObjectCountTracker, } diff --git a/pkg/apiserver/storage/file/watcher_test.go b/pkg/apiserver/storage/file/watcher_test.go index 51b72bf66ff..2eada00d284 100644 --- a/pkg/apiserver/storage/file/watcher_test.go +++ b/pkg/apiserver/storage/file/watcher_test.go @@ -14,6 +14,7 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/api/apitesting" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -24,7 +25,8 @@ import ( "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/storagebackend" "k8s.io/apiserver/pkg/storage/storagebackend/factory" - storagetesting "k8s.io/apiserver/pkg/storage/testing" + + storagetesting "github.com/grafana/grafana/pkg/apiserver/storage/testing" ) var scheme = runtime.NewScheme() @@ -52,7 +54,7 @@ func withDefaults(options *setupOptions, t testing.TB) { options.newFunc = newPod options.newListFunc = newPodList options.prefix = t.TempDir() - options.resourcePrefix = "/pods" + options.resourcePrefix = storagetesting.KeyFunc("", "") options.groupResource = schema.GroupResource{Resource: "pods"} } @@ -70,7 +72,11 @@ func testSetup(t testing.TB, opts ...setupOption) (context.Context, storage.Inte config.ForResource(setupOpts.groupResource), setupOpts.resourcePrefix, func(obj runtime.Object) (string, error) { - return storage.NamespaceKeyFunc(setupOpts.resourcePrefix, obj) + accessor, err := meta.Accessor(obj) + if err != nil { + return "", err + } + return storagetesting.KeyFunc(accessor.GetNamespace(), accessor.GetName()), nil }, setupOpts.newFunc, setupOpts.newListFunc, @@ -80,7 +86,7 @@ func testSetup(t testing.TB, opts ...setupOption) (context.Context, storage.Inte ) // Some tests may start reading before writing - if err := os.MkdirAll(path.Join(setupOpts.prefix, "pods", "test-ns"), fs.ModePerm); err != nil { + if err := os.MkdirAll(path.Join(setupOpts.prefix, storagetesting.KeyFunc("test-ns", "")), fs.ModePerm); err != nil { return nil, nil, nil, err } @@ -95,7 +101,6 @@ func TestWatch(t *testing.T) { ctx, store, destroyFunc, err := testSetup(t) defer destroyFunc() assert.NoError(t, err) - storagetesting.RunTestWatch(ctx, t, store) } diff --git a/pkg/apiserver/storage/testing/store_tests.go b/pkg/apiserver/storage/testing/store_tests.go new file mode 100644 index 00000000000..1025c3781dd --- /dev/null +++ b/pkg/apiserver/storage/testing/store_tests.go @@ -0,0 +1,2691 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// Provenance-includes-location: https://github.com/kubernetes/apiserver/blob/master/pkg/storage/testing/store_tests.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: The Kubernetes Authors. + +package testing + +import ( + "context" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "testing" + + "github.com/google/go-cmp/cmp" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/apis/example" + "k8s.io/apiserver/pkg/storage" + "k8s.io/apiserver/pkg/storage/value" + "k8s.io/utils/ptr" +) + +type KeyValidation func(ctx context.Context, t *testing.T, key string) + +func RunTestCreate(ctx context.Context, t *testing.T, store storage.Interface, validation KeyValidation) { + tests := []struct { + name string + inputObj *example.Pod + expectedError error + }{{ + name: "successful create", + inputObj: &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}}, + }, { + name: "create with ResourceVersion set", + inputObj: &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "test-ns", ResourceVersion: "1"}}, + expectedError: storage.ErrResourceVersionSetOnCreate, + }} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + out := &example.Pod{} // reset + // verify that kv pair is empty before set + key := computePodKey(tt.inputObj) + if err := store.Get(ctx, key, storage.GetOptions{}, out); !storage.IsNotFound(err) { + t.Fatalf("expecting empty result on key %s, got %v", key, err) + } + + err := store.Create(ctx, key, tt.inputObj, out, 0) + if !errors.Is(err, tt.expectedError) { + t.Errorf("expecting error %v, but get: %v", tt.expectedError, err) + } + if err != nil { + return + } + // basic tests of the output + if tt.inputObj.ObjectMeta.Name != out.ObjectMeta.Name { + t.Errorf("pod name want=%s, get=%s", tt.inputObj.ObjectMeta.Name, out.ObjectMeta.Name) + } + if out.ResourceVersion == "" { + t.Errorf("output should have non-empty resource version") + } + validation(ctx, t, key) + }) + } +} + +func RunTestCreateWithTTL(ctx context.Context, t *testing.T, store storage.Interface) { + input := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}} + out := &example.Pod{} + + key := computePodKey(input) + if err := store.Create(ctx, key, input, out, 1); err != nil { + t.Fatalf("Create failed: %v", err) + } + + w, err := store.Watch(ctx, key, storage.ListOptions{ResourceVersion: out.ResourceVersion, Predicate: storage.Everything}) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + testCheckEventType(t, w, watch.Deleted) +} + +func RunTestCreateWithKeyExist(ctx context.Context, t *testing.T, store storage.Interface) { + obj := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}} + key, _ := testPropagateStore(ctx, t, store, obj) + out := &example.Pod{} + + err := store.Create(ctx, key, obj, out, 0) + if err == nil || !storage.IsExist(err) { + t.Errorf("expecting key exists error, but get: %s", err) + } +} + +func RunTestGet(ctx context.Context, t *testing.T, store storage.Interface) { + // create an object to test + key, createdObj := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}}) + // update the object once to allow get by exact resource version to be tested + updateObj := createdObj.DeepCopy() + updateObj.Annotations = map[string]string{"test-annotation": "1"} + storedObj := &example.Pod{} + err := store.GuaranteedUpdate(ctx, key, storedObj, true, nil, + func(_ runtime.Object, _ storage.ResponseMeta) (runtime.Object, *uint64, error) { + ttl := uint64(1) + return updateObj, &ttl, nil + }, nil) + if err != nil { + t.Fatalf("Update failed: %v", err) + } + // create an additional object to increment the resource version for pods above the resource version of the foo object + secondObj := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "test-ns"}} + lastUpdatedObj := &example.Pod{} + if err := store.Create(ctx, computePodKey(secondObj), secondObj, lastUpdatedObj, 0); err != nil { + t.Fatalf("Set failed: %v", err) + } + + currentRV, _ := strconv.Atoi(storedObj.ResourceVersion) + lastUpdatedCurrentRV, _ := strconv.Atoi(lastUpdatedObj.ResourceVersion) + + // TODO(jpbetz): Add exact test cases + tests := []struct { + name string + key string + ignoreNotFound bool + expectNotFoundErr bool + expectRVTooLarge bool + expectedOut *example.Pod + expectedAlternatives []*example.Pod + rv string + }{{ + name: "get existing", + key: key, + ignoreNotFound: false, + expectNotFoundErr: false, + expectedOut: storedObj, + }, { + // For RV=0 arbitrarily old version is allowed, including from the moment + // when the object didn't yet exist. + // As a result, we allow it by setting ignoreNotFound and allowing an empty + // object in expectedOut. + name: "resource version 0", + key: key, + ignoreNotFound: true, + expectedAlternatives: []*example.Pod{{}, createdObj, storedObj}, + rv: "0", + }, { + // Given that Get with set ResourceVersion is effectively always + // NotOlderThan semantic, both versions of object are allowed. + name: "object created resource version", + key: key, + expectedAlternatives: []*example.Pod{createdObj, storedObj}, + rv: createdObj.ResourceVersion, + }, { + name: "current object resource version, match=NotOlderThan", + key: key, + expectedOut: storedObj, + rv: fmt.Sprintf("%d", currentRV), + }, { + name: "latest resource version", + key: key, + expectedOut: storedObj, + rv: fmt.Sprintf("%d", lastUpdatedCurrentRV), + }, { + name: "too high resource version", + key: key, + expectRVTooLarge: true, + rv: strconv.FormatInt(math.MaxInt64, 10), + }, { + name: "get non-existing", + key: "/non-existing", + ignoreNotFound: false, + expectNotFoundErr: true, + }, { + name: "get non-existing, ignore not found", + key: "/non-existing", + ignoreNotFound: true, + expectNotFoundErr: false, + expectedOut: &example.Pod{}, + }} + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + // For some asynchronous implementations of storage interface (in particular watchcache), + // certain requests may impact result of further requests. As an example, if we first + // ensure that watchcache is synchronized up to ResourceVersion X (using Get/List requests + // with NotOlderThan semantic), the further requests (even specifying earlier resource + // version) will also return the result synchronized to at least ResourceVersion X. + // By parallelizing test cases we ensure that the order in which test cases are defined + // doesn't automatically preclude some scenarios from happening. + t.Parallel() + + out := &example.Pod{} + err := store.Get(ctx, tt.key, storage.GetOptions{IgnoreNotFound: tt.ignoreNotFound, ResourceVersion: tt.rv}, out) + if tt.expectNotFoundErr { + if err == nil || !storage.IsNotFound(err) { + t.Errorf("expecting not found error, but get: %v", err) + } + return + } + if tt.expectRVTooLarge { + if err == nil || !storage.IsTooLargeResourceVersion(err) { + t.Errorf("expecting resource version too high error, but get: %v", err) + } + return + } + if err != nil { + t.Fatalf("Get failed: %v", err) + } + + if tt.expectedAlternatives == nil { + expectNoDiff(t, fmt.Sprintf("%s: incorrect pod", tt.name), tt.expectedOut, out) + } else { + ExpectContains(t, fmt.Sprintf("%s: incorrect pod", tt.name), toInterfaceSlice(tt.expectedAlternatives), out) + } + }) + } +} + +func RunTestUnconditionalDelete(ctx context.Context, t *testing.T, store storage.Interface) { + key, storedObj := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}}) + + tests := []struct { + name string + key string + expectedObj *example.Pod + expectNotFoundErr bool + }{{ + name: "existing key", + key: key, + expectedObj: storedObj, + expectNotFoundErr: false, + }, { + name: "non-existing key", + key: "/non-existing", + expectedObj: nil, + expectNotFoundErr: true, + }} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + out := &example.Pod{} // reset + err := store.Delete(ctx, tt.key, out, nil, storage.ValidateAllObjectFunc, nil) + if tt.expectNotFoundErr { + if err == nil || !storage.IsNotFound(err) { + t.Errorf("expecting not found error, but get: %s", err) + } + return + } + if err != nil { + t.Fatalf("Delete failed: %v", err) + } + // We expect the resource version of the returned object to be + // updated compared to the last existing object. + if storedObj.ResourceVersion == out.ResourceVersion { + t.Errorf("expecting resource version to be updated, but get: %s", out.ResourceVersion) + } + out.ResourceVersion = storedObj.ResourceVersion + expectNoDiff(t, "incorrect pod:", tt.expectedObj, out) + }) + } +} + +func RunTestConditionalDelete(ctx context.Context, t *testing.T, store storage.Interface) { + obj := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns", UID: "A"}} + key, storedObj := testPropagateStore(ctx, t, store, obj) + + tests := []struct { + name string + precondition *storage.Preconditions + expectInvalidObjErr bool + }{{ + name: "UID match", + precondition: storage.NewUIDPreconditions("A"), + expectInvalidObjErr: false, + }, { + name: "UID mismatch", + precondition: storage.NewUIDPreconditions("B"), + expectInvalidObjErr: true, + }} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + out := &example.Pod{} + err := store.Delete(ctx, key, out, tt.precondition, storage.ValidateAllObjectFunc, nil) + if tt.expectInvalidObjErr { + if err == nil || !storage.IsInvalidObj(err) { + t.Errorf("expecting invalid UID error, but get: %s", err) + } + return + } + if err != nil { + t.Fatalf("Delete failed: %v", err) + } + // We expect the resource version of the returned object to be + // updated compared to the last existing object. + if storedObj.ResourceVersion == out.ResourceVersion { + t.Errorf("expecting resource version to be updated, but get: %s", out.ResourceVersion) + } + out.ResourceVersion = storedObj.ResourceVersion + expectNoDiff(t, "incorrect pod:", storedObj, out) + obj := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns", UID: "A"}} + key, storedObj = testPropagateStore(ctx, t, store, obj) + }) + } +} + +// The following set of Delete tests are testing the logic of adding `suggestion` +// as a parameter with probably value of the current state. +// Introducing it for GuaranteedUpdate cause a number of issues, so we're addressing +// all of those upfront by adding appropriate tests: +// - https://github.com/kubernetes/kubernetes/pull/35415 +// [DONE] Lack of tests originally - added TestDeleteWithSuggestion. +// - https://github.com/kubernetes/kubernetes/pull/40664 +// [DONE] Irrelevant for delete, as Delete doesn't write data (nor compare it). +// - https://github.com/kubernetes/kubernetes/pull/47703 +// [DONE] Irrelevant for delete, because Delete doesn't persist data. +// - https://github.com/kubernetes/kubernetes/pull/48394/ +// [DONE] Irrelevant for delete, because Delete doesn't compare data. +// - https://github.com/kubernetes/kubernetes/pull/43152 +// [DONE] Added TestDeleteWithSuggestionAndConflict +// - https://github.com/kubernetes/kubernetes/pull/54780 +// [DONE] Irrelevant for delete, because Delete doesn't compare data. +// - https://github.com/kubernetes/kubernetes/pull/58375 +// [DONE] Irrelevant for delete, because Delete doesn't compare data. +// - https://github.com/kubernetes/kubernetes/pull/77619 +// [DONE] Added TestValidateDeletionWithSuggestion for corresponding delete checks. +// - https://github.com/kubernetes/kubernetes/pull/78713 +// [DONE] Bug was in getState function which is shared with the new code. +// - https://github.com/kubernetes/kubernetes/pull/78713 +// [DONE] Added TestPreconditionalDeleteWithSuggestion + +func RunTestDeleteWithSuggestion(ctx context.Context, t *testing.T, store storage.Interface) { + key, originalPod := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "name", Namespace: "test-ns"}}) + + out := &example.Pod{} + if err := store.Delete(ctx, key, out, nil, storage.ValidateAllObjectFunc, originalPod); err != nil { + t.Errorf("Unexpected failure during deletion: %v", err) + } + + if err := store.Get(ctx, key, storage.GetOptions{}, &example.Pod{}); !storage.IsNotFound(err) { + t.Errorf("Unexpected error on reading object: %v", err) + } +} + +func RunTestDeleteWithSuggestionAndConflict(ctx context.Context, t *testing.T, store storage.Interface) { + key, originalPod := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "name", Namespace: "test-ns"}}) + + // First update, so originalPod is outdated. + updatedPod := &example.Pod{} + if err := store.GuaranteedUpdate(ctx, key, updatedPod, false, nil, + storage.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { + pod := obj.(*example.Pod) + pod.ObjectMeta.Labels = map[string]string{"foo": "bar"} + return pod, nil + }), nil); err != nil { + t.Errorf("Unexpected failure during updated: %v", err) + } + + out := &example.Pod{} + if err := store.Delete(ctx, key, out, nil, storage.ValidateAllObjectFunc, originalPod); err != nil { + t.Errorf("Unexpected failure during deletion: %v", err) + } + + if err := store.Get(ctx, key, storage.GetOptions{}, &example.Pod{}); !storage.IsNotFound(err) { + t.Errorf("Unexpected error on reading object: %v", err) + } + updatedPod.ObjectMeta.ResourceVersion = out.ObjectMeta.ResourceVersion + expectNoDiff(t, "incorrect pod:", updatedPod, out) +} + +// RunTestDeleteWithConflict tests the case when another conflicting update happened before the delete completed. +func RunTestDeleteWithConflict(ctx context.Context, t *testing.T, store storage.Interface) { + key, _ := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "name", Namespace: "test-ns"}}) + + // First update, so originalPod is outdated. + updatedPod := &example.Pod{} + validateCount := 0 + updateCount := 0 + // Simulate a conflicting update in the middle of delete. + validateAllWithUpdate := func(_ context.Context, _ runtime.Object) error { + validateCount++ + if validateCount > 1 { + return nil + } + if err := store.GuaranteedUpdate(ctx, key, updatedPod, false, nil, + storage.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { + pod := obj.(*example.Pod) + pod.ObjectMeta.Labels = map[string]string{"foo": "bar"} + return pod, nil + }), nil); err != nil { + t.Errorf("Unexpected failure during updated: %v", err) + } + updateCount++ + return nil + } + + out := &example.Pod{} + if err := store.Delete(ctx, key, out, nil, validateAllWithUpdate, nil); err != nil { + t.Errorf("Unexpected failure during deletion: %v", err) + } + + if validateCount != 2 { + t.Errorf("Expect validateCount = %d, but got %d", 2, validateCount) + } + if updateCount != 1 { + t.Errorf("Expect updateCount = %d, but got %d", 1, updateCount) + } + + if err := store.Get(ctx, key, storage.GetOptions{}, &example.Pod{}); !storage.IsNotFound(err) { + t.Errorf("Unexpected error on reading object: %v", err) + } + updatedPod.ObjectMeta.ResourceVersion = out.ObjectMeta.ResourceVersion + expectNoDiff(t, "incorrect pod:", updatedPod, out) +} + +func RunTestDeleteWithSuggestionOfDeletedObject(ctx context.Context, t *testing.T, store storage.Interface) { + key, originalPod := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "name", Namespace: "test-ns"}}) + + // First delete, so originalPod is outdated. + deletedPod := &example.Pod{} + if err := store.Delete(ctx, key, deletedPod, nil, storage.ValidateAllObjectFunc, originalPod); err != nil { + t.Errorf("Unexpected failure during deletion: %v", err) + } + + // Now try deleting with stale object. + out := &example.Pod{} + if err := store.Delete(ctx, key, out, nil, storage.ValidateAllObjectFunc, originalPod); !storage.IsNotFound(err) { + t.Errorf("Unexpected error during deletion: %v, expected not-found", err) + } +} + +func RunTestValidateDeletionWithSuggestion(ctx context.Context, t *testing.T, store storage.Interface) { + key, originalPod := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "name", Namespace: "test-ns"}}) + + // Check that validaing fresh object fails is called once and fails. + validationCalls := 0 + validationError := fmt.Errorf("validation error") + validateNothing := func(_ context.Context, _ runtime.Object) error { + validationCalls++ + return validationError + } + out := &example.Pod{} + if err := store.Delete(ctx, key, out, nil, validateNothing, originalPod); !errors.Is(err, validationError) { + t.Errorf("Unexpected failure during deletion: %v", err) + } + if validationCalls != 1 { + t.Errorf("validate function should have been called once, called %d", validationCalls) + } + + // First update, so originalPod is outdated. + updatedPod := &example.Pod{} + if err := store.GuaranteedUpdate(ctx, key, updatedPod, false, nil, + storage.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { + pod := obj.(*example.Pod) + pod.ObjectMeta.Labels = map[string]string{"foo": "bar"} + return pod, nil + }), nil); err != nil { + t.Errorf("Unexpected failure during updated: %v", err) + } + + calls := 0 + validateFresh := func(_ context.Context, obj runtime.Object) error { + calls++ + pod := obj.(*example.Pod) + if pod.ObjectMeta.Labels == nil || pod.ObjectMeta.Labels["foo"] != "bar" { + return fmt.Errorf("stale object") + } + return nil + } + + if err := store.Delete(ctx, key, out, nil, validateFresh, originalPod); err != nil { + t.Errorf("Unexpected failure during deletion: %v", err) + } + + // Implementations of the storage interface are allowed to ignore the suggestion, + // in which case just one validation call is possible. + if calls > 2 { + t.Errorf("validate function should have been called at most twice, called %d", calls) + } + + if err := store.Get(ctx, key, storage.GetOptions{}, &example.Pod{}); !storage.IsNotFound(err) { + t.Errorf("Unexpected error on reading object: %v", err) + } +} + +// RunTestValidateDeletionWithOnlySuggestionValid tests the case of delete with validateDeletion function, +// when the suggested cachedExistingObject passes the validate function while the current version does not pass the validate function. +func RunTestValidateDeletionWithOnlySuggestionValid(ctx context.Context, t *testing.T, store storage.Interface) { + key, originalPod := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "name", Namespace: "test-ns", Labels: map[string]string{"foo": "bar"}}}) + + // Check that validaing fresh object fails is called once and fails. + validationCalls := 0 + validationError := fmt.Errorf("validation error") + validateNothing := func(_ context.Context, _ runtime.Object) error { + validationCalls++ + return validationError + } + out := &example.Pod{} + if err := store.Delete(ctx, key, out, nil, validateNothing, originalPod); !errors.Is(err, validationError) { + t.Errorf("Unexpected failure during deletion: %v", err) + } + if validationCalls != 1 { + t.Errorf("validate function should have been called once, called %d", validationCalls) + } + + // First update, so originalPod is outdated. + updatedPod := &example.Pod{} + if err := store.GuaranteedUpdate(ctx, key, updatedPod, false, nil, + storage.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { + pod := obj.(*example.Pod) + pod.ObjectMeta.Labels = map[string]string{"foo": "barbar"} + return pod, nil + }), nil); err != nil { + t.Errorf("Unexpected failure during updated: %v", err) + } + + calls := 0 + validateFresh := func(_ context.Context, obj runtime.Object) error { + calls++ + pod := obj.(*example.Pod) + if pod.ObjectMeta.Labels == nil || pod.ObjectMeta.Labels["foo"] != "bar" { + return fmt.Errorf("stale object") + } + return nil + } + + err := store.Delete(ctx, key, out, nil, validateFresh, originalPod) + if err == nil || err.Error() != "stale object" { + t.Errorf("expecting stale object error, but get: %s", err) + } + + // Implementations of the storage interface are allowed to ignore the suggestion, + // in which case just one validation call is possible. + if calls > 2 { + t.Errorf("validate function should have been called at most twice, called %d", calls) + } + + if err = store.Get(ctx, key, storage.GetOptions{}, out); err != nil { + t.Errorf("Unexpected error on reading object: %v", err) + } + expectNoDiff(t, "incorrect pod:", updatedPod, out) +} + +func RunTestPreconditionalDeleteWithSuggestion(ctx context.Context, t *testing.T, store storage.Interface) { + key, originalPod := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "name", Namespace: "test-ns"}}) + + // First update, so originalPod is outdated. + updatedPod := &example.Pod{} + if err := store.GuaranteedUpdate(ctx, key, updatedPod, false, nil, + storage.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { + pod := obj.(*example.Pod) + pod.ObjectMeta.UID = "myUID" + return pod, nil + }), nil); err != nil { + t.Errorf("Unexpected failure during updated: %v", err) + } + + prec := storage.NewUIDPreconditions("myUID") + + out := &example.Pod{} + if err := store.Delete(ctx, key, out, prec, storage.ValidateAllObjectFunc, originalPod); err != nil { + t.Errorf("Unexpected failure during deletion: %v", err) + } + + if err := store.Get(ctx, key, storage.GetOptions{}, &example.Pod{}); !storage.IsNotFound(err) { + t.Errorf("Unexpected error on reading object: %v", err) + } +} + +// RunTestPreconditionalDeleteWithOnlySuggestionPass tests the case of delete with preconditions, +// when the suggested cachedExistingObject passes the preconditions while the current version does not pass the preconditions. +func RunTestPreconditionalDeleteWithOnlySuggestionPass(ctx context.Context, t *testing.T, store storage.Interface) { + key, originalPod := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "name", Namespace: "test-ns", UID: "myUID"}}) + + // First update, so originalPod is outdated. + updatedPod := &example.Pod{} + if err := store.GuaranteedUpdate(ctx, key, updatedPod, false, nil, + storage.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { + pod := obj.(*example.Pod) + pod.ObjectMeta.UID = "otherUID" + return pod, nil + }), nil); err != nil { + t.Errorf("Unexpected failure during updated: %v", err) + } + + prec := storage.NewUIDPreconditions("myUID") + // Although originalPod passes the precondition, its delete would fail due to conflict. + // The 2nd try with updatedPod would fail the precondition. + out := &example.Pod{} + err := store.Delete(ctx, key, out, prec, storage.ValidateAllObjectFunc, originalPod) + if err == nil || !storage.IsInvalidObj(err) { + t.Errorf("expecting invalid UID error, but get: %s", err) + } + + if err = store.Get(ctx, key, storage.GetOptions{}, out); err != nil { + t.Errorf("Unexpected error on reading object: %v", err) + } + expectNoDiff(t, "incorrect pod:", updatedPod, out) +} + +func RunTestList(ctx context.Context, t *testing.T, store storage.Interface, compaction Compaction, ignoreWatchCacheTests bool) { + initialRV, preset, err := seedMultiLevelData(ctx, store) + if err != nil { + t.Fatal(err) + } + + list := &example.PodList{} + storageOpts := storage.ListOptions{ + // Ensure we're listing from "now". + ResourceVersion: "", + Predicate: storage.Everything, + Recursive: true, + } + if err := store.GetList(ctx, "/second", storageOpts, list); err != nil { + t.Errorf("Unexpected error: %v", err) + } + continueRV, _ := strconv.Atoi(list.ResourceVersion) + secondContinuation, err := storage.EncodeContinue("/second/foo", "/second/", int64(continueRV)) + if err != nil { + t.Fatal(err) + } + + getAttrs := func(obj runtime.Object) (labels.Set, fields.Set, error) { + pod := obj.(*example.Pod) + return nil, fields.Set{"metadata.name": pod.Name, "spec.nodeName": pod.Spec.NodeName}, nil + } + // Use compact to increase etcd global revision without changes to any resources. + // The increase in resources version comes from Kubernetes compaction updating hidden key. + // Used to test consistent List to confirm it returns latest etcd revision. + compaction(ctx, t, initialRV) + currentRV := fmt.Sprintf("%d", continueRV+1) + + tests := []struct { + name string + rv string + rvMatch metav1.ResourceVersionMatch + prefix string + pred storage.SelectionPredicate + ignoreForWatchCache bool + expectedOut []example.Pod + expectedAlternatives [][]example.Pod + expectContinue bool + expectedRemainingItemCount *int64 + expectError bool + expectRVTooLarge bool + expectRV string + expectRVFunc func(string) error + }{ + { + name: "rejects invalid resource version", + prefix: "/pods", + pred: storage.Everything, + rv: "abc", + expectError: true, + }, + { + name: "rejects resource version and continue token", + prefix: "/pods", + pred: storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.Everything(), + Limit: 1, + Continue: secondContinuation, + }, + rv: "1", + expectError: true, + }, + { + name: "rejects resource version set too high", + prefix: "/pods", + rv: strconv.FormatInt(math.MaxInt64, 10), + expectRVTooLarge: true, + }, + { + name: "test List on existing key", + prefix: "/pods/first/", + pred: storage.Everything, + expectedOut: []example.Pod{*preset[0]}, + }, + { + name: "test List on existing key with resource version set to 0", + prefix: "/pods/first/", + pred: storage.Everything, + expectedAlternatives: [][]example.Pod{{}, {*preset[0]}}, + rv: "0", + }, + { + name: "test List on existing key with resource version set before first write, match=Exact", + prefix: "/pods/first/", + pred: storage.Everything, + expectedOut: []example.Pod{}, + rv: initialRV, + rvMatch: metav1.ResourceVersionMatchExact, + expectRV: initialRV, + }, + { + name: "test List on existing key with resource version set to 0, match=NotOlderThan", + prefix: "/pods/first/", + pred: storage.Everything, + expectedAlternatives: [][]example.Pod{{}, {*preset[0]}}, + rv: "0", + rvMatch: metav1.ResourceVersionMatchNotOlderThan, + }, + { + name: "test List on existing key with resource version set to 0, match=Invalid", + prefix: "/pods/first/", + pred: storage.Everything, + rv: "0", + rvMatch: "Invalid", + expectError: true, + }, + { + name: "test List on existing key with resource version set before first write, match=NotOlderThan", + prefix: "/pods/first/", + pred: storage.Everything, + expectedAlternatives: [][]example.Pod{{}, {*preset[0]}}, + rv: initialRV, + rvMatch: metav1.ResourceVersionMatchNotOlderThan, + }, + { + name: "test List on existing key with resource version set before first write, match=Invalid", + prefix: "/pods/first/", + pred: storage.Everything, + rv: initialRV, + rvMatch: "Invalid", + expectError: true, + }, + { + name: "test List on existing key with resource version set to current resource version", + prefix: "/pods/first/", + pred: storage.Everything, + expectedOut: []example.Pod{*preset[0]}, + rv: list.ResourceVersion, + }, + { + name: "test List on existing key with resource version set to current resource version, match=Exact", + prefix: "/pods/first/", + pred: storage.Everything, + expectedOut: []example.Pod{*preset[0]}, + rv: list.ResourceVersion, + rvMatch: metav1.ResourceVersionMatchExact, + expectRV: list.ResourceVersion, + }, + { + name: "test List on existing key with resource version set to current resource version, match=NotOlderThan", + prefix: "/pods/first/", + pred: storage.Everything, + expectedOut: []example.Pod{*preset[0]}, + rv: list.ResourceVersion, + rvMatch: metav1.ResourceVersionMatchNotOlderThan, + }, + { + name: "test List on non-existing key", + prefix: "/pods/non-existing/", + pred: storage.Everything, + expectedOut: []example.Pod{}, + }, + { + name: "test List with pod name matching", + prefix: "/pods/first/", + pred: storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.ParseSelectorOrDie("metadata.name!=bar"), + }, + expectedOut: []example.Pod{}, + }, + { + name: "test List with pod name matching with resource version set to current resource version, match=NotOlderThan", + prefix: "/pods/first/", + pred: storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.ParseSelectorOrDie("metadata.name!=bar"), + }, + expectedOut: []example.Pod{}, + rv: list.ResourceVersion, + rvMatch: metav1.ResourceVersionMatchNotOlderThan, + }, + { + name: "test List with limit", + prefix: "/pods/second/", + pred: storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.Everything(), + Limit: 1, + }, + expectedOut: []example.Pod{*preset[1]}, + expectContinue: true, + expectedRemainingItemCount: ptr.To(int64(1)), + }, + { + name: "test List with limit at current resource version", + prefix: "/pods/second/", + pred: storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.Everything(), + Limit: 1, + }, + expectedOut: []example.Pod{*preset[1]}, + expectContinue: true, + expectedRemainingItemCount: ptr.To(int64(1)), + rv: list.ResourceVersion, + expectRV: list.ResourceVersion, + }, + { + name: "test List with limit at current resource version and match=Exact", + prefix: "/pods/second/", + pred: storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.Everything(), + Limit: 1, + }, + expectedOut: []example.Pod{*preset[1]}, + expectContinue: true, + expectedRemainingItemCount: ptr.To(int64(1)), + rv: list.ResourceVersion, + rvMatch: metav1.ResourceVersionMatchExact, + expectRV: list.ResourceVersion, + }, + { + name: "test List with limit at current resource version and match=NotOlderThan", + prefix: "/pods/second/", + pred: storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.Everything(), + Limit: 1, + }, + expectedOut: []example.Pod{*preset[1]}, + expectContinue: true, + expectedRemainingItemCount: ptr.To(int64(1)), + rv: list.ResourceVersion, + rvMatch: metav1.ResourceVersionMatchNotOlderThan, + expectRVFunc: resourceVersionNotOlderThan(list.ResourceVersion), + }, + { + name: "test List with limit at resource version 0", + prefix: "/pods/second/", + pred: storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.Everything(), + Limit: 1, + }, + // TODO(#108003): As of now, watchcache is deliberately ignoring + // limit if RV=0 is specified, returning whole list of objects. + // While this should eventually get fixed, for now we're explicitly + // ignoring this testcase for watchcache. + ignoreForWatchCache: true, + expectedOut: []example.Pod{*preset[1]}, + expectContinue: true, + expectedRemainingItemCount: ptr.To(int64(1)), + rv: "0", + expectRVFunc: resourceVersionNotOlderThan(list.ResourceVersion), + }, + { + name: "test List with limit at resource version 0 match=NotOlderThan", + prefix: "/pods/second/", + pred: storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.Everything(), + Limit: 1, + }, + // TODO(#108003): As of now, watchcache is deliberately ignoring + // limit if RV=0 is specified, returning whole list of objects. + // While this should eventually get fixed, for now we're explicitly + // ignoring this testcase for watchcache. + ignoreForWatchCache: true, + expectedOut: []example.Pod{*preset[1]}, + expectContinue: true, + expectedRemainingItemCount: ptr.To(int64(1)), + rv: "0", + rvMatch: metav1.ResourceVersionMatchNotOlderThan, + expectRVFunc: resourceVersionNotOlderThan(list.ResourceVersion), + }, + { + name: "test List with limit at resource version before first write and match=Exact", + prefix: "/pods/second/", + pred: storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.Everything(), + Limit: 1, + }, + expectedOut: []example.Pod{}, + expectContinue: false, + rv: initialRV, + rvMatch: metav1.ResourceVersionMatchExact, + expectRV: initialRV, + }, + { + name: "test List with pregenerated continue token", + prefix: "/pods/second/", + pred: storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.Everything(), + Limit: 1, + Continue: secondContinuation, + }, + expectedOut: []example.Pod{*preset[2]}, + }, + { + name: "ignores resource version 0 for List with pregenerated continue token", + prefix: "/pods/second/", + pred: storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.Everything(), + Limit: 1, + Continue: secondContinuation, + }, + rv: "0", + expectedOut: []example.Pod{*preset[2]}, + }, + { + name: "test List with multiple levels of directories and expect flattened result", + prefix: "/pods/second/", + pred: storage.Everything, + expectedOut: []example.Pod{*preset[1], *preset[2]}, + }, + { + name: "test List with multiple levels of directories and expect flattened result with current resource version and match=NotOlderThan", + prefix: "/pods/second/", + pred: storage.Everything, + rv: list.ResourceVersion, + rvMatch: metav1.ResourceVersionMatchNotOlderThan, + expectedOut: []example.Pod{*preset[1], *preset[2]}, + }, + { + name: "test List with filter returning only one item, ensure only a single page returned", + prefix: "/pods", + pred: storage.SelectionPredicate{ + Field: fields.OneTermEqualSelector("metadata.name", "barfoo"), + Label: labels.Everything(), + Limit: 1, + }, + expectedOut: []example.Pod{*preset[3]}, + expectContinue: true, + }, + { + name: "test List with filter returning only one item, ensure only a single page returned with current resource version and match=NotOlderThan", + prefix: "/pods", + pred: storage.SelectionPredicate{ + Field: fields.OneTermEqualSelector("metadata.name", "barfoo"), + Label: labels.Everything(), + Limit: 1, + }, + rv: list.ResourceVersion, + rvMatch: metav1.ResourceVersionMatchNotOlderThan, + expectedOut: []example.Pod{*preset[3]}, + expectContinue: true, + }, + { + name: "test List with filter returning only one item, covers the entire list", + prefix: "/pods", + pred: storage.SelectionPredicate{ + Field: fields.OneTermEqualSelector("metadata.name", "barfoo"), + Label: labels.Everything(), + Limit: 2, + }, + expectedOut: []example.Pod{*preset[3]}, + expectContinue: false, + }, + { + name: "test List with filter returning only one item, covers the entire list with current resource version and match=NotOlderThan", + prefix: "/pods", + pred: storage.SelectionPredicate{ + Field: fields.OneTermEqualSelector("metadata.name", "barfoo"), + Label: labels.Everything(), + Limit: 2, + }, + rv: list.ResourceVersion, + rvMatch: metav1.ResourceVersionMatchNotOlderThan, + expectedOut: []example.Pod{*preset[3]}, + expectContinue: false, + }, + { + name: "test List with filter returning only one item, covers the entire list, with resource version 0", + prefix: "/pods", + pred: storage.SelectionPredicate{ + Field: fields.OneTermEqualSelector("metadata.name", "barfoo"), + Label: labels.Everything(), + Limit: 2, + }, + rv: "0", + expectedAlternatives: [][]example.Pod{{}, {*preset[3]}}, + expectContinue: false, + }, + { + name: "test List with filter returning two items, more pages possible", + prefix: "/pods", + pred: storage.SelectionPredicate{ + Field: fields.OneTermEqualSelector("metadata.name", "bar"), + Label: labels.Everything(), + Limit: 2, + }, + expectContinue: true, + expectedOut: []example.Pod{*preset[0], *preset[1]}, + }, + { + name: "test List with filter returning two items, more pages possible with current resource version and match=NotOlderThan", + prefix: "/pods", + pred: storage.SelectionPredicate{ + Field: fields.OneTermEqualSelector("metadata.name", "bar"), + Label: labels.Everything(), + Limit: 2, + }, + rv: list.ResourceVersion, + rvMatch: metav1.ResourceVersionMatchNotOlderThan, + expectContinue: true, + expectedOut: []example.Pod{*preset[0], *preset[1]}, + }, + { + name: "filter returns two items split across multiple pages", + prefix: "/pods", + pred: storage.SelectionPredicate{ + Field: fields.OneTermEqualSelector("metadata.name", "foo"), + Label: labels.Everything(), + Limit: 2, + }, + expectedOut: []example.Pod{*preset[2], *preset[4]}, + }, + { + name: "filter returns two items split across multiple pages with current resource version and match=NotOlderThan", + prefix: "/pods", + pred: storage.SelectionPredicate{ + Field: fields.OneTermEqualSelector("metadata.name", "foo"), + Label: labels.Everything(), + Limit: 2, + }, + rv: list.ResourceVersion, + rvMatch: metav1.ResourceVersionMatchNotOlderThan, + expectedOut: []example.Pod{*preset[2], *preset[4]}, + }, + { + name: "filter returns one item for last page, ends on last item, not full", + prefix: "/pods", + pred: storage.SelectionPredicate{ + Field: fields.OneTermEqualSelector("metadata.name", "foo"), + Label: labels.Everything(), + Limit: 2, + Continue: encodeContinueOrDie("third/barfoo", int64(continueRV)), + }, + expectedOut: []example.Pod{*preset[4]}, + }, + { + name: "filter returns one item for last page, starts on last item, full", + prefix: "/pods", + pred: storage.SelectionPredicate{ + Field: fields.OneTermEqualSelector("metadata.name", "foo"), + Label: labels.Everything(), + Limit: 1, + Continue: encodeContinueOrDie("third/barfoo", int64(continueRV)), + }, + expectedOut: []example.Pod{*preset[4]}, + }, + { + name: "filter returns one item for last page, starts on last item, partial page", + prefix: "/pods", + pred: storage.SelectionPredicate{ + Field: fields.OneTermEqualSelector("metadata.name", "foo"), + Label: labels.Everything(), + Limit: 2, + Continue: encodeContinueOrDie("third/barfoo", int64(continueRV)), + }, + expectedOut: []example.Pod{*preset[4]}, + }, + { + name: "filter returns two items, page size equal to total list size", + prefix: "/pods", + pred: storage.SelectionPredicate{ + Field: fields.OneTermEqualSelector("metadata.name", "foo"), + Label: labels.Everything(), + Limit: 5, + }, + expectedOut: []example.Pod{*preset[2], *preset[4]}, + }, + { + name: "filter returns two items, page size equal to total list size with current resource version and match=NotOlderThan", + prefix: "/pods", + pred: storage.SelectionPredicate{ + Field: fields.OneTermEqualSelector("metadata.name", "foo"), + Label: labels.Everything(), + Limit: 5, + }, + rv: list.ResourceVersion, + rvMatch: metav1.ResourceVersionMatchNotOlderThan, + expectedOut: []example.Pod{*preset[2], *preset[4]}, + }, + { + name: "filter returns one item, page size equal to total list size", + prefix: "/pods", + pred: storage.SelectionPredicate{ + Field: fields.OneTermEqualSelector("metadata.name", "barfoo"), + Label: labels.Everything(), + Limit: 5, + }, + expectedOut: []example.Pod{*preset[3]}, + }, + { + name: "filter returns one item, page size equal to total list size with current resource version and match=NotOlderThan", + prefix: "/pods", + pred: storage.SelectionPredicate{ + Field: fields.OneTermEqualSelector("metadata.name", "barfoo"), + Label: labels.Everything(), + Limit: 5, + }, + rv: list.ResourceVersion, + rvMatch: metav1.ResourceVersionMatchNotOlderThan, + expectedOut: []example.Pod{*preset[3]}, + }, + { + name: "list all items", + prefix: "/pods", + pred: storage.Everything, + expectedOut: []example.Pod{*preset[0], *preset[1], *preset[2], *preset[3], *preset[4]}, + }, + { + name: "list all items with current resource version and match=NotOlderThan", + prefix: "/pods", + pred: storage.Everything, + rv: list.ResourceVersion, + rvMatch: metav1.ResourceVersionMatchNotOlderThan, + expectedOut: []example.Pod{*preset[0], *preset[1], *preset[2], *preset[3], *preset[4]}, + }, + { + name: "verify list returns updated version of object; filter returns one item, page size equal to total list size with current resource version and match=NotOlderThan", + prefix: "/pods", + pred: storage.SelectionPredicate{ + Field: fields.OneTermEqualSelector("spec.nodeName", "fakeNode"), + Label: labels.Everything(), + Limit: 5, + }, + rv: list.ResourceVersion, + rvMatch: metav1.ResourceVersionMatchNotOlderThan, + expectedOut: []example.Pod{*preset[0]}, + }, + { + name: "verify list does not return deleted object; filter for deleted object, page size equal to total list size with current resource version and match=NotOlderThan", + prefix: "/pods", + pred: storage.SelectionPredicate{ + Field: fields.OneTermEqualSelector("metadata.name", "baz"), + Label: labels.Everything(), + Limit: 5, + }, + rv: list.ResourceVersion, + rvMatch: metav1.ResourceVersionMatchNotOlderThan, + expectedOut: []example.Pod{}, + }, + { + name: "test consistent List", + prefix: "/pods/empty", + pred: storage.Everything, + rv: "", + expectRV: currentRV, + expectedOut: []example.Pod{}, + }, + { + name: "test non-consistent List", + prefix: "/pods/empty", + pred: storage.Everything, + rv: "0", + expectRVFunc: resourceVersionNotOlderThan(list.ResourceVersion), + expectedOut: []example.Pod{}, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + // For some asynchronous implementations of storage interface (in particular watchcache), + // certain requests may impact result of further requests. As an example, if we first + // ensure that watchcache is synchronized up to ResourceVersion X (using Get/List requests + // with NotOlderThan semantic), the further requests (even specifying earlier resource + // version) will also return the result synchronized to at least ResourceVersion X. + // By parallelizing test cases we ensure that the order in which test cases are defined + // doesn't automatically preclude some scenarios from happening. + t.Parallel() + + if ignoreWatchCacheTests && tt.ignoreForWatchCache { + t.Skip() + } + + if tt.pred.GetAttrs == nil { + tt.pred.GetAttrs = getAttrs + } + + out := &example.PodList{} + storageOpts := storage.ListOptions{ + ResourceVersion: tt.rv, + ResourceVersionMatch: tt.rvMatch, + Predicate: tt.pred, + Recursive: true, + } + err := store.GetList(ctx, tt.prefix, storageOpts, out) + if tt.expectRVTooLarge { + if err == nil || !apierrors.IsTimeout(err) || !storage.IsTooLargeResourceVersion(err) { + t.Fatalf("expecting resource version too high error, but get: %s", err) + } + return + } + + if err != nil { + if !tt.expectError { + t.Fatalf("GetList failed: %v", err) + } + return + } + if tt.expectError { + t.Fatalf("expected error but got none") + } + if (len(out.Continue) > 0) != tt.expectContinue { + t.Errorf("unexpected continue token: %q", out.Continue) + } + + // If a client requests an exact resource version, it must be echoed back to them. + if tt.expectRV != "" { + if tt.expectRV != out.ResourceVersion { + t.Errorf("resourceVersion in list response want=%s, got=%s", tt.expectRV, out.ResourceVersion) + } + } + if tt.expectRVFunc != nil { + if err := tt.expectRVFunc(out.ResourceVersion); err != nil { + t.Errorf("resourceVersion in list response invalid: %v", err) + } + } + + if tt.expectedAlternatives == nil { + sort.Sort(sortablePodList(tt.expectedOut)) + expectNoDiff(t, "incorrect list pods", tt.expectedOut, out.Items) + } else { + ExpectContains(t, "incorrect list pods", toInterfaceSlice(tt.expectedAlternatives), out.Items) + } + if !cmp.Equal(tt.expectedRemainingItemCount, out.RemainingItemCount) { + t.Fatalf("unexpected remainingItemCount, diff: %s", cmp.Diff(tt.expectedRemainingItemCount, out.RemainingItemCount)) + } + }) + } +} + +func RunTestConsistentList(ctx context.Context, t *testing.T, store storage.Interface, compaction Compaction, cacheEnabled, consistentReadsSupported bool) { + outPod := &example.Pod{} + inPod := &example.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "foo"}} + err := store.Create(ctx, computePodKey(inPod), inPod, outPod, 0) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + lastObjecRV := outPod.ResourceVersion + compaction(ctx, t, outPod.ResourceVersion) + parsedRV, _ := strconv.Atoi(outPod.ResourceVersion) + currentRV := fmt.Sprintf("%d", parsedRV+1) + + firstNonConsistentReadRV := lastObjecRV + if consistentReadsSupported && !cacheEnabled { + firstNonConsistentReadRV = currentRV + } + + secondNonConsistentReadRV := lastObjecRV + if consistentReadsSupported { + secondNonConsistentReadRV = currentRV + } + + tcs := []struct { + name string + requestRV string + expectResponseRV string + }{ + { + name: "Non-consistent list before sync", + requestRV: "0", + expectResponseRV: firstNonConsistentReadRV, + }, + { + name: "Consistent request returns currentRV", + requestRV: "", + expectResponseRV: currentRV, + }, + { + name: "Non-consistent request after sync returns currentRV", + requestRV: "0", + expectResponseRV: secondNonConsistentReadRV, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + out := &example.PodList{} + opts := storage.ListOptions{ + ResourceVersion: tc.requestRV, + Predicate: storage.Everything, + } + err = store.GetList(ctx, "/pods/empty", opts, out) + if err != nil { + t.Fatalf("GetList failed: %v", err) + } + if out.ResourceVersion != tc.expectResponseRV { + t.Errorf("resourceVersion in list response want=%s, got=%s", tc.expectResponseRV, out.ResourceVersion) + } + }) + } +} + +// seedMultiLevelData creates a set of keys with a multi-level structure, returning a resourceVersion +// from before any were created along with the full set of objects that were persisted +func seedMultiLevelData(ctx context.Context, store storage.Interface) (string, []*example.Pod, error) { + // Setup storage with the following structure: + // / + // - first/ + // | - bar + // | + // - second/ + // | - bar + // | - foo + // | - [deleted] baz + // | + // - third/ + // | - barfoo + // | - foo + barFirst := &example.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "first", Name: "bar"}} + barSecond := &example.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "second", Name: "bar"}} + fooSecond := &example.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "second", Name: "foo"}} + bazSecond := &example.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "second", Name: "baz"}} + barfooThird := &example.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "third", Name: "barfoo"}} + fooThird := &example.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "third", Name: "foo"}} + + preset := []struct { + key string + obj *example.Pod + storedObj *example.Pod + }{ + { + key: computePodKey(barFirst), + obj: barFirst, + }, + { + key: computePodKey(barSecond), + obj: barSecond, + }, + { + key: computePodKey(fooSecond), + obj: fooSecond, + }, + { + key: computePodKey(barfooThird), + obj: barfooThird, + }, + { + key: computePodKey(fooThird), + obj: fooThird, + }, + { + key: computePodKey(bazSecond), + obj: bazSecond, + }, + } + + // we want to figure out the resourceVersion before we create anything + initialList := &example.PodList{} + if err := store.GetList(ctx, "/pods", storage.ListOptions{Predicate: storage.Everything, Recursive: true}, initialList); err != nil { + return "", nil, fmt.Errorf("failed to determine starting resourceVersion: %w", err) + } + initialRV := initialList.ResourceVersion + + for i, ps := range preset { + preset[i].storedObj = &example.Pod{} + err := store.Create(ctx, ps.key, ps.obj, preset[i].storedObj, 0) + if err != nil { + return "", nil, fmt.Errorf("failed to create object: %w", err) + } + } + + // For barFirst, we first create it with key /pods/first/bar and then we update + // it by changing its spec.nodeName. The point of doing this is to be able to + // test that if a pod with key /pods/first/bar is in fact returned, the returned + // pod is the updated one (i.e. with spec.nodeName changed). + preset[0].storedObj = &example.Pod{} + if err := store.GuaranteedUpdate(ctx, computePodKey(barFirst), preset[0].storedObj, true, nil, + func(input runtime.Object, _ storage.ResponseMeta) (output runtime.Object, ttl *uint64, err error) { + pod := input.(*example.Pod).DeepCopy() + pod.Spec.NodeName = "fakeNode" + return pod, nil, nil + }, nil); err != nil { + return "", nil, fmt.Errorf("failed to update object: %w", err) + } + + // We now delete bazSecond provided it has been created first. We do this to enable + // testing cases that had an object exist initially and then was deleted and how this + // would be reflected in responses of different calls. + if err := store.Delete(ctx, computePodKey(bazSecond), preset[len(preset)-1].storedObj, nil, storage.ValidateAllObjectFunc, nil); err != nil { + return "", nil, fmt.Errorf("failed to delete object: %w", err) + } + + // Since we deleted bazSecond (last element of preset), we remove it from preset. + preset = preset[:len(preset)-1] + // nolint:prealloc + var created []*example.Pod + for _, item := range preset { + created = append(created, item.storedObj) + } + return initialRV, created, nil +} + +func RunTestGetListNonRecursive(ctx context.Context, t *testing.T, compaction Compaction, store storage.Interface) { + key, prevStoredObj := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}}) + prevRV, _ := strconv.Atoi(prevStoredObj.ResourceVersion) + + storedObj := &example.Pod{} + if err := store.GuaranteedUpdate(ctx, key, storedObj, false, nil, + func(_ runtime.Object, _ storage.ResponseMeta) (runtime.Object, *uint64, error) { + newPod := prevStoredObj.DeepCopy() + newPod.Annotations = map[string]string{"version": "second"} + return newPod, nil, nil + }, nil); err != nil { + t.Fatalf("update failed: %v", err) + } + objRV, _ := strconv.Atoi(storedObj.ResourceVersion) + // Use compact to increase etcd global revision without changes to any resources. + // The increase in resources version comes from Kubernetes compaction updating hidden key. + // Used to test consistent List to confirm it returns latest etcd revision. + compaction(ctx, t, prevStoredObj.ResourceVersion) + + tests := []struct { + name string + key string + pred storage.SelectionPredicate + expectedOut []example.Pod + expectedAlternatives [][]example.Pod + rv string + rvMatch metav1.ResourceVersionMatch + expectRVTooLarge bool + }{{ + name: "existing key", + key: key, + pred: storage.Everything, + expectedOut: []example.Pod{*storedObj}, + }, { + name: "existing key, resourceVersion=0", + key: key, + pred: storage.Everything, + expectedAlternatives: [][]example.Pod{{}, {*prevStoredObj}, {*storedObj}}, + rv: "0", + }, { + name: "existing key, resourceVersion=0, resourceVersionMatch=notOlderThan", + key: key, + pred: storage.Everything, + expectedAlternatives: [][]example.Pod{{}, {*prevStoredObj}, {*storedObj}}, + rv: "0", + rvMatch: metav1.ResourceVersionMatchNotOlderThan, + }, { + name: "existing key, resourceVersion=current", + key: key, + pred: storage.Everything, + expectedOut: []example.Pod{*storedObj}, + rv: fmt.Sprintf("%d", objRV), + }, { + name: "existing key, resourceVersion=current, resourceVersionMatch=notOlderThan", + key: key, + pred: storage.Everything, + expectedOut: []example.Pod{*storedObj}, + rv: fmt.Sprintf("%d", objRV), + rvMatch: metav1.ResourceVersionMatchNotOlderThan, + }, { + name: "existing key, resourceVersion=previous, resourceVersionMatch=notOlderThan", + key: key, + pred: storage.Everything, + expectedAlternatives: [][]example.Pod{{*prevStoredObj}, {*storedObj}}, + rv: fmt.Sprintf("%d", prevRV), + rvMatch: metav1.ResourceVersionMatchNotOlderThan, + }, { + name: "existing key, resourceVersion=current, resourceVersionMatch=exact", + key: key, + pred: storage.Everything, + expectedOut: []example.Pod{*storedObj}, + rv: fmt.Sprintf("%d", objRV), + rvMatch: metav1.ResourceVersionMatchExact, + }, { + name: "existing key, resourceVersion=previous, resourceVersionMatch=exact", + key: key, + pred: storage.Everything, + expectedOut: []example.Pod{*prevStoredObj}, + rv: fmt.Sprintf("%d", prevRV), + rvMatch: metav1.ResourceVersionMatchExact, + }, { + name: "existing key, resourceVersion=too high", + key: key, + pred: storage.Everything, + expectedOut: []example.Pod{*storedObj}, + rv: strconv.FormatInt(math.MaxInt64, 10), + expectRVTooLarge: true, + }, { + name: "non-existing key", + key: "/non-existing", + pred: storage.Everything, + expectedOut: []example.Pod{}, + }, { + name: "with matching pod name", + key: "/non-existing", + pred: storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.ParseSelectorOrDie("metadata.name!=" + storedObj.Name), + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { + pod := obj.(*example.Pod) + return nil, fields.Set{"metadata.name": pod.Name}, nil + }, + }, + expectedOut: []example.Pod{}, + }, { + name: "existing key, resourceVersion=current, with not matching pod name", + key: key, + pred: storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.ParseSelectorOrDie("metadata.name!=" + storedObj.Name), + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { + pod := obj.(*example.Pod) + return nil, fields.Set{"metadata.name": pod.Name}, nil + }, + }, + expectedOut: []example.Pod{}, + rv: fmt.Sprintf("%d", objRV), + }} + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + // For some asynchronous implementations of storage interface (in particular watchcache), + // certain requests may impact result of further requests. As an example, if we first + // ensure that watchcache is synchronized up to ResourceVersion X (using Get/List requests + // with NotOlderThan semantic), the further requests (even specifying earlier resource + // version) will also return the result synchronized to at least ResourceVersion X. + // By parallelizing test cases we ensure that the order in which test cases are defined + // doesn't automatically preclude some scenarios from happening. + t.Parallel() + + out := &example.PodList{} + storageOpts := storage.ListOptions{ + ResourceVersion: tt.rv, + ResourceVersionMatch: tt.rvMatch, + Predicate: tt.pred, + Recursive: false, + } + err := store.GetList(ctx, tt.key, storageOpts, out) + + if tt.expectRVTooLarge { + if err == nil || !storage.IsTooLargeResourceVersion(err) { + t.Errorf("%s: expecting resource version too high error, but get: %s", tt.name, err) + } + return + } + + if err != nil { + t.Fatalf("GetList failed: %v", err) + } + if len(out.ResourceVersion) == 0 { + t.Errorf("%s: unset resourceVersion", tt.name) + } + + if tt.expectedAlternatives == nil { + expectNoDiff(t, "incorrect list pods", tt.expectedOut, out.Items) + } else { + ExpectContains(t, "incorrect list pods", toInterfaceSlice(tt.expectedAlternatives), out.Items) + } + }) + } +} + +// RunTestGetListRecursivePrefix tests how recursive parameter works for object keys that are prefixes of each other. +func RunTestGetListRecursivePrefix(ctx context.Context, t *testing.T, store storage.Interface) { + fooKey, fooObj := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}}) + fooBarKey, fooBarObj := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foobar", Namespace: "test-ns"}}) + _, otherNamespaceObj := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "baz", Namespace: "test-ns2"}}) + lastRev := otherNamespaceObj.ResourceVersion + + tests := []struct { + name string + key string + recursive bool + expectedOut []example.Pod + }{ + { + name: "NonRecursive on resource prefix doesn't return any objects", + key: "/pods/", + recursive: false, + expectedOut: []example.Pod{}, + }, + { + name: "Recursive on resource prefix returns all objects", + key: "/pods/", + recursive: true, + expectedOut: []example.Pod{*fooObj, *fooBarObj, *otherNamespaceObj}, + }, + { + name: "NonRecursive on namespace prefix doesn't return any objects", + key: "/pods/test-ns/", + recursive: false, + expectedOut: []example.Pod{}, + }, + { + name: "Recursive on resource prefix returns objects in the namespace", + key: "/pods/test-ns/", + recursive: true, + expectedOut: []example.Pod{*fooObj, *fooBarObj}, + }, + { + name: "NonRecursive on object key (prefix) returns object and no other objects with the same prefix", + key: fooKey, + recursive: false, + expectedOut: []example.Pod{*fooObj}, + }, + { + name: "Recursive on object key (prefix) doesn't return anything", + key: fooKey, + recursive: true, + expectedOut: []example.Pod{}, + }, + { + name: "NonRecursive on object key (no-prefix) return object", + key: fooBarKey, + recursive: false, + expectedOut: []example.Pod{*fooBarObj}, + }, + { + name: "Recursive on object key (no-prefix) doesn't return anything", + key: fooBarKey, + recursive: true, + expectedOut: []example.Pod{}, + }, + } + + listTypes := []struct { + name string + ResourceVersion string + Match metav1.ResourceVersionMatch + }{ + { + name: "Exact", + ResourceVersion: lastRev, + Match: metav1.ResourceVersionMatchExact, + }, + { + name: "Consistent", + ResourceVersion: "", + }, + { + name: "NotOlderThan", + ResourceVersion: "0", + Match: metav1.ResourceVersionMatchNotOlderThan, + }, + } + + for _, listType := range listTypes { + listType := listType + t.Run(listType.name, func(t *testing.T) { + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + out := &example.PodList{} + storageOpts := storage.ListOptions{ + ResourceVersion: listType.ResourceVersion, + ResourceVersionMatch: listType.Match, + Recursive: tt.recursive, + Predicate: storage.Everything, + } + err := store.GetList(ctx, tt.key, storageOpts, out) + if err != nil { + t.Fatalf("GetList failed: %v", err) + } + expectNoDiff(t, "incorrect list pods", tt.expectedOut, out.Items) + }) + } + }) + } +} + +type CallsValidation func(t *testing.T, pageSize, estimatedProcessedObjects uint64) + +func RunTestListContinuation(ctx context.Context, t *testing.T, store storage.Interface, validation CallsValidation) { + // Setup storage with the following structure: + // / + // - first/ + // | - bar + // | + // - second/ + // | - bar + // | - foo + barFirst := &example.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "first", Name: "bar"}} + barSecond := &example.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "second", Name: "bar"}} + fooSecond := &example.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "second", Name: "foo"}} + preset := []struct { + key string + obj *example.Pod + storedObj *example.Pod + }{ + { + key: computePodKey(barFirst), + obj: barFirst, + }, + { + key: computePodKey(barSecond), + obj: barSecond, + }, + { + key: computePodKey(fooSecond), + obj: fooSecond, + }, + } + + var currentRV string + for i, ps := range preset { + preset[i].storedObj = &example.Pod{} + err := store.Create(ctx, ps.key, ps.obj, preset[i].storedObj, 0) + if err != nil { + t.Fatalf("Set failed: %v", err) + } + currentRV = preset[i].storedObj.ResourceVersion + } + + // test continuations + out := &example.PodList{} + pred := func(limit int64, continueValue string) storage.SelectionPredicate { + return storage.SelectionPredicate{ + Limit: limit, + Continue: continueValue, + Label: labels.Everything(), + Field: fields.Everything(), + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { + pod := obj.(*example.Pod) + return nil, fields.Set{"metadata.name": pod.Name}, nil + }, + } + } + options := storage.ListOptions{ + // Limit is ignored when ResourceVersion is set to 0. + // Set it to consistent read. + ResourceVersion: "", + Predicate: pred(1, ""), + Recursive: true, + } + if err := store.GetList(ctx, "/pods", options, out); err != nil { + t.Fatalf("Unable to get initial list: %v", err) + } + if len(out.Continue) == 0 { + t.Fatalf("No continuation token set") + } + expectNoDiff(t, "incorrect first page", []example.Pod{*preset[0].storedObj}, out.Items) + if out.ResourceVersion != currentRV { + t.Errorf("Expect output.ResourceVersion = %s, but got %s", currentRV, out.ResourceVersion) + } + if validation != nil { + validation(t, 1, 1) + } + + continueFromSecondItem := out.Continue + + // no limit, should get two items + out = &example.PodList{} + options = storage.ListOptions{ + // ResourceVersion should be unset when setting continuation token. + ResourceVersion: "", + Predicate: pred(0, continueFromSecondItem), + Recursive: true, + } + if err := store.GetList(ctx, "/pods", options, out); err != nil { + t.Fatalf("Unable to get second page: %v", err) + } + if len(out.Continue) != 0 { + t.Fatalf("Unexpected continuation token set") + } + key, rv, err := storage.DecodeContinue(continueFromSecondItem, "/pods") + t.Logf("continue token was %d %s %v", rv, key, err) + expectNoDiff(t, "incorrect second page", []example.Pod{*preset[1].storedObj, *preset[2].storedObj}, out.Items) + if out.ResourceVersion != currentRV { + t.Errorf("Expect output.ResourceVersion = %s, but got %s", currentRV, out.ResourceVersion) + } + if validation != nil { + validation(t, 0, 2) + } + + // limit, should get two more pages + out = &example.PodList{} + options = storage.ListOptions{ + // ResourceVersion should be unset when setting continuation token. + ResourceVersion: "", + Predicate: pred(1, continueFromSecondItem), + Recursive: true, + } + if err := store.GetList(ctx, "/pods", options, out); err != nil { + t.Fatalf("Unable to get second page: %v", err) + } + if len(out.Continue) == 0 { + t.Fatalf("No continuation token set") + } + expectNoDiff(t, "incorrect second page", []example.Pod{*preset[1].storedObj}, out.Items) + if out.ResourceVersion != currentRV { + t.Errorf("Expect output.ResourceVersion = %s, but got %s", currentRV, out.ResourceVersion) + } + if validation != nil { + validation(t, 1, 1) + } + + continueFromThirdItem := out.Continue + + out = &example.PodList{} + options = storage.ListOptions{ + // ResourceVersion should be unset when setting continuation token. + ResourceVersion: "", + Predicate: pred(1, continueFromThirdItem), + Recursive: true, + } + if err := store.GetList(ctx, "/pods", options, out); err != nil { + t.Fatalf("Unable to get second page: %v", err) + } + if len(out.Continue) != 0 { + t.Fatalf("Unexpected continuation token set") + } + expectNoDiff(t, "incorrect third page", []example.Pod{*preset[2].storedObj}, out.Items) + if out.ResourceVersion != currentRV { + t.Errorf("Expect output.ResourceVersion = %s, but got %s", currentRV, out.ResourceVersion) + } + if validation != nil { + validation(t, 1, 1) + } +} + +func RunTestListPaginationRareObject(ctx context.Context, t *testing.T, store storage.Interface, validation CallsValidation) { + podCount := 1000 + var pods []*example.Pod + for i := 0; i < podCount; i++ { + obj := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("pod-%d", i)}} + key := computePodKey(obj) + storedObj := &example.Pod{} + err := store.Create(ctx, key, obj, storedObj, 0) + if err != nil { + t.Fatalf("Set failed: %v", err) + } + pods = append(pods, storedObj) + } + + out := &example.PodList{} + options := storage.ListOptions{ + Predicate: storage.SelectionPredicate{ + Limit: 1, + Label: labels.Everything(), + Field: fields.OneTermEqualSelector("metadata.name", "pod-999"), + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { + pod := obj.(*example.Pod) + return nil, fields.Set{"metadata.name": pod.Name}, nil + }, + }, + Recursive: true, + } + if err := store.GetList(ctx, "/pods", options, out); err != nil { + t.Fatalf("Unable to get initial list: %v", err) + } + if len(out.Continue) != 0 { + t.Errorf("Unexpected continuation token set") + } + if len(out.Items) != 1 || !reflect.DeepEqual(&out.Items[0], pods[999]) { + t.Fatalf("Unexpected first page: %#v", out.Items) + } + if validation != nil { + validation(t, 1, uint64(podCount)) + } +} + +func RunTestListContinuationWithFilter(ctx context.Context, t *testing.T, store storage.Interface, validation CallsValidation) { + foo1 := &example.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "1", Name: "foo"}} + bar2 := &example.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "2", Name: "bar"}} // this should not match + foo3 := &example.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "3", Name: "foo"}} + foo4 := &example.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "4", Name: "foo"}} + preset := []struct { + key string + obj *example.Pod + storedObj *example.Pod + }{ + { + key: computePodKey(foo1), + obj: foo1, + }, + { + key: computePodKey(bar2), + obj: bar2, + }, + { + key: computePodKey(foo3), + obj: foo3, + }, + { + key: computePodKey(foo4), + obj: foo4, + }, + } + + var currentRV string + for i, ps := range preset { + preset[i].storedObj = &example.Pod{} + err := store.Create(ctx, ps.key, ps.obj, preset[i].storedObj, 0) + if err != nil { + t.Fatalf("Set failed: %v", err) + } + currentRV = preset[i].storedObj.ResourceVersion + } + + // the first list call should try to get 2 items from etcd (and only those items should be returned) + // the field selector should result in it reading 3 items via the transformer + // the chunking should result in 2 etcd Gets + // there should be a continueValue because there is more data + out := &example.PodList{} + pred := func(limit int64, continueValue string) storage.SelectionPredicate { + return storage.SelectionPredicate{ + Limit: limit, + Continue: continueValue, + Label: labels.Everything(), + Field: fields.OneTermNotEqualSelector("metadata.name", "bar"), + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { + pod := obj.(*example.Pod) + return nil, fields.Set{"metadata.name": pod.Name}, nil + }, + } + } + options := storage.ListOptions{ + // Limit is ignored when ResourceVersion is set to 0. + // Set it to consistent read. + ResourceVersion: "", + Predicate: pred(2, ""), + Recursive: true, + } + if err := store.GetList(ctx, "/pods", options, out); err != nil { + t.Errorf("Unable to get initial list: %v", err) + } + if len(out.Continue) == 0 { + t.Errorf("No continuation token set") + } + expectNoDiff(t, "incorrect first page", []example.Pod{*preset[0].storedObj, *preset[2].storedObj}, out.Items) + if out.ResourceVersion != currentRV { + t.Errorf("Expect output.ResourceVersion = %s, but got %s", currentRV, out.ResourceVersion) + } + if validation != nil { + validation(t, 2, 3) + } + + // the rest of the test does not make sense if the previous call failed + if t.Failed() { + return + } + + cont := out.Continue + + // the second list call should try to get 2 more items from etcd + // but since there is only one item left, that is all we should get with no continueValue + // both read counters should be incremented for the singular calls they make in this case + out = &example.PodList{} + options = storage.ListOptions{ + // ResourceVersion should be unset when setting continuation token. + ResourceVersion: "", + Predicate: pred(2, cont), + Recursive: true, + } + if err := store.GetList(ctx, "/pods", options, out); err != nil { + t.Errorf("Unable to get second page: %v", err) + } + if len(out.Continue) != 0 { + t.Errorf("Unexpected continuation token set") + } + expectNoDiff(t, "incorrect second page", []example.Pod{*preset[3].storedObj}, out.Items) + if out.ResourceVersion != currentRV { + t.Errorf("Expect output.ResourceVersion = %s, but got %s", currentRV, out.ResourceVersion) + } + if validation != nil { + validation(t, 2, 1) + } +} + +func RunTestListInconsistentContinuation(ctx context.Context, t *testing.T, store storage.Interface, compaction Compaction) { + if compaction == nil { + t.Skipf("compaction callback not provided") + } + + // Setup storage with the following structure: + // / + // - first/ + // | - bar + // | + // - second/ + // | - bar + // | - foo + barFirst := &example.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "first", Name: "bar"}} + barSecond := &example.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "second", Name: "bar"}} + fooSecond := &example.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "second", Name: "foo"}} + preset := []struct { + key string + obj *example.Pod + storedObj *example.Pod + }{ + { + key: computePodKey(barFirst), + obj: barFirst, + }, + { + key: computePodKey(barSecond), + obj: barSecond, + }, + { + key: computePodKey(fooSecond), + obj: fooSecond, + }, + } + for i, ps := range preset { + preset[i].storedObj = &example.Pod{} + err := store.Create(ctx, ps.key, ps.obj, preset[i].storedObj, 0) + if err != nil { + t.Fatalf("Set failed: %v", err) + } + } + + pred := func(limit int64, continueValue string) storage.SelectionPredicate { + return storage.SelectionPredicate{ + Limit: limit, + Continue: continueValue, + Label: labels.Everything(), + Field: fields.Everything(), + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { + pod := obj.(*example.Pod) + return nil, fields.Set{"metadata.name": pod.Name}, nil + }, + } + } + + out := &example.PodList{} + options := storage.ListOptions{ + ResourceVersion: "0", + Predicate: pred(1, ""), + Recursive: true, + } + if err := store.GetList(ctx, "/pods", options, out); err != nil { + t.Fatalf("Unable to get initial list: %v", err) + } + if len(out.Continue) == 0 { + t.Fatalf("No continuation token set") + } + expectNoDiff(t, "incorrect first page", []example.Pod{*preset[0].storedObj}, out.Items) + + continueFromSecondItem := out.Continue + + // update /second/bar + oldName := preset[2].obj.Name + newPod := &example.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: oldName, + Labels: map[string]string{ + "state": "new", + }, + }, + } + if err := store.GuaranteedUpdate(ctx, preset[2].key, preset[2].storedObj, false, nil, + func(_ runtime.Object, _ storage.ResponseMeta) (runtime.Object, *uint64, error) { + return newPod, nil, nil + }, newPod); err != nil { + t.Fatalf("update failed: %v", err) + } + + // compact to latest revision. + lastRVString := preset[2].storedObj.ResourceVersion + compaction(ctx, t, lastRVString) + + // The old continue token should have expired + options = storage.ListOptions{ + ResourceVersion: "0", + Predicate: pred(0, continueFromSecondItem), + Recursive: true, + } + err := store.GetList(ctx, "/pods", options, out) + if err == nil { + t.Fatalf("unexpected no error") + } + if !strings.Contains(err.Error(), "The provided continue parameter is too old ") { + t.Fatalf("unexpected error message %v", err) + } + status, ok := err.(apierrors.APIStatus) + if !ok { + t.Fatalf("expect error of implements the APIStatus interface, got %v", reflect.TypeOf(err)) + } + inconsistentContinueFromSecondItem := status.Status().ListMeta.Continue + if len(inconsistentContinueFromSecondItem) == 0 { + t.Fatalf("expect non-empty continue token") + } + + out = &example.PodList{} + options = storage.ListOptions{ + ResourceVersion: "0", + Predicate: pred(1, inconsistentContinueFromSecondItem), + Recursive: true, + } + if err := store.GetList(ctx, "/pods", options, out); err != nil { + t.Fatalf("Unable to get second page: %v", err) + } + if len(out.Continue) == 0 { + t.Fatalf("No continuation token set") + } + validateResourceVersion := resourceVersionNotOlderThan(lastRVString) + expectNoDiff(t, "incorrect second page", []example.Pod{*preset[1].storedObj}, out.Items) + if err := validateResourceVersion(out.ResourceVersion); err != nil { + t.Fatal(err) + } + continueFromThirdItem := out.Continue + resolvedResourceVersionFromThirdItem := out.ResourceVersion + out = &example.PodList{} + options = storage.ListOptions{ + ResourceVersion: "0", + Predicate: pred(1, continueFromThirdItem), + Recursive: true, + } + if err := store.GetList(ctx, "/pods", options, out); err != nil { + t.Fatalf("Unable to get second page: %v", err) + } + if len(out.Continue) != 0 { + t.Fatalf("Unexpected continuation token set") + } + expectNoDiff(t, "incorrect third page", []example.Pod{*preset[2].storedObj}, out.Items) + if out.ResourceVersion != resolvedResourceVersionFromThirdItem { + t.Fatalf("Expected list resource version to be %s, got %s", resolvedResourceVersionFromThirdItem, out.ResourceVersion) + } +} + +func RunTestListResourceVersionMatch(ctx context.Context, t *testing.T, store InterfaceWithPrefixTransformer) { + nextPod := func(index uint32) (string, *example.Pod) { + obj := &example.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("pod-%d", index), + Labels: map[string]string{ + "even": strconv.FormatBool(index%2 == 0), + }, + }, + } + return computePodKey(obj), obj + } + + transformer := &reproducingTransformer{ + store: store, + nextObject: nextPod, + } + + revertTransformer := store.UpdatePrefixTransformer( + func(previousTransformer *PrefixTransformer) value.Transformer { + transformer.wrapped = previousTransformer + return transformer + }) + defer revertTransformer() + + for i := 0; i < 5; i++ { + if err := transformer.createObject(ctx); err != nil { + t.Fatalf("failed to create object: %v", err) + } + } + + getAttrs := func(obj runtime.Object) (labels.Set, fields.Set, error) { + pod, ok := obj.(*example.Pod) + if !ok { + return nil, nil, fmt.Errorf("invalid object") + } + return labels.Set(pod.Labels), nil, nil + } + predicate := storage.SelectionPredicate{ + Label: labels.Set{"even": "true"}.AsSelector(), + GetAttrs: getAttrs, + Limit: 4, + } + + result1 := example.PodList{} + options := storage.ListOptions{ + Predicate: predicate, + Recursive: true, + } + if err := store.GetList(ctx, "/pods", options, &result1); err != nil { + t.Fatalf("failed to list objects: %v", err) + } + + // List objects from the returned resource version. + options = storage.ListOptions{ + Predicate: predicate, + ResourceVersion: result1.ResourceVersion, + ResourceVersionMatch: metav1.ResourceVersionMatchExact, + Recursive: true, + } + + result2 := example.PodList{} + if err := store.GetList(ctx, "/pods", options, &result2); err != nil { + t.Fatalf("failed to list objects: %v", err) + } + + expectNoDiff(t, "incorrect lists", result1, result2) + + // Now also verify the ResourceVersionMatchNotOlderThan. + options.ResourceVersionMatch = metav1.ResourceVersionMatchNotOlderThan + + result3 := example.PodList{} + if err := store.GetList(ctx, "/pods", options, &result3); err != nil { + t.Fatalf("failed to list objects: %v", err) + } + + options.ResourceVersion = result3.ResourceVersion + options.ResourceVersionMatch = metav1.ResourceVersionMatchExact + + result4 := example.PodList{} + if err := store.GetList(ctx, "/pods", options, &result4); err != nil { + t.Fatalf("failed to list objects: %v", err) + } + + expectNoDiff(t, "incorrect lists", result3, result4) +} + +func RunTestGuaranteedUpdate(ctx context.Context, t *testing.T, store InterfaceWithPrefixTransformer, validation KeyValidation) { + inputObj := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns", UID: "A"}} + key := computePodKey(inputObj) + + tests := []struct { + name string + key string + ignoreNotFound bool + precondition *storage.Preconditions + expectNotFoundErr bool + expectInvalidObjErr bool + expectNoUpdate bool + transformStale bool + hasSelfLink bool + }{{ + name: "non-existing key, ignoreNotFound=false", + key: "/non-existing", + ignoreNotFound: false, + precondition: nil, + expectNotFoundErr: true, + expectInvalidObjErr: false, + expectNoUpdate: false, + }, { + name: "non-existing key, ignoreNotFound=true", + key: "/non-existing", + ignoreNotFound: true, + precondition: nil, + expectNotFoundErr: false, + expectInvalidObjErr: false, + expectNoUpdate: false, + }, { + name: "existing key", + key: key, + ignoreNotFound: false, + precondition: nil, + expectNotFoundErr: false, + expectInvalidObjErr: false, + expectNoUpdate: false, + }, { + name: "same data", + key: key, + ignoreNotFound: false, + precondition: nil, + expectNotFoundErr: false, + expectInvalidObjErr: false, + expectNoUpdate: true, + }, { + name: "same data, a selfLink", + key: key, + ignoreNotFound: false, + precondition: nil, + expectNotFoundErr: false, + expectInvalidObjErr: false, + expectNoUpdate: true, + hasSelfLink: true, + }, { + name: "same data, stale", + key: key, + ignoreNotFound: false, + precondition: nil, + expectNotFoundErr: false, + expectInvalidObjErr: false, + expectNoUpdate: false, + transformStale: true, + }, { + name: "UID match", + key: key, + ignoreNotFound: false, + precondition: storage.NewUIDPreconditions("A"), + expectNotFoundErr: false, + expectInvalidObjErr: false, + expectNoUpdate: true, + }, { + name: "UID mismatch", + key: key, + ignoreNotFound: false, + precondition: storage.NewUIDPreconditions("B"), + expectNotFoundErr: false, + expectInvalidObjErr: true, + expectNoUpdate: true, + }} + + for i, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + key, storeObj := testPropagateStore(ctx, t, store, inputObj) + + out := &example.Pod{} + annotations := map[string]string{"version": fmt.Sprintf("%d", i)} + if tt.expectNoUpdate { + annotations = nil + } + + if tt.transformStale { + revertTransformer := store.UpdatePrefixTransformer( + func(transformer *PrefixTransformer) value.Transformer { + transformer.stale = true + return transformer + }) + defer revertTransformer() + } + + version := storeObj.ResourceVersion + err := store.GuaranteedUpdate(ctx, tt.key, out, tt.ignoreNotFound, tt.precondition, + storage.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { + if tt.expectNotFoundErr && tt.ignoreNotFound { + if pod := obj.(*example.Pod); pod.Name != "" { + t.Errorf("%s: expecting zero value, but get=%#v", tt.name, pod) + } + } + pod := *storeObj + if tt.hasSelfLink { + // nolint:staticcheck + pod.SelfLink = "testlink" + } + pod.Annotations = annotations + return &pod, nil + }), nil) + + if tt.expectNotFoundErr { + if err == nil || !storage.IsNotFound(err) { + t.Errorf("%s: expecting not found error, but get: %v", tt.name, err) + } + return + } + if tt.expectInvalidObjErr { + if err == nil || !storage.IsInvalidObj(err) { + t.Errorf("%s: expecting invalid UID error, but get: %s", tt.name, err) + } + return + } + if err != nil { + t.Fatalf("%s: GuaranteedUpdate failed: %v", tt.name, err) + } + if !reflect.DeepEqual(out.ObjectMeta.Annotations, annotations) { + t.Errorf("%s: pod annotations want=%s, get=%s", tt.name, annotations, out.ObjectMeta.Annotations) + } + // nolint:staticcheck + if out.SelfLink != "" { + t.Errorf("%s: selfLink should not be set", tt.name) + } + + // verify that kv pair is not empty after set and that the underlying data matches expectations + validation(ctx, t, key) + + switch tt.expectNoUpdate { + case true: + if version != out.ResourceVersion { + t.Errorf("%s: expect no version change, before=%s, after=%s", tt.name, version, out.ResourceVersion) + } + case false: + if version == out.ResourceVersion { + t.Errorf("%s: expect version change, but get the same version=%s", tt.name, version) + } + } + }) + } +} + +func RunTestGuaranteedUpdateWithTTL(ctx context.Context, t *testing.T, store storage.Interface) { + input := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}} + key := computePodKey(input) + + out := &example.Pod{} + err := store.GuaranteedUpdate(ctx, key, out, true, nil, + func(_ runtime.Object, _ storage.ResponseMeta) (runtime.Object, *uint64, error) { + ttl := uint64(1) + return input, &ttl, nil + }, nil) + if err != nil { + t.Fatalf("Create failed: %v", err) + } + + w, err := store.Watch(ctx, key, storage.ListOptions{ResourceVersion: out.ResourceVersion, Predicate: storage.Everything}) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + testCheckEventType(t, w, watch.Deleted) +} + +func RunTestGuaranteedUpdateChecksStoredData(ctx context.Context, t *testing.T, store InterfaceWithPrefixTransformer) { + input := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}} + key := computePodKey(input) + + // serialize input into etcd with data that would be normalized by a write - + // in this case, leading whitespace + revertTransformer := store.UpdatePrefixTransformer( + func(transformer *PrefixTransformer) value.Transformer { + transformer.prefix = []byte(string(transformer.prefix) + " ") + return transformer + }) + _, initial := testPropagateStore(ctx, t, store, input) + revertTransformer() + + // this update should write the canonical value to etcd because the new serialization differs + // from the stored serialization + input.ResourceVersion = initial.ResourceVersion + out := &example.Pod{} + err := store.GuaranteedUpdate(ctx, key, out, true, nil, + func(_ runtime.Object, _ storage.ResponseMeta) (runtime.Object, *uint64, error) { + return input, nil, nil + }, input) + if err != nil { + t.Fatalf("Update failed: %v", err) + } + if out.ResourceVersion == initial.ResourceVersion { + t.Errorf("guaranteed update should have updated the serialized data, got %#v", out) + } + + lastVersion := out.ResourceVersion + + // this update should not write to etcd because the input matches the stored data + input = out + out = &example.Pod{} + err = store.GuaranteedUpdate(ctx, key, out, true, nil, + func(_ runtime.Object, _ storage.ResponseMeta) (runtime.Object, *uint64, error) { + return input, nil, nil + }, input) + if err != nil { + t.Fatalf("Update failed: %v", err) + } + if out.ResourceVersion != lastVersion { + t.Errorf("guaranteed update should have short-circuited write, got %#v", out) + } + + revertTransformer = store.UpdatePrefixTransformer( + func(transformer *PrefixTransformer) value.Transformer { + transformer.stale = true + return transformer + }) + defer revertTransformer() + + // this update should write to etcd because the transformer reported stale + err = store.GuaranteedUpdate(ctx, key, out, true, nil, + func(_ runtime.Object, _ storage.ResponseMeta) (runtime.Object, *uint64, error) { + return input, nil, nil + }, input) + if err != nil { + t.Fatalf("Update failed: %v", err) + } + if out.ResourceVersion == lastVersion { + t.Errorf("guaranteed update should have written to etcd when transformer reported stale, got %#v", out) + } +} + +func RunTestGuaranteedUpdateWithConflict(ctx context.Context, t *testing.T, store storage.Interface) { + key, _ := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}}) + + errChan := make(chan error, 1) + var firstToFinish sync.WaitGroup + var secondToEnter sync.WaitGroup + firstToFinish.Add(1) + secondToEnter.Add(1) + + go func() { + err := store.GuaranteedUpdate(ctx, key, &example.Pod{}, false, nil, + storage.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { + pod := obj.(*example.Pod) + pod.Name = "foo-1" + secondToEnter.Wait() + return pod, nil + }), nil) + firstToFinish.Done() + errChan <- err + }() + + updateCount := 0 + err := store.GuaranteedUpdate(ctx, key, &example.Pod{}, false, nil, + storage.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { + if updateCount == 0 { + secondToEnter.Done() + firstToFinish.Wait() + } + updateCount++ + pod := obj.(*example.Pod) + pod.Name = "foo-2" + return pod, nil + }), nil) + if err != nil { + t.Fatalf("Second GuaranteedUpdate error %#v", err) + } + if err := <-errChan; err != nil { + t.Fatalf("First GuaranteedUpdate error %#v", err) + } + + if updateCount != 2 { + t.Errorf("Should have conflict and called update func twice") + } +} + +func RunTestGuaranteedUpdateWithSuggestionAndConflict(ctx context.Context, t *testing.T, store storage.Interface) { + key, originalPod := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}}) + + // First, update without a suggestion so originalPod is outdated + updatedPod := &example.Pod{} + err := store.GuaranteedUpdate(ctx, key, updatedPod, false, nil, + storage.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { + pod := obj.(*example.Pod) + pod.Generation = 2 + return pod, nil + }), + nil, + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // Second, update using the outdated originalPod as the suggestion. Return a conflict error when + // passed originalPod, and make sure that SimpleUpdate is called a second time after a live lookup + // with the value of updatedPod. + sawConflict := false + updatedPod2 := &example.Pod{} + err = store.GuaranteedUpdate(ctx, key, updatedPod2, false, nil, + storage.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { + pod := obj.(*example.Pod) + if pod.Generation != 2 { + if sawConflict { + t.Fatalf("unexpected second conflict") + } + sawConflict = true + // simulated stale object - return a conflict + return nil, apierrors.NewConflict(example.SchemeGroupVersion.WithResource("pods").GroupResource(), "name", errors.New("foo")) + } + pod.Generation = 3 + return pod, nil + }), + originalPod, + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if updatedPod2.Generation != 3 { + t.Errorf("unexpected pod generation: %q", updatedPod2.Generation) + } + + // Third, update using a current version as the suggestion. + // Return an error and make sure that SimpleUpdate is NOT called a second time, + // since the live lookup shows the suggestion was already up to date. + attempts := 0 + updatedPod3 := &example.Pod{} + err = store.GuaranteedUpdate(ctx, key, updatedPod3, false, nil, + storage.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { + pod := obj.(*example.Pod) + if pod.Generation != updatedPod2.Generation || pod.ResourceVersion != updatedPod2.ResourceVersion { + t.Logf("stale object (rv=%s), expected rv=%s", pod.ResourceVersion, updatedPod2.ResourceVersion) + } + attempts++ + return nil, fmt.Errorf("validation or admission error") + }), + updatedPod2, + ) + if err == nil { + t.Fatalf("expected error, got none") + } + // Implementations of the storage interface are allowed to ignore the suggestion, + // in which case two attempts are possible. + if attempts > 2 { + t.Errorf("update function should have been called at most twice, called %d", attempts) + } +} + +func RunTestTransformationFailure(ctx context.Context, t *testing.T, store InterfaceWithPrefixTransformer) { + barFirst := &example.Pod{ + ObjectMeta: metav1.ObjectMeta{Namespace: "first", Name: "bar"}, + Spec: DeepEqualSafePodSpec(), + } + bazSecond := &example.Pod{ + ObjectMeta: metav1.ObjectMeta{Namespace: "second", Name: "baz"}, + Spec: DeepEqualSafePodSpec(), + } + + preset := []struct { + key string + obj *example.Pod + storedObj *example.Pod + }{{ + key: computePodKey(barFirst), + obj: barFirst, + }, { + key: computePodKey(bazSecond), + obj: bazSecond, + }} + for i, ps := range preset[:1] { + preset[i].storedObj = &example.Pod{} + err := store.Create(ctx, ps.key, ps.obj, preset[:1][i].storedObj, 0) + if err != nil { + t.Fatalf("Set failed: %v", err) + } + } + + // create a second resource with an invalid prefix + revertTransformer := store.UpdatePrefixTransformer( + func(transformer *PrefixTransformer) value.Transformer { + return NewPrefixTransformer([]byte("otherprefix!"), false) + }) + for i, ps := range preset[1:] { + preset[1:][i].storedObj = &example.Pod{} + err := store.Create(ctx, ps.key, ps.obj, preset[1:][i].storedObj, 0) + if err != nil { + t.Fatalf("Set failed: %v", err) + } + } + revertTransformer() + + // List should fail + var got example.PodList + storageOpts := storage.ListOptions{ + Predicate: storage.Everything, + Recursive: true, + } + if err := store.GetList(ctx, "/pods", storageOpts, &got); !storage.IsInternalError(err) { + t.Errorf("Unexpected error %v", err) + } + + // Get should fail + if err := store.Get(ctx, preset[1].key, storage.GetOptions{}, &example.Pod{}); !storage.IsInternalError(err) { + t.Errorf("Unexpected error: %v", err) + } + + updateFunc := func(input runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) { + return input, nil, nil + } + // GuaranteedUpdate without suggestion should return an error + if err := store.GuaranteedUpdate(ctx, preset[1].key, &example.Pod{}, false, nil, updateFunc, nil); !storage.IsInternalError(err) { + t.Errorf("Unexpected error: %v", err) + } + // GuaranteedUpdate with suggestion should return an error if we don't change the object + if err := store.GuaranteedUpdate(ctx, preset[1].key, &example.Pod{}, false, nil, updateFunc, preset[1].obj); err == nil { + t.Errorf("Unexpected error: %v", err) + } + + // Delete fails with internal error. + if err := store.Delete(ctx, preset[1].key, &example.Pod{}, nil, storage.ValidateAllObjectFunc, nil); !storage.IsInternalError(err) { + t.Errorf("Unexpected error: %v", err) + } + if err := store.Get(ctx, preset[1].key, storage.GetOptions{}, &example.Pod{}); !storage.IsInternalError(err) { + t.Errorf("Unexpected error: %v", err) + } +} + +func RunTestCount(ctx context.Context, t *testing.T, store storage.Interface) { + resourceA := "/foo.bar.io/abc" + + // resourceA is intentionally a prefix of resourceB to ensure that the count + // for resourceA does not include any objects from resourceB. + resourceB := fmt.Sprintf("%sdef", resourceA) + + resourceACountExpected := 5 + for i := 1; i <= resourceACountExpected; i++ { + obj := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("foo-%d", i)}} + + key := fmt.Sprintf("%s/%d", resourceA, i) + if err := store.Create(ctx, key, obj, nil, 0); err != nil { + t.Fatalf("Create failed: %v", err) + } + } + + resourceBCount := 4 + for i := 1; i <= resourceBCount; i++ { + obj := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("foo-%d", i)}} + + key := fmt.Sprintf("%s/%d", resourceB, i) + if err := store.Create(ctx, key, obj, nil, 0); err != nil { + t.Fatalf("Create failed: %v", err) + } + } + + resourceACountGot, err := store.Count(resourceA) + if err != nil { + t.Fatalf("store.Count failed: %v", err) + } + + // count for resourceA should not include the objects for resourceB + // even though resourceA is a prefix of resourceB. + if int64(resourceACountExpected) != resourceACountGot { + t.Fatalf("store.Count for resource %s: expected %d but got %d", resourceA, resourceACountExpected, resourceACountGot) + } +} diff --git a/pkg/apiserver/storage/testing/utils.go b/pkg/apiserver/storage/testing/utils.go new file mode 100644 index 00000000000..8dd8c2fdf1f --- /dev/null +++ b/pkg/apiserver/storage/testing/utils.go @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// Provenance-includes-location: https://github.com/kubernetes/apiserver/blob/master/pkg/storage/testing/utils.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: The Kubernetes Authors. + +package testing + +import ( + "bytes" + "context" + "fmt" + "path" + "reflect" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/api/meta" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/apis/example" + "k8s.io/apiserver/pkg/storage" + "k8s.io/apiserver/pkg/storage/value" + + grafanaregistry "github.com/grafana/grafana/pkg/apiserver/registry/generic" +) + +// KeyFunc is a function that generates keys for tests. +// All tests use the "pods" resource, so the resource is hardcoded to "pods". +var KeyFunc = func(namespace, name string) string { + k := grafanaregistry.Key{ + Resource: "pods", + Namespace: namespace, + Name: name, + } + return k.String() +} + +// CreateObjList will create a list from the array of objects. +func CreateObjList(prefix string, helper storage.Interface, items []runtime.Object) error { + for i := range items { + obj := items[i] + meta, err := meta.Accessor(obj) + if err != nil { + return err + } + err = helper.Create(context.Background(), path.Join(prefix, meta.GetName()), obj, obj, 0) + if err != nil { + return err + } + items[i] = obj + } + return nil +} + +// CreateList will properly create a list using the storage interface. +func CreateList(prefix string, helper storage.Interface, list runtime.Object) error { + items, err := meta.ExtractList(list) + if err != nil { + return err + } + err = CreateObjList(prefix, helper, items) + if err != nil { + return err + } + return meta.SetList(list, items) +} + +// DeepEqualSafePodSpec returns an example.PodSpec safe for deep-equal operations. +func DeepEqualSafePodSpec() example.PodSpec { + grace := int64(30) + return example.PodSpec{ + RestartPolicy: "Always", + TerminationGracePeriodSeconds: &grace, + SchedulerName: "default-scheduler", + } +} + +func computePodKey(obj *example.Pod) string { + return KeyFunc(obj.Namespace, obj.Name) +} + +// testPropagateStore helps propagates store with objects, automates key generation, and returns +// keys and stored objects. +func testPropagateStore(ctx context.Context, t *testing.T, store storage.Interface, obj *example.Pod) (string, *example.Pod) { + // Setup store with a key and grab the output for returning. + key := computePodKey(obj) + + // Setup store with the specified key and grab the output for returning. + err := store.Delete(ctx, key, &example.Pod{}, nil, storage.ValidateAllObjectFunc, nil) + if err != nil && !storage.IsNotFound(err) { + t.Fatalf("Cleanup failed: %v", err) + } + setOutput := &example.Pod{} + if err := store.Create(ctx, key, obj, setOutput, 0); err != nil { + t.Fatalf("Set failed: %v", err) + } + return key, setOutput +} + +func expectNoDiff(t *testing.T, msg string, expected, actual interface{}) { + t.Helper() + if !reflect.DeepEqual(expected, actual) { + if diff := cmp.Diff(expected, actual); diff != "" { + t.Errorf("%s: %s", msg, diff) + } else { + t.Errorf("%s:\nexpected: %#v\ngot: %#v", msg, expected, actual) + } + } +} + +func ExpectContains(t *testing.T, msg string, expectedList []interface{}, got interface{}) { + t.Helper() + for _, expected := range expectedList { + if reflect.DeepEqual(expected, got) { + return + } + } + if len(expectedList) == 0 { + t.Errorf("%s: empty expectedList", msg) + return + } + if diff := cmp.Diff(expectedList[0], got); diff != "" { + t.Errorf("%s: differs from all items, with first: %s", msg, diff) + } else { + t.Errorf("%s: differs from all items, first: %#v\ngot: %#v", msg, expectedList[0], got) + } +} + +const dummyPrefix = "adapter" + +func encodeContinueOrDie(key string, resourceVersion int64) string { + token, err := storage.EncodeContinue(dummyPrefix+key, dummyPrefix, resourceVersion) + if err != nil { + panic(err) + } + return token +} + +func testCheckEventType(t *testing.T, w watch.Interface, expectEventType watch.EventType) { + select { + case res := <-w.ResultChan(): + if res.Type != expectEventType { + t.Errorf("event type want=%v, get=%v", expectEventType, res.Type) + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("time out after waiting %v on ResultChan", wait.ForeverTestTimeout) + } +} + +func testCheckResult(t *testing.T, w watch.Interface, expectEvent watch.Event) { + testCheckResultFunc(t, w, func(actualEvent watch.Event) { + expectNoDiff(t, "incorrect event", expectEvent, actualEvent) + }) +} + +func testCheckResultFunc(t *testing.T, w watch.Interface, check func(actualEvent watch.Event)) { + select { + case res := <-w.ResultChan(): + obj := res.Object + if co, ok := obj.(runtime.CacheableObject); ok { + res.Object = co.GetObject() + } + check(res) + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("time out after waiting %v on ResultChan", wait.ForeverTestTimeout) + } +} + +func testCheckStop(t *testing.T, w watch.Interface) { + select { + case e, ok := <-w.ResultChan(): + if ok { + var obj string + switch e.Object.(type) { + case *example.Pod: + obj = e.Object.(*example.Pod).Name + case *v1.Status: + obj = e.Object.(*v1.Status).Message + } + t.Errorf("ResultChan should have been closed. Event: %s. Object: %s", e.Type, obj) + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("time out after waiting 1s on ResultChan") + } +} + +func testCheckResultsInStrictOrder(t *testing.T, w watch.Interface, expectedEvents []watch.Event) { + for _, expectedEvent := range expectedEvents { + testCheckResult(t, w, expectedEvent) + } +} + +func testCheckResultsInRandomOrder(t *testing.T, w watch.Interface, expectedEvents []watch.Event) { + for range expectedEvents { + testCheckResultFunc(t, w, func(actualEvent watch.Event) { + ExpectContains(t, "unexpected event", toInterfaceSlice(expectedEvents), actualEvent) + }) + } +} + +func testCheckNoMoreResults(t *testing.T, w watch.Interface) { + select { + case e := <-w.ResultChan(): + t.Errorf("Unexpected: %#v event received, expected no events", e) + case <-time.After(time.Second): + return + } +} + +func toInterfaceSlice[T any](s []T) []interface{} { + result := make([]interface{}, len(s)) + for i, v := range s { + result[i] = v + } + return result +} + +// resourceVersionNotOlderThan returns a function to validate resource versions. Resource versions +// referring to points in logical time before the sentinel generate an error. All logical times as +// new as the sentinel or newer generate no error. +func resourceVersionNotOlderThan(sentinel string) func(string) error { + return func(resourceVersion string) error { + objectVersioner := storage.APIObjectVersioner{} + actualRV, err := objectVersioner.ParseResourceVersion(resourceVersion) + if err != nil { + return err + } + expectedRV, err := objectVersioner.ParseResourceVersion(sentinel) + if err != nil { + return err + } + if actualRV < expectedRV { + return fmt.Errorf("expected a resourceVersion no smaller than than %d, but got %d", expectedRV, actualRV) + } + return nil + } +} + +// StorageInjectingListErrors injects a dummy error for first N GetList calls. +type StorageInjectingListErrors struct { + storage.Interface + + lock sync.Mutex + Errors int +} + +func (s *StorageInjectingListErrors) GetList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error { + err := func() error { + s.lock.Lock() + defer s.lock.Unlock() + if s.Errors > 0 { + s.Errors-- + return fmt.Errorf("injected error") + } + return nil + }() + if err != nil { + return err + } + return s.Interface.GetList(ctx, key, opts, listObj) +} + +func (s *StorageInjectingListErrors) ErrorsConsumed() (bool, error) { + s.lock.Lock() + defer s.lock.Unlock() + return s.Errors == 0, nil +} + +type Compaction func(ctx context.Context, t *testing.T, resourceVersion string) + +type PrefixTransformerModifier func(*PrefixTransformer) value.Transformer + +type InterfaceWithPrefixTransformer interface { + storage.Interface + + UpdatePrefixTransformer(PrefixTransformerModifier) func() +} + +// PrefixTransformer adds and verifies that all data has the correct prefix on its way in and out. +type PrefixTransformer struct { + prefix []byte + stale bool + err error + reads uint64 +} + +func NewPrefixTransformer(prefix []byte, stale bool) *PrefixTransformer { + return &PrefixTransformer{ + prefix: prefix, + stale: stale, + } +} + +func (p *PrefixTransformer) TransformFromStorage(ctx context.Context, data []byte, dataCtx value.Context) ([]byte, bool, error) { + atomic.AddUint64(&p.reads, 1) + if dataCtx == nil { + panic("no context provided") + } + if !bytes.HasPrefix(data, p.prefix) { + return nil, false, fmt.Errorf("value does not have expected prefix %q: %s,", p.prefix, string(data)) + } + return bytes.TrimPrefix(data, p.prefix), p.stale, p.err +} +func (p *PrefixTransformer) TransformToStorage(ctx context.Context, data []byte, dataCtx value.Context) ([]byte, error) { + if dataCtx == nil { + panic("no context provided") + } + if len(data) > 0 { + return append(append([]byte{}, p.prefix...), data...), p.err + } + return data, p.err +} + +func (p *PrefixTransformer) GetReadsAndReset() uint64 { + return atomic.SwapUint64(&p.reads, 0) +} + +// reproducingTransformer is a custom test-only transformer used purely +// for testing consistency. +// It allows for creating predefined objects on TransformFromStorage operations, +// which allows for precise in time injection of new objects in the middle of +// read operations. +type reproducingTransformer struct { + wrapped value.Transformer + store storage.Interface + + index uint32 + nextObject func(uint32) (string, *example.Pod) +} + +func (rt *reproducingTransformer) TransformFromStorage(ctx context.Context, data []byte, dataCtx value.Context) ([]byte, bool, error) { + if err := rt.createObject(ctx); err != nil { + return nil, false, err + } + return rt.wrapped.TransformFromStorage(ctx, data, dataCtx) +} + +func (rt *reproducingTransformer) TransformToStorage(ctx context.Context, data []byte, dataCtx value.Context) ([]byte, error) { + return rt.wrapped.TransformToStorage(ctx, data, dataCtx) +} + +func (rt *reproducingTransformer) createObject(ctx context.Context) error { + key, obj := rt.nextObject(atomic.AddUint32(&rt.index, 1)) + out := &example.Pod{} + return rt.store.Create(ctx, key, obj, out, 0) +} + +// failingTransformer is a custom test-only transformer that always returns +// an error on transforming data from storage. +type failingTransformer struct { +} + +func (ft *failingTransformer) TransformFromStorage(ctx context.Context, data []byte, dataCtx value.Context) ([]byte, bool, error) { + return nil, false, fmt.Errorf("failed transformation") +} + +func (ft *failingTransformer) TransformToStorage(ctx context.Context, data []byte, dataCtx value.Context) ([]byte, error) { + return data, nil +} + +type sortablePodList []example.Pod + +func (s sortablePodList) Len() int { + return len(s) +} + +func (s sortablePodList) Less(i, j int) bool { + return computePodKey(&s[i]) < computePodKey(&s[j]) +} + +func (s sortablePodList) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} diff --git a/pkg/apiserver/storage/testing/watcher_tests.go b/pkg/apiserver/storage/testing/watcher_tests.go new file mode 100644 index 00000000000..97d8ca6d1e1 --- /dev/null +++ b/pkg/apiserver/storage/testing/watcher_tests.go @@ -0,0 +1,1620 @@ +// SPDX-License-Identifier: AGPL-3.0-only +// Provenance-includes-location: https://github.com/kubernetes/apiserver/blob/master/pkg/storage/testing/watcher_tests.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: The Kubernetes Authors. + +package testing + +import ( + "context" + "fmt" + "net/http" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/apis/example" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/storage" + "k8s.io/apiserver/pkg/storage/value" + utilfeature "k8s.io/apiserver/pkg/util/feature" + utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" + featuregatetesting "k8s.io/component-base/featuregate/testing" + "k8s.io/utils/ptr" +) + +func RunTestWatch(ctx context.Context, t *testing.T, store storage.Interface) { + testWatch(ctx, t, store, false) + testWatch(ctx, t, store, true) +} + +// It tests that +// - first occurrence of objects should notify Add event +// - update should trigger Modified event +// - update that gets filtered should trigger Deleted event +func testWatch(ctx context.Context, t *testing.T, store storage.Interface, recursive bool) { + basePod := &example.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + Spec: example.PodSpec{NodeName: ""}, + } + basePodAssigned := &example.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + Spec: example.PodSpec{NodeName: "bar"}, + } + + selectedPod := func(pod *example.Pod) *example.Pod { + result := pod.DeepCopy() + result.Labels = map[string]string{"select": "true"} + return result + } + + tests := []struct { + name string + namespace string + key string + pred storage.SelectionPredicate + watchTests []*testWatchStruct + }{{ + name: "create a key", + namespace: fmt.Sprintf("test-ns-1-%t", recursive), + watchTests: []*testWatchStruct{{basePod, true, watch.Added}}, + pred: storage.Everything, + }, { + name: "key updated to match predicate", + namespace: fmt.Sprintf("test-ns-2-%t", recursive), + watchTests: []*testWatchStruct{{basePod, false, ""}, {basePodAssigned, true, watch.Added}}, + pred: storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.ParseSelectorOrDie("spec.nodeName=bar"), + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { + pod := obj.(*example.Pod) + return nil, fields.Set{"spec.nodeName": pod.Spec.NodeName}, nil + }, + }, + }, { + name: "update", + namespace: fmt.Sprintf("test-ns-3-%t", recursive), + watchTests: []*testWatchStruct{{basePod, true, watch.Added}, {basePodAssigned, true, watch.Modified}}, + pred: storage.Everything, + }, { + name: "delete because of being filtered", + namespace: fmt.Sprintf("test-ns-4-%t", recursive), + watchTests: []*testWatchStruct{{basePod, true, watch.Added}, {basePodAssigned, true, watch.Deleted}}, + pred: storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.ParseSelectorOrDie("spec.nodeName!=bar"), + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { + pod := obj.(*example.Pod) + return nil, fields.Set{"spec.nodeName": pod.Spec.NodeName}, nil + }, + }, + }, { + name: "filtering", + namespace: fmt.Sprintf("test-ns-5-%t", recursive), + watchTests: []*testWatchStruct{ + {selectedPod(basePod), true, watch.Added}, + {basePod, true, watch.Deleted}, + {selectedPod(basePod), true, watch.Added}, + {selectedPod(basePodAssigned), true, watch.Modified}, + {nil, true, watch.Deleted}, + }, + pred: storage.SelectionPredicate{ + Label: labels.SelectorFromSet(labels.Set{"select": "true"}), + Field: fields.Everything(), + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { + pod := obj.(*example.Pod) + return labels.Set(pod.Labels), nil, nil + }, + }, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + watchKey := KeyFunc(tt.namespace, "") + key := KeyFunc(tt.namespace, "foo") + if !recursive { + watchKey = key + } + + // Get the current RV from which we can start watching. + out := &example.PodList{} + if err := store.GetList(ctx, watchKey, storage.ListOptions{ResourceVersion: "", Predicate: tt.pred, Recursive: recursive}, out); err != nil { + t.Fatalf("List failed: %v", err) + } + + w, err := store.Watch(ctx, watchKey, storage.ListOptions{ResourceVersion: out.ResourceVersion, Predicate: tt.pred, Recursive: recursive}) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + + // Create a pod in a different namespace first to ensure + // that its corresponding event will not be propagated. + badKey := KeyFunc(fmt.Sprintf("%s-bad", tt.namespace), "foo") + badOut := &example.Pod{} + err = store.GuaranteedUpdate(ctx, badKey, badOut, true, nil, storage.SimpleUpdate( + func(runtime.Object) (runtime.Object, error) { + obj := basePod.DeepCopy() + obj.Namespace = fmt.Sprintf("%s-bad", tt.namespace) + return obj, nil + }), nil) + if err != nil { + t.Fatalf("GuaranteedUpdate of bad pod failed: %v", err) + } + + var prevObj *example.Pod + for _, watchTest := range tt.watchTests { + out := &example.Pod{} + if watchTest.obj != nil { + err := store.GuaranteedUpdate(ctx, key, out, true, nil, storage.SimpleUpdate( + func(runtime.Object) (runtime.Object, error) { + obj := watchTest.obj.DeepCopy() + obj.Namespace = tt.namespace + return obj, nil + }), nil) + if err != nil { + t.Fatalf("GuaranteedUpdate failed: %v", err) + } + } else { + err := store.Delete(ctx, key, out, nil, storage.ValidateAllObjectFunc, nil) + if err != nil { + t.Fatalf("Delete failed: %v", err) + } + } + if watchTest.expectEvent { + expectObj := out + if watchTest.watchType == watch.Deleted { + expectObj = prevObj + expectObj.ResourceVersion = out.ResourceVersion + } + testCheckResult(t, w, watch.Event{Type: watchTest.watchType, Object: expectObj}) + } + prevObj = out + } + w.Stop() + testCheckStop(t, w) + }) + } +} + +// RunTestWatchFromZero tests that +// - watch from 0 should sync up and grab the object added before +// - For testing with etcd, watch from 0 is able to return events for objects +// whose previous version has been compacted. If testing with cacher, we +// expect compaction to be nil. +func RunTestWatchFromZero(ctx context.Context, t *testing.T, store storage.Interface, compaction Compaction) { + key, storedObj := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}}) + + w, err := store.Watch(ctx, key, storage.ListOptions{ResourceVersion: "0", Predicate: storage.Everything}) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + testCheckResult(t, w, watch.Event{Type: watch.Added, Object: storedObj}) + + // Update + out := &example.Pod{} + err = store.GuaranteedUpdate(ctx, key, out, true, nil, storage.SimpleUpdate( + func(runtime.Object) (runtime.Object, error) { + return &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns", Annotations: map[string]string{"a": "1"}}}, nil + }), nil) + if err != nil { + t.Fatalf("GuaranteedUpdate failed: %v", err) + } + + // Check that we receive a modified watch event. This check also + // indirectly ensures that the cache is synced. This is important + // when testing with the Cacher since we may have to allow for slow + // processing by allowing updates to propagate to the watch cache. + // This allows for that. + testCheckResult(t, w, watch.Event{Type: watch.Modified, Object: out}) + w.Stop() + + // Make sure when we watch from 0 we receive an ADDED event + w, err = store.Watch(ctx, key, storage.ListOptions{ResourceVersion: "0", Predicate: storage.Everything}) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + + testCheckResult(t, w, watch.Event{Type: watch.Added, Object: out}) + w.Stop() + + // Compact previous versions + if compaction == nil { + t.Skip("compaction callback not provided") + } + + // Update again + newOut := &example.Pod{} + err = store.GuaranteedUpdate(ctx, key, newOut, true, nil, storage.SimpleUpdate( + func(runtime.Object) (runtime.Object, error) { + return &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}}, nil + }), nil) + if err != nil { + t.Fatalf("GuaranteedUpdate failed: %v", err) + } + + // Compact previous versions + compaction(ctx, t, newOut.ResourceVersion) + + // Make sure we can still watch from 0 and receive an ADDED event + w, err = store.Watch(ctx, key, storage.ListOptions{ResourceVersion: "0", Predicate: storage.Everything}) + defer w.Stop() + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + testCheckResult(t, w, watch.Event{Type: watch.Added, Object: newOut}) + + // Make sure we can't watch from older resource versions anymoer and get a "Gone" error. + tooOldWatcher, err := store.Watch(ctx, key, storage.ListOptions{ResourceVersion: out.ResourceVersion, Predicate: storage.Everything}) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + defer tooOldWatcher.Stop() + expiredError := errors.NewResourceExpired("").ErrStatus + // TODO(wojtek-t): It seems that etcd is currently returning a different error, + // being an Internal error of "etcd event received with PrevKv=nil". + // We temporary allow both but we should unify here. + internalError := metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusInternalServerError, + Reason: metav1.StatusReasonInternalError, + } + testCheckResultFunc(t, tooOldWatcher, func(actualEvent watch.Event) { + expectNoDiff(t, "incorrect event type", watch.Error, actualEvent.Type) + if !apiequality.Semantic.DeepDerivative(&expiredError, actualEvent.Object) && !apiequality.Semantic.DeepDerivative(&internalError, actualEvent.Object) { + t.Errorf("expected: %#v; got %#v", &expiredError, actualEvent.Object) + } + }) +} + +func RunTestDeleteTriggerWatch(ctx context.Context, t *testing.T, store storage.Interface) { + key, storedObj := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}}) + w, err := store.Watch(ctx, key, storage.ListOptions{ResourceVersion: storedObj.ResourceVersion, Predicate: storage.Everything}) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + if err := store.Delete(ctx, key, &example.Pod{}, nil, storage.ValidateAllObjectFunc, nil); err != nil { + t.Fatalf("Delete failed: %v", err) + } + testCheckEventType(t, w, watch.Deleted) +} + +func RunTestWatchFromNonZero(ctx context.Context, t *testing.T, store storage.Interface) { + key, storedObj := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}}) + + w, err := store.Watch(ctx, key, storage.ListOptions{ResourceVersion: storedObj.ResourceVersion, Predicate: storage.Everything}) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + out := &example.Pod{} + _ = store.GuaranteedUpdate(ctx, key, out, true, nil, storage.SimpleUpdate( + func(runtime.Object) (runtime.Object, error) { + newObj := storedObj.DeepCopy() + newObj.Annotations = map[string]string{"version": "2"} + return newObj, nil + }), nil) + testCheckResult(t, w, watch.Event{Type: watch.Modified, Object: out}) +} + +func RunTestDelayedWatchDelivery(ctx context.Context, t *testing.T, store storage.Interface) { + _, storedObj := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}}) + startRV := storedObj.ResourceVersion + + watcher, err := store.Watch(ctx, KeyFunc("test-ns", ""), storage.ListOptions{ResourceVersion: startRV, Predicate: storage.Everything, Recursive: true}) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + // Depending on the implementation, different number of events that + // should be delivered to the watcher can be created before it will + // block the implementation and as a result force the watcher to be + // closed (as otherwise events would have to be dropped). + // For now, this number is smallest for Cacher and it equals 21 for it. + totalPods := 21 + for i := 0; i < totalPods; i++ { + out := &example.Pod{} + pod := &example.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("foo-%d", i), Namespace: "test-ns"}, + } + err := store.GuaranteedUpdate(ctx, computePodKey(pod), out, true, nil, storage.SimpleUpdate( + func(runtime.Object) (runtime.Object, error) { + return pod, nil + }), nil) + if err != nil { + t.Errorf("GuaranteedUpdate failed: %v", err) + } + } + + // Now stop the watcher and check if the consecutive events are being delivered. + watcher.Stop() + + watched := 0 + for { + event, ok := <-watcher.ResultChan() + if !ok { + break + } + object := event.Object + if co, ok := object.(runtime.CacheableObject); ok { + object = co.GetObject() + } + if a, e := object.(*example.Pod).Name, fmt.Sprintf("foo-%d", watched); e != a { + t.Errorf("Unexpected object watched: %s, expected %s", a, e) + } + watched++ + } + // We expect at least N events to be delivered, depending on the implementation. + // For now, this number is smallest for Cacher and it equals 10 (size of the out buffer). + if watched < 10 { + t.Errorf("Unexpected number of events: %v, expected: %v", watched, totalPods) + } +} + +func RunTestWatchError(ctx context.Context, t *testing.T, store InterfaceWithPrefixTransformer) { + obj := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}} + key := computePodKey(obj) + + // Compute the initial resource version from which we can start watching later. + list := &example.PodList{} + storageOpts := storage.ListOptions{ + ResourceVersion: "0", + Predicate: storage.Everything, + Recursive: true, + } + if err := store.GetList(ctx, KeyFunc("", ""), storageOpts, list); err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if err := store.GuaranteedUpdate(ctx, key, &example.Pod{}, true, nil, storage.SimpleUpdate( + func(runtime.Object) (runtime.Object, error) { + return obj, nil + }), nil); err != nil { + t.Fatalf("GuaranteedUpdate failed: %v", err) + } + + // Now trigger watch error by injecting failing transformer. + revertTransformer := store.UpdatePrefixTransformer( + func(previousTransformer *PrefixTransformer) value.Transformer { + return &failingTransformer{} + }) + defer revertTransformer() + + w, err := store.Watch(ctx, key, storage.ListOptions{ResourceVersion: list.ResourceVersion, Predicate: storage.Everything}) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + testCheckEventType(t, w, watch.Error) +} + +func RunTestWatchContextCancel(ctx context.Context, t *testing.T, store storage.Interface) { + canceledCtx, cancel := context.WithCancel(ctx) + cancel() + // When we watch with a canceled context, we should detect that it's context canceled. + // We won't take it as error and also close the watcher. + w, err := store.Watch(canceledCtx, KeyFunc("not-existing", ""), storage.ListOptions{ + ResourceVersion: "0", + Predicate: storage.Everything, + }) + if err != nil { + t.Fatal(err) + } + + select { + case _, ok := <-w.ResultChan(): + if ok { + t.Error("ResultChan() should be closed") + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("timeout after %v", wait.ForeverTestTimeout) + } +} + +func RunTestWatcherTimeout(ctx context.Context, t *testing.T, store storage.Interface) { + // initialRV is used to initate the watcher at the beginning of the world. + podList := example.PodList{} + options := storage.ListOptions{ + Predicate: storage.Everything, + Recursive: true, + } + if err := store.GetList(ctx, KeyFunc("", ""), options, &podList); err != nil { + t.Fatalf("Failed to list pods: %v", err) + } + initialRV := podList.ResourceVersion + + options = storage.ListOptions{ + ResourceVersion: initialRV, + Predicate: storage.Everything, + Recursive: true, + } + + // Create a number of watchers that will not be reading any result. + nonReadingWatchers := 50 + for i := 0; i < nonReadingWatchers; i++ { + watcher, err := store.Watch(ctx, KeyFunc("test-ns", ""), options) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer watcher.Stop() + } + + // Create a second watcher that will be reading result. + readingWatcher, err := store.Watch(ctx, KeyFunc("test-ns", ""), options) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer readingWatcher.Stop() + + // Depending on the implementation, different number of events that + // should be delivered to the watcher can be created before it will + // block the implementation and as a result force the watcher to be + // closed (as otherwise events would have to be dropped). + // For now, this number is smallest for Cacher and it equals 21 for it. + // + // Create more events to ensure that we're not blocking other watchers + // forever. + startTime := time.Now() + for i := 0; i < 22; i++ { + out := &example.Pod{} + pod := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("foo-%d", i), Namespace: "test-ns"}} + if err := store.Create(ctx, computePodKey(pod), pod, out, 0); err != nil { + t.Fatalf("Create failed: %v", err) + } + testCheckResult(t, readingWatcher, watch.Event{Type: watch.Added, Object: out}) + } + if time.Since(startTime) > time.Duration(250*nonReadingWatchers)*time.Millisecond { + t.Errorf("waiting for events took too long: %v", time.Since(startTime)) + } +} + +func RunTestWatchDeleteEventObjectHaveLatestRV(ctx context.Context, t *testing.T, store storage.Interface) { + key, storedObj := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}}) + + watchCtx, cancel := context.WithTimeout(ctx, wait.ForeverTestTimeout) + t.Cleanup(cancel) + w, err := store.Watch(watchCtx, key, storage.ListOptions{ResourceVersion: storedObj.ResourceVersion, Predicate: storage.Everything}) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + + deletedObj := &example.Pod{} + if err := store.Delete(ctx, key, deletedObj, &storage.Preconditions{}, storage.ValidateAllObjectFunc, nil); err != nil { + t.Fatalf("Delete failed: %v", err) + } + + // Verify that ResourceVersion has changed on deletion. + if storedObj.ResourceVersion == deletedObj.ResourceVersion { + t.Fatalf("ResourceVersion didn't changed on deletion: %s", deletedObj.ResourceVersion) + } + + testCheckResult(t, w, watch.Event{Type: watch.Deleted, Object: deletedObj}) +} + +func RunTestWatchInitializationSignal(ctx context.Context, t *testing.T, store storage.Interface) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + t.Cleanup(cancel) + initSignal := utilflowcontrol.NewInitializationSignal() + ctx = utilflowcontrol.WithInitializationSignal(ctx, initSignal) + + key, storedObj := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}}) + _, err := store.Watch(ctx, key, storage.ListOptions{ResourceVersion: storedObj.ResourceVersion, Predicate: storage.Everything}) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + + initSignal.Wait() +} + +// RunOptionalTestProgressNotify tests ProgressNotify feature of ListOptions. +// Given this feature is currently not explicitly used by higher layers of Kubernetes +// (it rather is used by wrappers of storage.Interface to implement its functionalities) +// this test is currently considered optional. +func RunOptionalTestProgressNotify(ctx context.Context, t *testing.T, store storage.Interface) { + input := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "name", Namespace: "test-ns"}} + key := computePodKey(input) + out := &example.Pod{} + if err := store.Create(ctx, key, input, out, 0); err != nil { + t.Fatalf("Create failed: %v", err) + } + validateResourceVersion := resourceVersionNotOlderThan(out.ResourceVersion) + + opts := storage.ListOptions{ + ResourceVersion: out.ResourceVersion, + Predicate: storage.Everything, + ProgressNotify: true, + } + w, err := store.Watch(ctx, key, opts) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + + // when we send a bookmark event, the client expects the event to contain an + // object of the correct type, but with no fields set other than the resourceVersion + testCheckResultFunc(t, w, func(actualEvent watch.Event) { + expectNoDiff(t, "incorrect event type", watch.Bookmark, actualEvent.Type) + // first, check that we have the correct resource version + obj, ok := actualEvent.Object.(metav1.Object) + if !ok { + t.Fatalf("got %T, not metav1.Object", actualEvent.Object) + } + if err := validateResourceVersion(obj.GetResourceVersion()); err != nil { + t.Fatal(err) + } + + // then, check that we have the right type and content + pod, ok := actualEvent.Object.(*example.Pod) + if !ok { + t.Fatalf("got %T, not *example.Pod", actualEvent.Object) + } + pod.ResourceVersion = "" + expectNoDiff(t, "bookmark event should contain an object with no fields set other than resourceVersion", &example.Pod{}, pod) + }) +} + +// It tests watches of cluster-scoped resources. +func RunTestClusterScopedWatch(ctx context.Context, t *testing.T, store storage.Interface) { + tests := []struct { + name string + // For watch request, the name of object is specified with field selector + // "metadata.name=objectName". So in this watch tests, we should set the + // requestedName and field selector "metadata.name=requestedName" at the + // same time or set neighter of them. + requestedName string + recursive bool + fieldSelector fields.Selector + indexFields []string + watchTests []*testWatchStruct + }{ + { + name: "cluster-wide watch, request without name, without field selector", + recursive: true, + fieldSelector: fields.Everything(), + watchTests: []*testWatchStruct{ + {basePod("t1-foo1"), true, watch.Added}, + {basePodUpdated("t1-foo1"), true, watch.Modified}, + {basePodAssigned("t1-foo2", "t1-bar1"), true, watch.Added}, + }, + }, + { + name: "cluster-wide watch, request without name, field selector with spec.nodeName", + recursive: true, + fieldSelector: fields.ParseSelectorOrDie("spec.nodeName=t2-bar1"), + indexFields: []string{"spec.nodeName"}, + watchTests: []*testWatchStruct{ + {basePod("t2-foo1"), false, ""}, + {basePodAssigned("t2-foo1", "t2-bar1"), true, watch.Added}, + }, + }, + { + name: "cluster-wide watch, request without name, field selector with spec.nodeName to filter out watch", + recursive: true, + fieldSelector: fields.ParseSelectorOrDie("spec.nodeName!=t3-bar1"), + indexFields: []string{"spec.nodeName"}, + watchTests: []*testWatchStruct{ + {basePod("t3-foo1"), true, watch.Added}, + {basePod("t3-foo2"), true, watch.Added}, + {basePodUpdated("t3-foo1"), true, watch.Modified}, + {basePodAssigned("t3-foo1", "t3-bar1"), true, watch.Deleted}, + }, + }, + { + name: "cluster-wide watch, request with name, field selector with metadata.name", + requestedName: "t4-foo1", + fieldSelector: fields.ParseSelectorOrDie("metadata.name=t4-foo1"), + watchTests: []*testWatchStruct{ + {basePod("t4-foo1"), true, watch.Added}, + {basePod("t4-foo2"), false, ""}, + {basePodUpdated("t4-foo1"), true, watch.Modified}, + {basePodUpdated("t4-foo2"), false, ""}, + }, + }, + { + name: "cluster-wide watch, request with name, field selector with metadata.name and spec.nodeName", + requestedName: "t5-foo1", + fieldSelector: fields.SelectorFromSet(fields.Set{ + "metadata.name": "t5-foo1", + "spec.nodeName": "t5-bar1", + }), + indexFields: []string{"spec.nodeName"}, + watchTests: []*testWatchStruct{ + {basePod("t5-foo1"), false, ""}, + {basePod("t5-foo2"), false, ""}, + {basePodUpdated("t5-foo1"), false, ""}, + {basePodUpdated("t5-foo2"), false, ""}, + {basePodAssigned("t5-foo1", "t5-bar1"), true, watch.Added}, + }, + }, + { + name: "cluster-wide watch, request with name, field selector with metadata.name, and with spec.nodeName to filter out watch", + requestedName: "t6-foo1", + fieldSelector: fields.AndSelectors( + fields.ParseSelectorOrDie("spec.nodeName!=t6-bar1"), + fields.SelectorFromSet(fields.Set{"metadata.name": "t6-foo1"}), + ), + indexFields: []string{"spec.nodeName"}, + watchTests: []*testWatchStruct{ + {basePod("t6-foo1"), true, watch.Added}, + {basePod("t6-foo2"), false, ""}, + {basePodUpdated("t6-foo1"), true, watch.Modified}, + {basePodAssigned("t6-foo1", "t6-bar1"), true, watch.Deleted}, + {basePodAssigned("t6-foo2", "t6-bar1"), false, ""}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + requestInfo := &genericapirequest.RequestInfo{} + requestInfo.Name = tt.requestedName + requestInfo.Namespace = "" + ctx = genericapirequest.WithRequestInfo(ctx, requestInfo) + ctx = genericapirequest.WithNamespace(ctx, "") + + watchKey := KeyFunc("", tt.requestedName) + + predicate := createPodPredicate(tt.fieldSelector, false, tt.indexFields) + + list := &example.PodList{} + opts := storage.ListOptions{ + ResourceVersion: "", + Predicate: predicate, + Recursive: true, + } + if err := store.GetList(ctx, KeyFunc("", ""), opts, list); err != nil { + t.Errorf("Unexpected error: %v", err) + } + + opts.ResourceVersion = list.ResourceVersion + opts.Recursive = tt.recursive + + w, err := store.Watch(ctx, watchKey, opts) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + + currentObjs := map[string]*example.Pod{} + for _, watchTest := range tt.watchTests { + out := &example.Pod{} + key := "pods/" + watchTest.obj.Name + err := store.GuaranteedUpdate(ctx, key, out, true, nil, storage.SimpleUpdate( + func(runtime.Object) (runtime.Object, error) { + obj := watchTest.obj.DeepCopy() + return obj, nil + }), nil) + if err != nil { + t.Fatalf("GuaranteedUpdate failed: %v", err) + } + + expectObj := out + if watchTest.watchType == watch.Deleted { + expectObj = currentObjs[watchTest.obj.Name] + expectObj.ResourceVersion = out.ResourceVersion + delete(currentObjs, watchTest.obj.Name) + } else { + currentObjs[watchTest.obj.Name] = out + } + if watchTest.expectEvent { + testCheckResult(t, w, watch.Event{Type: watchTest.watchType, Object: expectObj}) + } + } + w.Stop() + testCheckStop(t, w) + }) + } +} + +// It tests watch of namespace-scoped resources. +func RunTestNamespaceScopedWatch(ctx context.Context, t *testing.T, store storage.Interface) { + tests := []struct { + name string + // For watch request, the name of object is specified with field selector + // "metadata.name=objectName". So in this watch tests, we should set the + // requestedName and field selector "metadata.name=requestedName" at the + // same time or set neighter of them. + requestedName string + requestedNamespace string + recursive bool + fieldSelector fields.Selector + indexFields []string + watchTests []*testWatchStruct + }{ + { + name: "namespaced watch, request without name, request without namespace, without field selector", + recursive: true, + fieldSelector: fields.Everything(), + watchTests: []*testWatchStruct{ + {baseNamespacedPod("t1-foo1", "t1-ns1"), true, watch.Added}, + {baseNamespacedPod("t1-foo2", "t1-ns2"), true, watch.Added}, + {baseNamespacedPodUpdated("t1-foo1", "t1-ns1"), true, watch.Modified}, + {baseNamespacedPodUpdated("t1-foo2", "t1-ns2"), true, watch.Modified}, + }, + }, + { + name: "namespaced watch, request without name, request without namespace, field selector with metadata.namespace", + recursive: true, + fieldSelector: fields.ParseSelectorOrDie("metadata.namespace=t2-ns1"), + watchTests: []*testWatchStruct{ + {baseNamespacedPod("t2-foo1", "t2-ns1"), true, watch.Added}, + {baseNamespacedPod("t2-foo1", "t2-ns2"), false, ""}, + {baseNamespacedPodUpdated("t2-foo1", "t2-ns1"), true, watch.Modified}, + {baseNamespacedPodUpdated("t2-foo1", "t2-ns2"), false, ""}, + }, + }, + { + name: "namespaced watch, request without name, request without namespace, field selector with spec.nodename", + recursive: true, + fieldSelector: fields.ParseSelectorOrDie("spec.nodeName=t3-bar1"), + indexFields: []string{"spec.nodeName"}, + watchTests: []*testWatchStruct{ + {baseNamespacedPod("t3-foo1", "t3-ns1"), false, ""}, + {baseNamespacedPod("t3-foo2", "t3-ns2"), false, ""}, + {baseNamespacedPodAssigned("t3-foo1", "t3-ns1", "t3-bar1"), true, watch.Added}, + {baseNamespacedPodAssigned("t3-foo2", "t3-ns2", "t3-bar1"), true, watch.Added}, + }, + }, + { + name: "namespaced watch, request without name, request without namespace, field selector with spec.nodename to filter out watch", + recursive: true, + fieldSelector: fields.ParseSelectorOrDie("spec.nodeName!=t4-bar1"), + indexFields: []string{"spec.nodeName"}, + watchTests: []*testWatchStruct{ + {baseNamespacedPod("t4-foo1", "t4-ns1"), true, watch.Added}, + {baseNamespacedPod("t4-foo2", "t4-ns1"), true, watch.Added}, + {baseNamespacedPodUpdated("t4-foo1", "t4-ns1"), true, watch.Modified}, + {baseNamespacedPodAssigned("t4-foo1", "t4-ns1", "t4-bar1"), true, watch.Deleted}, + }, + }, + { + name: "namespaced watch, request without name, request with namespace, without field selector", + requestedNamespace: "t5-ns1", + recursive: true, + fieldSelector: fields.Everything(), + watchTests: []*testWatchStruct{ + {baseNamespacedPod("t5-foo1", "t5-ns1"), true, watch.Added}, + {baseNamespacedPod("t5-foo1", "t5-ns2"), false, ""}, + {baseNamespacedPod("t5-foo2", "t5-ns1"), true, watch.Added}, + {baseNamespacedPodUpdated("t5-foo1", "t5-ns1"), true, watch.Modified}, + {baseNamespacedPodUpdated("t5-foo1", "t5-ns2"), false, ""}, + }, + }, + { + name: "namespaced watch, request without name, request with namespace, field selector with matched metadata.namespace", + requestedNamespace: "t6-ns1", + recursive: true, + fieldSelector: fields.ParseSelectorOrDie("metadata.namespace=t6-ns1"), + watchTests: []*testWatchStruct{ + {baseNamespacedPod("t6-foo1", "t6-ns1"), true, watch.Added}, + {baseNamespacedPod("t6-foo1", "t6-ns2"), false, ""}, + {baseNamespacedPodUpdated("t6-foo1", "t6-ns1"), true, watch.Modified}, + }, + }, + { + name: "namespaced watch, request without name, request with namespace, field selector with non-matched metadata.namespace", + requestedNamespace: "t7-ns1", + recursive: true, + fieldSelector: fields.ParseSelectorOrDie("metadata.namespace=t7-ns2"), + watchTests: []*testWatchStruct{ + {baseNamespacedPod("t7-foo1", "t7-ns1"), false, ""}, + {baseNamespacedPod("t7-foo1", "t7-ns2"), false, ""}, + {baseNamespacedPodUpdated("t7-foo1", "t7-ns1"), false, ""}, + {baseNamespacedPodUpdated("t7-foo1", "t7-ns2"), false, ""}, + }, + }, + { + name: "namespaced watch, request without name, request with namespace, field selector with spec.nodename", + requestedNamespace: "t8-ns1", + recursive: true, + fieldSelector: fields.ParseSelectorOrDie("spec.nodeName=t8-bar2"), + indexFields: []string{"spec.nodeName"}, + watchTests: []*testWatchStruct{ + {baseNamespacedPod("t8-foo1", "t8-ns1"), false, ""}, + {baseNamespacedPodAssigned("t8-foo1", "t8-ns1", "t8-bar1"), false, ""}, + {baseNamespacedPodAssigned("t8-foo1", "t8-ns2", "t8-bar2"), false, ""}, + {baseNamespacedPodAssigned("t8-foo1", "t8-ns1", "t8-bar2"), true, watch.Added}, + }, + }, + { + name: "namespaced watch, request without name, request with namespace, field selector with spec.nodename to filter out watch", + requestedNamespace: "t9-ns2", + recursive: true, + fieldSelector: fields.ParseSelectorOrDie("spec.nodeName!=t9-bar1"), + indexFields: []string{"spec.nodeName"}, + watchTests: []*testWatchStruct{ + {baseNamespacedPod("t9-foo1", "t9-ns1"), false, ""}, + {baseNamespacedPod("t9-foo1", "t9-ns2"), true, watch.Added}, + {baseNamespacedPodAssigned("t9-foo1", "t9-ns2", "t9-bar1"), true, watch.Deleted}, + {baseNamespacedPodAssigned("t9-foo1", "t9-ns2", "t9-bar2"), true, watch.Added}, + }, + }, + { + name: "namespaced watch, request with name, request without namespace, field selector with metadata.name", + requestedName: "t10-foo1", + recursive: true, + fieldSelector: fields.ParseSelectorOrDie("metadata.name=t10-foo1"), + watchTests: []*testWatchStruct{ + {baseNamespacedPod("t10-foo1", "t10-ns1"), true, watch.Added}, + {baseNamespacedPod("t10-foo1", "t10-ns2"), true, watch.Added}, + {baseNamespacedPod("t10-foo2", "t10-ns1"), false, ""}, + {baseNamespacedPodUpdated("t10-foo1", "t10-ns1"), true, watch.Modified}, + {baseNamespacedPodAssigned("t10-foo1", "t10-ns1", "t10-bar1"), true, watch.Modified}, + }, + }, + { + name: "namespaced watch, request with name, request without namespace, field selector with metadata.name and metadata.namespace", + requestedName: "t11-foo1", + recursive: true, + fieldSelector: fields.SelectorFromSet(fields.Set{ + "metadata.name": "t11-foo1", + "metadata.namespace": "t11-ns1", + }), + watchTests: []*testWatchStruct{ + {baseNamespacedPod("t11-foo1", "t11-ns1"), true, watch.Added}, + {baseNamespacedPod("t11-foo2", "t11-ns1"), false, ""}, + {baseNamespacedPod("t11-foo1", "t11-ns2"), false, ""}, + {baseNamespacedPodUpdated("t11-foo1", "t11-ns1"), true, watch.Modified}, + {baseNamespacedPodAssigned("t11-foo1", "t11-ns1", "t11-bar1"), true, watch.Modified}, + }, + }, + { + name: "namespaced watch, request with name, request without namespace, field selector with metadata.name and spec.nodeName", + requestedName: "t12-foo1", + recursive: true, + fieldSelector: fields.SelectorFromSet(fields.Set{ + "metadata.name": "t12-foo1", + "spec.nodeName": "t12-bar1", + }), + indexFields: []string{"spec.nodeName"}, + watchTests: []*testWatchStruct{ + {baseNamespacedPod("t12-foo1", "t12-ns1"), false, ""}, + {baseNamespacedPodUpdated("t12-foo1", "t12-ns1"), false, ""}, + {baseNamespacedPodAssigned("t12-foo1", "t12-ns1", "t12-bar1"), true, watch.Added}, + }, + }, + { + name: "namespaced watch, request with name, request without namespace, field selector with metadata.name, and with spec.nodeName to filter out watch", + requestedName: "t15-foo1", + recursive: true, + fieldSelector: fields.AndSelectors( + fields.ParseSelectorOrDie("spec.nodeName!=t15-bar1"), + fields.SelectorFromSet(fields.Set{"metadata.name": "t15-foo1"}), + ), + indexFields: []string{"spec.nodeName"}, + watchTests: []*testWatchStruct{ + {baseNamespacedPod("t15-foo1", "t15-ns1"), true, watch.Added}, + {baseNamespacedPod("t15-foo2", "t15-ns1"), false, ""}, + {baseNamespacedPodUpdated("t15-foo1", "t15-ns1"), true, watch.Modified}, + {baseNamespacedPodAssigned("t15-foo1", "t15-ns1", "t15-bar1"), true, watch.Deleted}, + {baseNamespacedPodAssigned("t15-foo1", "t15-ns1", "t15-bar2"), true, watch.Added}, + }, + }, + { + name: "namespaced watch, request with name, request with namespace, with field selector metadata.name", + requestedName: "t16-foo1", + requestedNamespace: "t16-ns1", + fieldSelector: fields.ParseSelectorOrDie("metadata.name=t16-foo1"), + watchTests: []*testWatchStruct{ + {baseNamespacedPod("t16-foo1", "t16-ns1"), true, watch.Added}, + {baseNamespacedPod("t16-foo2", "t16-ns1"), false, ""}, + {baseNamespacedPodUpdated("t16-foo1", "t16-ns1"), true, watch.Modified}, + {baseNamespacedPodAssigned("t16-foo1", "t16-ns1", "t16-bar1"), true, watch.Modified}, + }, + }, + { + name: "namespaced watch, request with name, request with namespace, with field selector metadata.name and metadata.namespace", + requestedName: "t17-foo2", + requestedNamespace: "t17-ns1", + fieldSelector: fields.SelectorFromSet(fields.Set{ + "metadata.name": "t17-foo2", + "metadata.namespace": "t17-ns1", + }), + watchTests: []*testWatchStruct{ + {baseNamespacedPod("t17-foo1", "t17-ns1"), false, ""}, + {baseNamespacedPod("t17-foo2", "t17-ns1"), true, watch.Added}, + {baseNamespacedPodUpdated("t17-foo1", "t17-ns1"), false, ""}, + {baseNamespacedPodAssigned("t17-foo2", "t17-ns1", "t17-bar1"), true, watch.Modified}, + }, + }, + { + name: "namespaced watch, request with name, request with namespace, with field selector metadata.name, metadata.namespace and spec.nodename", + requestedName: "t18-foo1", + requestedNamespace: "t18-ns1", + fieldSelector: fields.SelectorFromSet(fields.Set{ + "metadata.name": "t18-foo1", + "metadata.namespace": "t18-ns1", + "spec.nodeName": "t18-bar1", + }), + indexFields: []string{"spec.nodeName"}, + watchTests: []*testWatchStruct{ + {baseNamespacedPod("t18-foo1", "t18-ns1"), false, ""}, + {baseNamespacedPod("t18-foo2", "t18-ns1"), false, ""}, + {baseNamespacedPod("t18-foo1", "t18-ns2"), false, ""}, + {baseNamespacedPodUpdated("t18-foo1", "t18-ns1"), false, ""}, + {baseNamespacedPodAssigned("t18-foo1", "t18-ns1", "t18-bar1"), true, watch.Added}, + }, + }, + { + name: "namespaced watch, request with name, request with namespace, with field selector metadata.name, metadata.namespace, and with spec.nodename to filter out watch", + requestedName: "t19-foo2", + requestedNamespace: "t19-ns1", + fieldSelector: fields.AndSelectors( + fields.ParseSelectorOrDie("spec.nodeName!=t19-bar1"), + fields.SelectorFromSet(fields.Set{"metadata.name": "t19-foo2", "metadata.namespace": "t19-ns1"}), + ), + indexFields: []string{"spec.nodeName"}, + watchTests: []*testWatchStruct{ + {baseNamespacedPod("t19-foo1", "t19-ns1"), false, ""}, + {baseNamespacedPod("t19-foo2", "t19-ns2"), false, ""}, + {baseNamespacedPod("t19-foo2", "t19-ns1"), true, watch.Added}, + {baseNamespacedPodUpdated("t19-foo2", "t19-ns1"), true, watch.Modified}, + {baseNamespacedPodAssigned("t19-foo2", "t19-ns1", "t19-bar1"), true, watch.Deleted}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + requestInfo := &genericapirequest.RequestInfo{} + requestInfo.Name = tt.requestedName + requestInfo.Namespace = tt.requestedNamespace + ctx = genericapirequest.WithRequestInfo(ctx, requestInfo) + ctx = genericapirequest.WithNamespace(ctx, tt.requestedNamespace) + + watchKey := KeyFunc(tt.requestedNamespace, tt.requestedName) + + predicate := createPodPredicate(tt.fieldSelector, true, tt.indexFields) + + list := &example.PodList{} + opts := storage.ListOptions{ + ResourceVersion: "", + Predicate: predicate, + Recursive: true, + } + if err := store.GetList(ctx, KeyFunc("", ""), opts, list); err != nil { + t.Errorf("Unexpected error: %v", err) + } + + opts.ResourceVersion = list.ResourceVersion + opts.Recursive = tt.recursive + + w, err := store.Watch(ctx, watchKey, opts) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + + currentObjs := map[string]*example.Pod{} + for _, watchTest := range tt.watchTests { + out := &example.Pod{} + key := "pods/" + watchTest.obj.Namespace + "/" + watchTest.obj.Name + err := store.GuaranteedUpdate(ctx, key, out, true, nil, storage.SimpleUpdate( + func(runtime.Object) (runtime.Object, error) { + obj := watchTest.obj.DeepCopy() + return obj, nil + }), nil) + if err != nil { + t.Fatalf("GuaranteedUpdate failed: %v", err) + } + + expectObj := out + podIdentifier := watchTest.obj.Namespace + "/" + watchTest.obj.Name + if watchTest.watchType == watch.Deleted { + expectObj = currentObjs[podIdentifier] + expectObj.ResourceVersion = out.ResourceVersion + delete(currentObjs, podIdentifier) + } else { + currentObjs[podIdentifier] = out + } + if watchTest.expectEvent { + testCheckResult(t, w, watch.Event{Type: watchTest.watchType, Object: expectObj}) + } + } + w.Stop() + testCheckStop(t, w) + }) + } +} + +// RunOptionalTestWatchDispatchBookmarkEvents tests whether bookmark events are sent. +// This feature is currently implemented in watch cache layer, so this is optional. +// +// TODO(#109831): ProgressNotify feature is effectively implementing the same +// +// functionality, so we should refactor this functionality to share the same input. +func RunTestWatchDispatchBookmarkEvents(ctx context.Context, t *testing.T, store storage.Interface, expectedWatchBookmarks bool) { + key, storedObj := testPropagateStore(ctx, t, store, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test-ns"}}) + startRV := storedObj.ResourceVersion + + tests := []struct { + name string + timeout time.Duration + expected bool + allowWatchBookmarks bool + }{ + { // test old client won't get Bookmark event + name: "allowWatchBookmarks=false", + timeout: 3 * time.Second, + expected: false, + allowWatchBookmarks: false, + }, + { + name: "allowWatchBookmarks=true", + timeout: 3 * time.Second, + expected: expectedWatchBookmarks, + allowWatchBookmarks: true, + }, + } + + for i, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pred := storage.Everything + pred.AllowWatchBookmarks = tt.allowWatchBookmarks + ctx, cancel := context.WithTimeout(context.Background(), tt.timeout) + defer cancel() + + watcher, err := store.Watch(ctx, key, storage.ListOptions{ResourceVersion: startRV, Predicate: pred}) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer watcher.Stop() + + // Create events of pods in a different namespace + out := &example.Pod{} + obj := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: fmt.Sprintf("other-ns-%d", i)}} + objKey := computePodKey(obj) + + if err := store.Create(ctx, objKey, obj, out, 0); err != nil { + t.Fatalf("Create failed: %v", err) + } + + // Now wait for Bookmark event + select { + case event, ok := <-watcher.ResultChan(): + if !ok && tt.expected { + t.Errorf("Unexpected object watched (no objects)") + } + if tt.expected && event.Type != watch.Bookmark { + t.Errorf("Unexpected object watched %#v", event) + } + case <-time.After(time.Second * 3): + if tt.expected { + t.Errorf("Unexpected object watched (timeout)") + } + } + }) + } +} + +// RunOptionalTestWatchBookmarksWithCorrectResourceVersion tests whether bookmark events are +// sent with correct resource versions. +// This feature is currently implemented in watch cache layer, so this is optional. +// +// TODO(#109831): ProgressNotify feature is effectively implementing the same +// +// functionality, so we should refactor this functionality to share the same input. +func RunTestOptionalWatchBookmarksWithCorrectResourceVersion(ctx context.Context, t *testing.T, store storage.Interface) { + // Compute the initial resource version. + list := &example.PodList{} + storageOpts := storage.ListOptions{ + Predicate: storage.Everything, + Recursive: true, + } + if err := store.GetList(ctx, KeyFunc("", ""), storageOpts, list); err != nil { + t.Errorf("Unexpected error: %v", err) + } + startRV := list.ResourceVersion + + key := KeyFunc("test-ns", "") + pred := storage.Everything + pred.AllowWatchBookmarks = true + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + watcher, err := store.Watch(ctx, key, storage.ListOptions{ResourceVersion: startRV, Predicate: pred, Recursive: true}) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer watcher.Stop() + + done := make(chan struct{}) + errc := make(chan error, 1) + var wg sync.WaitGroup + wg.Add(1) + // We must wait for the waitgroup to exit before we terminate the cache or the server in prior defers. + defer wg.Wait() + // Call close first, so the goroutine knows to exit. + defer close(done) + + go func() { + defer wg.Done() + for i := 0; i < 100; i++ { + select { + case <-done: + return + default: + out := &example.Pod{} + pod := &example.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("foo-%d", i), + Namespace: "test-ns", + }, + } + podKey := computePodKey(pod) + if err := store.Create(ctx, podKey, pod, out, 0); err != nil { + errc <- fmt.Errorf("failed to create pod %v: %v", pod, err) + return + } + time.Sleep(10 * time.Millisecond) + } + } + }() + + bookmarkReceived := false + lastObservedResourceVersion := uint64(0) + + for { + select { + case err := <-errc: + t.Fatal(err) + case event, ok := <-watcher.ResultChan(): + if !ok { + // Make sure we have received a bookmark event + if !bookmarkReceived { + t.Fatalf("Unpexected error, we did not received a bookmark event") + } + return + } + rv, err := storage.APIObjectVersioner{}.ObjectResourceVersion(event.Object) + if err != nil { + t.Fatalf("failed to parse resourceVersion from %#v", event) + } + if event.Type == watch.Bookmark { + bookmarkReceived = true + // bookmark event has a RV greater than or equal to the before one + if rv < lastObservedResourceVersion { + t.Fatalf("Unexpected bookmark resourceVersion %v less than observed %v)", rv, lastObservedResourceVersion) + } + } else { + // non-bookmark event has a RV greater than anything before + if rv <= lastObservedResourceVersion { + t.Fatalf("Unexpected event resourceVersion %v less than or equal to bookmark %v)", rv, lastObservedResourceVersion) + } + } + lastObservedResourceVersion = rv + } + } +} + +// RunSendInitialEventsBackwardCompatibility test backward compatibility +// when SendInitialEvents option is set against various implementations. +// Backward compatibility is defined as RV = "" || RV = "O" and AllowWatchBookmark is set to false. +// In that case we expect a watch request to be established. +func RunSendInitialEventsBackwardCompatibility(ctx context.Context, t *testing.T, store storage.Interface) { + opts := storage.ListOptions{Predicate: storage.Everything} + opts.SendInitialEvents = ptr.To(true) + w, err := store.Watch(ctx, KeyFunc("", ""), opts) + require.NoError(t, err) + w.Stop() +} + +// RunWatchSemantics test the following cases: +// +// +-----------------+---------------------+-------------------+ +// | ResourceVersion | AllowWatchBookmarks | SendInitialEvents | +// +=================+=====================+===================+ +// | Unset | true/false | true/false | +// | 0 | true/false | true/false | +// | 1 | true/false | true/false | +// | Current | true/false | true/false | +// +-----------------+---------------------+-------------------+ +// where: +// - false indicates the value of the param was set to "false" by a test case +// - true indicates the value of the param was set to "true" by a test case +func RunWatchSemantics(ctx context.Context, t *testing.T, store storage.Interface) { + trueVal, falseVal := true, false + addEventsFromCreatedPods := func(createdInitialPods []*example.Pod) []watch.Event { + var ret []watch.Event + for _, createdPod := range createdInitialPods { + ret = append(ret, watch.Event{Type: watch.Added, Object: createdPod}) + } + return ret + } + initialEventsEndFromLastCreatedPod := func(createdInitialPods []*example.Pod) watch.Event { + return watch.Event{ + Type: watch.Bookmark, + Object: &example.Pod{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: createdInitialPods[len(createdInitialPods)-1].ResourceVersion, + Annotations: map[string]string{"k8s.io/initial-events-end": "true"}, + }, + }, + } + } + scenarios := []struct { + name string + allowWatchBookmarks bool + sendInitialEvents *bool + resourceVersion string + // useCurrentRV if set gets the current RV from the storage + // after adding the initial pods which is then used to establish a new watch request + useCurrentRV bool + + initialPods []*example.Pod + podsAfterEstablishingWatch []*example.Pod + + expectedInitialEventsInRandomOrder func(createdInitialPods []*example.Pod) []watch.Event + expectedInitialEventsInStrictOrder func(createdInitialPods []*example.Pod) []watch.Event + expectedEventsAfterEstablishingWatch func(createdPodsAfterWatch []*example.Pod) []watch.Event + }{ + { + name: "allowWatchBookmarks=true, sendInitialEvents=true, RV=unset", + allowWatchBookmarks: true, + sendInitialEvents: &trueVal, + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + expectedInitialEventsInStrictOrder: func(createdInitialPods []*example.Pod) []watch.Event { + return []watch.Event{initialEventsEndFromLastCreatedPod(createdInitialPods)} + }, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=true, sendInitialEvents=false, RV=unset", + allowWatchBookmarks: true, + sendInitialEvents: &falseVal, + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=false, sendInitialEvents=false, RV=unset", + sendInitialEvents: &falseVal, + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=false, sendInitialEvents=true, RV=unset", + sendInitialEvents: &trueVal, + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + + { + name: "allowWatchBookmarks=true, sendInitialEvents=true, RV=0", + allowWatchBookmarks: true, + sendInitialEvents: &trueVal, + resourceVersion: "0", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + expectedInitialEventsInStrictOrder: func(createdInitialPods []*example.Pod) []watch.Event { + return []watch.Event{initialEventsEndFromLastCreatedPod(createdInitialPods)} + }, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=true, sendInitialEvents=false, RV=0", + allowWatchBookmarks: true, + sendInitialEvents: &falseVal, + resourceVersion: "0", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=false, sendInitialEvents=false, RV=0", + sendInitialEvents: &falseVal, + resourceVersion: "0", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=false, sendInitialEvents=true, RV=0", + sendInitialEvents: &trueVal, + resourceVersion: "0", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + + { + name: "allowWatchBookmarks=true, sendInitialEvents=true, RV=1", + allowWatchBookmarks: true, + sendInitialEvents: &trueVal, + resourceVersion: "1", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + expectedInitialEventsInStrictOrder: func(createdInitialPods []*example.Pod) []watch.Event { + return []watch.Event{initialEventsEndFromLastCreatedPod(createdInitialPods)} + }, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=true, sendInitialEvents=false, RV=1", + allowWatchBookmarks: true, + sendInitialEvents: &falseVal, + resourceVersion: "1", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInStrictOrder: addEventsFromCreatedPods, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=false, sendInitialEvents=false, RV=1", + sendInitialEvents: &falseVal, + resourceVersion: "1", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInStrictOrder: addEventsFromCreatedPods, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=false, sendInitialEvents=true, RV=1", + sendInitialEvents: &trueVal, + resourceVersion: "1", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + + { + name: "allowWatchBookmarks=true, sendInitialEvents=true, RV=useCurrentRV", + allowWatchBookmarks: true, + sendInitialEvents: &trueVal, + useCurrentRV: true, + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + expectedInitialEventsInStrictOrder: func(createdInitialPods []*example.Pod) []watch.Event { + return []watch.Event{initialEventsEndFromLastCreatedPod(createdInitialPods)} + }, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=true, sendInitialEvents=false, RV=useCurrentRV", + allowWatchBookmarks: true, + sendInitialEvents: &falseVal, + useCurrentRV: true, + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=false, sendInitialEvents=false, RV=useCurrentRV", + sendInitialEvents: &falseVal, + useCurrentRV: true, + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "allowWatchBookmarks=false, sendInitialEvents=true, RV=useCurrentRV", + sendInitialEvents: &trueVal, + useCurrentRV: true, + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + + { + name: "legacy, RV=0", + resourceVersion: "0", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + { + name: "legacy, RV=unset", + initialPods: []*example.Pod{makePod("1"), makePod("2"), makePod("3")}, + expectedInitialEventsInRandomOrder: addEventsFromCreatedPods, + podsAfterEstablishingWatch: []*example.Pod{makePod("4"), makePod("5")}, + expectedEventsAfterEstablishingWatch: addEventsFromCreatedPods, + }, + } + for idx, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + // set up env + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WatchList, true)() + if scenario.expectedInitialEventsInStrictOrder == nil { + scenario.expectedInitialEventsInStrictOrder = func(_ []*example.Pod) []watch.Event { return nil } + } + if scenario.expectedInitialEventsInRandomOrder == nil { + scenario.expectedInitialEventsInRandomOrder = func(_ []*example.Pod) []watch.Event { return nil } + } + if scenario.expectedEventsAfterEstablishingWatch == nil { + scenario.expectedEventsAfterEstablishingWatch = func(_ []*example.Pod) []watch.Event { return nil } + } + + var createdPods []*example.Pod + ns := fmt.Sprintf("ns-%v", idx) + for _, obj := range scenario.initialPods { + obj.Namespace = ns + out := &example.Pod{} + err := store.Create(ctx, computePodKey(obj), obj, out, 0) + require.NoError(t, err, "failed to add a pod: %v", obj) + createdPods = append(createdPods, out) + } + + if scenario.useCurrentRV { + currentStorageRV, err := storage.GetCurrentResourceVersionFromStorage(ctx, store, func() runtime.Object { return &example.PodList{} }, KeyFunc("", ""), "") + require.NoError(t, err) + scenario.resourceVersion = fmt.Sprintf("%d", currentStorageRV) + } + + opts := storage.ListOptions{Predicate: storage.Everything, Recursive: true} + opts.SendInitialEvents = scenario.sendInitialEvents + opts.Predicate.AllowWatchBookmarks = scenario.allowWatchBookmarks + if len(scenario.resourceVersion) > 0 { + opts.ResourceVersion = scenario.resourceVersion + } + + w, err := store.Watch(context.Background(), KeyFunc(ns, ""), opts) + require.NoError(t, err, "failed to create watch: %v") + defer w.Stop() + + // make sure we only get initial events + testCheckResultsInRandomOrder(t, w, scenario.expectedInitialEventsInRandomOrder(createdPods)) + testCheckResultsInStrictOrder(t, w, scenario.expectedInitialEventsInStrictOrder(createdPods)) + testCheckNoMoreResults(t, w) + + createdPods = []*example.Pod{} + // add a pod that is greater than the storage's RV when the watch was started + for _, obj := range scenario.podsAfterEstablishingWatch { + obj.Namespace = ns + out := &example.Pod{} + err = store.Create(ctx, computePodKey(obj), obj, out, 0) + require.NoError(t, err, "failed to add a pod: %v") + createdPods = append(createdPods, out) + } + testCheckResultsInStrictOrder(t, w, scenario.expectedEventsAfterEstablishingWatch(createdPods)) + testCheckNoMoreResults(t, w) + }) + } +} + +// RunWatchSemanticInitialEventsExtended checks if the bookmark event +// marking the end of the list stream contains the global RV. +// +// note that this scenario differs from the one in RunWatchSemantics +// by adding the pod to a different ns to advance the global RV +func RunWatchSemanticInitialEventsExtended(ctx context.Context, t *testing.T, store storage.Interface) { + trueVal := true + expectedInitialEventsInStrictOrder := func(firstPod, secondPod *example.Pod) []watch.Event { + return []watch.Event{ + {Type: watch.Added, Object: firstPod}, + {Type: watch.Bookmark, Object: &example.Pod{ + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: secondPod.ResourceVersion, + Annotations: map[string]string{"k8s.io/initial-events-end": "true"}, + }, + }}, + } + } + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.WatchList, true)() + + ns := "ns-foo" + pod := makePod("1") + pod.Namespace = ns + firstPod := &example.Pod{} + err := store.Create(ctx, computePodKey(pod), pod, firstPod, 0) + require.NoError(t, err, "failed to add a pod: %v") + + // add the pod to a different ns to advance the global RV + pod = makePod("2") + pod.Namespace = "other-ns-foo" + secondPod := &example.Pod{} + err = store.Create(ctx, computePodKey(pod), pod, secondPod, 0) + require.NoError(t, err, "failed to add a pod: %v") + + opts := storage.ListOptions{Predicate: storage.Everything, Recursive: true} + opts.SendInitialEvents = &trueVal + opts.Predicate.AllowWatchBookmarks = true + + w, err := store.Watch(context.Background(), KeyFunc(ns, ""), opts) + require.NoError(t, err, "failed to create watch: %v") + defer w.Stop() + + // make sure we only get initial events from the first ns + // followed by the bookmark with the global RV + testCheckResultsInStrictOrder(t, w, expectedInitialEventsInStrictOrder(firstPod, secondPod)) + testCheckNoMoreResults(t, w) +} + +func makePod(namePrefix string) *example.Pod { + return &example.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("pod-%s", namePrefix), + }, + } +} + +type testWatchStruct struct { + obj *example.Pod + expectEvent bool + watchType watch.EventType +} + +func createPodPredicate(field fields.Selector, namespaceScoped bool, indexField []string) storage.SelectionPredicate { + return storage.SelectionPredicate{ + Label: labels.Everything(), + Field: field, + GetAttrs: determinePodGetAttrFunc(namespaceScoped, indexField), + IndexFields: indexField, + } +} + +func determinePodGetAttrFunc(namespaceScoped bool, indexField []string) storage.AttrFunc { + if indexField != nil { + if namespaceScoped { + return namespacedScopedNodeNameAttrFunc + } + return clusterScopedNodeNameAttrFunc + } + if namespaceScoped { + return storage.DefaultNamespaceScopedAttr + } + return storage.DefaultClusterScopedAttr +} + +func namespacedScopedNodeNameAttrFunc(obj runtime.Object) (labels.Set, fields.Set, error) { + pod := obj.(*example.Pod) + return nil, fields.Set{ + "spec.nodeName": pod.Spec.NodeName, + "metadata.name": pod.ObjectMeta.Name, + "metadata.namespace": pod.ObjectMeta.Namespace, + }, nil +} + +func clusterScopedNodeNameAttrFunc(obj runtime.Object) (labels.Set, fields.Set, error) { + pod := obj.(*example.Pod) + return nil, fields.Set{ + "spec.nodeName": pod.Spec.NodeName, + "metadata.name": pod.ObjectMeta.Name, + }, nil +} + +func basePod(podName string) *example.Pod { + return baseNamespacedPod(podName, "") +} + +func basePodUpdated(podName string) *example.Pod { + return baseNamespacedPodUpdated(podName, "") +} + +func basePodAssigned(podName, nodeName string) *example.Pod { + return baseNamespacedPodAssigned(podName, "", nodeName) +} + +func baseNamespacedPod(podName, namespace string) *example.Pod { + return &example.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: namespace}, + } +} + +func baseNamespacedPodUpdated(podName, namespace string) *example.Pod { + return &example.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: namespace}, + Status: example.PodStatus{Phase: "Running"}, + } +} + +func baseNamespacedPodAssigned(podName, namespace, nodeName string) *example.Pod { + return &example.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: namespace}, + Spec: example.PodSpec{NodeName: nodeName}, + } +} diff --git a/pkg/build/go.mod b/pkg/build/go.mod index 531adb6618e..4542a341f4c 100644 --- a/pkg/build/go.mod +++ b/pkg/build/go.mod @@ -37,9 +37,9 @@ require ( github.com/urfave/cli v1.22.15 // @grafana/grafana-backend-group github.com/urfave/cli/v2 v2.27.1 // @grafana/grafana-backend-group go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 // indirect; @grafana/plugins-platform-backend - go.opentelemetry.io/otel v1.26.0 // indirect; @grafana/grafana-backend-group - go.opentelemetry.io/otel/sdk v1.26.0 // indirect; @grafana/grafana-backend-group - go.opentelemetry.io/otel/trace v1.26.0 // indirect; @grafana/grafana-backend-group + go.opentelemetry.io/otel v1.28.0 // indirect; @grafana/grafana-backend-group + go.opentelemetry.io/otel/sdk v1.28.0 // indirect; @grafana/grafana-backend-group + go.opentelemetry.io/otel/trace v1.28.0 // indirect; @grafana/grafana-backend-group golang.org/x/crypto v0.24.0 // indirect; @grafana/grafana-backend-group golang.org/x/mod v0.18.0 // @grafana/grafana-backend-group golang.org/x/net v0.26.0 // indirect; @grafana/oss-big-tent @grafana/partner-datasources @@ -50,7 +50,7 @@ require ( golang.org/x/tools v0.22.0 // indirect; @grafana/grafana-as-code google.golang.org/api v0.176.0 // @grafana/grafana-backend-group google.golang.org/grpc v1.64.0 // indirect; @grafana/plugins-platform-backend - google.golang.org/protobuf v1.34.1 // indirect; @grafana/plugins-platform-backend + google.golang.org/protobuf v1.34.2 // indirect; @grafana/plugins-platform-backend gopkg.in/yaml.v3 v3.0.1 // @grafana/alerting-backend ) @@ -74,7 +74,7 @@ require ( github.com/drone/runner-go v1.12.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect - github.com/go-logr/logr v1.4.1 // indirect; @grafana/grafana-app-platform-squad + github.com/go-logr/logr v1.4.2 // indirect; @grafana/grafana-app-platform-squad github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/go-querystring v1.1.0 // indirect @@ -87,13 +87,13 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 // indirect - go.opentelemetry.io/otel/metric v1.26.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect golang.org/x/sys v0.21.0 // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect; @grafana/grafana-backend-group - google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) @@ -112,12 +112,12 @@ require ( github.com/vektah/gqlparser/v2 v2.5.11 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.0.0-20240518090000-14441aefdf88 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.2.0-alpha // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 // indirect go.opentelemetry.io/otel/log v0.2.0-alpha // indirect go.opentelemetry.io/otel/sdk/log v0.2.0-alpha // indirect - go.opentelemetry.io/proto/otlp v1.2.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f // indirect gotest.tools/v3 v3.5.1 // indirect ) diff --git a/pkg/build/go.sum b/pkg/build/go.sum index 4e40f12ad02..46495e12664 100644 --- a/pkg/build/go.sum +++ b/pkg/build/go.sum @@ -89,8 +89,7 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/gogo/protobuf v0.0.0-20170307180453-100ba4e88506/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -218,31 +217,24 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 h1:A3SayB3rNyt+1S6qpI9mHPkeHTZbD7XILEqWnYZb2l0= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0/go.mod h1:27iA5uvhuRNmalO+iEUdVn5ZMj2qy10Mm+XRIpRmyuU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= -go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= -go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.0.0-20240518090000-14441aefdf88 h1:oM0GTNKGlc5qHctWeIGTVyda4iFFalOzMZ3Ehj5rwB4= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.0.0-20240518090000-14441aefdf88/go.mod h1:JGG8ebaMO5nXOPnvKEl+DiA4MGwFjCbjsxT1WHIEBPY= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.2.0-alpha h1:z2s6Zba+OUyayRv5m1AXWNUTGh57K1iMhy6emU5QT5Y= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.2.0-alpha/go.mod h1:paOXXyUgPW6jYxYkP0pB47H2zHE1fPvMJ4E4G9LHOi0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 h1:1u/AyyOqAWzy+SkPxDpahCNZParHV8Vid1RnI2clyDE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0/go.mod h1:z46paqbJ9l7c9fIPCXTqTGwhQZ5XoTIsfeFYWboizjs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0 h1:Waw9Wfpo/IXzOI8bCB7DIk+0JZcqqsyn1JFnAc+iam8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 h1:1wp/gyxsuYtuE/JFxsQRtcCDtMrO2qMvlfXALU5wkzI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0/go.mod h1:gbTHmghkGgqxMomVQQMur1Nba4M0MQ8AYThXDUjsJ38= go.opentelemetry.io/otel/log v0.2.0-alpha h1:ixOPvMzserpqA07SENHvRzkZOsnG0XbPr74hv1AQ+n0= go.opentelemetry.io/otel/log v0.2.0-alpha/go.mod h1:vbFZc65yq4c4ssvXY43y/nIqkNJLxORrqw0L85P59LA= -go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= -go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= -go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8= -go.opentelemetry.io/otel/sdk v1.26.0/go.mod h1:0p8MXpqLeJ0pzcszQQN4F0S5FVjBLgypeGSngLsmirs= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= go.opentelemetry.io/otel/sdk/log v0.2.0-alpha h1:jGTkL/jroJ31jnP6jDl34N/mDOfRGGYZHcHsCM+5kWA= go.opentelemetry.io/otel/sdk/log v0.2.0-alpha/go.mod h1:Hd8Lw9FPGUM3pfY7iGMRvFaC2Nyau4Ajb5WnQ9OdIho= -go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= -go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -327,10 +319,8 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU= -google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -348,8 +338,7 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/cmd/grafana-cli/commands/conflict_user_command.go b/pkg/cmd/grafana-cli/commands/conflict_user_command.go index 0aad8adfec7..9ad5fe87611 100644 --- a/pkg/cmd/grafana-cli/commands/conflict_user_command.go +++ b/pkg/cmd/grafana-cli/commands/conflict_user_command.go @@ -70,7 +70,7 @@ func initializeConflictResolver(cmd *utils.ContextCommandLine, f Formatter, ctx if err != nil { return nil, fmt.Errorf("%v: %w", "failed to load configuration", err) } - s, err := getSqlStore(cfg, tracer, features) + s, replstore, err := getSqlStore(cfg, tracer, features) if err != nil { return nil, fmt.Errorf("%v: %w", "failed to get to sql", err) } @@ -90,7 +90,7 @@ func initializeConflictResolver(cmd *utils.ContextCommandLine, f Formatter, ctx if err != nil { return nil, fmt.Errorf("%v: %w", "failed to initialize tracer service", err) } - acService, err := acimpl.ProvideService(cfg, s, routing, nil, nil, nil, features, tracer, zanzana.NewNoopClient()) + acService, err := acimpl.ProvideService(cfg, replstore, routing, nil, nil, nil, features, tracer, zanzana.NewNoopClient()) if err != nil { return nil, fmt.Errorf("%v: %w", "failed to get access control", err) } @@ -99,9 +99,15 @@ func initializeConflictResolver(cmd *utils.ContextCommandLine, f Formatter, ctx return &resolver, nil } -func getSqlStore(cfg *setting.Cfg, tracer tracing.Tracer, features featuremgmt.FeatureToggles) (*sqlstore.SQLStore, error) { +func getSqlStore(cfg *setting.Cfg, tracer tracing.Tracer, features featuremgmt.FeatureToggles) (*sqlstore.SQLStore, *sqlstore.ReplStore, error) { bus := bus.ProvideBus(tracer) - return sqlstore.ProvideService(cfg, features, &migrations.OSSMigrations{}, bus, tracer) + ss, err := sqlstore.ProvideService(cfg, features, &migrations.OSSMigrations{}, bus, tracer) + if err != nil { + return nil, nil, err + } + + replStore, err := sqlstore.ProvideServiceWithReadReplica(ss, cfg, features, &migrations.OSSMigrations{}, bus, tracer) + return ss, replStore, err } func runListConflictUsers() func(context *cli.Context) error { diff --git a/pkg/cmd/grafana-cli/commands/install_command.go b/pkg/cmd/grafana-cli/commands/install_command.go index c15bd99fd30..ed82430edbe 100644 --- a/pkg/cmd/grafana-cli/commands/install_command.go +++ b/pkg/cmd/grafana-cli/commands/install_command.go @@ -75,27 +75,57 @@ func installCommand(c utils.CommandLine) error { pluginID := c.Args().First() version := c.Args().Get(1) - err := installPlugin(context.Background(), pluginID, version, c) + err := installPlugin(context.Background(), pluginID, version, newInstallPluginOpts(c)) if err == nil { logRestartNotice() } return err } +type pluginInstallOpts struct { + insecure bool + repoURL string + pluginURL string + pluginDir string +} + +func newInstallPluginOpts(c utils.CommandLine) pluginInstallOpts { + return pluginInstallOpts{ + insecure: c.Bool("insecure"), + repoURL: c.PluginRepoURL(), + pluginURL: c.PluginURL(), + pluginDir: c.PluginDirectory(), + } +} + // installPlugin downloads the plugin code as a zip file from the Grafana.com API // and then extracts the zip into the plugin's directory. -func installPlugin(ctx context.Context, pluginID, version string, c utils.CommandLine) error { +func installPlugin(ctx context.Context, pluginID, version string, o pluginInstallOpts) error { + return doInstallPlugin(ctx, pluginID, version, o, map[string]bool{}) +} + +// doInstallPlugin is a recursive function that installs a plugin and its dependencies. +// installing is a map that keeps track of which plugins are currently being installed to avoid infinite loops. +func doInstallPlugin(ctx context.Context, pluginID, version string, o pluginInstallOpts, installing map[string]bool) error { + if installing[pluginID] { + return nil + } + installing[pluginID] = true + defer func() { + installing[pluginID] = false + }() + // If a version is specified, check if it is already installed if version != "" { - if services.PluginVersionInstalled(pluginID, version, c.PluginDirectory()) { + if services.PluginVersionInstalled(pluginID, version, o.pluginDir) { services.Logger.Successf("Plugin %s v%s already installed.", pluginID, version) return nil } } repository := repo.NewManager(repo.ManagerCfg{ - SkipTLSVerify: c.Bool("insecure"), - BaseURL: c.PluginRepoURL(), + SkipTLSVerify: o.insecure, + BaseURL: o.repoURL, Logger: services.Logger, }) @@ -103,7 +133,7 @@ func installPlugin(ctx context.Context, pluginID, version string, c utils.Comman var archive *repo.PluginArchive var err error - pluginZipURL := c.PluginURL() + pluginZipURL := o.pluginURL if pluginZipURL != "" { if archive, err = repository.GetPluginArchiveByURL(ctx, pluginZipURL, compatOpts); err != nil { return err @@ -114,23 +144,19 @@ func installPlugin(ctx context.Context, pluginID, version string, c utils.Comman } } - pluginFs := storage.FileSystem(services.Logger, c.PluginDirectory()) + pluginFs := storage.FileSystem(services.Logger, o.pluginDir) extractedArchive, err := pluginFs.Extract(ctx, pluginID, storage.SimpleDirNameGeneratorFunc, archive.File) if err != nil { return err } for _, dep := range extractedArchive.Dependencies { - services.Logger.Infof("Fetching %s dependency...", dep.ID) - d, err := repository.GetPluginArchive(ctx, dep.ID, dep.Version, compatOpts) - if err != nil { - return fmt.Errorf("%v: %w", fmt.Sprintf("failed to download plugin %s from repository", dep.ID), err) - } - - _, err = pluginFs.Extract(ctx, dep.ID, storage.SimpleDirNameGeneratorFunc, d.File) - if err != nil { - return err - } + services.Logger.Infof("Fetching %s dependency %s...", pluginID, dep.ID) + return doInstallPlugin(ctx, dep.ID, dep.Version, pluginInstallOpts{ + insecure: o.insecure, + repoURL: o.repoURL, + pluginDir: o.pluginDir, + }, installing) } return nil } diff --git a/pkg/cmd/grafana-cli/commands/upgrade_all_command.go b/pkg/cmd/grafana-cli/commands/upgrade_all_command.go index 74b3e65f038..87381dcd650 100644 --- a/pkg/cmd/grafana-cli/commands/upgrade_all_command.go +++ b/pkg/cmd/grafana-cli/commands/upgrade_all_command.go @@ -63,7 +63,7 @@ func upgradeAllCommand(c utils.CommandLine) error { return err } - err = installPlugin(ctx, p.JSONData.ID, "", c) + err = installPlugin(ctx, p.JSONData.ID, "", newInstallPluginOpts(c)) if err != nil { return err } diff --git a/pkg/cmd/grafana-cli/commands/upgrade_command.go b/pkg/cmd/grafana-cli/commands/upgrade_command.go index a0a3ba4500e..0832c5818b5 100644 --- a/pkg/cmd/grafana-cli/commands/upgrade_command.go +++ b/pkg/cmd/grafana-cli/commands/upgrade_command.go @@ -35,7 +35,7 @@ func upgradeCommand(c utils.CommandLine) error { return fmt.Errorf("failed to remove plugin '%s': %w", pluginID, err) } - err = installPlugin(ctx, pluginID, "", c) + err = installPlugin(ctx, pluginID, "", newInstallPluginOpts(c)) if err == nil { logRestartNotice() } diff --git a/pkg/cmd/grafana-server/commands/cli.go b/pkg/cmd/grafana-server/commands/cli.go index 313991db613..6e82a6362b4 100644 --- a/pkg/cmd/grafana-server/commands/cli.go +++ b/pkg/cmd/grafana-server/commands/cli.go @@ -77,7 +77,7 @@ func RunServer(opts ServerOptions) error { } }() - if err := setupProfiling(Profile, ProfileAddr, ProfilePort); err != nil { + if err := setupProfiling(Profile, ProfileAddr, ProfilePort, ProfileBlockRate, ProfileMutexFraction); err != nil { return err } if err := setupTracing(Tracing, TracingFile, logger); err != nil { diff --git a/pkg/cmd/grafana-server/commands/diagnostics.go b/pkg/cmd/grafana-server/commands/diagnostics.go index 06a992b45b8..9a4f33f29a7 100644 --- a/pkg/cmd/grafana-server/commands/diagnostics.go +++ b/pkg/cmd/grafana-server/commands/diagnostics.go @@ -12,24 +12,30 @@ import ( ) const ( - profilingEnabledEnvName = "GF_DIAGNOSTICS_PROFILING_ENABLED" - profilingAddrEnvName = "GF_DIAGNOSTICS_PROFILING_ADDR" - profilingPortEnvName = "GF_DIAGNOSTICS_PROFILING_PORT" - tracingEnabledEnvName = "GF_DIAGNOSTICS_TRACING_ENABLED" - tracingFileEnvName = "GF_DIAGNOSTICS_TRACING_FILE" + profilingEnabledEnvName = "GF_DIAGNOSTICS_PROFILING_ENABLED" + profilingAddrEnvName = "GF_DIAGNOSTICS_PROFILING_ADDR" + profilingPortEnvName = "GF_DIAGNOSTICS_PROFILING_PORT" + profilingBlockRateEnvName = "GF_DIAGNOSTICS_PROFILING_BLOCK_RATE" + profilingMutexRateEnvName = "GF_DIAGNOSTICS_PROFILING_MUTEX_RATE" + tracingEnabledEnvName = "GF_DIAGNOSTICS_TRACING_ENABLED" + tracingFileEnvName = "GF_DIAGNOSTICS_TRACING_FILE" ) type profilingDiagnostics struct { - enabled bool - addr string - port uint64 + enabled bool + addr string + port uint64 + blockRate int + mutexRate int } -func newProfilingDiagnostics(enabled bool, addr string, port uint64) *profilingDiagnostics { +func newProfilingDiagnostics(enabled bool, addr string, port uint64, blockRate int, mutexRate int) *profilingDiagnostics { return &profilingDiagnostics{ - enabled: enabled, - addr: addr, - port: port, + enabled: enabled, + addr: addr, + port: port, + blockRate: blockRate, + mutexRate: mutexRate, } } @@ -57,6 +63,24 @@ func (pd *profilingDiagnostics) overrideWithEnv() error { pd.port = port } + blockRateEnv := os.Getenv(profilingBlockRateEnvName) + if blockRateEnv != "" { + blockRate, err := strconv.Atoi(blockRateEnv) + if err != nil { + return fmt.Errorf("failed to parse %s environment variable as int", profilingBlockRateEnvName) + } + pd.blockRate = blockRate + } + + mutexFractionEnv := os.Getenv(profilingMutexRateEnvName) + if mutexFractionEnv != "" { + mutexProfileFraction, err := strconv.Atoi(mutexFractionEnv) + if err != nil { + return fmt.Errorf("failed to parse %s environment variable as int", profilingMutexRateEnvName) + } + pd.mutexRate = mutexProfileFraction + } + return nil } @@ -90,15 +114,17 @@ func (td *tracingDiagnostics) overrideWithEnv() error { return nil } -func setupProfiling(profile bool, profileAddr string, profilePort uint64) error { - profileDiagnostics := newProfilingDiagnostics(profile, profileAddr, profilePort) +func setupProfiling(profile bool, profileAddr string, profilePort uint64, blockRate int, mutexFraction int) error { + profileDiagnostics := newProfilingDiagnostics(profile, profileAddr, profilePort, blockRate, mutexFraction) if err := profileDiagnostics.overrideWithEnv(); err != nil { return err } if profileDiagnostics.enabled { - fmt.Println("diagnostics: pprof profiling enabled", "addr", profileDiagnostics.addr, "port", profileDiagnostics.port) - runtime.SetBlockProfileRate(1) + fmt.Println("diagnostics: pprof profiling enabled", "addr", profileDiagnostics.addr, "port", profileDiagnostics.port, "blockProfileRate", profileDiagnostics.blockRate, "mutexProfileRate", profileDiagnostics.mutexRate) + runtime.SetBlockProfileRate(profileDiagnostics.blockRate) + runtime.SetMutexProfileFraction(profileDiagnostics.mutexRate) + go func() { // TODO: We should enable the linter and fix G114 here. // G114: Use of net/http serve function that has no support for setting timeouts (gosec) diff --git a/pkg/cmd/grafana-server/commands/diagnostics_test.go b/pkg/cmd/grafana-server/commands/diagnostics_test.go index 0cd59f6a4e9..c4de07e34cc 100644 --- a/pkg/cmd/grafana-server/commands/diagnostics_test.go +++ b/pkg/cmd/grafana-server/commands/diagnostics_test.go @@ -9,17 +9,21 @@ import ( func TestProfilingDiagnostics(t *testing.T) { tcs := []struct { - defaults *profilingDiagnostics - enabledEnv string - addrEnv string - portEnv string - expected *profilingDiagnostics + defaults *profilingDiagnostics + enabledEnv string + addrEnv string + portEnv string + blockRateEnv string + mutexRateEnv string + expected *profilingDiagnostics }{ - {defaults: newProfilingDiagnostics(false, "localhost", 6060), enabledEnv: "", addrEnv: "", portEnv: "", expected: newProfilingDiagnostics(false, "localhost", 6060)}, - {defaults: newProfilingDiagnostics(true, "0.0.0.0", 8080), enabledEnv: "", addrEnv: "", portEnv: "", expected: newProfilingDiagnostics(true, "0.0.0.0", 8080)}, - {defaults: newProfilingDiagnostics(false, "", 6060), enabledEnv: "false", addrEnv: "", portEnv: "8080", expected: newProfilingDiagnostics(false, "", 8080)}, - {defaults: newProfilingDiagnostics(false, "localhost", 6060), enabledEnv: "true", addrEnv: "0.0.0.0", portEnv: "8080", expected: newProfilingDiagnostics(true, "0.0.0.0", 8080)}, - {defaults: newProfilingDiagnostics(false, "127.0.0.1", 6060), enabledEnv: "true", addrEnv: "", portEnv: "", expected: newProfilingDiagnostics(true, "127.0.0.1", 6060)}, + {defaults: newProfilingDiagnostics(false, "localhost", 6060, 0, 0), enabledEnv: "", addrEnv: "", portEnv: "", expected: newProfilingDiagnostics(false, "localhost", 6060, 0, 0)}, + {defaults: newProfilingDiagnostics(true, "0.0.0.0", 8080, 0, 0), enabledEnv: "", addrEnv: "", portEnv: "", expected: newProfilingDiagnostics(true, "0.0.0.0", 8080, 0, 0)}, + {defaults: newProfilingDiagnostics(false, "", 6060, 0, 0), enabledEnv: "false", addrEnv: "", portEnv: "8080", expected: newProfilingDiagnostics(false, "", 8080, 0, 0)}, + {defaults: newProfilingDiagnostics(false, "localhost", 6060, 0, 0), enabledEnv: "true", addrEnv: "0.0.0.0", portEnv: "8080", expected: newProfilingDiagnostics(true, "0.0.0.0", 8080, 0, 0)}, + {defaults: newProfilingDiagnostics(false, "127.0.0.1", 6060, 0, 0), enabledEnv: "true", addrEnv: "", portEnv: "", expected: newProfilingDiagnostics(true, "127.0.0.1", 6060, 0, 0)}, + {defaults: newProfilingDiagnostics(true, "localhost", 6060, 0, 0), enabledEnv: "", addrEnv: "", portEnv: "", blockRateEnv: "3", mutexRateEnv: "4", expected: newProfilingDiagnostics(true, "localhost", 6060, 3, 4)}, + {defaults: newProfilingDiagnostics(true, "localhost", 6060, 0, 0), enabledEnv: "", addrEnv: "", portEnv: "", expected: newProfilingDiagnostics(true, "localhost", 6060, 0, 0)}, } for i, tc := range tcs { @@ -33,6 +37,12 @@ func TestProfilingDiagnostics(t *testing.T) { if tc.portEnv != "" { t.Setenv(profilingPortEnvName, tc.portEnv) } + if tc.blockRateEnv != "" { + t.Setenv(profilingBlockRateEnvName, tc.blockRateEnv) + } + if tc.mutexRateEnv != "" { + t.Setenv(profilingMutexRateEnvName, tc.mutexRateEnv) + } err := tc.defaults.overrideWithEnv() assert.NoError(t, err) assert.Exactly(t, tc.expected, tc.defaults) diff --git a/pkg/cmd/grafana-server/commands/flags.go b/pkg/cmd/grafana-server/commands/flags.go index 8749ce96ca0..b28b1d07787 100644 --- a/pkg/cmd/grafana-server/commands/flags.go +++ b/pkg/cmd/grafana-server/commands/flags.go @@ -1,21 +1,28 @@ package commands -import "github.com/urfave/cli/v2" +import ( + "runtime" + + "github.com/urfave/cli/v2" +) // flags for the grafana server command(s) var ( - ConfigFile string - HomePath string - PidFile string - Packaging string - ConfigOverrides string - Version bool - VerboseVersion bool - Profile bool - ProfileAddr string - ProfilePort uint64 - Tracing bool - TracingFile string + ConfigFile string + HomePath string + PidFile string + Packaging string + ConfigOverrides string + Version bool + VerboseVersion bool + Profile bool + ProfileAddr string + ProfilePort uint64 + ProfileBlockRate int + ProfileMutexFraction int + ProfileContention bool + Tracing bool + TracingFile string ) var commonFlags = []cli.Flag{ @@ -75,6 +82,18 @@ var commonFlags = []cli.Flag{ Usage: "Define custom port for profiling", Destination: &ProfilePort, }, + &cli.IntFlag{ + Name: "profile-block-rate", + Value: 1, + Usage: "Controls the fraction of goroutine blocking events that are reported in the blocking profile. The profiler aims to sample an average of one blocking event per rate nanoseconds spent blocked. To turn off profiling entirely, use 0", + Destination: &ProfileBlockRate, + }, + &cli.IntFlag{ + Name: "profile-mutex-rate", + Value: runtime.SetMutexProfileFraction(-1), + Usage: "Controls the fraction of mutex contention events that are reported in the mutex profile. On average 1/rate events are reported. To turn off mutex profiling entirely, use 0", + Destination: &ProfileMutexFraction, + }, &cli.BoolFlag{ Name: "tracing", Value: false, diff --git a/pkg/cmd/grafana-server/commands/target.go b/pkg/cmd/grafana-server/commands/target.go index 6293eb78bcd..25df8f70e04 100644 --- a/pkg/cmd/grafana-server/commands/target.go +++ b/pkg/cmd/grafana-server/commands/target.go @@ -54,7 +54,7 @@ func RunTargetServer(opts ServerOptions) error { } }() - if err := setupProfiling(Profile, ProfileAddr, ProfilePort); err != nil { + if err := setupProfiling(Profile, ProfileAddr, ProfilePort, ProfileBlockRate, ProfileMutexFraction); err != nil { return err } if err := setupTracing(Tracing, TracingFile, logger); err != nil { diff --git a/pkg/cmd/grafana/apiserver/cmd.go b/pkg/cmd/grafana/apiserver/cmd.go index 9f09a382b4b..585e00ebb5a 100644 --- a/pkg/cmd/grafana/apiserver/cmd.go +++ b/pkg/cmd/grafana/apiserver/cmd.go @@ -3,6 +3,7 @@ package apiserver import ( "context" "os" + "sync" "github.com/spf13/cobra" "go.opentelemetry.io/otel" @@ -73,15 +74,15 @@ func newCommandStartExampleAPIServer(o *APIServerOptions, stopCh <-chan struct{} return err } - config, err := o.Config() - if err != nil { - return err - } - if o.Options.TracingOptions.TracingService != nil { tracer.InitTracer(o.Options.TracingOptions.TracingService) } + config, err := o.Config(tracer) + if err != nil { + return err + } + defer o.factory.Shutdown() if err := o.RunAPIServer(config, stopCh); err != nil { @@ -116,16 +117,17 @@ type lateInitializedTracingProvider struct { } func (tp lateInitializedTracingProvider) Tracer(name string, options ...trace.TracerOption) trace.Tracer { - return tp.tracer + return tp.tracer.getTracer() } type lateInitializedTracingService struct { tracing.Tracer + mutex sync.RWMutex } func newLateInitializedTracingService() *lateInitializedTracingService { ts := &lateInitializedTracingService{ - Tracer: tracing.InitializeTracerForTest(), + Tracer: tracing.NewNoopTracerService(), } tp := &lateInitializedTracingProvider{ @@ -137,8 +139,17 @@ func newLateInitializedTracingService() *lateInitializedTracingService { return ts } -func (s *lateInitializedTracingService) InitTracer(tracer tracing.Tracer) { +func (s *lateInitializedTracingService) getTracer() tracing.Tracer { + s.mutex.RLock() + t := s.Tracer + s.mutex.RUnlock() + return t +} + +func (s *lateInitializedTracingService) InitTracer(tracer *tracing.TracingService) { + s.mutex.Lock() s.Tracer = tracer + s.mutex.Unlock() } var _ tracing.Tracer = &lateInitializedTracingService{} diff --git a/pkg/cmd/grafana/apiserver/server.go b/pkg/cmd/grafana/apiserver/server.go index 8578651f7c9..790c2fe2be8 100644 --- a/pkg/cmd/grafana/apiserver/server.go +++ b/pkg/cmd/grafana/apiserver/server.go @@ -76,7 +76,7 @@ func (o *APIServerOptions) loadAPIGroupBuilders(ctx context.Context, tracer trac return nil } -func (o *APIServerOptions) Config() (*genericapiserver.RecommendedConfig, error) { +func (o *APIServerOptions) Config(tracer tracing.Tracer) (*genericapiserver.RecommendedConfig, error) { if err := o.Options.RecommendedOptions.SecureServing.MaybeDefaultWithSelfSignedCerts( "localhost", o.AlternateDNS, []net.IP{netutils.ParseIPSloppy("127.0.0.1")}, ); err != nil { @@ -122,6 +122,7 @@ func (o *APIServerOptions) Config() (*genericapiserver.RecommendedConfig, error) setting.BuildVersion, setting.BuildCommit, setting.BuildBranch, + o.factory.GetOptionalMiddlewares(tracer)..., ) return serverConfig, err } diff --git a/pkg/infra/tracing/tracing.go b/pkg/infra/tracing/tracing.go index 29dbee5e9e6..624f892214c 100644 --- a/pkg/infra/tracing/tracing.go +++ b/pkg/infra/tracing/tracing.go @@ -99,38 +99,20 @@ func ProvideService(tracingCfg *TracingConfig) (*TracingService, error) { return ots, nil } +func NewNoopTracerService() *TracingService { + tp := &noopTracerProvider{TracerProvider: noop.NewTracerProvider()} + otel.SetTracerProvider(tp) + + cfg := NewEmptyTracingConfig() + ots := &TracingService{cfg: cfg, tracerProvider: tp} + _ = ots.initOpentelemetryTracer() + return ots +} + func (ots *TracingService) GetTracerProvider() tracerProvider { return ots.tracerProvider } -func TraceIDFromContext(ctx context.Context, requireSampled bool) string { - spanCtx := trace.SpanContextFromContext(ctx) - if !spanCtx.HasTraceID() || !spanCtx.IsValid() || (requireSampled && !spanCtx.IsSampled()) { - return "" - } - - return spanCtx.TraceID().String() -} - -// Error sets the status to error and record the error as an exception in the provided span. -func Error(span trace.Span, err error) error { - attr := []attribute.KeyValue{} - grafanaErr := errutil.Error{} - if errors.As(err, &grafanaErr) { - attr = append(attr, attribute.String("message_id", grafanaErr.MessageID)) - } - - span.SetStatus(codes.Error, err.Error()) - span.RecordError(err, trace.WithAttributes(attr...)) - return err -} - -// Errorf wraps fmt.Errorf and also sets the status to error and record the error as an exception in the provided span. -func Errorf(span trace.Span, format string, args ...any) error { - err := fmt.Errorf(format, args...) - return Error(span, err) -} - type noopTracerProvider struct { trace.TracerProvider } @@ -389,3 +371,31 @@ func (rl *rateLimiter) ShouldSample(p tracesdk.SamplingParameters) tracesdk.Samp } func (rl *rateLimiter) Description() string { return rl.description } + +func TraceIDFromContext(ctx context.Context, requireSampled bool) string { + spanCtx := trace.SpanContextFromContext(ctx) + if !spanCtx.HasTraceID() || !spanCtx.IsValid() || (requireSampled && !spanCtx.IsSampled()) { + return "" + } + + return spanCtx.TraceID().String() +} + +// Error sets the status to error and record the error as an exception in the provided span. +func Error(span trace.Span, err error) error { + attr := []attribute.KeyValue{} + grafanaErr := errutil.Error{} + if errors.As(err, &grafanaErr) { + attr = append(attr, attribute.String("message_id", grafanaErr.MessageID)) + } + + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err, trace.WithAttributes(attr...)) + return err +} + +// Errorf wraps fmt.Errorf and also sets the status to error and record the error as an exception in the provided span. +func Errorf(span trace.Span, format string, args ...any) error { + err := fmt.Errorf(format, args...) + return Error(span, err) +} diff --git a/pkg/infra/usagestats/service/usage_stats_test.go b/pkg/infra/usagestats/service/usage_stats_test.go index 32d62354caf..b85c9fd3eef 100644 --- a/pkg/infra/usagestats/service/usage_stats_test.go +++ b/pkg/infra/usagestats/service/usage_stats_test.go @@ -23,6 +23,7 @@ import ( "github.com/grafana/grafana/pkg/infra/tracing" "github.com/grafana/grafana/pkg/infra/usagestats" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/supportbundles/supportbundlestest" "github.com/grafana/grafana/pkg/setting" @@ -247,7 +248,7 @@ func createService(t *testing.T, sqlStore db.DB, withDB bool) *UsageStats { kvstore.ProvideService(sqlStore), routing.NewRouteRegister(), tracing.InitializeTracerForTest(), - acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), + acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), supportbundlestest.NewFakeBundleService(), ) diff --git a/pkg/login/social/connectors/azuread_oauth.go b/pkg/login/social/connectors/azuread_oauth.go index cd91c95e922..484fb2fd3b8 100644 --- a/pkg/login/social/connectors/azuread_oauth.go +++ b/pkg/login/social/connectors/azuread_oauth.go @@ -189,13 +189,18 @@ func (s *SocialAzureAD) Reload(ctx context.Context, settings ssoModels.SSOSettin return nil } -func (s *SocialAzureAD) Validate(ctx context.Context, settings ssoModels.SSOSettings, _ ssoModels.SSOSettings, requester identity.Requester) error { - info, err := CreateOAuthInfoFromKeyValues(settings.Settings) +func (s *SocialAzureAD) Validate(ctx context.Context, newSettings ssoModels.SSOSettings, oldSettings ssoModels.SSOSettings, requester identity.Requester) error { + info, err := CreateOAuthInfoFromKeyValues(newSettings.Settings) if err != nil { return ssosettings.ErrInvalidSettings.Errorf("SSO settings map cannot be converted to OAuthInfo: %v", err) } - err = validateInfo(info, requester) + oldInfo, err := CreateOAuthInfoFromKeyValues(oldSettings.Settings) + if err != nil { + oldInfo = &social.OAuthInfo{} + } + + err = validateInfo(info, oldInfo, requester) if err != nil { return err } diff --git a/pkg/login/social/connectors/generic_oauth.go b/pkg/login/social/connectors/generic_oauth.go index e9d7d1513eb..84a8e00d3fd 100644 --- a/pkg/login/social/connectors/generic_oauth.go +++ b/pkg/login/social/connectors/generic_oauth.go @@ -75,13 +75,18 @@ func NewGenericOAuthProvider(info *social.OAuthInfo, cfg *setting.Cfg, orgRoleMa return provider } -func (s *SocialGenericOAuth) Validate(ctx context.Context, settings ssoModels.SSOSettings, _ ssoModels.SSOSettings, requester identity.Requester) error { - info, err := CreateOAuthInfoFromKeyValues(settings.Settings) +func (s *SocialGenericOAuth) Validate(ctx context.Context, newSettings ssoModels.SSOSettings, oldSettings ssoModels.SSOSettings, requester identity.Requester) error { + info, err := CreateOAuthInfoFromKeyValues(newSettings.Settings) if err != nil { return ssosettings.ErrInvalidSettings.Errorf("SSO settings map cannot be converted to OAuthInfo: %v", err) } - err = validateInfo(info, requester) + oldInfo, err := CreateOAuthInfoFromKeyValues(oldSettings.Settings) + if err != nil { + oldInfo = &social.OAuthInfo{} + } + + err = validateInfo(info, oldInfo, requester) if err != nil { return err } diff --git a/pkg/login/social/connectors/github_oauth.go b/pkg/login/social/connectors/github_oauth.go index 80e840e80e4..348e59f1ddd 100644 --- a/pkg/login/social/connectors/github_oauth.go +++ b/pkg/login/social/connectors/github_oauth.go @@ -82,13 +82,18 @@ func NewGitHubProvider(info *social.OAuthInfo, cfg *setting.Cfg, orgRoleMapper * return provider } -func (s *SocialGithub) Validate(ctx context.Context, settings ssoModels.SSOSettings, _ ssoModels.SSOSettings, requester identity.Requester) error { - info, err := CreateOAuthInfoFromKeyValues(settings.Settings) +func (s *SocialGithub) Validate(ctx context.Context, newSettings ssoModels.SSOSettings, oldSettings ssoModels.SSOSettings, requester identity.Requester) error { + info, err := CreateOAuthInfoFromKeyValues(newSettings.Settings) if err != nil { return ssosettings.ErrInvalidSettings.Errorf("SSO settings map cannot be converted to OAuthInfo: %v", err) } - err = validateInfo(info, requester) + oldInfo, err := CreateOAuthInfoFromKeyValues(oldSettings.Settings) + if err != nil { + oldInfo = &social.OAuthInfo{} + } + + err = validateInfo(info, oldInfo, requester) if err != nil { return err } diff --git a/pkg/login/social/connectors/gitlab_oauth.go b/pkg/login/social/connectors/gitlab_oauth.go index 357ab458611..d51544dd7c7 100644 --- a/pkg/login/social/connectors/gitlab_oauth.go +++ b/pkg/login/social/connectors/gitlab_oauth.go @@ -64,13 +64,18 @@ func NewGitLabProvider(info *social.OAuthInfo, cfg *setting.Cfg, orgRoleMapper * return provider } -func (s *SocialGitlab) Validate(ctx context.Context, settings ssoModels.SSOSettings, _ ssoModels.SSOSettings, requester identity.Requester) error { - info, err := CreateOAuthInfoFromKeyValues(settings.Settings) +func (s *SocialGitlab) Validate(ctx context.Context, newSettings ssoModels.SSOSettings, oldSettings ssoModels.SSOSettings, requester identity.Requester) error { + info, err := CreateOAuthInfoFromKeyValues(newSettings.Settings) if err != nil { return ssosettings.ErrInvalidSettings.Errorf("SSO settings map cannot be converted to OAuthInfo: %v", err) } - err = validateInfo(info, requester) + oldInfo, err := CreateOAuthInfoFromKeyValues(oldSettings.Settings) + if err != nil { + oldInfo = &social.OAuthInfo{} + } + + err = validateInfo(info, oldInfo, requester) if err != nil { return err } diff --git a/pkg/login/social/connectors/google_oauth.go b/pkg/login/social/connectors/google_oauth.go index ac369137ecf..3044548948b 100644 --- a/pkg/login/social/connectors/google_oauth.go +++ b/pkg/login/social/connectors/google_oauth.go @@ -65,13 +65,17 @@ func NewGoogleProvider(info *social.OAuthInfo, cfg *setting.Cfg, orgRoleMapper * return provider } -func (s *SocialGoogle) Validate(ctx context.Context, settings ssoModels.SSOSettings, _ ssoModels.SSOSettings, requester identity.Requester) error { - info, err := CreateOAuthInfoFromKeyValues(settings.Settings) +func (s *SocialGoogle) Validate(ctx context.Context, newSettings ssoModels.SSOSettings, oldSettings ssoModels.SSOSettings, requester identity.Requester) error { + info, err := CreateOAuthInfoFromKeyValues(newSettings.Settings) if err != nil { return ssosettings.ErrInvalidSettings.Errorf("SSO settings map cannot be converted to OAuthInfo: %v", err) } + oldInfo, err := CreateOAuthInfoFromKeyValues(oldSettings.Settings) + if err != nil { + oldInfo = &social.OAuthInfo{} + } - err = validateInfo(info, requester) + err = validateInfo(info, oldInfo, requester) if err != nil { return err } diff --git a/pkg/login/social/connectors/grafana_com_oauth.go b/pkg/login/social/connectors/grafana_com_oauth.go index b6d4b32d3fa..1ded4a18f7b 100644 --- a/pkg/login/social/connectors/grafana_com_oauth.go +++ b/pkg/login/social/connectors/grafana_com_oauth.go @@ -54,13 +54,18 @@ func NewGrafanaComProvider(info *social.OAuthInfo, cfg *setting.Cfg, orgRoleMapp return provider } -func (s *SocialGrafanaCom) Validate(ctx context.Context, settings ssoModels.SSOSettings, _ ssoModels.SSOSettings, requester identity.Requester) error { - info, err := CreateOAuthInfoFromKeyValues(settings.Settings) +func (s *SocialGrafanaCom) Validate(ctx context.Context, newSettings ssoModels.SSOSettings, oldSettings ssoModels.SSOSettings, requester identity.Requester) error { + info, err := CreateOAuthInfoFromKeyValues(newSettings.Settings) if err != nil { return ssosettings.ErrInvalidSettings.Errorf("SSO settings map cannot be converted to OAuthInfo: %v", err) } - err = validateInfo(info, requester) + oldInfo, err := CreateOAuthInfoFromKeyValues(oldSettings.Settings) + if err != nil { + oldInfo = &social.OAuthInfo{} + } + + err = validateInfo(info, oldInfo, requester) if err != nil { return err } diff --git a/pkg/login/social/connectors/okta_oauth.go b/pkg/login/social/connectors/okta_oauth.go index 4110ef5fec7..b126c2acd1d 100644 --- a/pkg/login/social/connectors/okta_oauth.go +++ b/pkg/login/social/connectors/okta_oauth.go @@ -61,13 +61,18 @@ func NewOktaProvider(info *social.OAuthInfo, cfg *setting.Cfg, orgRoleMapper *Or return provider } -func (s *SocialOkta) Validate(ctx context.Context, settings ssoModels.SSOSettings, _ ssoModels.SSOSettings, requester identity.Requester) error { - info, err := CreateOAuthInfoFromKeyValues(settings.Settings) +func (s *SocialOkta) Validate(ctx context.Context, newSettings ssoModels.SSOSettings, oldSettings ssoModels.SSOSettings, requester identity.Requester) error { + info, err := CreateOAuthInfoFromKeyValues(newSettings.Settings) if err != nil { return ssosettings.ErrInvalidSettings.Errorf("SSO settings map cannot be converted to OAuthInfo: %v", err) } - err = validateInfo(info, requester) + oldInfo, err := CreateOAuthInfoFromKeyValues(oldSettings.Settings) + if err != nil { + oldInfo = &social.OAuthInfo{} + } + + err = validateInfo(info, oldInfo, requester) if err != nil { return err } diff --git a/pkg/login/social/connectors/org_role_mapper.go b/pkg/login/social/connectors/org_role_mapper.go index bb3d7974cb7..af08ee7a514 100644 --- a/pkg/login/social/connectors/org_role_mapper.go +++ b/pkg/login/social/connectors/org_role_mapper.go @@ -3,6 +3,7 @@ package connectors import ( "context" "fmt" + "regexp" "strconv" "strings" @@ -11,7 +12,12 @@ import ( "github.com/grafana/grafana/pkg/setting" ) -const mapperMatchAllOrgID = -1 +const ( + mapperMatchAllOrgID = -1 + escapeStr = `\` +) + +var separatorRegexp = regexp.MustCompile(":") // OrgRoleMapper maps external orgs/groups to Grafana orgs and basic roles. type OrgRoleMapper struct { @@ -132,7 +138,7 @@ func (m *OrgRoleMapper) ParseOrgMappingSettings(ctx context.Context, mappings [] res := map[string]map[int64]org.RoleType{} for _, v := range mappings { - kv := strings.Split(v, ":") + kv := splitOrgMapping(v) if !isValidOrgMappingFormat(kv) { m.logger.Error("Skipping org mapping due to invalid format.", "mapping", fmt.Sprintf("%v", v)) if roleStrict { @@ -203,6 +209,29 @@ func (m *OrgRoleMapper) getAllOrgs() (map[int64]bool, error) { return allOrgIDs, nil } +func splitOrgMapping(mapping string) []string { + result := make([]string, 0, 3) + matches := separatorRegexp.FindAllStringIndex(mapping, -1) + from := 0 + + for _, match := range matches { + // match[0] is the start, match[1] is the end of the match + start, end := match[0], match[1] + // Check if the match is not preceded by two backslashes + if start == 0 || mapping[start-1:start] != escapeStr { + result = append(result, strings.ReplaceAll(mapping[from:end-1], escapeStr, "")) + from = end + } + } + + result = append(result, mapping[from:]) + if len(result) > 3 { + return []string{} + } + + return result +} + func getRoleForInternalOrgMapping(kv []string) org.RoleType { if len(kv) > 2 && org.RoleType(kv[2]).IsValid() { return org.RoleType(kv[2]) diff --git a/pkg/login/social/connectors/org_role_mapper_test.go b/pkg/login/social/connectors/org_role_mapper_test.go index 75aebe596be..3c9760e5d4a 100644 --- a/pkg/login/social/connectors/org_role_mapper_test.go +++ b/pkg/login/social/connectors/org_role_mapper_test.go @@ -268,6 +268,29 @@ func TestOrgRoleMapper_ParseOrgMappingSettings(t *testing.T) { strictRoleMapping: false, }, }, + { + name: "should return correct mapping when the first part contains multiple colons", + rawMapping: []string{"Groups\\:IT\\:ops:1:Viewer"}, + roleStrict: false, + expected: &MappingConfiguration{ + orgMapping: map[string]map[int64]org.RoleType{"Groups:IT:ops": {1: org.RoleViewer}}, + strictRoleMapping: false, + }, + }, + { + name: "should return correct mapping when the org name contains multiple colons", + rawMapping: []string{`Group1:Org\:1:Viewer`}, + roleStrict: false, + setupMock: func(orgService *orgtest.MockService) { + orgService.On("GetByName", mock.Anything, mock.MatchedBy(func(query *org.GetOrgByNameQuery) bool { + return query.Name == "Org:1" + })).Return(&org.Org{ID: 1}, nil) + }, + expected: &MappingConfiguration{ + orgMapping: map[string]map[int64]org.RoleType{"Group1": {1: org.RoleViewer}}, + strictRoleMapping: false, + }, + }, { name: "should return empty mapping when org mapping is nil", rawMapping: nil, @@ -277,8 +300,8 @@ func TestOrgRoleMapper_ParseOrgMappingSettings(t *testing.T) { }, }, { - name: "should return empty mapping when the org mapping format is invalid and strict role mapping is enabled", - rawMapping: []string{"External:Org1:First:Organization:Editor"}, + name: "should return empty mapping when one of the org mappings are not in the correct format and strict role mapping is enabled", + rawMapping: []string{"Second:Group:1:SuperEditor", "Second:1:Viewer"}, roleStrict: true, expected: &MappingConfiguration{ orgMapping: map[string]map[int64]org.RoleType{}, @@ -286,11 +309,11 @@ func TestOrgRoleMapper_ParseOrgMappingSettings(t *testing.T) { }, }, { - name: "should return only the valid mappings from the raw mappings when strict role mapping is disabled", - rawMapping: []string{"External:Org1:First:Organization:Editor", "Second:1:Editor"}, + name: "should skip org mapping when one of the org mappings are not in the correct format and strict role mapping is enabled", + rawMapping: []string{"Second:Group:1:SuperEditor", "Second:1:Admin"}, roleStrict: false, expected: &MappingConfiguration{ - orgMapping: map[string]map[int64]org.RoleType{"Second": {1: org.RoleEditor}}, + orgMapping: map[string]map[int64]org.RoleType{"Second": {1: org.RoleAdmin}}, strictRoleMapping: false, }, }, diff --git a/pkg/login/social/connectors/social_base.go b/pkg/login/social/connectors/social_base.go index 92fe4b6d6e7..46410c985eb 100644 --- a/pkg/login/social/connectors/social_base.go +++ b/pkg/login/social/connectors/social_base.go @@ -259,9 +259,11 @@ func getRoleFromSearch(role string) (org.RoleType, bool) { return org.RoleType(cases.Title(language.Und).String(role)), false } -func validateInfo(info *social.OAuthInfo, requester identity.Requester) error { +func validateInfo(info *social.OAuthInfo, oldInfo *social.OAuthInfo, requester identity.Requester) error { return validation.Validate(info, requester, validation.RequiredValidator(info.ClientId, "Client Id"), - validation.AllowAssignGrafanaAdminValidator, - validation.SkipOrgRoleSyncAllowAssignGrafanaAdminValidator) + validation.AllowAssignGrafanaAdminValidator(info, oldInfo, requester), + validation.SkipOrgRoleSyncAllowAssignGrafanaAdminValidator, + validation.OrgAttributePathValidator(info, oldInfo, requester), + validation.OrgMappingValidator(info, oldInfo, requester)) } diff --git a/pkg/login/social/socialimpl/service_test.go b/pkg/login/social/socialimpl/service_test.go index 1984704104f..374fe6b6ba9 100644 --- a/pkg/login/social/socialimpl/service_test.go +++ b/pkg/login/social/socialimpl/service_test.go @@ -13,6 +13,7 @@ import ( "github.com/grafana/grafana/pkg/login/social" "github.com/grafana/grafana/pkg/login/social/connectors" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/licensing" secretsfake "github.com/grafana/grafana/pkg/services/secrets/fakes" @@ -67,7 +68,7 @@ func TestSocialService_ProvideService(t *testing.T) { cfg.Raw = iniFile secrets := secretsfake.NewMockService(t) - accessControl := acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + accessControl := acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) sqlStore := db.InitTestDB(t) ssoSettingsSvc := ssosettingsimpl.ProvideService( @@ -179,7 +180,7 @@ func TestSocialService_ProvideService_GrafanaComGrafanaNet(t *testing.T) { cfg := setting.NewCfg() secrets := secretsfake.NewMockService(t) - accessControl := acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + accessControl := acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) sqlStore := db.InitTestDB(t) ssoSettingsSvc := ssosettingsimpl.ProvideService( diff --git a/pkg/middleware/auth_test.go b/pkg/middleware/auth_test.go index c5fc39ae508..d52c95b9905 100644 --- a/pkg/middleware/auth_test.go +++ b/pkg/middleware/auth_test.go @@ -26,7 +26,7 @@ import ( ) func setupAuthMiddlewareTest(t *testing.T, identity *authn.Identity, authErr error) *contexthandler.ContextHandler { - return contexthandler.ProvideService(setting.NewCfg(), tracing.InitializeTracerForTest(), featuremgmt.WithFeatures(), &authntest.FakeService{ + return contexthandler.ProvideService(setting.NewCfg(), tracing.InitializeTracerForTest(), &authntest.FakeService{ ExpectedErr: authErr, ExpectedIdentity: identity, }) diff --git a/pkg/middleware/middleware_test.go b/pkg/middleware/middleware_test.go index 722c9f09335..06adf52a7a3 100644 --- a/pkg/middleware/middleware_test.go +++ b/pkg/middleware/middleware_test.go @@ -19,7 +19,6 @@ import ( "github.com/grafana/grafana/pkg/services/authn/authntest" "github.com/grafana/grafana/pkg/services/contexthandler" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" - "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/navtree" "github.com/grafana/grafana/pkg/services/user/usertest" "github.com/grafana/grafana/pkg/setting" @@ -273,5 +272,5 @@ func getContextHandler(t *testing.T, cfg *setting.Cfg, authnService authn.Servic t.Helper() tracer := tracing.InitializeTracerForTest() - return contexthandler.ProvideService(cfg, tracer, featuremgmt.WithFeatures(), authnService) + return contexthandler.ProvideService(cfg, tracer, authnService) } diff --git a/pkg/middleware/request_tracing.go b/pkg/middleware/request_tracing.go index d97db94c908..2985fe0ad60 100644 --- a/pkg/middleware/request_tracing.go +++ b/pkg/middleware/request_tracing.go @@ -87,10 +87,6 @@ func RequestTracing(tracer tracing.Tracer) web.Middleware { // generic span name for requests where there's no route operation name spanName := fmt.Sprintf("HTTP %s ", req.Method) - // TODO: do not depend on web.Context from the future - if routeOperation, exists := RouteOperationName(web.FromContext(req.Context()).Req); exists { - spanName = fmt.Sprintf("HTTP %s %s", req.Method, routeOperation) - } ctx, span := tracer.Start(ctx, spanName, trace.WithAttributes( semconv.HTTPURLKey.String(req.RequestURI), @@ -105,6 +101,13 @@ func RequestTracing(tracer tracing.Tracer) web.Middleware { next.ServeHTTP(rw, req) + // Reset the span name after the request has been processed, as + // the route operation may have been injected by middleware. + // TODO: do not depend on web.Context from the future + if routeOperation, exists := RouteOperationName(web.FromContext(req.Context()).Req); exists { + span.SetName(fmt.Sprintf("HTTP %s %s", req.Method, routeOperation)) + } + status := rw.Status() span.SetAttributes(semconv.HTTPStatusCode(status)) diff --git a/pkg/plugins/backendplugin/grpcplugin/client_proto.go b/pkg/plugins/backendplugin/grpcplugin/client_proto.go index e208e674fbe..9384292ec06 100644 --- a/pkg/plugins/backendplugin/grpcplugin/client_proto.go +++ b/pkg/plugins/backendplugin/grpcplugin/client_proto.go @@ -25,6 +25,7 @@ type ProtoClient interface { pluginv2.ResourceClient pluginv2.DiagnosticsClient pluginv2.StreamClient + pluginv2.AdmissionControlClient PID(context.Context) (string, error) PluginID() string @@ -184,3 +185,27 @@ func (r *protoClient) PublishStream(ctx context.Context, in *pluginv2.PublishStr } return c.StreamClient.PublishStream(ctx, in, opts...) } + +func (r *protoClient) ValidateAdmission(ctx context.Context, in *pluginv2.AdmissionRequest, opts ...grpc.CallOption) (*pluginv2.ValidationResponse, error) { + c, exists := r.client(ctx) + if !exists { + return nil, errClientNotStarted + } + return c.AdmissionClient.ValidateAdmission(ctx, in, opts...) +} + +func (r *protoClient) MutateAdmission(ctx context.Context, in *pluginv2.AdmissionRequest, opts ...grpc.CallOption) (*pluginv2.MutationResponse, error) { + c, exists := r.client(ctx) + if !exists { + return nil, errClientNotStarted + } + return c.AdmissionClient.MutateAdmission(ctx, in, opts...) +} + +func (r *protoClient) ConvertObject(ctx context.Context, in *pluginv2.ConversionRequest, opts ...grpc.CallOption) (*pluginv2.ConversionResponse, error) { + c, exists := r.client(ctx) + if !exists { + return nil, errClientNotStarted + } + return c.AdmissionClient.ConvertObject(ctx, in, opts...) +} diff --git a/pkg/plugins/instrumentationutils/request_status.go b/pkg/plugins/instrumentationutils/request_status.go new file mode 100644 index 00000000000..d8da79cb71a --- /dev/null +++ b/pkg/plugins/instrumentationutils/request_status.go @@ -0,0 +1,104 @@ +package instrumentationutils + +import ( + "context" + "errors" + "strings" + + grpccodes "google.golang.org/grpc/codes" + grpcstatus "google.golang.org/grpc/status" + + "github.com/grafana/grafana-plugin-sdk-go/backend" + "github.com/grafana/grafana-plugin-sdk-go/genproto/pluginv2" +) + +type RequestStatus int + +const ( + RequestStatusOK RequestStatus = iota + RequestStatusCancelled + RequestStatusError +) + +func (status RequestStatus) String() string { + names := [...]string{"ok", "cancelled", "error"} + if status < RequestStatusOK || status > RequestStatusError { + return "" + } + + return names[status] +} + +func RequestStatusFromError(err error) RequestStatus { + status := RequestStatusOK + if err != nil { + status = RequestStatusError + if errors.Is(err, context.Canceled) || grpcstatus.Code(err) == grpccodes.Canceled { + status = RequestStatusCancelled + } + } + + return status +} + +func RequestStatusFromErrorString(errString string) RequestStatus { + status := RequestStatusOK + if errString != "" { + status = RequestStatusError + if strings.Contains(errString, context.Canceled.Error()) || strings.Contains(errString, "code = Canceled") { + status = RequestStatusCancelled + } + } + + return status +} + +func RequestStatusFromQueryDataResponse(res *backend.QueryDataResponse, err error) RequestStatus { + if err != nil { + return RequestStatusFromError(err) + } + + status := RequestStatusOK + + if res != nil { + for _, dr := range res.Responses { + if dr.Error != nil { + s := RequestStatusFromError(dr.Error) + if s > status { + status = s + } + + if status == RequestStatusError { + break + } + } + } + } + + return status +} + +func RequestStatusFromProtoQueryDataResponse(res *pluginv2.QueryDataResponse, err error) RequestStatus { + if err != nil { + return RequestStatusFromError(err) + } + + status := RequestStatusOK + + if res != nil { + for _, dr := range res.Responses { + if dr.Error != "" { + s := RequestStatusFromErrorString(dr.Error) + if s > status { + status = s + } + + if status == RequestStatusError { + break + } + } + } + } + + return status +} diff --git a/pkg/services/pluginsintegration/clientmiddleware/utils_test.go b/pkg/plugins/instrumentationutils/request_status_test.go similarity index 59% rename from pkg/services/pluginsintegration/clientmiddleware/utils_test.go rename to pkg/plugins/instrumentationutils/request_status_test.go index 818a52b4ca5..c21ea9da9b2 100644 --- a/pkg/services/pluginsintegration/clientmiddleware/utils_test.go +++ b/pkg/plugins/instrumentationutils/request_status_test.go @@ -1,4 +1,4 @@ -package clientmiddleware +package instrumentationutils import ( "context" @@ -9,23 +9,25 @@ import ( "github.com/grafana/grafana-plugin-sdk-go/backend" "github.com/grafana/grafana-plugin-sdk-go/data" "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) func TestRequestStatus(t *testing.T) { tcs := []struct { - s requestStatus + s RequestStatus expectedLabel string }{ { - s: requestStatusOK, + s: RequestStatusOK, expectedLabel: "ok", }, { - s: requestStatusError, + s: RequestStatusError, expectedLabel: "error", }, { - s: requestStatusCancelled, + s: RequestStatusCancelled, expectedLabel: "cancelled", }, } @@ -42,28 +44,33 @@ func TestRequestStatusFromError(t *testing.T) { tcs := []struct { desc string err error - expectedStatus requestStatus + expectedStatus RequestStatus }{ { desc: "no error should be status ok", err: nil, - expectedStatus: requestStatusOK, + expectedStatus: RequestStatusOK, }, { desc: "error should be status error", err: errors.New("boom"), - expectedStatus: requestStatusError, + expectedStatus: RequestStatusError, }, { desc: "context canceled should be status cancelled", err: context.Canceled, - expectedStatus: requestStatusCancelled, + expectedStatus: RequestStatusCancelled, + }, + { + desc: "gRPC canceled should be status cancelled", + err: status.Error(codes.Canceled, "canceled"), + expectedStatus: RequestStatusCancelled, }, } for _, tc := range tcs { t.Run(tc.desc, func(t *testing.T) { - status := requestStatusFromError(tc.err) + status := RequestStatusFromError(tc.err) require.Equal(t, tc.expectedStatus, status) }) } @@ -94,43 +101,79 @@ func TestRequestStatusFromQueryDataResponse(t *testing.T) { desc string resp *backend.QueryDataResponse err error - expectedStatus requestStatus + expectedStatus RequestStatus }{ { desc: "no error should be status ok", err: nil, - expectedStatus: requestStatusOK, + expectedStatus: RequestStatusOK, }, { desc: "error should be status error", err: errors.New("boom"), - expectedStatus: requestStatusError, + expectedStatus: RequestStatusError, }, { desc: "context canceled should be status cancelled", err: context.Canceled, - expectedStatus: requestStatusCancelled, + expectedStatus: RequestStatusCancelled, }, { desc: "response without error should be status ok", resp: responseWithoutError, - expectedStatus: requestStatusOK, + expectedStatus: RequestStatusOK, }, { desc: "response with error should be status error", resp: responseWithError, - expectedStatus: requestStatusError, + expectedStatus: RequestStatusError, }, { desc: "response with multiple error should pick the highest status cancelled", resp: responseWithMultipleErrors, - expectedStatus: requestStatusError, + expectedStatus: RequestStatusError, }, } for _, tc := range tcs { t.Run(tc.desc, func(t *testing.T) { - status := requestStatusFromQueryDataResponse(tc.resp, tc.err) + status := RequestStatusFromQueryDataResponse(tc.resp, tc.err) + require.Equal(t, tc.expectedStatus, status) + }) + } +} + +func TestRequestStatusFromErrorString(t *testing.T) { + tcs := []struct { + desc string + err string + expectedStatus RequestStatus + }{ + { + desc: "no error should be status ok", + err: "", + expectedStatus: RequestStatusOK, + }, + { + desc: "error should be status error", + err: errors.New("boom").Error(), + expectedStatus: RequestStatusError, + }, + { + desc: "context canceled should be status cancelled", + err: context.Canceled.Error(), + expectedStatus: RequestStatusCancelled, + }, + { + desc: "gRPC canceled should be status cancelled", + err: status.Error(codes.Canceled, "canceled").Error(), + expectedStatus: RequestStatusCancelled, + }, + } + + for _, tc := range tcs { + t.Run(tc.desc, func(t *testing.T) { + status := RequestStatusFromErrorString(tc.err) require.Equal(t, tc.expectedStatus, status) }) } diff --git a/pkg/plugins/manager/client/client.go b/pkg/plugins/manager/client/client.go index 75504972c2a..c10ed8a6e28 100644 --- a/pkg/plugins/manager/client/client.go +++ b/pkg/plugins/manager/client/client.go @@ -94,7 +94,7 @@ func (s *Service) CallResource(ctx context.Context, req *backend.CallResourceReq removeNonAllowedHeaders(req.Headers) processedStreams := 0 - wrappedSender := callResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { + wrappedSender := backend.CallResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { // Expected that headers and status are only part of first stream if processedStreams == 0 && res != nil { if len(res.Headers) > 0 { @@ -354,9 +354,3 @@ func ensureContentTypeHeader(res *backend.CallResourceResponse) { res.Headers[contentTypeHeaderName] = []string{defaultContentType} } } - -type callResourceResponseSenderFunc func(res *backend.CallResourceResponse) error - -func (fn callResourceResponseSenderFunc) Send(res *backend.CallResourceResponse) error { - return fn(res) -} diff --git a/pkg/plugins/manager/client/client_test.go b/pkg/plugins/manager/client/client_test.go index c6649b2f9dd..20401d34250 100644 --- a/pkg/plugins/manager/client/client_test.go +++ b/pkg/plugins/manager/client/client_test.go @@ -169,7 +169,7 @@ func TestCallResource(t *testing.T) { } responses := []*backend.CallResourceResponse{} - sender := callResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { + sender := backend.CallResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { responses = append(responses, res) return nil }) @@ -232,7 +232,7 @@ func TestCallResource(t *testing.T) { } responses := []*backend.CallResourceResponse{} - sender := callResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { + sender := backend.CallResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { responses = append(responses, res) return nil }) @@ -280,7 +280,7 @@ func TestCallResource(t *testing.T) { } responses := []*backend.CallResourceResponse{} - sender := callResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { + sender := backend.CallResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { responses = append(responses, res) return nil }) @@ -348,7 +348,7 @@ func TestCallResource(t *testing.T) { } responses := []*backend.CallResourceResponse{} - sender := callResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { + sender := backend.CallResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { responses = append(responses, res) return nil }) diff --git a/pkg/plugins/manager/client/decorator.go b/pkg/plugins/manager/client/decorator.go index e44ad56a02c..5f4f7642e11 100644 --- a/pkg/plugins/manager/client/decorator.go +++ b/pkg/plugins/manager/client/decorator.go @@ -35,6 +35,9 @@ func (d *Decorator) QueryData(ctx context.Context, req *backend.QueryDataRequest if req == nil { return nil, errNilRequest } + ctx = backend.WithEndpoint(ctx, backend.EndpointQueryData) + ctx = backend.WithPluginContext(ctx, req.PluginContext) + ctx = backend.WithUser(ctx, req.PluginContext.User) client := clientFromMiddlewares(d.middlewares, d.client) @@ -46,6 +49,10 @@ func (d *Decorator) CallResource(ctx context.Context, req *backend.CallResourceR return errNilRequest } + ctx = backend.WithEndpoint(ctx, backend.EndpointCallResource) + ctx = backend.WithPluginContext(ctx, req.PluginContext) + ctx = backend.WithUser(ctx, req.PluginContext.User) + if sender == nil { return errors.New("sender cannot be nil") } @@ -59,6 +66,10 @@ func (d *Decorator) CollectMetrics(ctx context.Context, req *backend.CollectMetr return nil, errNilRequest } + ctx = backend.WithEndpoint(ctx, backend.EndpointCollectMetrics) + ctx = backend.WithPluginContext(ctx, req.PluginContext) + ctx = backend.WithUser(ctx, req.PluginContext.User) + client := clientFromMiddlewares(d.middlewares, d.client) return client.CollectMetrics(ctx, req) } @@ -68,6 +79,10 @@ func (d *Decorator) CheckHealth(ctx context.Context, req *backend.CheckHealthReq return nil, errNilRequest } + ctx = backend.WithEndpoint(ctx, backend.EndpointCheckHealth) + ctx = backend.WithPluginContext(ctx, req.PluginContext) + ctx = backend.WithUser(ctx, req.PluginContext.User) + client := clientFromMiddlewares(d.middlewares, d.client) return client.CheckHealth(ctx, req) } @@ -77,6 +92,10 @@ func (d *Decorator) SubscribeStream(ctx context.Context, req *backend.SubscribeS return nil, errNilRequest } + ctx = backend.WithEndpoint(ctx, backend.EndpointSubscribeStream) + ctx = backend.WithPluginContext(ctx, req.PluginContext) + ctx = backend.WithUser(ctx, req.PluginContext.User) + client := clientFromMiddlewares(d.middlewares, d.client) return client.SubscribeStream(ctx, req) } @@ -86,6 +105,10 @@ func (d *Decorator) PublishStream(ctx context.Context, req *backend.PublishStrea return nil, errNilRequest } + ctx = backend.WithEndpoint(ctx, backend.EndpointPublishStream) + ctx = backend.WithPluginContext(ctx, req.PluginContext) + ctx = backend.WithUser(ctx, req.PluginContext.User) + client := clientFromMiddlewares(d.middlewares, d.client) return client.PublishStream(ctx, req) } @@ -95,6 +118,10 @@ func (d *Decorator) RunStream(ctx context.Context, req *backend.RunStreamRequest return errNilRequest } + ctx = backend.WithEndpoint(ctx, backend.EndpointRunStream) + ctx = backend.WithPluginContext(ctx, req.PluginContext) + ctx = backend.WithUser(ctx, req.PluginContext.User) + if sender == nil { return errors.New("sender cannot be nil") } @@ -108,6 +135,10 @@ func (d *Decorator) ValidateAdmission(ctx context.Context, req *backend.Admissio return nil, errNilRequest } + ctx = backend.WithEndpoint(ctx, backend.EndpointValidateAdmission) + ctx = backend.WithPluginContext(ctx, req.PluginContext) + ctx = backend.WithUser(ctx, req.PluginContext.User) + client := clientFromMiddlewares(d.middlewares, d.client) return client.ValidateAdmission(ctx, req) } @@ -117,6 +148,10 @@ func (d *Decorator) MutateAdmission(ctx context.Context, req *backend.AdmissionR return nil, errNilRequest } + ctx = backend.WithEndpoint(ctx, backend.EndpointMutateAdmission) + ctx = backend.WithPluginContext(ctx, req.PluginContext) + ctx = backend.WithUser(ctx, req.PluginContext.User) + client := clientFromMiddlewares(d.middlewares, d.client) return client.MutateAdmission(ctx, req) } @@ -126,6 +161,10 @@ func (d *Decorator) ConvertObject(ctx context.Context, req *backend.ConversionRe return nil, errNilRequest } + ctx = backend.WithEndpoint(ctx, backend.EndpointConvertObject) + ctx = backend.WithPluginContext(ctx, req.PluginContext) + ctx = backend.WithUser(ctx, req.PluginContext.User) + client := clientFromMiddlewares(d.middlewares, d.client) return client.ConvertObject(ctx, req) } diff --git a/pkg/plugins/manager/client/decorator_test.go b/pkg/plugins/manager/client/decorator_test.go index 91c472779a4..03a5e4f3c92 100644 --- a/pkg/plugins/manager/client/decorator_test.go +++ b/pkg/plugins/manager/client/decorator_test.go @@ -43,7 +43,7 @@ func TestDecorator(t *testing.T) { _, _ = d.QueryData(context.Background(), &backend.QueryDataRequest{}) require.True(t, queryDataCalled) - sender := callResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { + sender := backend.CallResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { return nil }) diff --git a/pkg/plugins/manager/installer.go b/pkg/plugins/manager/installer.go index 02aafee6733..b0be555af16 100644 --- a/pkg/plugins/manager/installer.go +++ b/pkg/plugins/manager/installer.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "sync" "github.com/grafana/grafana/pkg/plugins" "github.com/grafana/grafana/pkg/plugins/auth" @@ -24,6 +25,7 @@ type PluginInstaller struct { pluginStorageDirFunc storage.DirNameGeneratorFunc pluginRegistry registry.Service pluginLoader loader.Service + installing sync.Map log log.Logger serviceRegistry auth.ExternalServiceRegistry } @@ -43,6 +45,7 @@ func New(pluginRegistry registry.Service, pluginLoader loader.Service, pluginRep pluginRepo: pluginRepo, pluginStorage: pluginStorage, pluginStorageDirFunc: pluginStorageDirFunc, + installing: sync.Map{}, log: log.New("plugin.installer"), serviceRegistry: serviceRegistry, } @@ -54,14 +57,46 @@ func (m *PluginInstaller) Add(ctx context.Context, pluginID, version string, opt return err } + if ok, _ := m.installing.Load(pluginID); ok != nil { + return nil + } + m.installing.Store(pluginID, true) + defer func() { + m.installing.Delete(pluginID) + }() + + archive, err := m.install(ctx, pluginID, version, compatOpts) + if err != nil { + return err + } + + for _, dep := range archive.Dependencies { + m.log.Info(fmt.Sprintf("Fetching %s dependency %s...", pluginID, dep.ID)) + + err = m.Add(ctx, dep.ID, dep.Version, opts) + if err != nil { + return fmt.Errorf("%v: %w", fmt.Sprintf("failed to download plugin %s from repository", dep.ID), err) + } + } + + _, err = m.pluginLoader.Load(ctx, sources.NewLocalSource(plugins.ClassExternal, []string{archive.Path})) + if err != nil { + m.log.Error("Could not load plugins", "path", archive.Path, "error", err) + return err + } + + return nil +} + +func (m *PluginInstaller) install(ctx context.Context, pluginID, version string, compatOpts repo.CompatOpts) (*storage.ExtractedPluginArchive, error) { var pluginArchive *repo.PluginArchive if plugin, exists := m.plugin(ctx, pluginID, version); exists { if plugin.IsCorePlugin() || plugin.IsBundledPlugin() { - return plugins.ErrInstallCorePlugin + return nil, plugins.ErrInstallCorePlugin } if plugin.Info.Version == version { - return plugins.DuplicateError{ + return nil, plugins.DuplicateError{ PluginID: plugin.ID, } } @@ -69,74 +104,51 @@ func (m *PluginInstaller) Add(ctx context.Context, pluginID, version string, opt // get plugin update information to confirm if target update is possible pluginArchiveInfo, err := m.pluginRepo.GetPluginArchiveInfo(ctx, pluginID, version, compatOpts) if err != nil { - return err + return nil, err } // if existing plugin version is the same as the target update version if pluginArchiveInfo.Version == plugin.Info.Version { - return plugins.DuplicateError{ + return nil, plugins.DuplicateError{ PluginID: plugin.ID, } } if pluginArchiveInfo.URL == "" && pluginArchiveInfo.Version == "" { - return fmt.Errorf("could not determine update options for %s", pluginID) + return nil, fmt.Errorf("could not determine update options for %s", pluginID) } // remove existing installation of plugin err = m.Remove(ctx, plugin.ID, plugin.Info.Version) if err != nil { - return err + return nil, err } if pluginArchiveInfo.URL != "" { pluginArchive, err = m.pluginRepo.GetPluginArchiveByURL(ctx, pluginArchiveInfo.URL, compatOpts) if err != nil { - return err + return nil, err } } else { pluginArchive, err = m.pluginRepo.GetPluginArchive(ctx, pluginID, pluginArchiveInfo.Version, compatOpts) if err != nil { - return err + return nil, err } } } else { var err error pluginArchive, err = m.pluginRepo.GetPluginArchive(ctx, pluginID, version, compatOpts) if err != nil { - return err + return nil, err } } extractedArchive, err := m.pluginStorage.Extract(ctx, pluginID, m.pluginStorageDirFunc, pluginArchive.File) if err != nil { - return err + return nil, err } - // download dependency plugins - pathsToScan := []string{extractedArchive.Path} - for _, dep := range extractedArchive.Dependencies { - m.log.Info(fmt.Sprintf("Fetching %s dependencies...", dep.ID)) - d, err := m.pluginRepo.GetPluginArchive(ctx, dep.ID, dep.Version, compatOpts) - if err != nil { - return fmt.Errorf("%v: %w", fmt.Sprintf("failed to download plugin %s from repository", dep.ID), err) - } - - depArchive, err := m.pluginStorage.Extract(ctx, dep.ID, m.pluginStorageDirFunc, d.File) - if err != nil { - return err - } - - pathsToScan = append(pathsToScan, depArchive.Path) - } - - _, err = m.pluginLoader.Load(ctx, sources.NewLocalSource(plugins.ClassExternal, pathsToScan)) - if err != nil { - m.log.Error("Could not load plugins", "paths", pathsToScan, "error", err) - return err - } - - return nil + return extractedArchive, nil } func (m *PluginInstaller) Remove(ctx context.Context, pluginID, version string) error { diff --git a/pkg/plugins/manager/installer_test.go b/pkg/plugins/manager/installer_test.go index 7bfadece3af..f90c3ba11c3 100644 --- a/pkg/plugins/manager/installer_test.go +++ b/pkg/plugins/manager/installer_test.go @@ -182,6 +182,103 @@ func TestPluginManager_Add_Remove(t *testing.T) { }) } }) + + t.Run("Can install multiple dependency levels", func(t *testing.T) { + const ( + p1, p1Zip = "foo-panel", "foo-panel.zip" + p2, p2Zip = "foo-datasource", "foo-datasource.zip" + p3, p3Zip = "foo-app", "foo-app.zip" + ) + + var loadedPaths []string + loader := &fakes.FakeLoader{ + LoadFunc: func(ctx context.Context, src plugins.PluginSource) ([]*plugins.Plugin, error) { + loadedPaths = append(loadedPaths, src.PluginURIs(ctx)...) + return []*plugins.Plugin{}, nil + }, + } + + pluginRepo := &fakes.FakePluginRepo{ + GetPluginArchiveFunc: func(_ context.Context, id, version string, _ repo.CompatOpts) (*repo.PluginArchive, error) { + return &repo.PluginArchive{File: &zip.ReadCloser{Reader: zip.Reader{File: []*zip.File{{ + FileHeader: zip.FileHeader{Name: fmt.Sprintf("%s.zip", id)}, + }}}}}, nil + }, + } + + fs := &fakes.FakePluginStorage{ + ExtractFunc: func(_ context.Context, id string, _ storage.DirNameGeneratorFunc, z *zip.ReadCloser) (*storage.ExtractedPluginArchive, error) { + switch id { + case p1: + return &storage.ExtractedPluginArchive{Path: p1Zip}, nil + case p2: + return &storage.ExtractedPluginArchive{ + Dependencies: []*storage.Dependency{{ID: p1}}, + Path: p2Zip, + }, nil + case p3: + return &storage.ExtractedPluginArchive{ + Dependencies: []*storage.Dependency{{ID: p2}}, + Path: p3Zip, + }, nil + default: + return nil, fmt.Errorf("unknown plugin %s", id) + } + }, + } + + inst := New(fakes.NewFakePluginRegistry(), loader, pluginRepo, fs, storage.SimpleDirNameGeneratorFunc, &fakes.FakeAuthService{}) + err := inst.Add(context.Background(), p3, "", testCompatOpts()) + require.NoError(t, err) + require.Equal(t, []string{p1Zip, p2Zip, p3Zip}, loadedPaths) + }) + + t.Run("Livelock prevented when two plugins depend on each other", func(t *testing.T) { + const ( + p1, p1Zip = "foo-panel", "foo-panel.zip" + p2, p2Zip = "foo-datasource", "foo-datasource.zip" + ) + + var loadedPaths []string + loader := &fakes.FakeLoader{ + LoadFunc: func(ctx context.Context, src plugins.PluginSource) ([]*plugins.Plugin, error) { + loadedPaths = append(loadedPaths, src.PluginURIs(ctx)...) + return []*plugins.Plugin{}, nil + }, + } + + pluginRepo := &fakes.FakePluginRepo{ + GetPluginArchiveFunc: func(_ context.Context, id, version string, _ repo.CompatOpts) (*repo.PluginArchive, error) { + return &repo.PluginArchive{File: &zip.ReadCloser{Reader: zip.Reader{File: []*zip.File{{ + FileHeader: zip.FileHeader{Name: fmt.Sprintf("%s.zip", id)}, + }}}}}, nil + }, + } + + fs := &fakes.FakePluginStorage{ + ExtractFunc: func(_ context.Context, id string, _ storage.DirNameGeneratorFunc, z *zip.ReadCloser) (*storage.ExtractedPluginArchive, error) { + switch id { + case p1: + return &storage.ExtractedPluginArchive{ + Dependencies: []*storage.Dependency{{ID: p2}}, + Path: p1Zip, + }, nil + case p2: + return &storage.ExtractedPluginArchive{ + Dependencies: []*storage.Dependency{{ID: p1}}, + Path: p2Zip, + }, nil + default: + return nil, fmt.Errorf("unknown plugin %s", id) + } + }, + } + + inst := New(fakes.NewFakePluginRegistry(), loader, pluginRepo, fs, storage.SimpleDirNameGeneratorFunc, &fakes.FakeAuthService{}) + err := inst.Add(context.Background(), p1, "", testCompatOpts()) + require.NoError(t, err) + require.Equal(t, []string{p2Zip, p1Zip}, loadedPaths) + }) } func createPlugin(t *testing.T, pluginID string, class plugins.Class, managed, backend bool, cbs ...func(*plugins.Plugin)) *plugins.Plugin { @@ -196,11 +293,13 @@ func createPlugin(t *testing.T, pluginID string, class plugins.Class, managed, b }, } p.SetLogger(log.NewTestLogger()) - p.RegisterClient(&fakes.FakePluginClient{ - ID: pluginID, - Managed: managed, - Log: p.Logger(), - }) + if p.Backend { + p.RegisterClient(&fakes.FakePluginClient{ + ID: pluginID, + Managed: managed, + Log: p.Logger(), + }) + } for _, cb := range cbs { cb(p) diff --git a/pkg/plugins/models.go b/pkg/plugins/models.go index a7583ef4a9c..101dd6185a7 100644 --- a/pkg/plugins/models.go +++ b/pkg/plugins/models.go @@ -191,6 +191,7 @@ type DataSourceDTO struct { Module string `json:"module,omitempty"` JSONData map[string]any `json:"jsonData"` ReadOnly bool `json:"readOnly"` + APIVersion string `json:"apiVersion,omitempty"` BasicAuth string `json:"basicAuth,omitempty"` WithCredentials bool `json:"withCredentials,omitempty"` diff --git a/pkg/promlib/admission_handler.go b/pkg/promlib/admission_handler.go index 4cfa68b1d53..fb8511373b5 100644 --- a/pkg/promlib/admission_handler.go +++ b/pkg/promlib/admission_handler.go @@ -48,8 +48,8 @@ func (s *Service) MutateAdmission(ctx context.Context, req *backend.AdmissionReq default: return getBadRequest(fmt.Sprintf("expected apiVersion: v0alpha1, found: %s", settings.APIVersion)), nil } - if settings.URL != "" { - return getBadRequest("unsupported URL value"), nil + if settings.URL == "" { + return getBadRequest("missing URL value"), nil } pb, err := backend.DataSourceInstanceSettingsToProtoBytes(settings) diff --git a/pkg/promlib/go.mod b/pkg/promlib/go.mod index 201f24f3496..5839f9bf919 100644 --- a/pkg/promlib/go.mod +++ b/pkg/promlib/go.mod @@ -3,15 +3,15 @@ module github.com/grafana/grafana/pkg/promlib go 1.21.10 require ( - github.com/grafana/grafana-plugin-sdk-go v0.235.0 + github.com/grafana/grafana-plugin-sdk-go v0.237.0 github.com/json-iterator/go v1.1.12 github.com/patrickmn/go-cache v2.1.0+incompatible - github.com/prometheus/client_golang v1.19.0 - github.com/prometheus/common v0.53.0 + github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/common v0.54.0 github.com/prometheus/prometheus v1.8.2-0.20221021121301-51a44e6657c3 github.com/stretchr/testify v1.9.0 - go.opentelemetry.io/otel v1.26.0 - go.opentelemetry.io/otel/trace v1.26.0 + go.opentelemetry.io/otel v1.28.0 + go.opentelemetry.io/otel/trace v1.28.0 golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f k8s.io/apimachinery v0.29.3 ) @@ -39,7 +39,7 @@ require ( github.com/getkin/kin-openapi v0.124.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect @@ -97,14 +97,14 @@ require ( github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.51.0 // indirect - go.opentelemetry.io/contrib/propagators/jaeger v1.26.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.53.0 // indirect + go.opentelemetry.io/contrib/propagators/jaeger v1.28.0 // indirect go.opentelemetry.io/contrib/samplers/jaegerremote v0.20.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0 // indirect - go.opentelemetry.io/otel/metric v1.26.0 // indirect - go.opentelemetry.io/otel/sdk v1.26.0 // indirect - go.opentelemetry.io/proto/otlp v1.2.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/goleak v1.3.0 // indirect golang.org/x/mod v0.18.0 // indirect @@ -114,10 +114,11 @@ require ( golang.org/x/text v0.16.0 // indirect golang.org/x/tools v0.22.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect + gonum.org/v1/gonum v0.14.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect google.golang.org/grpc v1.64.0 // indirect - google.golang.org/protobuf v1.34.1 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/pkg/promlib/go.sum b/pkg/promlib/go.sum index a4775ee92f8..4b21b883c52 100644 --- a/pkg/promlib/go.sum +++ b/pkg/promlib/go.sum @@ -49,8 +49,7 @@ github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBj github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= @@ -81,7 +80,7 @@ github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1 github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/grafana/grafana-plugin-sdk-go v0.235.0 h1:UnZ/iBDvCkfDgwR94opi8trAWJXv4V8Qr1ocJKRRmqA= +github.com/grafana/grafana-plugin-sdk-go v0.237.0 h1:sxif4tl9ocYSVyeCtGijWQbW2ygfEOFGKQTCQ/ZX99M= github.com/grafana/otel-profiling-go v0.5.1 h1:stVPKAFZSa7eGiqbYuG25VcqYksR6iWvF3YH66t4qL8= github.com/grafana/pyroscope-go/godeltaprof v0.1.7 h1:C11j63y7gymiW8VugJ9ZW0pWfxTZugdSJyC48olk5KY= github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db h1:7aN5cccjIqCLTzedH7MZzRZt5/lsAHch6Z3L2ZGn5FA= @@ -165,9 +164,9 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= +github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.14.0 h1:Lw4VdGGoKEZilJsayHf0B+9YgLGREba2C6xr+Fdfq6s= @@ -218,17 +217,17 @@ github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 h1:A3SayB3rNyt+1S6qpI9mHPkeHTZbD7XILEqWnYZb2l0= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.51.0 h1:974XTyIwHI4nHa1+uSLxHtUnlJ2DiVtAJjk7fd07p/8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= -go.opentelemetry.io/contrib/propagators/jaeger v1.26.0 h1:RH76Cl2pfOLLoCtxAPax9c7oYzuL1tiI7/ZPJEmEmOw= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.53.0 h1:IVtyPth4Rs5P8wIf0mP2KVKFNTJ4paX9qQ4Hkh5gFdc= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/propagators/jaeger v1.28.0 h1:xQ3ktSVS128JWIaN1DiPGIjcH+GsvkibIAVRWFjS9eM= go.opentelemetry.io/contrib/samplers/jaegerremote v0.20.0 h1:ja+d7Aea/9PgGxB63+E0jtRFpma717wubS0KFkZpmYw= -go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 h1:1u/AyyOqAWzy+SkPxDpahCNZParHV8Vid1RnI2clyDE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0 h1:Waw9Wfpo/IXzOI8bCB7DIk+0JZcqqsyn1JFnAc+iam8= -go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= -go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8= -go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -279,12 +278,11 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= -gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= -google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU= +gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/promlib/instrumentation/instrumentation.go b/pkg/promlib/instrumentation/instrumentation.go index 1cab64aef18..1c61d46af3c 100644 --- a/pkg/promlib/instrumentation/instrumentation.go +++ b/pkg/promlib/instrumentation/instrumentation.go @@ -18,8 +18,8 @@ const ( StatusOK = "ok" StatusError = "error" - EndpointCallResource = "callResource" - EndpointQueryData = "queryData" + EndpointCallResource = string(backend.EndpointCallResource) + EndpointQueryData = string(backend.EndpointQueryData) PluginSource = "plugin" ExternalSource = "external" diff --git a/pkg/registry/apis/alerting/notifications/receiver/conversions.go b/pkg/registry/apis/alerting/notifications/receiver/conversions.go index cd38ad5e805..b4a19200381 100644 --- a/pkg/registry/apis/alerting/notifications/receiver/conversions.go +++ b/pkg/registry/apis/alerting/notifications/receiver/conversions.go @@ -54,19 +54,18 @@ func convertToK8sResource(orgID int64, receiver definitions.GettableApiReceiver, } uid := getUID(receiver) // TODO replace to stable UID when we switch to normal storage - return &model.Receiver{ + r := &model.Receiver{ TypeMeta: resourceInfo.TypeMeta(), ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(uid), // This is needed to make PATCH work - Name: uid, // TODO replace to stable UID when we switch to normal storage - Namespace: namespacer(orgID), - Annotations: map[string]string{ // TODO find a better place for provenance? - "grafana.com/provenance": string(provenance), - }, + UID: types.UID(uid), // This is needed to make PATCH work + Name: uid, // TODO replace to stable UID when we switch to normal storage + Namespace: namespacer(orgID), ResourceVersion: "", // TODO: Implement optimistic concurrency. }, Spec: spec, - }, nil + } + r.SetProvenanceStatus(string(provenance)) + return r, nil } func convertToDomainModel(receiver *model.Receiver) (definitions.GettableApiReceiver, error) { diff --git a/pkg/registry/apis/alerting/notifications/timeinterval/conversions.go b/pkg/registry/apis/alerting/notifications/timeinterval/conversions.go index cacb6f4369a..ae8ed546c98 100644 --- a/pkg/registry/apis/alerting/notifications/timeinterval/conversions.go +++ b/pkg/registry/apis/alerting/notifications/timeinterval/conversions.go @@ -6,6 +6,7 @@ import ( "hash/fnv" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/types" model "github.com/grafana/grafana/pkg/apis/alerting_notifications/v0alpha1" @@ -19,7 +20,7 @@ func getIntervalUID(t definitions.MuteTimeInterval) string { return fmt.Sprintf("%016x", sum.Sum64()) } -func convertToK8sResources(orgID int64, intervals []definitions.MuteTimeInterval, namespacer request.NamespaceMapper) (*model.TimeIntervalList, error) { +func convertToK8sResources(orgID int64, intervals []definitions.MuteTimeInterval, namespacer request.NamespaceMapper, selector fields.Selector) (*model.TimeIntervalList, error) { data, err := json.Marshal(intervals) if err != nil { return nil, err @@ -30,23 +31,15 @@ func convertToK8sResources(orgID int64, intervals []definitions.MuteTimeInterval return nil, err } result := &model.TimeIntervalList{} + for idx := range specs { interval := intervals[idx] spec := specs[idx] - uid := getIntervalUID(interval) // TODO replace to stable UID when we switch to normal storage - result.Items = append(result.Items, model.TimeInterval{ - TypeMeta: resourceInfo.TypeMeta(), - ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(uid), // TODO This is needed to make PATCH work - Name: uid, // TODO replace to stable UID when we switch to normal storage - Namespace: namespacer(orgID), - Annotations: map[string]string{ // TODO find a better place for provenance? - "grafana.com/provenance": string(interval.Provenance), - }, - ResourceVersion: interval.Version, - }, - Spec: spec, - }) + item := buildTimeInterval(orgID, interval, spec, namespacer) + if selector != nil && !selector.Empty() && !selector.Matches(model.SelectableTimeIntervalsFields(&item)) { + continue + } + result.Items = append(result.Items, item) } return result, nil } @@ -61,21 +54,24 @@ func convertToK8sResource(orgID int64, interval definitions.MuteTimeInterval, na if err != nil { return nil, err } + result := buildTimeInterval(orgID, interval, spec, namespacer) + return &result, nil +} +func buildTimeInterval(orgID int64, interval definitions.MuteTimeInterval, spec model.TimeIntervalSpec, namespacer request.NamespaceMapper) model.TimeInterval { uid := getIntervalUID(interval) // TODO replace to stable UID when we switch to normal storage - return &model.TimeInterval{ + i := model.TimeInterval{ TypeMeta: resourceInfo.TypeMeta(), ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(uid), // TODO This is needed to make PATCH work - Name: uid, // TODO replace to stable UID when we switch to normal storage - Namespace: namespacer(orgID), - Annotations: map[string]string{ // TODO find a better place for provenance? - "grafana.com/provenance": string(interval.Provenance), - }, + UID: types.UID(uid), // TODO This is needed to make PATCH work + Name: uid, // TODO replace to stable UID when we switch to normal storage + Namespace: namespacer(orgID), ResourceVersion: interval.Version, }, Spec: spec, - }, nil + } + i.SetProvenanceStatus(string(interval.Provenance)) + return i } func convertToDomainModel(interval *model.TimeInterval) (definitions.MuteTimeInterval, error) { diff --git a/pkg/registry/apis/alerting/notifications/timeinterval/legacy_storage.go b/pkg/registry/apis/alerting/notifications/timeinterval/legacy_storage.go index 64d95e17a12..ac2137a2cb0 100644 --- a/pkg/registry/apis/alerting/notifications/timeinterval/legacy_storage.go +++ b/pkg/registry/apis/alerting/notifications/timeinterval/legacy_storage.go @@ -59,7 +59,7 @@ func (s *legacyStorage) ConvertToTable(ctx context.Context, object runtime.Objec return s.tableConverter.ConvertToTable(ctx, object, tableOptions) } -func (s *legacyStorage) List(ctx context.Context, _ *internalversion.ListOptions) (runtime.Object, error) { +func (s *legacyStorage) List(ctx context.Context, opts *internalversion.ListOptions) (runtime.Object, error) { orgId, err := request.OrgIDForList(ctx) if err != nil { return nil, err @@ -70,7 +70,7 @@ func (s *legacyStorage) List(ctx context.Context, _ *internalversion.ListOptions return nil, err } - return convertToK8sResources(orgId, res, s.namespacer) + return convertToK8sResources(orgId, res, s.namespacer, opts.FieldSelector) } func (s *legacyStorage) Get(ctx context.Context, uid string, _ *metav1.GetOptions) (runtime.Object, error) { diff --git a/pkg/registry/apis/alerting/notifications/timeinterval/storage.go b/pkg/registry/apis/alerting/notifications/timeinterval/storage.go index efbddd08f14..d51c75d7732 100644 --- a/pkg/registry/apis/alerting/notifications/timeinterval/storage.go +++ b/pkg/registry/apis/alerting/notifications/timeinterval/storage.go @@ -4,10 +4,13 @@ import ( "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" + apistore "k8s.io/apiserver/pkg/storage" "github.com/prometheus/client_golang/prometheus" @@ -63,7 +66,7 @@ func NewStorage( NewListFunc: resourceInfo.NewListFunc, KeyRootFunc: grafanaregistry.KeyRootFunc(resourceInfo.GroupResource()), KeyFunc: grafanaregistry.NamespaceKeyFunc(resourceInfo.GroupResource()), - PredicateFunc: grafanaregistry.Matcher, + PredicateFunc: Matcher, DefaultQualifiedResource: resourceInfo.GroupResource(), SingularQualifiedResource: resourceInfo.SingularGroupResource(), TableConvertor: legacyStore.tableConverter, @@ -71,7 +74,7 @@ func NewStorage( UpdateStrategy: strategy, DeleteStrategy: strategy, } - options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: grafanaregistry.GetAttrs} + options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: GetAttrs} if err := s.CompleteWithOptions(options); err != nil { return nil, err } @@ -79,3 +82,19 @@ func NewStorage( } return legacyStore, nil } + +func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) { + if s, ok := obj.(*model.TimeInterval); ok { + return s.Labels, model.SelectableTimeIntervalsFields(s), nil + } + return nil, nil, fmt.Errorf("object of type %T is not supported", obj) +} + +// Matcher returns a generic.SelectionPredicate that matches on label and field selectors. +func Matcher(label labels.Selector, field fields.Selector) apistore.SelectionPredicate { + return apistore.SelectionPredicate{ + Label: label, + Field: field, + GetAttrs: GetAttrs, + } +} diff --git a/pkg/server/wire.go b/pkg/server/wire.go index a8bf87483eb..8ca6910b160 100644 --- a/pkg/server/wire.go +++ b/pkg/server/wire.go @@ -360,6 +360,7 @@ var wireBasicSet = wire.NewSet( authnimpl.ProvideService, authnimpl.ProvideIdentitySynchronizer, authnimpl.ProvideAuthnService, + authnimpl.ProvideAuthnServiceAuthenticateOnly, authnimpl.ProvideRegistration, supportbundlesimpl.ProvideService, extsvcaccounts.ProvideExtSvcAccountsService, @@ -396,6 +397,7 @@ var wireSet = wire.NewSet( wire.Bind(new(notifications.WebhookSender), new(*notifications.NotificationService)), wire.Bind(new(notifications.EmailSender), new(*notifications.NotificationService)), wire.Bind(new(db.DB), new(*sqlstore.SQLStore)), + wire.Bind(new(db.ReplDB), new(*sqlstore.ReplStore)), prefimpl.ProvideService, oauthtoken.ProvideService, wire.Bind(new(oauthtoken.OAuthTokenService), new(*oauthtoken.Service)), @@ -412,6 +414,7 @@ var wireCLISet = wire.NewSet( wire.Bind(new(notifications.WebhookSender), new(*notifications.NotificationService)), wire.Bind(new(notifications.EmailSender), new(*notifications.NotificationService)), wire.Bind(new(db.DB), new(*sqlstore.SQLStore)), + wire.Bind(new(db.ReplDB), new(*sqlstore.ReplStore)), prefimpl.ProvideService, oauthtoken.ProvideService, wire.Bind(new(oauthtoken.OAuthTokenService), new(*oauthtoken.Service)), diff --git a/pkg/services/accesscontrol/accesscontrol.go b/pkg/services/accesscontrol/accesscontrol.go index 0c2b31efe42..41fe41bd214 100644 --- a/pkg/services/accesscontrol/accesscontrol.go +++ b/pkg/services/accesscontrol/accesscontrol.go @@ -6,15 +6,16 @@ import ( "fmt" "strings" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "github.com/grafana/grafana/pkg/apimachinery/identity" "github.com/grafana/grafana/pkg/registry" "github.com/grafana/grafana/pkg/services/authn" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" "github.com/grafana/grafana/pkg/services/org" "github.com/grafana/grafana/pkg/services/user" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" ) var tracer = otel.Tracer("github.com/grafana/grafana/pkg/services/accesscontrol") diff --git a/pkg/services/accesscontrol/acimpl/accesscontrol.go b/pkg/services/accesscontrol/acimpl/accesscontrol.go index fe2d8697972..338ea891a44 100644 --- a/pkg/services/accesscontrol/acimpl/accesscontrol.go +++ b/pkg/services/accesscontrol/acimpl/accesscontrol.go @@ -3,32 +3,63 @@ package acimpl import ( "context" "errors" + "time" + openfgav1 "github.com/openfga/api/proto/openfga/v1" "github.com/prometheus/client_golang/prometheus" "github.com/grafana/grafana/pkg/apimachinery/identity" "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/infra/metrics" "github.com/grafana/grafana/pkg/services/accesscontrol" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/featuremgmt" ) +var ( + errAccessNotImplemented = errors.New("access control not implemented for resource") +) + var _ accesscontrol.AccessControl = new(AccessControl) -func ProvideAccessControl(features featuremgmt.FeatureToggles) *AccessControl { +func ProvideAccessControl(features featuremgmt.FeatureToggles, zclient zanzana.Client) *AccessControl { logger := log.New("accesscontrol") - return &AccessControl{ - features, logger, accesscontrol.NewResolvers(logger), + + var m *acMetrics + if features.IsEnabledGlobally(featuremgmt.FlagZanzana) { + m = initMetrics() } + + return &AccessControl{ + features, + logger, + accesscontrol.NewResolvers(logger), + zclient, + m, + } +} + +func ProvideAccessControlTest() *AccessControl { + return ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) } type AccessControl struct { features featuremgmt.FeatureToggles log log.Logger resolvers accesscontrol.Resolvers + zclient zanzana.Client + metrics *acMetrics } func (a *AccessControl) Evaluate(ctx context.Context, user identity.Requester, evaluator accesscontrol.Evaluator) (bool, error) { + if a.features.IsEnabledGlobally(featuremgmt.FlagZanzana) { + return a.evaluateCompare(ctx, user, evaluator) + } + + return a.evaluate(ctx, user, evaluator) +} + +func (a *AccessControl) evaluate(ctx context.Context, user identity.Requester, evaluator accesscontrol.Evaluator) (bool, error) { timer := prometheus.NewTimer(metrics.MAccessEvaluationsSummary) defer timer.ObserveDuration() metrics.MAccessEvaluationCount.Inc() @@ -66,6 +97,95 @@ func (a *AccessControl) Evaluate(ctx context.Context, user identity.Requester, e return resolvedEvaluator.Evaluate(permissions), nil } +func (a *AccessControl) evaluateZanzana(ctx context.Context, user identity.Requester, evaluator accesscontrol.Evaluator) (bool, error) { + eval, err := evaluator.MutateScopes(ctx, a.resolvers.GetScopeAttributeMutator(user.GetOrgID())) + if err != nil { + if !errors.Is(err, accesscontrol.ErrResolverNotFound) { + return false, err + } + eval = evaluator + } + + return eval.EvaluateCustom(func(action, scope string) (bool, error) { + kind, _, identifier := accesscontrol.SplitScope(scope) + key, ok := zanzana.TranslateToTuple(user.GetUID().String(), action, kind, identifier, user.GetOrgID()) + if !ok { + // unsupported translation + return false, errAccessNotImplemented + } + + res, err := a.zclient.Check(ctx, &openfgav1.CheckRequest{ + TupleKey: &openfgav1.CheckRequestTupleKey{ + User: key.User, + Relation: key.Relation, + Object: key.Object, + }, + }) + + if err != nil { + return false, err + } + + return res.Allowed, nil + }) +} + +type evalResult struct { + runner string + decision bool + err error + duration time.Duration +} + +// evaluateCompare run RBAC and zanzana checks in parallel and then compare result +func (a *AccessControl) evaluateCompare(ctx context.Context, user identity.Requester, evaluator accesscontrol.Evaluator) (bool, error) { + res := make(chan evalResult, 2) + go func() { + timer := prometheus.NewTimer(a.metrics.mAccessEngineEvaluationsSeconds.WithLabelValues("zanzana")) + defer timer.ObserveDuration() + start := time.Now() + + hasAccess, err := a.evaluateZanzana(ctx, user, evaluator) + res <- evalResult{"zanzana", hasAccess, err, time.Since(start)} + }() + + go func() { + timer := prometheus.NewTimer(a.metrics.mAccessEngineEvaluationsSeconds.WithLabelValues("grafana")) + defer timer.ObserveDuration() + start := time.Now() + + hasAccess, err := a.evaluate(ctx, user, evaluator) + res <- evalResult{"grafana", hasAccess, err, time.Since(start)} + }() + first, second := <-res, <-res + close(res) + + if second.runner == "grafana" { + first, second = second, first + } + + if !errors.Is(second.err, errAccessNotImplemented) { + if second.err != nil { + a.log.Error("zanzana evaluation failed", "error", second.err) + } else if first.decision != second.decision { + a.metrics.mZanzanaEvaluationStatusTotal.WithLabelValues("error").Inc() + a.log.Warn( + "zanzana evaluation result does not match grafana", + "grafana_decision", first.decision, + "zanana_decision", second.decision, + "grafana_ms", first.duration, + "zanzana_ms", second.duration, + "eval", evaluator.GoString(), + ) + } else { + a.metrics.mZanzanaEvaluationStatusTotal.WithLabelValues("success").Inc() + a.log.Debug("zanzana evaluation is correct", "grafana_ms", first.duration, "zanzana_ms", second.duration) + } + } + + return first.decision, first.err +} + func (a *AccessControl) RegisterScopeAttributeResolver(prefix string, resolver accesscontrol.ScopeAttributeResolver) { a.resolvers.AddScopeAttributeResolver(prefix, resolver) } diff --git a/pkg/services/accesscontrol/acimpl/accesscontrol_test.go b/pkg/services/accesscontrol/acimpl/accesscontrol_test.go index 9bc60141a14..22f2e5d4858 100644 --- a/pkg/services/accesscontrol/acimpl/accesscontrol_test.go +++ b/pkg/services/accesscontrol/acimpl/accesscontrol_test.go @@ -8,6 +8,7 @@ import ( "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/user" ) @@ -65,7 +66,7 @@ func TestAccessControl_Evaluate(t *testing.T) { for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures(featuremgmt.FlagAccessActionSets)) + ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures(featuremgmt.FlagAccessActionSets), zanzana.NewNoopClient()) if tt.scopeResolver != nil { ac.RegisterScopeAttributeResolver(tt.resolverPrefix, tt.scopeResolver) diff --git a/pkg/services/accesscontrol/acimpl/metrics.go b/pkg/services/accesscontrol/acimpl/metrics.go new file mode 100644 index 00000000000..253f1160f1a --- /dev/null +++ b/pkg/services/accesscontrol/acimpl/metrics.go @@ -0,0 +1,53 @@ +package acimpl + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/grafana/grafana/pkg/infra/metrics/metricutil" +) + +const ( + metricsSubSystem = "authz" + metricsNamespace = "grafana" +) + +type acMetrics struct { + // mAccessEngineEvaluationsSeconds is a summary for evaluating access for a specific engine (RBAC and zanzana) + mAccessEngineEvaluationsSeconds *prometheus.HistogramVec + // mZanzanaEvaluationStatusTotal is a metric for zanzana evaluation status + mZanzanaEvaluationStatusTotal *prometheus.CounterVec +} + +var once sync.Once + +// TODO: use prometheus.Registerer +func initMetrics() *acMetrics { + m := &acMetrics{} + once.Do(func() { + m.mAccessEngineEvaluationsSeconds = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: "engine_evaluations_seconds", + Help: "Histogram for evaluation time for the specific access control engine (RBAC and zanzana).", + Namespace: metricsNamespace, + Subsystem: metricsSubSystem, + Buckets: prometheus.ExponentialBuckets(0.00001, 4, 10), + }, + []string{"engine"}, + ) + + m.mZanzanaEvaluationStatusTotal = metricutil.NewCounterVecStartingAtZero( + prometheus.CounterOpts{ + Name: "zanzana_evaluation_status_total", + Help: "evaluation status (success or error) for zanzana", + Namespace: metricsNamespace, + Subsystem: metricsSubSystem, + }, []string{"status"}, map[string][]string{"status": {"success", "error"}}) + + prometheus.MustRegister( + m.mAccessEngineEvaluationsSeconds, + m.mZanzanaEvaluationStatusTotal, + ) + }) + return m +} diff --git a/pkg/services/accesscontrol/acimpl/service.go b/pkg/services/accesscontrol/acimpl/service.go index 69f392bb40f..ec22b8a81f2 100644 --- a/pkg/services/accesscontrol/acimpl/service.go +++ b/pkg/services/accesscontrol/acimpl/service.go @@ -48,11 +48,11 @@ var SharedWithMeFolderPermission = accesscontrol.Permission{ var OSSRolesPrefixes = []string{accesscontrol.ManagedRolePrefix, accesscontrol.ExternalServiceRolePrefix} func ProvideService( - cfg *setting.Cfg, db db.DB, routeRegister routing.RouteRegister, cache *localcache.CacheService, + cfg *setting.Cfg, db db.ReplDB, routeRegister routing.RouteRegister, cache *localcache.CacheService, accessControl accesscontrol.AccessControl, actionResolver accesscontrol.ActionResolver, features featuremgmt.FeatureToggles, tracer tracing.Tracer, zclient zanzana.Client, ) (*Service, error) { - service := ProvideOSSService(cfg, database.ProvideService(db), actionResolver, cache, features, tracer, zclient, db) + service := ProvideOSSService(cfg, database.ProvideService(db), actionResolver, cache, features, tracer, zclient, db.DB()) api.NewAccessControlAPI(routeRegister, accessControl, service, features).RegisterAPIEndpoints() if err := accesscontrol.DeclareFixedRoles(service, cfg); err != nil { diff --git a/pkg/services/accesscontrol/acimpl/service_bench_test.go b/pkg/services/accesscontrol/acimpl/service_bench_test.go index c3b1a103b84..78e153936c7 100644 --- a/pkg/services/accesscontrol/acimpl/service_bench_test.go +++ b/pkg/services/accesscontrol/acimpl/service_bench_test.go @@ -25,7 +25,7 @@ import ( // - each managed role will have 3 permissions {"resources:action2", "resources:id:x"} where x belongs to [1, 3] func setupBenchEnv(b *testing.B, usersCount, resourceCount int) (accesscontrol.Service, *user.SignedInUser) { now := time.Now() - sqlStore := db.InitTestDB(b) + sqlStore := db.InitTestReplDB(b) store := database.ProvideService(sqlStore) acService := &Service{ cfg: setting.NewCfg(), diff --git a/pkg/services/accesscontrol/acimpl/service_test.go b/pkg/services/accesscontrol/acimpl/service_test.go index 7e7a6cdd43c..74382c44a28 100644 --- a/pkg/services/accesscontrol/acimpl/service_test.go +++ b/pkg/services/accesscontrol/acimpl/service_test.go @@ -41,8 +41,8 @@ func setupTestEnv(t testing.TB) *Service { log: log.New("accesscontrol"), registrations: accesscontrol.RegistrationList{}, roles: accesscontrol.BuildBasicRoleDefinitions(), - store: database.ProvideService(db.InitTestDB(t)), tracer: tracing.InitializeTracerForTest(), + store: database.ProvideService(db.InitTestReplDB(t)), } require.NoError(t, ac.RegisterFixedRoles(context.Background())) return ac @@ -65,7 +65,7 @@ func TestUsageMetrics(t *testing.T) { s := ProvideOSSService( cfg, - database.ProvideService(db.InitTestDB(t)), + database.ProvideService(db.InitTestReplDB(t)), &resourcepermissions.FakeActionSetSvc{}, localcache.ProvideService(), featuremgmt.WithFeatures(), diff --git a/pkg/services/accesscontrol/authorize_in_org_test.go b/pkg/services/accesscontrol/authorize_in_org_test.go index f9cc616c73a..47d117766f2 100644 --- a/pkg/services/accesscontrol/authorize_in_org_test.go +++ b/pkg/services/accesscontrol/authorize_in_org_test.go @@ -14,6 +14,7 @@ import ( "github.com/grafana/grafana/pkg/services/accesscontrol/actest" "github.com/grafana/grafana/pkg/services/authn" "github.com/grafana/grafana/pkg/services/authn/authntest" + "github.com/grafana/grafana/pkg/services/authz/zanzana" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/team" @@ -24,7 +25,7 @@ import ( ) func TestAuthorizeInOrgMiddleware(t *testing.T) { - ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) // Define test cases testCases := []struct { diff --git a/pkg/services/accesscontrol/database/database.go b/pkg/services/accesscontrol/database/database.go index 43e8564505f..4b3c6479de6 100644 --- a/pkg/services/accesscontrol/database/database.go +++ b/pkg/services/accesscontrol/database/database.go @@ -36,17 +36,17 @@ const ( WHERE br.role = ?` ) -func ProvideService(sql db.DB) *AccessControlStore { +func ProvideService(sql db.ReplDB) *AccessControlStore { return &AccessControlStore{sql} } type AccessControlStore struct { - sql db.DB + sql db.ReplDB } func (s *AccessControlStore) GetUserPermissions(ctx context.Context, query accesscontrol.GetUserPermissionsQuery) ([]accesscontrol.Permission, error) { result := make([]accesscontrol.Permission, 0) - err := s.sql.WithDbSession(ctx, func(sess *db.Session) error { + err := s.sql.ReadReplica().WithDbSession(ctx, func(sess *db.Session) error { if query.UserID == 0 && len(query.TeamIDs) == 0 && len(query.Roles) == 0 { // no permission to fetch return nil @@ -104,7 +104,7 @@ func (s *AccessControlStore) GetTeamsPermissions(ctx context.Context, query acce orgID := query.OrgID rolePrefixes := query.RolePrefixes result := make([]teamPermission, 0) - err := s.sql.WithDbSession(ctx, func(sess *db.Session) error { + err := s.sql.ReadReplica().WithDbSession(ctx, func(sess *db.Session) error { if len(teams) == 0 { // no permission to fetch return nil @@ -172,7 +172,7 @@ func (s *AccessControlStore) SearchUsersPermissions(ctx context.Context, orgID i } } - if err := s.sql.WithDbSession(ctx, func(sess *db.Session) error { + if err := s.sql.ReadReplica().WithDbSession(ctx, func(sess *db.Session) error { roleNameFilterJoin := "" if len(options.RolePrefixes) > 0 { roleNameFilterJoin = "INNER JOIN role AS r ON up.role_id = r.id" @@ -198,7 +198,7 @@ func (s *AccessControlStore) SearchUsersPermissions(ctx context.Context, orgID i params = append(params, userID) } - grafanaAdmin := fmt.Sprintf(grafanaAdminAssignsSQL, s.sql.Quote("user")) + grafanaAdmin := fmt.Sprintf(grafanaAdminAssignsSQL, s.sql.ReadReplica().Quote("user")) params = append(params, accesscontrol.RoleGrafanaAdmin) if options.NamespacedID != "" { grafanaAdmin += " AND sa.user_id = ?" @@ -284,11 +284,11 @@ func (s *AccessControlStore) GetUsersBasicRoles(ctx context.Context, userFilter IsAdmin bool `xorm:"is_admin"` } dbRoles := make([]UserOrgRole, 0) - if err := s.sql.WithDbSession(ctx, func(sess *db.Session) error { + if err := s.sql.ReadReplica().WithDbSession(ctx, func(sess *db.Session) error { // Find roles q := ` SELECT u.id, ou.role, u.is_admin - FROM ` + s.sql.GetDialect().Quote("user") + ` AS u + FROM ` + s.sql.ReadReplica().GetDialect().Quote("user") + ` AS u LEFT JOIN org_user AS ou ON u.id = ou.user_id WHERE (u.is_admin OR ou.org_id = ?) ` @@ -318,7 +318,7 @@ func (s *AccessControlStore) GetUsersBasicRoles(ctx context.Context, userFilter } func (s *AccessControlStore) DeleteUserPermissions(ctx context.Context, orgID, userID int64) error { - err := s.sql.WithDbSession(ctx, func(sess *db.Session) error { + err := s.sql.DB().WithDbSession(ctx, func(sess *db.Session) error { roleDeleteQuery := "DELETE FROM user_role WHERE user_id = ?" roleDeleteParams := []any{roleDeleteQuery, userID} if orgID != accesscontrol.GlobalOrgID { @@ -383,7 +383,7 @@ func (s *AccessControlStore) DeleteUserPermissions(ctx context.Context, orgID, u } func (s *AccessControlStore) DeleteTeamPermissions(ctx context.Context, orgID, teamID int64) error { - err := s.sql.WithDbSession(ctx, func(sess *db.Session) error { + err := s.sql.DB().WithDbSession(ctx, func(sess *db.Session) error { roleDeleteQuery := "DELETE FROM team_role WHERE team_id = ? AND org_id = ?" roleDeleteParams := []any{roleDeleteQuery, teamID, orgID} diff --git a/pkg/services/accesscontrol/database/database_test.go b/pkg/services/accesscontrol/database/database_test.go index 8f8b237cd35..39d25fa3029 100644 --- a/pkg/services/accesscontrol/database/database_test.go +++ b/pkg/services/accesscontrol/database/database_test.go @@ -470,8 +470,8 @@ func createUsersAndTeams(t *testing.T, store db.DB, svcs helperServices, orgID i return res } -func setupTestEnv(t testing.TB) (*database.AccessControlStore, rs.Store, user.Service, team.Service, org.Service, *sqlstore.SQLStore) { - sql, cfg := db.InitTestDBWithCfg(t) +func setupTestEnv(t testing.TB) (*database.AccessControlStore, rs.Store, user.Service, team.Service, org.Service, *sqlstore.ReplStore) { + sql, cfg := db.InitTestReplDBWithCfg(t) cfg.AutoAssignOrg = true cfg.AutoAssignOrgRole = "Viewer" cfg.AutoAssignOrgId = 1 diff --git a/pkg/services/accesscontrol/database/externalservices.go b/pkg/services/accesscontrol/database/externalservices.go index ea69ff79fc5..622db51684e 100644 --- a/pkg/services/accesscontrol/database/externalservices.go +++ b/pkg/services/accesscontrol/database/externalservices.go @@ -18,7 +18,7 @@ func extServiceRoleName(externalServiceID string) string { func (s *AccessControlStore) DeleteExternalServiceRole(ctx context.Context, externalServiceID string) error { uid := accesscontrol.PrefixedRoleUID(extServiceRoleName(externalServiceID)) - return s.sql.WithDbSession(ctx, func(sess *db.Session) error { + return s.sql.DB().WithDbSession(ctx, func(sess *db.Session) error { stored, errGet := getRoleByUID(ctx, sess, uid) if errGet != nil { // Role not found, nothing to do @@ -55,7 +55,7 @@ func (s *AccessControlStore) SaveExternalServiceRole(ctx context.Context, cmd ac role := genExternalServiceRole(cmd) assignment := genExternalServiceAssignment(cmd) - return s.sql.WithDbSession(ctx, func(sess *db.Session) error { + return s.sql.DB().WithDbSession(ctx, func(sess *db.Session) error { // Create or update the role existingRole, errSaveRole := s.saveRole(ctx, sess, &role) if errSaveRole != nil { diff --git a/pkg/services/accesscontrol/database/externalservices_test.go b/pkg/services/accesscontrol/database/externalservices_test.go index 21bbbda171a..0df0860d21b 100644 --- a/pkg/services/accesscontrol/database/externalservices_test.go +++ b/pkg/services/accesscontrol/database/externalservices_test.go @@ -7,9 +7,10 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/grafana/grafana/pkg/infra/db" "github.com/grafana/grafana/pkg/services/accesscontrol" - "github.com/stretchr/testify/require" ) func TestAccessControlStore_SaveExternalServiceRole(t *testing.T) { @@ -114,7 +115,7 @@ func TestAccessControlStore_SaveExternalServiceRole(t *testing.T) { t.Run(tt.name, func(t *testing.T) { ctx := context.Background() s := &AccessControlStore{ - sql: db.InitTestDB(t), + sql: db.InitTestReplDB(t), } for i := range tt.runs { @@ -125,7 +126,7 @@ func TestAccessControlStore_SaveExternalServiceRole(t *testing.T) { } require.NoError(t, err) - errDBSession := s.sql.WithDbSession(ctx, func(sess *db.Session) error { + errDBSession := s.sql.DB().WithDbSession(ctx, func(sess *db.Session) error { storedRole, err := getRoleByUID(ctx, sess, accesscontrol.PrefixedRoleUID(extServiceRoleName(tt.runs[i].cmd.ExternalServiceID))) require.NoError(t, err) require.NotNil(t, storedRole) @@ -187,13 +188,13 @@ func TestAccessControlStore_DeleteExternalServiceRole(t *testing.T) { t.Run(tt.name, func(t *testing.T) { ctx := context.Background() s := &AccessControlStore{ - sql: db.InitTestDB(t), + sql: db.InitTestReplDB(t), } if tt.init != nil { tt.init(t, ctx, s) } roleID := int64(-1) - err := s.sql.WithDbSession(ctx, func(sess *db.Session) error { + err := s.sql.DB().WithDbSession(ctx, func(sess *db.Session) error { role, err := getRoleByUID(ctx, sess, accesscontrol.PrefixedRoleUID(extServiceRoleName(tt.id))) if err != nil && !errors.Is(err, accesscontrol.ErrRoleNotFound) { return err @@ -217,7 +218,7 @@ func TestAccessControlStore_DeleteExternalServiceRole(t *testing.T) { } // Assignments should be deleted - _ = s.sql.WithDbSession(ctx, func(sess *db.Session) error { + _ = s.sql.DB().WithDbSession(ctx, func(sess *db.Session) error { var assignment accesscontrol.UserRole count, err := sess.Where("role_id = ?", roleID).Count(&assignment) require.NoError(t, err) @@ -226,7 +227,7 @@ func TestAccessControlStore_DeleteExternalServiceRole(t *testing.T) { }) // Permissions should be deleted - _ = s.sql.WithDbSession(ctx, func(sess *db.Session) error { + _ = s.sql.DB().WithDbSession(ctx, func(sess *db.Session) error { var permission accesscontrol.Permission count, err := sess.Where("role_id = ?", roleID).Count(&permission) require.NoError(t, err) @@ -235,7 +236,7 @@ func TestAccessControlStore_DeleteExternalServiceRole(t *testing.T) { }) // Role should be deleted - _ = s.sql.WithDbSession(ctx, func(sess *db.Session) error { + _ = s.sql.DB().WithDbSession(ctx, func(sess *db.Session) error { storedRole, err := getRoleByUID(ctx, sess, accesscontrol.PrefixedRoleUID(extServiceRoleName(tt.id))) require.ErrorIs(t, err, accesscontrol.ErrRoleNotFound) require.Nil(t, storedRole) diff --git a/pkg/services/accesscontrol/evaluator.go b/pkg/services/accesscontrol/evaluator.go index 4b082e267d4..9a49cc15f0e 100644 --- a/pkg/services/accesscontrol/evaluator.go +++ b/pkg/services/accesscontrol/evaluator.go @@ -11,9 +11,13 @@ import ( var logger = log.New("accesscontrol.evaluator") +type CheckerFn func(action string, scope string) (bool, error) + type Evaluator interface { // Evaluate permissions that are grouped by action Evaluate(permissions map[string][]string) bool + // EvaluateCustom allows to perform evaluation with custom check function + EvaluateCustom(fn CheckerFn) (bool, error) // MutateScopes executes a sequence of ScopeModifier functions on all embedded scopes of an evaluator and returns a new Evaluator MutateScopes(ctx context.Context, mutate ScopeAttributeMutator) (Evaluator, error) // String returns a string representation of permission required by the evaluator @@ -80,6 +84,25 @@ func match(scope, target string) bool { return scope == target } +func (p permissionEvaluator) EvaluateCustom(fn CheckerFn) (bool, error) { + if len(p.Scopes) == 0 { + return fn(p.Action, "") + } + + for _, target := range p.Scopes { + matches, err := fn(p.Action, target) + if err != nil { + return false, err + } + + if matches { + return true, nil + } + } + + return false, nil +} + func (p permissionEvaluator) MutateScopes(ctx context.Context, mutate ScopeAttributeMutator) (Evaluator, error) { if p.Scopes == nil { return EvalPermission(p.Action), nil @@ -135,6 +158,19 @@ func (a allEvaluator) Evaluate(permissions map[string][]string) bool { return true } +func (a allEvaluator) EvaluateCustom(fn CheckerFn) (bool, error) { + for _, e := range a.allOf { + allowed, err := e.EvaluateCustom(fn) + if err != nil { + return false, err + } + if !allowed { + return false, nil + } + } + return true, nil +} + func (a allEvaluator) MutateScopes(ctx context.Context, mutate ScopeAttributeMutator) (Evaluator, error) { resolved := false modified := make([]Evaluator, 0, len(a.allOf)) @@ -195,6 +231,19 @@ func (a anyEvaluator) Evaluate(permissions map[string][]string) bool { return false } +func (a anyEvaluator) EvaluateCustom(fn CheckerFn) (bool, error) { + for _, e := range a.anyOf { + allowed, err := e.EvaluateCustom(fn) + if err != nil { + return false, err + } + if allowed { + return true, nil + } + } + return false, nil +} + func (a anyEvaluator) MutateScopes(ctx context.Context, mutate ScopeAttributeMutator) (Evaluator, error) { resolved := false modified := make([]Evaluator, 0, len(a.anyOf)) diff --git a/pkg/services/accesscontrol/middleware_test.go b/pkg/services/accesscontrol/middleware_test.go index 6374f165a5c..afdb8ef5063 100644 --- a/pkg/services/accesscontrol/middleware_test.go +++ b/pkg/services/accesscontrol/middleware_test.go @@ -10,6 +10,7 @@ import ( "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/contexthandler/ctxkey" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" "github.com/grafana/grafana/pkg/services/featuremgmt" @@ -25,7 +26,7 @@ type middlewareTestCase struct { } func TestMiddleware(t *testing.T) { - ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) tests := []middlewareTestCase{ { @@ -81,7 +82,7 @@ func TestMiddleware_forceLogin(t *testing.T) { {url: "/endpoint"}, } - ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) for _, tc := range tests { t.Run(tc.url, func(t *testing.T) { diff --git a/pkg/services/accesscontrol/migrator/migrator.go b/pkg/services/accesscontrol/migrator/migrator.go index 8ecc20ad553..33317b4b5be 100644 --- a/pkg/services/accesscontrol/migrator/migrator.go +++ b/pkg/services/accesscontrol/migrator/migrator.go @@ -19,14 +19,14 @@ const ( maxLen = 40 ) -func MigrateScopeSplit(db db.DB, log log.Logger) error { +func MigrateScopeSplit(db db.ReplDB, log log.Logger) error { t := time.Now() ctx := context.Background() cnt := 0 // Search for the permissions to update var permissions []ac.Permission - if errFind := db.WithTransactionalDbSession(ctx, func(sess *sqlstore.DBSession) error { + if errFind := db.DB().WithTransactionalDbSession(ctx, func(sess *sqlstore.DBSession) error { return sess.SQL("SELECT * FROM permission WHERE NOT scope = '' AND identifier = ''").Find(&permissions) }); errFind != nil { log.Error("Could not search for permissions to update", "migration", "scopeSplit", "error", errFind) @@ -76,7 +76,7 @@ func MigrateScopeSplit(db db.DB, log log.Logger) error { delQuery = delQuery[:len(delQuery)-1] + ")" // Batch update the permissions - if errBatchUpdate := db.GetSqlxSession().WithTransaction(ctx, func(tx *session.SessionTx) error { + if errBatchUpdate := db.DB().GetSqlxSession().WithTransaction(ctx, func(tx *session.SessionTx) error { if _, errDel := tx.Exec(ctx, delQuery, delArgs...); errDel != nil { log.Error("Error deleting permissions", "migration", "scopeSplit", "error", errDel) return errDel diff --git a/pkg/services/accesscontrol/migrator/migrator_bench_test.go b/pkg/services/accesscontrol/migrator/migrator_bench_test.go index 5257a2fb2dd..7246b0812e3 100644 --- a/pkg/services/accesscontrol/migrator/migrator_bench_test.go +++ b/pkg/services/accesscontrol/migrator/migrator_bench_test.go @@ -10,7 +10,7 @@ import ( ) func benchScopeSplitConcurrent(b *testing.B, count int) { - store := db.InitTestDB(b) + store := db.InitTestReplDB(b) // Populate permissions require.NoError(b, batchInsertPermissions(count, store), "could not insert permissions") logger := log.New("migrator.test") diff --git a/pkg/services/accesscontrol/migrator/migrator_test.go b/pkg/services/accesscontrol/migrator/migrator_test.go index bfa35f17efc..0c98aa24a26 100644 --- a/pkg/services/accesscontrol/migrator/migrator_test.go +++ b/pkg/services/accesscontrol/migrator/migrator_test.go @@ -46,7 +46,7 @@ func batchInsertPermissions(cnt int, sqlStore db.DB) error { // TestIntegrationMigrateScopeSplit tests the scope split migration // also tests the scope split truncation logic func TestIntegrationMigrateScopeSplitTruncation(t *testing.T) { - sqlStore := db.InitTestDB(t) + sqlStore := db.InitTestReplDB(t) logger := log.New("accesscontrol.migrator.test") batchSize = 20 diff --git a/pkg/services/accesscontrol/models.go b/pkg/services/accesscontrol/models.go index 7e68c0d8091..bac02e9096c 100644 --- a/pkg/services/accesscontrol/models.go +++ b/pkg/services/accesscontrol/models.go @@ -214,19 +214,7 @@ func (p Permission) OSSPermission() Permission { // SplitScope returns kind, attribute and Identifier func (p Permission) SplitScope() (string, string, string) { - if p.Scope == "" { - return "", "", "" - } - - fragments := strings.Split(p.Scope, ":") - switch l := len(fragments); l { - case 1: // Splitting a wildcard scope "*" -> kind: "*"; attribute: "*"; identifier: "*" - return fragments[0], fragments[0], fragments[0] - case 2: // Splitting a wildcard scope with specified kind "dashboards:*" -> kind: "dashboards"; attribute: "*"; identifier: "*" - return fragments[0], fragments[1], fragments[1] - default: // Splitting a scope with all fields specified "dashboards:uid:my_dash" -> kind: "dashboards"; attribute: "uid"; identifier: "my_dash" - return fragments[0], fragments[1], strings.Join(fragments[2:], ":") - } + return SplitScope(p.Scope) } type GetUserPermissionsQuery struct { diff --git a/pkg/services/accesscontrol/resourcepermissions/service_test.go b/pkg/services/accesscontrol/resourcepermissions/service_test.go index ec208395cb8..039530fe1bf 100644 --- a/pkg/services/accesscontrol/resourcepermissions/service_test.go +++ b/pkg/services/accesscontrol/resourcepermissions/service_test.go @@ -13,6 +13,7 @@ import ( "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" "github.com/grafana/grafana/pkg/services/accesscontrol/actest" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/licensing/licensingtest" "github.com/grafana/grafana/pkg/services/org/orgimpl" @@ -289,7 +290,7 @@ func TestService_RegisterActionSets(t *testing.T) { if tt.actionSetsEnabled { features = featuremgmt.WithFeatures(featuremgmt.FlagAccessActionSets) } - ac := acimpl.ProvideAccessControl(features) + ac := acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()) actionSets := NewActionSetService() _, err := New( setting.NewCfg(), tt.options, features, routing.NewRouteRegister(), licensingtest.NewFakeLicensing(), @@ -335,7 +336,7 @@ func setupTestEnvironment(t *testing.T, ops Options) (*Service, user.Service, te license := licensingtest.NewFakeLicensing() license.On("FeatureEnabled", "accesscontrol.enforcement").Return(true).Maybe() acService := &actest.FakeService{} - ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) service, err := New( cfg, ops, featuremgmt.WithFeatures(), routing.NewRouteRegister(), license, ac, acService, sql, teamSvc, userSvc, NewActionSetService(), diff --git a/pkg/services/accesscontrol/scope.go b/pkg/services/accesscontrol/scope.go index 4805b25f2a1..367fd2eecfc 100644 --- a/pkg/services/accesscontrol/scope.go +++ b/pkg/services/accesscontrol/scope.go @@ -10,6 +10,23 @@ const ( maxPrefixParts = 2 ) +// SplitScope returns kind, attribute and Identifier +func SplitScope(scope string) (string, string, string) { + if scope == "" { + return "", "", "" + } + + fragments := strings.Split(scope, ":") + switch l := len(fragments); l { + case 1: // Splitting a wildcard scope "*" -> kind: "*"; attribute: "*"; identifier: "*" + return fragments[0], fragments[0], fragments[0] + case 2: // Splitting a wildcard scope with specified kind "dashboards:*" -> kind: "dashboards"; attribute: "*"; identifier: "*" + return fragments[0], fragments[1], fragments[1] + default: // Splitting a scope with all fields specified "dashboards:uid:my_dash" -> kind: "dashboards"; attribute: "uid"; identifier: "my_dash" + return fragments[0], fragments[1], strings.Join(fragments[2:], ":") + } +} + func ParseScopeID(scope string) (int64, error) { id, err := strconv.ParseInt(ScopeSuffix(scope), 10, 64) if err != nil { diff --git a/pkg/services/annotations/annotationsimpl/annotations_test.go b/pkg/services/annotations/annotationsimpl/annotations_test.go index eb603a14664..d98af424ad5 100644 --- a/pkg/services/annotations/annotationsimpl/annotations_test.go +++ b/pkg/services/annotations/annotationsimpl/annotations_test.go @@ -18,6 +18,7 @@ import ( "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" "github.com/grafana/grafana/pkg/services/annotations" "github.com/grafana/grafana/pkg/services/annotations/testutil" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/dashboards" dashboardstore "github.com/grafana/grafana/pkg/services/dashboards/database" "github.com/grafana/grafana/pkg/services/featuremgmt" @@ -225,7 +226,7 @@ func TestIntegrationAnnotationListingWithInheritedRBAC(t *testing.T) { guardian.New = origNewGuardian }) - ac := acimpl.ProvideAccessControl(features) + ac := acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()) folderSvc := folderimpl.ProvideService(ac, bus.ProvideBus(tracing.InitializeTracerForTest()), dashStore, folderimpl.ProvideDashboardFolderStore(sql), sql, features, supportbundlestest.NewFakeBundleService(), nil) cfg.AnnotationMaximumTagsLength = 60 diff --git a/pkg/services/apiserver/builder/helper.go b/pkg/services/apiserver/builder/helper.go index 8c0e84db2e2..52c47813d0e 100644 --- a/pkg/services/apiserver/builder/helper.go +++ b/pkg/services/apiserver/builder/helper.go @@ -10,6 +10,7 @@ import ( "strings" "time" + "github.com/grafana/grafana/pkg/web" "github.com/prometheus/client_golang/prometheus" "golang.org/x/mod/semver" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -53,6 +54,7 @@ func SetupConfig( buildVersion string, buildCommit string, buildBranch string, + optionalMiddlewares ...web.Middleware, ) error { defsGetter := GetOpenAPIDefinitions(builders) serverConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig( @@ -98,6 +100,14 @@ func SetupConfig( handler := filters.WithTracingHTTPLoggingAttributes(requestHandler) handler = filters.WithRequester(handler) handler = genericapiserver.DefaultBuildHandlerChain(handler, c) + + // If optional middlewares include auth function, they need to happen before DefaultBuildHandlerChain + if len(optionalMiddlewares) > 0 { + for _, m := range optionalMiddlewares { + handler = m(handler) + } + } + handler = filters.WithAcceptHeader(handler) handler = filters.WithPathRewriters(handler, pathRewriters) handler = k8stracing.WithTracing(handler, serverConfig.TracerProvider, "KubernetesAPI") diff --git a/pkg/services/apiserver/options/aggregator.go b/pkg/services/apiserver/options/aggregator.go index 56728814696..7ff47ce76d6 100644 --- a/pkg/services/apiserver/options/aggregator.go +++ b/pkg/services/apiserver/options/aggregator.go @@ -81,8 +81,8 @@ func (o *AggregatorServerOptions) ApplyTo(aggregatorConfig *aggregatorapiserver. } // override the RESTOptionsGetter to use the file storage options getter restOptionsGetter, err := filestorage.NewRESTOptionsGetter(dataPath, etcdOptions.StorageConfig, - "apiregistration.k8s.io/apiservices", - "service.grafana.app/externalnames", + "/group/apiregistration.k8s.io/resource/apiservices", + "/group/service.grafana.app/resource/externalnames", ) if err != nil { return err diff --git a/pkg/services/apiserver/service.go b/pkg/services/apiserver/service.go index a4bf740a162..c5ae9a3cd35 100644 --- a/pkg/services/apiserver/service.go +++ b/pkg/services/apiserver/service.go @@ -45,6 +45,7 @@ import ( "github.com/grafana/grafana/pkg/services/store/entity/sqlstash" "github.com/grafana/grafana/pkg/setting" "github.com/grafana/grafana/pkg/storage/unified/apistore" + "github.com/grafana/grafana/pkg/storage/unified/entitybridge" "github.com/grafana/grafana/pkg/storage/unified/resource" "github.com/grafana/grafana/pkg/storage/unified/sql" ) @@ -292,40 +293,46 @@ func (s *service) start(ctx context.Context) error { serverConfig.Config.RESTOptionsGetter = apistore.NewRESTOptionsGetter(client, o.RecommendedOptions.Etcd.StorageConfig.Codec) - case grafanaapiserveroptions.StorageTypeUnified: - if !s.features.IsEnabledGlobally(featuremgmt.FlagUnifiedStorage) { - return fmt.Errorf("unified storage requires the unifiedStorage feature flag") + case grafanaapiserveroptions.StorageTypeUnified, grafanaapiserveroptions.StorageTypeUnifiedGrpc: + var client entity.EntityStoreClient + var entityServer sqlstash.SqlEntityServer + + if o.StorageOptions.StorageType == grafanaapiserveroptions.StorageTypeUnifiedGrpc { + conn, err := grpc.NewClient(o.StorageOptions.Address, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return err + } + client = entity.NewEntityStoreClientGRPC(conn) + } else { + if !s.features.IsEnabledGlobally(featuremgmt.FlagUnifiedStorage) { + return fmt.Errorf("unified storage requires the unifiedStorage feature flag") + } + + eDB, err := dbimpl.ProvideEntityDB(s.db, s.cfg, s.features, s.tracing) + if err != nil { + return err + } + + entityServer, err = sqlstash.ProvideSQLEntityServer(eDB, s.tracing) + if err != nil { + return err + } + client = entity.NewEntityStoreClientLocal(entityServer) } - eDB, err := dbimpl.ProvideEntityDB(s.db, s.cfg, s.features, s.tracing) - if err != nil { - return err + if false { + // Use the entity bridge + server, err := entitybridge.EntityAsResourceServer(client, entityServer, s.tracing) + if err != nil { + return err + } + serverConfig.Config.RESTOptionsGetter = apistore.NewRESTOptionsGetterForServer(server, + o.RecommendedOptions.Etcd.StorageConfig.Codec) + } else { + serverConfig.Config.RESTOptionsGetter = entitystorage.NewRESTOptionsGetter(s.cfg, + client, o.RecommendedOptions.Etcd.StorageConfig.Codec) } - storeServer, err := sqlstash.ProvideSQLEntityServer(eDB, s.tracing) - if err != nil { - return err - } - - store := entity.NewEntityStoreClientLocal(storeServer) - - serverConfig.Config.RESTOptionsGetter = entitystorage.NewRESTOptionsGetter(s.cfg, store, o.RecommendedOptions.Etcd.StorageConfig.Codec) - - case grafanaapiserveroptions.StorageTypeUnifiedGrpc: - // Create a connection to the gRPC server - conn, err := grpc.NewClient(o.StorageOptions.Address, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - return err - } - - // TODO: determine when to close the connection, we cannot defer it here - // defer conn.Close() - - // Create a client instance - store := entity.NewEntityStoreClientGRPC(conn) - - serverConfig.Config.RESTOptionsGetter = entitystorage.NewRESTOptionsGetter(s.cfg, store, o.RecommendedOptions.Etcd.StorageConfig.Codec) - case grafanaapiserveroptions.StorageTypeLegacy: fallthrough case grafanaapiserveroptions.StorageTypeFile: diff --git a/pkg/services/apiserver/standalone/factory.go b/pkg/services/apiserver/standalone/factory.go index edd421e0333..1458859bcd5 100644 --- a/pkg/services/apiserver/standalone/factory.go +++ b/pkg/services/apiserver/standalone/factory.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/grafana/grafana-plugin-sdk-go/backend" + "github.com/grafana/grafana/pkg/web" "github.com/prometheus/client_golang/prometheus" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -33,6 +34,9 @@ type APIServerFactory interface { // Given the flags, what can we produce GetEnabled(runtime []RuntimeConfig) ([]schema.GroupVersion, error) + // Any optional middlewares this factory wants configured via apiserver's BuildHandlerChain facility + GetOptionalMiddlewares(tracer tracing.Tracer) []web.Middleware + // Make an API server for a given group+version MakeAPIServer(ctx context.Context, tracer tracing.Tracer, gv schema.GroupVersion) (builder.APIGroupBuilder, error) @@ -50,6 +54,10 @@ func (p *DummyAPIFactory) GetOptions() options.OptionsProvider { return nil } +func (p *DummyAPIFactory) GetOptionalMiddlewares(_ tracing.Tracer) []web.Middleware { + return []web.Middleware{} +} + func (p *DummyAPIFactory) GetEnabled(runtime []RuntimeConfig) ([]schema.GroupVersion, error) { gv := []schema.GroupVersion{} for _, cfg := range runtime { diff --git a/pkg/services/authn/authn.go b/pkg/services/authn/authn.go index 15974b1b983..87e3491dd0b 100644 --- a/pkg/services/authn/authn.go +++ b/pkg/services/authn/authn.go @@ -14,7 +14,6 @@ import ( "github.com/grafana/grafana/pkg/models/usertoken" "github.com/grafana/grafana/pkg/services/login" "github.com/grafana/grafana/pkg/setting" - "github.com/grafana/grafana/pkg/web" ) const ( @@ -74,9 +73,13 @@ type PostAuthHookFn func(ctx context.Context, identity *Identity, r *Request) er type PostLoginHookFn func(ctx context.Context, identity *Identity, r *Request, err error) type PreLogoutHookFn func(ctx context.Context, requester identity.Requester, sessionToken *usertoken.UserToken) error -type Service interface { +type Authenticator interface { // Authenticate authenticates a request Authenticate(ctx context.Context, r *Request) (*Identity, error) +} + +type Service interface { + Authenticator // RegisterPostAuthHook registers a hook with a priority that is called after a successful authentication. // A lower number means higher priority. RegisterPostAuthHook(hook PostAuthHookFn, priority uint) @@ -116,10 +119,9 @@ type IdentitySynchronizer interface { } type Client interface { + Authenticator // Name returns the name of a client Name() string - // Authenticate performs the authentication for the request - Authenticate(ctx context.Context, r *Request) (*Identity, error) // IsEnabled returns the enabled status of the client IsEnabled() bool } @@ -186,11 +188,6 @@ type Request struct { OrgID int64 // HTTPRequest is the original HTTP request to authenticate HTTPRequest *http.Request - - // Resp is the response writer to use for the request - // Used to set cookies and headers - Resp web.ResponseWriter - // metadata is additional information about the auth request metadata map[string]string } diff --git a/pkg/services/authn/authnimpl/service.go b/pkg/services/authn/authnimpl/service.go index 847d61fcf9a..18bfab199a2 100644 --- a/pkg/services/authn/authnimpl/service.go +++ b/pkg/services/authn/authnimpl/service.go @@ -39,6 +39,11 @@ func ProvideAuthnService(s *Service) authn.Service { return s } +// make sure service also implements authn.ServiceAuthenticateOnly interface +func ProvideAuthnServiceAuthenticateOnly(s *Service) authn.Authenticator { + return s +} + // make sure service implements authn.IdentitySynchronizer interface func ProvideIdentitySynchronizer(s *Service) authn.IdentitySynchronizer { return s diff --git a/pkg/services/authn/authnimpl/sync/oauth_token_sync.go b/pkg/services/authn/authnimpl/sync/oauth_token_sync.go index 2b4220fdfd0..1a3891570ab 100644 --- a/pkg/services/authn/authnimpl/sync/oauth_token_sync.go +++ b/pkg/services/authn/authnimpl/sync/oauth_token_sync.go @@ -55,8 +55,10 @@ func (s *OAuthTokenSync) SyncOauthTokenHook(ctx context.Context, identity *authn return nil } + ctxLogger := s.log.FromContext(ctx).New("userID", identity.ID.ID()) + _, err, _ := s.singleflightGroup.Do(identity.ID.String(), func() (interface{}, error) { - s.log.Debug("Singleflight request for OAuth token sync", "key", identity.ID.String()) + ctxLogger.Debug("Singleflight request for OAuth token sync") // FIXME: Consider using context.WithoutCancel instead of context.Background after Go 1.21 update updateCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second) @@ -69,7 +71,7 @@ func (s *OAuthTokenSync) SyncOauthTokenHook(ctx context.Context, identity *authn token, _, err := s.service.HasOAuthEntry(ctx, identity) if err != nil { - s.log.Error("Failed to get OAuth entry for verifying if token has already been refreshed", "id", identity.ID, "error", err) + ctxLogger.Error("Failed to get OAuth entry for verifying if token has already been refreshed", "id", identity.ID, "error", err) return nil, err } @@ -79,14 +81,14 @@ func (s *OAuthTokenSync) SyncOauthTokenHook(ctx context.Context, identity *authn return nil, nil } - s.log.Error("Failed to refresh OAuth access token", "id", identity.ID, "error", refreshErr) + ctxLogger.Error("Failed to refresh OAuth access token", "id", identity.ID, "error", refreshErr) if err := s.service.InvalidateOAuthTokens(ctx, token); err != nil { - s.log.Warn("Failed to invalidate OAuth tokens", "id", identity.ID, "error", err) + ctxLogger.Warn("Failed to invalidate OAuth tokens", "id", identity.ID, "error", err) } if err := s.sessionService.RevokeToken(ctx, identity.SessionToken, false); err != nil { - s.log.Warn("Failed to revoke session token", "id", identity.ID, "tokenId", identity.SessionToken.Id, "error", err) + ctxLogger.Warn("Failed to revoke session token", "id", identity.ID, "tokenId", identity.SessionToken.Id, "error", err) } return nil, refreshErr diff --git a/pkg/services/authn/clients/ext_jwt.go b/pkg/services/authn/clients/ext_jwt.go index f58fc48b892..bbe639ff432 100644 --- a/pkg/services/authn/clients/ext_jwt.go +++ b/pkg/services/authn/clients/ext_jwt.go @@ -20,9 +20,8 @@ import ( var _ authn.Client = new(ExtendedJWT) const ( - extJWTAuthenticationHeaderName = "X-Access-Token" - extJWTAuthorizationHeaderName = "X-Grafana-Id" - extJWTAccessTokenExpectAudience = "grafana" + ExtJWTAuthenticationHeaderName = "X-Access-Token" + ExtJWTAuthorizationHeaderName = "X-Grafana-Id" ) var ( @@ -46,7 +45,7 @@ func ProvideExtendedJWT(cfg *setting.Cfg) *ExtendedJWT { }) accessTokenVerifier := authlib.NewAccessTokenVerifier(authlib.VerifierConfig{ - AllowedAudiences: []string{extJWTAccessTokenExpectAudience}, + AllowedAudiences: cfg.ExtJWTAuth.Audiences, }, keys) // For ID tokens, we explicitly do not validate audience, hence an empty AllowedAudiences @@ -129,11 +128,19 @@ func (s *ExtendedJWT) authenticateAsUser( return nil, errExtJWTInvalidSubject.Errorf("unexpected identity: %s", userID.String()) } + // For use in service layer, allow higher privilege + allowedKubernetesNamespace := accessTokenClaims.Rest.Namespace + if len(s.cfg.StackID) > 0 { + // For single-tenant cloud use, choose the lower of the two (id token will always have the specific namespace) + allowedKubernetesNamespace = idTokenClaims.Rest.Namespace + } + return &authn.Identity{ - ID: userID, - OrgID: s.getDefaultOrgID(), - AuthenticatedBy: login.ExtendedJWTModule, - AuthID: accessID.String(), + ID: userID, + OrgID: s.getDefaultOrgID(), + AuthenticatedBy: login.ExtendedJWTModule, + AuthID: accessID.String(), + AllowedKubernetesNamespace: allowedKubernetesNamespace, ClientParams: authn.ClientParams{ SyncPermissions: true, FetchPermissionsParams: authn.FetchPermissionsParams{ @@ -159,11 +166,12 @@ func (s *ExtendedJWT) authenticateAsService(claims *authlib.Claims[authlib.Acces } return &authn.Identity{ - ID: id, - UID: id, - OrgID: s.getDefaultOrgID(), - AuthenticatedBy: login.ExtendedJWTModule, - AuthID: claims.Subject, + ID: id, + UID: id, + OrgID: s.getDefaultOrgID(), + AuthenticatedBy: login.ExtendedJWTModule, + AuthID: claims.Subject, + AllowedKubernetesNamespace: claims.Rest.Namespace, ClientParams: authn.ClientParams{ SyncPermissions: true, FetchPermissionsParams: authn.FetchPermissionsParams{ @@ -208,7 +216,7 @@ func (s *ExtendedJWT) Priority() uint { // retrieveAuthenticationToken retrieves the JWT token from the request. func (s *ExtendedJWT) retrieveAuthenticationToken(httpRequest *http.Request) string { - jwtToken := httpRequest.Header.Get(extJWTAuthenticationHeaderName) + jwtToken := httpRequest.Header.Get(ExtJWTAuthenticationHeaderName) // Strip the 'Bearer' prefix if it exists. return strings.TrimPrefix(jwtToken, "Bearer ") @@ -216,7 +224,7 @@ func (s *ExtendedJWT) retrieveAuthenticationToken(httpRequest *http.Request) str // retrieveAuthorizationToken retrieves the JWT token from the request. func (s *ExtendedJWT) retrieveAuthorizationToken(httpRequest *http.Request) string { - jwtToken := httpRequest.Header.Get(extJWTAuthorizationHeaderName) + jwtToken := httpRequest.Header.Get(ExtJWTAuthorizationHeaderName) // Strip the 'Bearer' prefix if it exists. return strings.TrimPrefix(jwtToken, "Bearer ") diff --git a/pkg/services/authn/clients/ext_jwt_test.go b/pkg/services/authn/clients/ext_jwt_test.go index 7f072a6b5e8..2413fb38e23 100644 --- a/pkg/services/authn/clients/ext_jwt_test.go +++ b/pkg/services/authn/clients/ext_jwt_test.go @@ -51,6 +51,17 @@ var ( Namespace: "default", // org ID of 1 is special and translates to default }, } + validIDTokenClaimsWithStackSet = idTokenClaims{ + Claims: &jwt.Claims{ + Subject: "user:2", + Expiry: jwt.NewNumericDate(time.Date(2023, 5, 3, 0, 0, 0, 0, time.UTC)), + IssuedAt: jwt.NewNumericDate(time.Date(2023, 5, 2, 0, 0, 0, 0, time.UTC)), + }, + Rest: authnlib.IDTokenClaims{ + AuthenticatedBy: "extended_jwt", + Namespace: "stack-1234", + }, + } validAcessTokenClaimsWildcard = accessTokenClaims{ Claims: &jwt.Claims{ Subject: "access-policy:this-uid", @@ -173,7 +184,6 @@ func TestExtendedJWT_Test(t *testing.T) { actual := env.s.Test(context.Background(), &authn.Request{ HTTPRequest: validHTTPReq, - Resp: nil, }) assert.Equal(t, tc.want, actual) @@ -184,6 +194,7 @@ func TestExtendedJWT_Test(t *testing.T) { func TestExtendedJWT_Authenticate(t *testing.T) { type testCase struct { name string + cfg *setting.Cfg // optional, only used when overriding the cfg provided by default test setup accessToken *accessTokenClaims idToken *idTokenClaims orgID int64 @@ -196,11 +207,12 @@ func TestExtendedJWT_Authenticate(t *testing.T) { accessToken: &validAccessTokenClaims, orgID: 1, want: &authn.Identity{ - ID: authn.MustParseNamespaceID("access-policy:this-uid"), - UID: authn.MustParseNamespaceID("access-policy:this-uid"), - OrgID: 1, - AuthenticatedBy: "extendedjwt", - AuthID: "access-policy:this-uid", + ID: authn.MustParseNamespaceID("access-policy:this-uid"), + UID: authn.MustParseNamespaceID("access-policy:this-uid"), + OrgID: 1, + AllowedKubernetesNamespace: "default", + AuthenticatedBy: "extendedjwt", + AuthID: "access-policy:this-uid", ClientParams: authn.ClientParams{ SyncPermissions: true, FetchPermissionsParams: authn.FetchPermissionsParams{Roles: []string{"fixed:folders:reader"}}}, @@ -211,11 +223,12 @@ func TestExtendedJWT_Authenticate(t *testing.T) { accessToken: &validAcessTokenClaimsWildcard, orgID: 1, want: &authn.Identity{ - ID: authn.MustParseNamespaceID("access-policy:this-uid"), - UID: authn.MustParseNamespaceID("access-policy:this-uid"), - OrgID: 1, - AuthenticatedBy: "extendedjwt", - AuthID: "access-policy:this-uid", + ID: authn.MustParseNamespaceID("access-policy:this-uid"), + UID: authn.MustParseNamespaceID("access-policy:this-uid"), + OrgID: 1, + AllowedKubernetesNamespace: "*", + AuthenticatedBy: "extendedjwt", + AuthID: "access-policy:this-uid", ClientParams: authn.ClientParams{ SyncPermissions: true, }, @@ -227,10 +240,11 @@ func TestExtendedJWT_Authenticate(t *testing.T) { idToken: &validIDTokenClaims, orgID: 1, want: &authn.Identity{ - ID: authn.MustParseNamespaceID("user:2"), - OrgID: 1, - AuthenticatedBy: "extendedjwt", - AuthID: "access-policy:this-uid", + ID: authn.MustParseNamespaceID("user:2"), + OrgID: 1, + AllowedKubernetesNamespace: "default", + AuthenticatedBy: "extendedjwt", + AuthID: "access-policy:this-uid", ClientParams: authn.ClientParams{ FetchSyncedUser: true, SyncPermissions: true, @@ -246,10 +260,36 @@ func TestExtendedJWT_Authenticate(t *testing.T) { idToken: &validIDTokenClaims, orgID: 1, want: &authn.Identity{ - ID: authn.MustParseNamespaceID("user:2"), - OrgID: 1, - AuthenticatedBy: "extendedjwt", - AuthID: "access-policy:this-uid", + ID: authn.MustParseNamespaceID("user:2"), + OrgID: 1, + AllowedKubernetesNamespace: "*", + AuthenticatedBy: "extendedjwt", + AuthID: "access-policy:this-uid", + ClientParams: authn.ClientParams{ + FetchSyncedUser: true, + SyncPermissions: true, + }, + }, + }, + { + name: "should authenticate as user using wildcard namespace for access token, setting allowed namespace to specific", + accessToken: &validAcessTokenClaimsWildcard, + idToken: &validIDTokenClaimsWithStackSet, + orgID: 1, + cfg: &setting.Cfg{ + // default org set up by the authenticator is 1 + StackID: "1234", + ExtJWTAuth: setting.ExtJWTSettings{ + Enabled: true, + ExpectIssuer: "http://localhost:3000", + }, + }, + want: &authn.Identity{ + ID: authn.MustParseNamespaceID("user:2"), + OrgID: 1, + AllowedKubernetesNamespace: "stack-1234", + AuthenticatedBy: "extendedjwt", + AuthID: "access-policy:this-uid", ClientParams: authn.ClientParams{ FetchSyncedUser: true, SyncPermissions: true, @@ -302,7 +342,7 @@ func TestExtendedJWT_Authenticate(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - env := setupTestCtx(nil) + env := setupTestCtx(tc.cfg) validHTTPReq := &http.Request{ Header: map[string][]string{ @@ -314,13 +354,12 @@ func TestExtendedJWT_Authenticate(t *testing.T) { if tc.idToken != nil { env.s.accessTokenVerifier = &mockVerifier{Claims: *tc.accessToken} env.s.idTokenVerifier = &mockIDVerifier{Claims: *tc.idToken} - validHTTPReq.Header.Add(extJWTAuthorizationHeaderName, generateIDToken(*tc.idToken, pk, jose.RS256)) + validHTTPReq.Header.Add(ExtJWTAuthorizationHeaderName, generateIDToken(*tc.idToken, pk, jose.RS256)) } id, err := env.s.Authenticate(context.Background(), &authn.Request{ OrgID: tc.orgID, HTTPRequest: validHTTPReq, - Resp: nil, }) if tc.wantErr != nil { assert.ErrorIs(t, err, tc.wantErr) diff --git a/pkg/services/authn/clients/jwt_test.go b/pkg/services/authn/clients/jwt_test.go index 73cac5818f2..c4381f3945e 100644 --- a/pkg/services/authn/clients/jwt_test.go +++ b/pkg/services/authn/clients/jwt_test.go @@ -155,7 +155,6 @@ func TestAuthenticateJWT(t *testing.T) { id, err := jwtClient.Authenticate(context.Background(), &authn.Request{ OrgID: 1, HTTPRequest: validHTTPReq, - Resp: nil, }) require.NoError(t, err) @@ -267,7 +266,6 @@ func TestJWTClaimConfig(t *testing.T) { _, err := jwtClient.Authenticate(context.Background(), &authn.Request{ OrgID: 1, HTTPRequest: httpReq, - Resp: nil, }) if tc.valid { require.NoError(t, err) @@ -384,7 +382,6 @@ func TestJWTTest(t *testing.T) { got := jwtClient.Test(context.Background(), &authn.Request{ OrgID: 1, HTTPRequest: httpReq, - Resp: nil, }) require.Equal(t, tc.want, got) @@ -432,7 +429,6 @@ func TestJWTStripParam(t *testing.T) { _, err := jwtClient.Authenticate(context.Background(), &authn.Request{ OrgID: 1, HTTPRequest: httpReq, - Resp: nil, }) require.NoError(t, err) // auth_token should be removed from the query string @@ -489,7 +485,6 @@ func TestJWTSubClaimsConfig(t *testing.T) { identity, err := jwtClient.Authenticate(context.Background(), &authn.Request{ OrgID: 1, HTTPRequest: httpReq, - Resp: nil, }) require.NoError(t, err) require.Equal(t, "mainemail+extraemail02@gmail.com", identity.Email) diff --git a/pkg/services/authn/identity.go b/pkg/services/authn/identity.go index 6133b58d1d1..d1094025c5c 100644 --- a/pkg/services/authn/identity.go +++ b/pkg/services/authn/identity.go @@ -47,6 +47,8 @@ type Identity struct { // AuthId is the unique identifier for the entity in the external system. // Empty if the identity is provided by Grafana. AuthID string + // AllowedKubernetesNamespace + AllowedKubernetesNamespace string // IsDisabled is true if the entity is disabled. IsDisabled bool // HelpFlags1 is the help flags for the entity. @@ -127,6 +129,10 @@ func (i *Identity) GetLogin() string { return i.Login } +func (i *Identity) GetAllowedKubernetesNamespace() string { + return i.AllowedKubernetesNamespace +} + func (i *Identity) GetOrgID() int64 { return i.OrgID } diff --git a/pkg/services/authz/zanzana/client/client.go b/pkg/services/authz/zanzana/client/client.go index e4c60780a40..cc31cc53d93 100644 --- a/pkg/services/authz/zanzana/client/client.go +++ b/pkg/services/authz/zanzana/client/client.go @@ -28,9 +28,16 @@ func WithLogger(logger log.Logger) ClientOption { } } +func WithSchema(dsl string) ClientOption { + return func(c *Client) { + c.dsl = dsl + } +} + type Client struct { logger log.Logger client openfgav1.OpenFGAServiceClient + dsl string tenantID string storeID string modelID string @@ -53,6 +60,10 @@ func New(ctx context.Context, cc grpc.ClientConnInterface, opts ...ClientOption) c.tenantID = "stack-default" } + if c.dsl == "" { + c.dsl = schema.DSL + } + store, err := c.getOrCreateStore(ctx, c.tenantID) if err != nil { return nil, err @@ -60,7 +71,7 @@ func New(ctx context.Context, cc grpc.ClientConnInterface, opts ...ClientOption) c.storeID = store.GetId() - modelID, err := c.loadModel(ctx, c.storeID, schema.DSL) + modelID, err := c.loadModel(ctx, c.storeID, c.dsl) if err != nil { return nil, err } @@ -165,7 +176,7 @@ func (c *Client) loadModel(ctx context.Context, storeID string, dsl string) (str // If provided dsl is equal to a stored dsl we use that as the authorization id if schema.EqualModels(dsl, storedDSL) { - return res.AuthorizationModels[0].GetId(), nil + return model.GetId(), nil } } diff --git a/pkg/services/authz/zanzana/client/client_test.go b/pkg/services/authz/zanzana/client/client_test.go new file mode 100644 index 00000000000..523fc665e36 --- /dev/null +++ b/pkg/services/authz/zanzana/client/client_test.go @@ -0,0 +1,113 @@ +package client + +import ( + "context" + "testing" + + openfgav1 "github.com/openfga/api/proto/openfga/v1" + + "github.com/fullstorydev/grpchan/inprocgrpc" + "github.com/grafana/grafana/pkg/infra/db" + "github.com/grafana/grafana/pkg/infra/log" + "github.com/grafana/grafana/pkg/tests/testsuite" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + zserver "github.com/grafana/grafana/pkg/services/authz/zanzana/server" + zstore "github.com/grafana/grafana/pkg/services/authz/zanzana/store" + "github.com/grafana/grafana/pkg/services/sqlstore/migrator" +) + +func TestMain(m *testing.M) { + testsuite.Run(m) +} + +func TestIntegrationClient(t *testing.T) { + conn := zanzanaServerIntegrationTest(t) + + var ( + prevStoreID string + prevModelID string + ) + + t.Run("should create default store and authorization model on first startup", func(t *testing.T) { + c, err := New(context.Background(), conn) + require.NoError(t, err) + + assert.NotEmpty(t, c.storeID) + assert.NotEmpty(t, c.modelID) + + prevStoreID, prevModelID = c.storeID, c.modelID + }) + + t.Run("should reuse existing store and authorization model", func(t *testing.T) { + c, err := New(context.Background(), conn) + require.NoError(t, err) + + assert.Equal(t, prevStoreID, c.storeID) + assert.Equal(t, prevModelID, c.modelID) + }) + + t.Run("should create new store and authorization model when new tenant id is used", func(t *testing.T) { + c, err := New(context.Background(), conn, WithTenantID("new")) + require.NoError(t, err) + + assert.NotEmpty(t, c.storeID) + assert.NotEmpty(t, c.modelID) + + assert.NotEqual(t, prevStoreID, c.storeID) + assert.NotEqual(t, prevModelID, c.modelID) + + prevStoreID, prevModelID = c.storeID, c.modelID + }) + + t.Run("should update authorization model if it has new changes", func(t *testing.T) { + dsl := ` +model + schema 1.1 + +type user + ` + c, err := New(context.Background(), conn, WithTenantID("new"), WithSchema(dsl)) + require.NoError(t, err) + + assert.Equal(t, prevStoreID, c.storeID) + assert.NotEqual(t, prevModelID, c.modelID) + }) + + t.Run("should load older authorization model", func(t *testing.T) { + c, err := New(context.Background(), conn, WithTenantID("new")) + require.NoError(t, err) + + assert.Equal(t, prevStoreID, c.storeID) + assert.Equal(t, prevModelID, c.modelID) + }) +} + +func zanzanaServerIntegrationTest(tb testing.TB) *inprocgrpc.Channel { + if testing.Short() { + tb.Skip("skipping integration test") + } + + db, cfg := db.InitTestDBWithCfg(tb) + + // Hack to skip these tests on mysql 5.7 + if db.GetDialect().DriverName() == migrator.MySQL { + if supported, err := db.RecursiveQueriesAreSupported(); !supported || err != nil { + tb.Skip("skipping integration test") + } + } + + logger := log.NewNopLogger() + + store, err := zstore.NewEmbeddedStore(cfg, db, logger) + require.NoError(tb, err) + + srv, err := zserver.New(store, logger) + require.NoError(tb, err) + + channel := &inprocgrpc.Channel{} + openfgav1.RegisterOpenFGAServiceServer(channel, srv) + + return channel +} diff --git a/pkg/services/authz/zanzana/logger.go b/pkg/services/authz/zanzana/logger/logger.go similarity index 59% rename from pkg/services/authz/zanzana/logger.go rename to pkg/services/authz/zanzana/logger/logger.go index 79936df85e0..ac953ed4f15 100644 --- a/pkg/services/authz/zanzana/logger.go +++ b/pkg/services/authz/zanzana/logger/logger.go @@ -1,4 +1,4 @@ -package zanzana +package logger import ( "context" @@ -8,13 +8,13 @@ import ( "github.com/grafana/grafana/pkg/infra/log" ) -// zanzanaLogger is a grafana logger wrapper compatible with OpenFGA logger interface -type zanzanaLogger struct { +// ZanzanaLogger is a grafana logger wrapper compatible with OpenFGA logger interface +type ZanzanaLogger struct { logger log.Logger } -func newZanzanaLogger(logger log.Logger) *zanzanaLogger { - return &zanzanaLogger{ +func New(logger log.Logger) *ZanzanaLogger { + return &ZanzanaLogger{ logger: logger, } } @@ -36,50 +36,50 @@ func zapFieldsToArgs(fields []zap.Field) []any { return args } -func (l *zanzanaLogger) Debug(msg string, fields ...zap.Field) { +func (l *ZanzanaLogger) Debug(msg string, fields ...zap.Field) { l.logger.Debug(msg, zapFieldsToArgs(fields)...) } -func (l *zanzanaLogger) Info(msg string, fields ...zap.Field) { +func (l *ZanzanaLogger) Info(msg string, fields ...zap.Field) { l.logger.Info(msg, zapFieldsToArgs(fields)...) } -func (l *zanzanaLogger) Warn(msg string, fields ...zap.Field) { +func (l *ZanzanaLogger) Warn(msg string, fields ...zap.Field) { l.logger.Warn(msg, zapFieldsToArgs(fields)...) } -func (l *zanzanaLogger) Error(msg string, fields ...zap.Field) { +func (l *ZanzanaLogger) Error(msg string, fields ...zap.Field) { l.logger.Error(msg, zapFieldsToArgs(fields)...) } -func (l *zanzanaLogger) Panic(msg string, fields ...zap.Field) { +func (l *ZanzanaLogger) Panic(msg string, fields ...zap.Field) { l.logger.Error(msg, zapFieldsToArgs(fields)...) } -func (l *zanzanaLogger) Fatal(msg string, fields ...zap.Field) { +func (l *ZanzanaLogger) Fatal(msg string, fields ...zap.Field) { l.logger.Error(msg, zapFieldsToArgs(fields)...) } -func (l *zanzanaLogger) DebugWithContext(ctx context.Context, msg string, fields ...zap.Field) { +func (l *ZanzanaLogger) DebugWithContext(ctx context.Context, msg string, fields ...zap.Field) { l.logger.Debug(msg, zapFieldsToArgs(fields)...) } -func (l *zanzanaLogger) InfoWithContext(ctx context.Context, msg string, fields ...zap.Field) { +func (l *ZanzanaLogger) InfoWithContext(ctx context.Context, msg string, fields ...zap.Field) { l.logger.Info(msg, zapFieldsToArgs(fields)...) } -func (l *zanzanaLogger) WarnWithContext(ctx context.Context, msg string, fields ...zap.Field) { +func (l *ZanzanaLogger) WarnWithContext(ctx context.Context, msg string, fields ...zap.Field) { l.logger.Warn(msg, zapFieldsToArgs(fields)...) } -func (l *zanzanaLogger) ErrorWithContext(ctx context.Context, msg string, fields ...zap.Field) { +func (l *ZanzanaLogger) ErrorWithContext(ctx context.Context, msg string, fields ...zap.Field) { l.logger.Error(msg, zapFieldsToArgs(fields)...) } -func (l *zanzanaLogger) PanicWithContext(ctx context.Context, msg string, fields ...zap.Field) { +func (l *ZanzanaLogger) PanicWithContext(ctx context.Context, msg string, fields ...zap.Field) { l.logger.Error(msg, zapFieldsToArgs(fields)...) } -func (l *zanzanaLogger) FatalWithContext(ctx context.Context, msg string, fields ...zap.Field) { +func (l *ZanzanaLogger) FatalWithContext(ctx context.Context, msg string, fields ...zap.Field) { l.logger.Error(msg, zapFieldsToArgs(fields)...) } diff --git a/pkg/services/authz/zanzana/server.go b/pkg/services/authz/zanzana/server.go index 787ff6acab9..3d6b0dc4a6a 100644 --- a/pkg/services/authz/zanzana/server.go +++ b/pkg/services/authz/zanzana/server.go @@ -1,110 +1,20 @@ package zanzana import ( - "context" - "fmt" - "net/http" - "time" - - "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" - openfgav1 "github.com/openfga/api/proto/openfga/v1" - httpmiddleware "github.com/openfga/openfga/pkg/middleware/http" "github.com/openfga/openfga/pkg/server" - serverErrors "github.com/openfga/openfga/pkg/server/errors" "github.com/openfga/openfga/pkg/storage" - "github.com/rs/cors" - "go.uber.org/zap/zapcore" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - healthv1pb "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/status" "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/services/grpcserver" "github.com/grafana/grafana/pkg/setting" + + zserver "github.com/grafana/grafana/pkg/services/authz/zanzana/server" ) func NewServer(store storage.OpenFGADatastore, logger log.Logger) (*server.Server, error) { - // FIXME(kalleep): add support for more options, tracing etc - opts := []server.OpenFGAServiceV1Option{ - server.WithDatastore(store), - server.WithLogger(newZanzanaLogger(logger)), - } - - // FIXME(kalleep): Interceptors - // We probably need to at least need to add store id interceptor also - // would be nice to inject our own requestid? - srv, err := server.NewServerWithOpts(opts...) - if err != nil { - return nil, err - } - - return srv, nil + return zserver.New(store, logger) } -// StartOpenFGAHttpSever starts HTTP server which allows to use fga cli. func StartOpenFGAHttpSever(cfg *setting.Cfg, srv grpcserver.Provider, logger log.Logger) error { - dialOpts := []grpc.DialOption{ - grpc.WithTransportCredentials(insecure.NewCredentials()), - } - - addr := srv.GetAddress() - // Wait until GRPC server is initialized - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - maxRetries := 100 - retries := 0 - for addr == "" && retries < maxRetries { - <-ticker.C - addr = srv.GetAddress() - retries++ - } - if addr == "" { - return fmt.Errorf("failed to start HTTP server: GRPC server unavailable") - } - - conn, err := grpc.NewClient(addr, dialOpts...) - if err != nil { - return fmt.Errorf("unable to dial GRPC: %w", err) - } - - muxOpts := []runtime.ServeMuxOption{ - runtime.WithForwardResponseOption(httpmiddleware.HTTPResponseModifier), - runtime.WithErrorHandler(func(c context.Context, - sr *runtime.ServeMux, mm runtime.Marshaler, w http.ResponseWriter, r *http.Request, e error) { - intCode := serverErrors.ConvertToEncodedErrorCode(status.Convert(e)) - httpmiddleware.CustomHTTPErrorHandler(c, w, r, serverErrors.NewEncodedError(intCode, e.Error())) - }), - runtime.WithStreamErrorHandler(func(ctx context.Context, e error) *status.Status { - intCode := serverErrors.ConvertToEncodedErrorCode(status.Convert(e)) - encodedErr := serverErrors.NewEncodedError(intCode, e.Error()) - return status.Convert(encodedErr) - }), - runtime.WithHealthzEndpoint(healthv1pb.NewHealthClient(conn)), - runtime.WithOutgoingHeaderMatcher(func(s string) (string, bool) { return s, true }), - } - mux := runtime.NewServeMux(muxOpts...) - if err := openfgav1.RegisterOpenFGAServiceHandler(context.TODO(), mux, conn); err != nil { - return fmt.Errorf("failed to register gateway handler: %w", err) - } - - httpServer := &http.Server{ - Addr: cfg.Zanzana.HttpAddr, - Handler: cors.New(cors.Options{ - AllowedOrigins: []string{"*"}, - AllowCredentials: true, - AllowedHeaders: []string{"*"}, - AllowedMethods: []string{http.MethodGet, http.MethodPost, - http.MethodHead, http.MethodPatch, http.MethodDelete, http.MethodPut}, - }).Handler(mux), - ReadHeaderTimeout: 30 * time.Second, - } - go func() { - err = httpServer.ListenAndServe() - if err != nil { - logger.Error("failed to start http server", zapcore.Field{Key: "err", Type: zapcore.ErrorType, Interface: err}) - } - }() - logger.Info(fmt.Sprintf("OpenFGA HTTP server listening on '%s'...", httpServer.Addr)) - return nil + return zserver.StartOpenFGAHttpSever(cfg, srv, logger) } diff --git a/pkg/services/authz/zanzana/server/server.go b/pkg/services/authz/zanzana/server/server.go new file mode 100644 index 00000000000..49baf954fde --- /dev/null +++ b/pkg/services/authz/zanzana/server/server.go @@ -0,0 +1,113 @@ +package server + +import ( + "context" + "fmt" + "net/http" + "time" + + openfgav1 "github.com/openfga/api/proto/openfga/v1" + httpmiddleware "github.com/openfga/openfga/pkg/middleware/http" + "github.com/openfga/openfga/pkg/server" + serverErrors "github.com/openfga/openfga/pkg/server/errors" + "github.com/openfga/openfga/pkg/storage" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/rs/cors" + "go.uber.org/zap/zapcore" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + healthv1pb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" + + "github.com/grafana/grafana/pkg/infra/log" + "github.com/grafana/grafana/pkg/services/grpcserver" + "github.com/grafana/grafana/pkg/setting" + + zlogger "github.com/grafana/grafana/pkg/services/authz/zanzana/logger" +) + +func New(store storage.OpenFGADatastore, logger log.Logger) (*server.Server, error) { + // FIXME(kalleep): add support for more options, tracing etc + opts := []server.OpenFGAServiceV1Option{ + server.WithDatastore(store), + server.WithLogger(zlogger.New(logger)), + } + + // FIXME(kalleep): Interceptors + // We probably need to at least need to add store id interceptor also + // would be nice to inject our own requestid? + srv, err := server.NewServerWithOpts(opts...) + if err != nil { + return nil, err + } + + return srv, nil +} + +// StartOpenFGAHttpSever starts HTTP server which allows to use fga cli. +func StartOpenFGAHttpSever(cfg *setting.Cfg, srv grpcserver.Provider, logger log.Logger) error { + dialOpts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + } + + addr := srv.GetAddress() + // Wait until GRPC server is initialized + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + maxRetries := 100 + retries := 0 + for addr == "" && retries < maxRetries { + <-ticker.C + addr = srv.GetAddress() + retries++ + } + if addr == "" { + return fmt.Errorf("failed to start HTTP server: GRPC server unavailable") + } + + conn, err := grpc.NewClient(addr, dialOpts...) + if err != nil { + return fmt.Errorf("unable to dial GRPC: %w", err) + } + + muxOpts := []runtime.ServeMuxOption{ + runtime.WithForwardResponseOption(httpmiddleware.HTTPResponseModifier), + runtime.WithErrorHandler(func(c context.Context, + sr *runtime.ServeMux, mm runtime.Marshaler, w http.ResponseWriter, r *http.Request, e error) { + intCode := serverErrors.ConvertToEncodedErrorCode(status.Convert(e)) + httpmiddleware.CustomHTTPErrorHandler(c, w, r, serverErrors.NewEncodedError(intCode, e.Error())) + }), + runtime.WithStreamErrorHandler(func(ctx context.Context, e error) *status.Status { + intCode := serverErrors.ConvertToEncodedErrorCode(status.Convert(e)) + encodedErr := serverErrors.NewEncodedError(intCode, e.Error()) + return status.Convert(encodedErr) + }), + runtime.WithHealthzEndpoint(healthv1pb.NewHealthClient(conn)), + runtime.WithOutgoingHeaderMatcher(func(s string) (string, bool) { return s, true }), + } + mux := runtime.NewServeMux(muxOpts...) + if err := openfgav1.RegisterOpenFGAServiceHandler(context.TODO(), mux, conn); err != nil { + return fmt.Errorf("failed to register gateway handler: %w", err) + } + + httpServer := &http.Server{ + Addr: cfg.Zanzana.HttpAddr, + Handler: cors.New(cors.Options{ + AllowedOrigins: []string{"*"}, + AllowCredentials: true, + AllowedHeaders: []string{"*"}, + AllowedMethods: []string{http.MethodGet, http.MethodPost, + http.MethodHead, http.MethodPatch, http.MethodDelete, http.MethodPut}, + }).Handler(mux), + ReadHeaderTimeout: 30 * time.Second, + } + go func() { + err = httpServer.ListenAndServe() + if err != nil { + logger.Error("failed to start http server", zapcore.Field{Key: "err", Type: zapcore.ErrorType, Interface: err}) + } + }() + logger.Info(fmt.Sprintf("OpenFGA HTTP server listening on '%s'...", httpServer.Addr)) + return nil +} diff --git a/pkg/services/authz/zanzana/store.go b/pkg/services/authz/zanzana/store.go index 380d3d08ba1..67361386744 100644 --- a/pkg/services/authz/zanzana/store.go +++ b/pkg/services/authz/zanzana/store.go @@ -1,128 +1,18 @@ package zanzana import ( - "fmt" - "time" - - "xorm.io/xorm" - - "github.com/openfga/openfga/assets" "github.com/openfga/openfga/pkg/storage" - "github.com/openfga/openfga/pkg/storage/mysql" - "github.com/openfga/openfga/pkg/storage/postgres" - "github.com/openfga/openfga/pkg/storage/sqlcommon" "github.com/grafana/grafana/pkg/infra/db" "github.com/grafana/grafana/pkg/infra/log" - zstore "github.com/grafana/grafana/pkg/services/authz/zanzana/store" - "github.com/grafana/grafana/pkg/services/authz/zanzana/store/migration" - "github.com/grafana/grafana/pkg/services/authz/zanzana/store/sqlite" - "github.com/grafana/grafana/pkg/services/sqlstore" - "github.com/grafana/grafana/pkg/services/sqlstore/migrator" "github.com/grafana/grafana/pkg/setting" + + "github.com/grafana/grafana/pkg/services/authz/zanzana/store" ) -// FIXME(kalleep): Add sqlite data store. -// There is no support for sqlite atm but we are working on adding it: https://github.com/openfga/openfga/pull/1615 func NewStore(cfg *setting.Cfg, logger log.Logger) (storage.OpenFGADatastore, error) { - grafanaDBCfg, zanzanaDBCfg, err := parseConfig(cfg, logger) - if err != nil { - return nil, fmt.Errorf("failed to parse database config: %w", err) - } - - switch grafanaDBCfg.Type { - case migrator.SQLite: - connStr := grafanaDBCfg.ConnectionString - // Initilize connection using xorm engine so we can reuse it for both migrations and data store - engine, err := xorm.NewEngine(grafanaDBCfg.Type, connStr) - if err != nil { - return nil, fmt.Errorf("failed to connect to database: %w", err) - } - - m := migrator.NewMigrator(engine, cfg) - if err := migration.RunWithMigrator(m, cfg, zstore.EmbedMigrations, zstore.SQLiteMigrationDir); err != nil { - return nil, fmt.Errorf("failed to run migrations: %w", err) - } - - return sqlite.NewWithDB(engine.DB().DB, &sqlite.Config{ - Config: zanzanaDBCfg, - QueryRetries: grafanaDBCfg.QueryRetries, - }) - case migrator.MySQL: - // For mysql we need to pass parseTime parameter in connection string - connStr := grafanaDBCfg.ConnectionString + "&parseTime=true" - if err := migration.Run(cfg, migrator.MySQL, connStr, assets.EmbedMigrations, assets.MySQLMigrationDir); err != nil { - return nil, fmt.Errorf("failed to run migrations: %w", err) - } - - return mysql.New(connStr, zanzanaDBCfg) - case migrator.Postgres: - connStr := grafanaDBCfg.ConnectionString - if err := migration.Run(cfg, migrator.Postgres, connStr, assets.EmbedMigrations, assets.PostgresMigrationDir); err != nil { - return nil, fmt.Errorf("failed to run migrations: %w", err) - } - - return postgres.New(connStr, zanzanaDBCfg) - } - - // Should never happen - return nil, fmt.Errorf("unsupported database engine: %s", grafanaDBCfg.Type) + return store.NewStore(cfg, logger) } - func NewEmbeddedStore(cfg *setting.Cfg, db db.DB, logger log.Logger) (storage.OpenFGADatastore, error) { - grafanaDBCfg, zanzanaDBCfg, err := parseConfig(cfg, logger) - if err != nil { - return nil, fmt.Errorf("failed to parse database config: %w", err) - } - - m := migrator.NewMigrator(db.GetEngine(), cfg) - - switch grafanaDBCfg.Type { - case migrator.SQLite: - if err := migration.RunWithMigrator(m, cfg, zstore.EmbedMigrations, zstore.SQLiteMigrationDir); err != nil { - return nil, fmt.Errorf("failed to run migrations: %w", err) - } - - // FIXME(kalleep): We should work on getting sqlite implemtation merged upstream and replace this one - return sqlite.NewWithDB(db.GetEngine().DB().DB, &sqlite.Config{ - Config: zanzanaDBCfg, - QueryRetries: grafanaDBCfg.QueryRetries, - }) - case migrator.MySQL: - if err := migration.RunWithMigrator(m, cfg, assets.EmbedMigrations, assets.MySQLMigrationDir); err != nil { - return nil, fmt.Errorf("failed to run migrations: %w", err) - } - - // For mysql we need to pass parseTime parameter in connection string - return mysql.New(grafanaDBCfg.ConnectionString+"&parseTime=true", zanzanaDBCfg) - case migrator.Postgres: - if err := migration.RunWithMigrator(m, cfg, assets.EmbedMigrations, assets.PostgresMigrationDir); err != nil { - return nil, fmt.Errorf("failed to run migrations: %w", err) - } - - return postgres.New(grafanaDBCfg.ConnectionString, zanzanaDBCfg) - } - - // Should never happen - return nil, fmt.Errorf("unsupported database engine: %s", db.GetDialect().DriverName()) -} - -func parseConfig(cfg *setting.Cfg, logger log.Logger) (*sqlstore.DatabaseConfig, *sqlcommon.Config, error) { - sec := cfg.Raw.Section("database") - grafanaDBCfg, err := sqlstore.NewDatabaseConfig(cfg, nil) - if err != nil { - return nil, nil, nil - } - - zanzanaDBCfg := &sqlcommon.Config{ - Logger: newZanzanaLogger(logger), - MaxTuplesPerWriteField: 100, - MaxTypesPerModelField: 100, - MaxOpenConns: grafanaDBCfg.MaxOpenConn, - MaxIdleConns: grafanaDBCfg.MaxIdleConn, - ConnMaxLifetime: time.Duration(grafanaDBCfg.ConnMaxLifetime) * time.Second, - ExportMetrics: sec.Key("instrument_queries").MustBool(false), - } - - return grafanaDBCfg, zanzanaDBCfg, nil + return store.NewEmbeddedStore(cfg, db, logger) } diff --git a/pkg/services/authz/zanzana/store/assets.go b/pkg/services/authz/zanzana/store/assets/assets.go similarity index 91% rename from pkg/services/authz/zanzana/store/assets.go rename to pkg/services/authz/zanzana/store/assets/assets.go index e61b32e5b93..fb55d16eef2 100644 --- a/pkg/services/authz/zanzana/store/assets.go +++ b/pkg/services/authz/zanzana/store/assets/assets.go @@ -1,4 +1,4 @@ -package store +package assets import "embed" diff --git a/pkg/services/authz/zanzana/store/migrations/sqlite/001_initialize_schema.sql b/pkg/services/authz/zanzana/store/assets/migrations/sqlite/001_initialize_schema.sql similarity index 100% rename from pkg/services/authz/zanzana/store/migrations/sqlite/001_initialize_schema.sql rename to pkg/services/authz/zanzana/store/assets/migrations/sqlite/001_initialize_schema.sql diff --git a/pkg/services/authz/zanzana/store/migrations/sqlite/002_add_authorization_model_version.sql b/pkg/services/authz/zanzana/store/assets/migrations/sqlite/002_add_authorization_model_version.sql similarity index 100% rename from pkg/services/authz/zanzana/store/migrations/sqlite/002_add_authorization_model_version.sql rename to pkg/services/authz/zanzana/store/assets/migrations/sqlite/002_add_authorization_model_version.sql diff --git a/pkg/services/authz/zanzana/store/migrations/sqlite/003_add_reverse_lookup_index.sql b/pkg/services/authz/zanzana/store/assets/migrations/sqlite/003_add_reverse_lookup_index.sql similarity index 100% rename from pkg/services/authz/zanzana/store/migrations/sqlite/003_add_reverse_lookup_index.sql rename to pkg/services/authz/zanzana/store/assets/migrations/sqlite/003_add_reverse_lookup_index.sql diff --git a/pkg/services/authz/zanzana/store/migrations/sqlite/004_add_authorization_model_serialized_protobuf.sql b/pkg/services/authz/zanzana/store/assets/migrations/sqlite/004_add_authorization_model_serialized_protobuf.sql similarity index 100% rename from pkg/services/authz/zanzana/store/migrations/sqlite/004_add_authorization_model_serialized_protobuf.sql rename to pkg/services/authz/zanzana/store/assets/migrations/sqlite/004_add_authorization_model_serialized_protobuf.sql diff --git a/pkg/services/authz/zanzana/store/migrations/sqlite/005_add_conditions_to_tuples.sql b/pkg/services/authz/zanzana/store/assets/migrations/sqlite/005_add_conditions_to_tuples.sql similarity index 100% rename from pkg/services/authz/zanzana/store/migrations/sqlite/005_add_conditions_to_tuples.sql rename to pkg/services/authz/zanzana/store/assets/migrations/sqlite/005_add_conditions_to_tuples.sql diff --git a/pkg/services/authz/zanzana/store/sqlite/store_test.go b/pkg/services/authz/zanzana/store/sqlite/store_test.go index 2274fec3822..5388114d188 100644 --- a/pkg/services/authz/zanzana/store/sqlite/store_test.go +++ b/pkg/services/authz/zanzana/store/sqlite/store_test.go @@ -19,10 +19,11 @@ import ( "github.com/openfga/openfga/pkg/typesystem" "github.com/grafana/grafana/pkg/infra/db" - "github.com/grafana/grafana/pkg/services/authz/zanzana/store" - "github.com/grafana/grafana/pkg/services/authz/zanzana/store/migration" "github.com/grafana/grafana/pkg/services/sqlstore/migrator" "github.com/grafana/grafana/pkg/tests/testsuite" + + zassets "github.com/grafana/grafana/pkg/services/authz/zanzana/store/assets" + "github.com/grafana/grafana/pkg/services/authz/zanzana/store/migration" ) func TestMain(m *testing.M) { @@ -287,7 +288,7 @@ func sqliteIntegrationTest(tb testing.TB) *sql.DB { db, cfg := db.InitTestDBWithCfg(tb) m := migrator.NewMigrator(db.GetEngine(), cfg) - err := migration.RunWithMigrator(m, cfg, store.EmbedMigrations, store.SQLiteMigrationDir) + err := migration.RunWithMigrator(m, cfg, zassets.EmbedMigrations, zassets.SQLiteMigrationDir) require.NoError(tb, err) return db.GetEngine().DB().DB diff --git a/pkg/services/authz/zanzana/store/store.go b/pkg/services/authz/zanzana/store/store.go new file mode 100644 index 00000000000..fe7aadc0cee --- /dev/null +++ b/pkg/services/authz/zanzana/store/store.go @@ -0,0 +1,128 @@ +package store + +import ( + "fmt" + "time" + + "xorm.io/xorm" + + "github.com/openfga/openfga/assets" + "github.com/openfga/openfga/pkg/storage" + "github.com/openfga/openfga/pkg/storage/mysql" + "github.com/openfga/openfga/pkg/storage/postgres" + "github.com/openfga/openfga/pkg/storage/sqlcommon" + + "github.com/grafana/grafana/pkg/infra/db" + "github.com/grafana/grafana/pkg/infra/log" + "github.com/grafana/grafana/pkg/services/sqlstore" + "github.com/grafana/grafana/pkg/services/sqlstore/migrator" + "github.com/grafana/grafana/pkg/setting" + + zlogger "github.com/grafana/grafana/pkg/services/authz/zanzana/logger" + zassets "github.com/grafana/grafana/pkg/services/authz/zanzana/store/assets" + "github.com/grafana/grafana/pkg/services/authz/zanzana/store/migration" + "github.com/grafana/grafana/pkg/services/authz/zanzana/store/sqlite" +) + +func NewStore(cfg *setting.Cfg, logger log.Logger) (storage.OpenFGADatastore, error) { + grafanaDBCfg, zanzanaDBCfg, err := parseConfig(cfg, logger) + if err != nil { + return nil, fmt.Errorf("failed to parse database config: %w", err) + } + + switch grafanaDBCfg.Type { + case migrator.SQLite: + connStr := grafanaDBCfg.ConnectionString + // Initilize connection using xorm engine so we can reuse it for both migrations and data store + engine, err := xorm.NewEngine(grafanaDBCfg.Type, connStr) + if err != nil { + return nil, fmt.Errorf("failed to connect to database: %w", err) + } + + m := migrator.NewMigrator(engine, cfg) + if err := migration.RunWithMigrator(m, cfg, zassets.EmbedMigrations, zassets.SQLiteMigrationDir); err != nil { + return nil, fmt.Errorf("failed to run migrations: %w", err) + } + + return sqlite.NewWithDB(engine.DB().DB, &sqlite.Config{ + Config: zanzanaDBCfg, + QueryRetries: grafanaDBCfg.QueryRetries, + }) + case migrator.MySQL: + // For mysql we need to pass parseTime parameter in connection string + connStr := grafanaDBCfg.ConnectionString + "&parseTime=true" + if err := migration.Run(cfg, migrator.MySQL, connStr, assets.EmbedMigrations, assets.MySQLMigrationDir); err != nil { + return nil, fmt.Errorf("failed to run migrations: %w", err) + } + + return mysql.New(connStr, zanzanaDBCfg) + case migrator.Postgres: + connStr := grafanaDBCfg.ConnectionString + if err := migration.Run(cfg, migrator.Postgres, connStr, assets.EmbedMigrations, assets.PostgresMigrationDir); err != nil { + return nil, fmt.Errorf("failed to run migrations: %w", err) + } + + return postgres.New(connStr, zanzanaDBCfg) + } + + // Should never happen + return nil, fmt.Errorf("unsupported database engine: %s", grafanaDBCfg.Type) +} + +func NewEmbeddedStore(cfg *setting.Cfg, db db.DB, logger log.Logger) (storage.OpenFGADatastore, error) { + grafanaDBCfg, zanzanaDBCfg, err := parseConfig(cfg, logger) + if err != nil { + return nil, fmt.Errorf("failed to parse database config: %w", err) + } + + m := migrator.NewMigrator(db.GetEngine(), cfg) + + switch grafanaDBCfg.Type { + case migrator.SQLite: + if err := migration.RunWithMigrator(m, cfg, zassets.EmbedMigrations, zassets.SQLiteMigrationDir); err != nil { + return nil, fmt.Errorf("failed to run migrations: %w", err) + } + + // FIXME(kalleep): We should work on getting sqlite implemtation merged upstream and replace this one + return sqlite.NewWithDB(db.GetEngine().DB().DB, &sqlite.Config{ + Config: zanzanaDBCfg, + QueryRetries: grafanaDBCfg.QueryRetries, + }) + case migrator.MySQL: + if err := migration.RunWithMigrator(m, cfg, assets.EmbedMigrations, assets.MySQLMigrationDir); err != nil { + return nil, fmt.Errorf("failed to run migrations: %w", err) + } + + // For mysql we need to pass parseTime parameter in connection string + return mysql.New(grafanaDBCfg.ConnectionString+"&parseTime=true", zanzanaDBCfg) + case migrator.Postgres: + if err := migration.RunWithMigrator(m, cfg, assets.EmbedMigrations, assets.PostgresMigrationDir); err != nil { + return nil, fmt.Errorf("failed to run migrations: %w", err) + } + + return postgres.New(grafanaDBCfg.ConnectionString, zanzanaDBCfg) + } + + // Should never happen + return nil, fmt.Errorf("unsupported database engine: %s", db.GetDialect().DriverName()) +} + +func parseConfig(cfg *setting.Cfg, logger log.Logger) (*sqlstore.DatabaseConfig, *sqlcommon.Config, error) { + sec := cfg.Raw.Section("database") + grafanaDBCfg, err := sqlstore.NewDatabaseConfig(cfg, nil) + if err != nil { + return nil, nil, nil + } + + zanzanaDBCfg := &sqlcommon.Config{ + Logger: zlogger.New(logger), + MaxTuplesPerWriteField: 100, + MaxTypesPerModelField: 100, + MaxOpenConns: grafanaDBCfg.MaxOpenConn, + MaxIdleConns: grafanaDBCfg.MaxIdleConn, + ConnMaxLifetime: time.Duration(grafanaDBCfg.ConnMaxLifetime) * time.Second, + ExportMetrics: sec.Key("instrument_queries").MustBool(false), + } + + return grafanaDBCfg, zanzanaDBCfg, nil +} diff --git a/pkg/services/contexthandler/contexthandler.go b/pkg/services/contexthandler/contexthandler.go index fe8f810872e..a58425e54ff 100644 --- a/pkg/services/contexthandler/contexthandler.go +++ b/pkg/services/contexthandler/contexthandler.go @@ -6,6 +6,7 @@ import ( "fmt" "net/http" + authnClients "github.com/grafana/grafana/pkg/services/authn/clients" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -16,29 +17,26 @@ import ( "github.com/grafana/grafana/pkg/services/authn" "github.com/grafana/grafana/pkg/services/contexthandler/ctxkey" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" - "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/login" "github.com/grafana/grafana/pkg/services/user" "github.com/grafana/grafana/pkg/setting" "github.com/grafana/grafana/pkg/web" ) -func ProvideService(cfg *setting.Cfg, tracer tracing.Tracer, features featuremgmt.FeatureToggles, authnService authn.Service, +func ProvideService(cfg *setting.Cfg, tracer tracing.Tracer, authenticator authn.Authenticator, ) *ContextHandler { return &ContextHandler{ - Cfg: cfg, - tracer: tracer, - features: features, - authnService: authnService, + Cfg: cfg, + tracer: tracer, + authenticator: authenticator, } } // ContextHandler is a middleware. type ContextHandler struct { - Cfg *setting.Cfg - tracer tracing.Tracer - features featuremgmt.FeatureToggles - authnService authn.Service + Cfg *setting.Cfg + tracer tracing.Tracer + authenticator authn.Authenticator } type reqContextKey = ctxkey.Key @@ -112,7 +110,7 @@ func (h *ContextHandler) Middleware(next http.Handler) http.Handler { reqContext.Logger = reqContext.Logger.New("traceID", traceID) } - id, err := h.authnService.Authenticate(ctx, &authn.Request{HTTPRequest: reqContext.Req, Resp: reqContext.Resp}) + id, err := h.authenticator.Authenticate(ctx, &authn.Request{HTTPRequest: reqContext.Req}) if err != nil { // Hack: set all errors on LookupTokenErr, so we can check it in auth middlewares reqContext.LookupTokenErr = err @@ -124,6 +122,8 @@ func (h *ContextHandler) Middleware(next http.Handler) http.Handler { reqContext.IsRenderCall = id.IsAuthenticatedBy(login.RenderModule) } + h.excludeSensitiveHeadersFromRequest(reqContext.Req) + reqContext.Logger = reqContext.Logger.New("userId", reqContext.UserID, "orgId", reqContext.OrgID, "uname", reqContext.Login) span.AddEvent("user", trace.WithAttributes( attribute.String("uname", reqContext.Login), @@ -142,6 +142,11 @@ func (h *ContextHandler) Middleware(next http.Handler) http.Handler { }) } +func (h *ContextHandler) excludeSensitiveHeadersFromRequest(req *http.Request) { + req.Header.Del(authnClients.ExtJWTAuthenticationHeaderName) + req.Header.Del(authnClients.ExtJWTAuthorizationHeaderName) +} + func (h *ContextHandler) addIDHeaderEndOfRequestFunc(ident identity.Requester) web.BeforeFunc { return func(w web.ResponseWriter) { if w.Written() { diff --git a/pkg/services/contexthandler/contexthandler_test.go b/pkg/services/contexthandler/contexthandler_test.go index 0dcdf896d61..c237660e237 100644 --- a/pkg/services/contexthandler/contexthandler_test.go +++ b/pkg/services/contexthandler/contexthandler_test.go @@ -15,7 +15,6 @@ import ( "github.com/grafana/grafana/pkg/services/authn/authntest" "github.com/grafana/grafana/pkg/services/contexthandler" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" - "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/login" "github.com/grafana/grafana/pkg/services/user" "github.com/grafana/grafana/pkg/setting" @@ -27,7 +26,6 @@ func TestContextHandler(t *testing.T) { handler := contexthandler.ProvideService( setting.NewCfg(), tracing.InitializeTracerForTest(), - featuremgmt.WithFeatures(), &authntest.FakeService{ExpectedErr: errors.New("some error")}, ) @@ -49,7 +47,6 @@ func TestContextHandler(t *testing.T) { handler := contexthandler.ProvideService( setting.NewCfg(), tracing.InitializeTracerForTest(), - featuremgmt.WithFeatures(), &authntest.FakeService{ExpectedIdentity: id}, ) @@ -75,7 +72,6 @@ func TestContextHandler(t *testing.T) { handler := contexthandler.ProvideService( setting.NewCfg(), tracing.InitializeTracerForTest(), - featuremgmt.WithFeatures(), &authntest.FakeService{ExpectedIdentity: identity}, ) @@ -97,7 +93,6 @@ func TestContextHandler(t *testing.T) { handler := contexthandler.ProvideService( setting.NewCfg(), tracing.InitializeTracerForTest(), - featuremgmt.WithFeatures(), &authntest.FakeService{ExpectedIdentity: identity}, ) @@ -128,7 +123,6 @@ func TestContextHandler(t *testing.T) { handler := contexthandler.ProvideService( cfg, tracing.InitializeTracerForTest(), - featuremgmt.WithFeatures(), &authntest.FakeService{ExpectedIdentity: &authn.Identity{}}, ) @@ -154,7 +148,6 @@ func TestContextHandler(t *testing.T) { handler := contexthandler.ProvideService( cfg, tracing.InitializeTracerForTest(), - featuremgmt.WithFeatures(), &authntest.FakeService{ExpectedIdentity: &authn.Identity{ID: authn.MustParseNamespaceID(id)}}, ) diff --git a/pkg/services/correlations/correlationstest/fake.go b/pkg/services/correlations/correlationstest/fake.go index 6263f9db41f..f5f134bf51b 100644 --- a/pkg/services/correlations/correlationstest/fake.go +++ b/pkg/services/correlations/correlationstest/fake.go @@ -5,6 +5,7 @@ import ( "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/infra/db" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/correlations" "github.com/grafana/grafana/pkg/services/datasources" fakeDatasources "github.com/grafana/grafana/pkg/services/datasources/fakes" @@ -20,6 +21,6 @@ func New(db db.DB, cfg *setting.Cfg, bus bus.Bus) *correlations.CorrelationsServ }, } - correlationsSvc, _ := correlations.ProvideService(db, routing.NewRouteRegister(), ds, acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), bus, quotatest.New(false, nil), cfg) + correlationsSvc, _ := correlations.ProvideService(db, routing.NewRouteRegister(), ds, acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), bus, quotatest.New(false, nil), cfg) return correlationsSvc } diff --git a/pkg/services/dashboards/database/database.go b/pkg/services/dashboards/database/database.go index c3ae1f976b3..bad463c509d 100644 --- a/pkg/services/dashboards/database/database.go +++ b/pkg/services/dashboards/database/database.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "github.com/grafana/grafana/pkg/apimachinery/identity" "github.com/grafana/grafana/pkg/infra/db" "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/infra/metrics" @@ -876,6 +877,11 @@ func (d *dashboardStore) FindDashboards(ctx context.Context, query *dashboards.F }) } + // only list k6 folders when requested by a service account - prevents showing k6 folders in the UI for users + if query.SignedInUser == nil || query.SignedInUser.GetID().Namespace() != identity.NamespaceServiceAccount { + filters = append(filters, searchstore.K6FolderFilter{}) + } + filters = append(filters, permissions.NewAccessControlDashboardPermissionFilter(query.SignedInUser, query.Permission, query.Type, d.features, recursiveQueriesAreSupported)) filters = append(filters, searchstore.DeletedFilter{Deleted: query.IsDeleted}) diff --git a/pkg/services/dashboards/database/database_test.go b/pkg/services/dashboards/database/database_test.go index f955b99152c..e7050847483 100644 --- a/pkg/services/dashboards/database/database_test.go +++ b/pkg/services/dashboards/database/database_test.go @@ -16,6 +16,7 @@ import ( "github.com/grafana/grafana/pkg/infra/db" "github.com/grafana/grafana/pkg/infra/tracing" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/dashboards" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/folder" @@ -827,7 +828,7 @@ func TestIntegrationFindDashboardsByTitle(t *testing.T) { orgID := int64(1) insertTestDashboard(t, dashboardStore, "dashboard under general", orgID, 0, "", false) - ac := acimpl.ProvideAccessControl(features) + ac := acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()) folderStore := folderimpl.ProvideDashboardFolderStore(sqlStore) folderServiceWithFlagOn := folderimpl.ProvideService(ac, bus.ProvideBus(tracing.InitializeTracerForTest()), dashboardStore, folderStore, sqlStore, features, supportbundlestest.NewFakeBundleService(), nil) @@ -944,7 +945,7 @@ func TestIntegrationFindDashboardsByFolder(t *testing.T) { orgID := int64(1) insertTestDashboard(t, dashboardStore, "dashboard under general", orgID, 0, "", false) - ac := acimpl.ProvideAccessControl(features) + ac := acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()) folderStore := folderimpl.ProvideDashboardFolderStore(sqlStore) folderServiceWithFlagOn := folderimpl.ProvideService(ac, bus.ProvideBus(tracing.InitializeTracerForTest()), dashboardStore, folderStore, sqlStore, features, supportbundlestest.NewFakeBundleService(), nil) diff --git a/pkg/services/dashboards/service/dashboard_service.go b/pkg/services/dashboards/service/dashboard_service.go index c94d8b4df7f..f9d2168b710 100644 --- a/pkg/services/dashboards/service/dashboard_service.go +++ b/pkg/services/dashboards/service/dashboard_service.go @@ -700,16 +700,9 @@ func makeQueryResult(query *dashboards.FindPersistedDashboardsQuery, res []dashb hitList := make([]*model.Hit, 0) hits := make(map[int64]*model.Hit) - requesterIsSvcAccount := query.SignedInUser.GetID().Namespace() == identity.NamespaceServiceAccount - for _, item := range res { hit, exists := hits[item.ID] if !exists { - // Don't list k6 items for users, we don't want users to interact with k6 folders directly through folder UI - if (item.UID == accesscontrol.K6FolderUID || item.FolderUID == accesscontrol.K6FolderUID) && !requesterIsSvcAccount { - continue - } - metrics.MFolderIDsServiceCount.WithLabelValues(metrics.Dashboard).Inc() hit = &model.Hit{ ID: item.ID, diff --git a/pkg/services/folder/folderimpl/folder.go b/pkg/services/folder/folderimpl/folder.go index 073e6ca239f..8b70af72be3 100644 --- a/pkg/services/folder/folderimpl/folder.go +++ b/pkg/services/folder/folderimpl/folder.go @@ -180,17 +180,7 @@ func (s *Service) GetFolders(ctx context.Context, q folder.GetFoldersQuery) ([]* } } - // only list k6 folders when requested by a service account - prevents showing k6 folders in the UI for users - result := make([]*folder.Folder, 0, len(dashFolders)) - requesterIsSvcAccount := qry.SignedInUser.GetID().Namespace() == identity.NamespaceServiceAccount - for _, folder := range dashFolders { - if (folder.UID == accesscontrol.K6FolderUID || folder.ParentUID == accesscontrol.K6FolderUID) && !requesterIsSvcAccount { - continue - } - result = append(result, folder) - } - - return result, nil + return dashFolders, nil } func (s *Service) Get(ctx context.Context, q *folder.GetFolderQuery) (*folder.Folder, error) { diff --git a/pkg/services/folder/folderimpl/folder_test.go b/pkg/services/folder/folderimpl/folder_test.go index f47d4698769..c3c07dc89c4 100644 --- a/pkg/services/folder/folderimpl/folder_test.go +++ b/pkg/services/folder/folderimpl/folder_test.go @@ -27,6 +27,7 @@ import ( "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" "github.com/grafana/grafana/pkg/services/accesscontrol/actest" acmock "github.com/grafana/grafana/pkg/services/accesscontrol/mock" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/dashboards" "github.com/grafana/grafana/pkg/services/dashboards/dashboardaccess" "github.com/grafana/grafana/pkg/services/dashboards/database" @@ -97,7 +98,7 @@ func TestIntegrationFolderService(t *testing.T) { features: features, bus: bus.ProvideBus(tracing.InitializeTracerForTest()), db: db, - accessControl: acimpl.ProvideAccessControl(features), + accessControl: acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()), metrics: newFoldersMetrics(nil), registry: make(map[string]folder.RegistryService), } @@ -427,7 +428,7 @@ func TestIntegrationNestedFolderService(t *testing.T) { nestedFolderStore := ProvideStore(db) b := bus.ProvideBus(tracing.InitializeTracerForTest()) - ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) serviceWithFlagOn := &Service{ log: slog.New(logtest.NewTestHandler(t)).With("logger", "test-folder-service"), @@ -803,7 +804,7 @@ func TestNestedFolderServiceFeatureToggle(t *testing.T) { dashboardStore: &dashStore, dashboardFolderStore: dashboardFolderStore, features: featuremgmt.WithFeatures(featuremgmt.FlagNestedFolders), - accessControl: acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), + accessControl: acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), metrics: newFoldersMetrics(nil), } t.Run("create folder", func(t *testing.T) { @@ -839,7 +840,7 @@ func TestFolderServiceDualWrite(t *testing.T) { dashboardStore: dashStore, dashboardFolderStore: dashboardFolderStore, features: featuremgmt.WithFeatures(featuremgmt.FlagNestedFolders), - accessControl: acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), + accessControl: acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), metrics: newFoldersMetrics(nil), bus: bus.ProvideBus(tracing.InitializeTracerForTest()), } @@ -903,7 +904,7 @@ func TestNestedFolderService(t *testing.T) { features := featuremgmt.WithFeatures() db, _ := sqlstore.InitTestDB(t) - folderSvc := setup(t, dashStore, dashboardFolderStore, nestedFolderStore, features, acimpl.ProvideAccessControl(features), db) + folderSvc := setup(t, dashStore, dashboardFolderStore, nestedFolderStore, features, acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()), db) _, err := folderSvc.Create(context.Background(), &folder.CreateFolderCommand{ OrgID: orgID, Title: dash.Title, @@ -937,7 +938,7 @@ func TestNestedFolderService(t *testing.T) { features := featuremgmt.WithFeatures("nestedFolders") db, _ := sqlstore.InitTestDB(t) - folderSvc := setup(t, dashStore, dashboardFolderStore, nestedFolderStore, features, acimpl.ProvideAccessControl(features), db) + folderSvc := setup(t, dashStore, dashboardFolderStore, nestedFolderStore, features, acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()), db) _, err := folderSvc.Create(context.Background(), &folder.CreateFolderCommand{ OrgID: orgID, Title: dash.Title, @@ -969,7 +970,7 @@ func TestNestedFolderService(t *testing.T) { dashStore.On("SaveDashboard", mock.Anything, mock.AnythingOfType("dashboards.SaveDashboardCommand")).Return(&dashboards.Dashboard{}, nil) features := featuremgmt.WithFeatures("nestedFolders") - folderSvc := setup(t, dashStore, nil, nil, features, acimpl.ProvideAccessControl(features), dbtest.NewFakeDB()) + folderSvc := setup(t, dashStore, nil, nil, features, acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()), dbtest.NewFakeDB()) _, err := folderSvc.Create(context.Background(), &folder.CreateFolderCommand{ OrgID: orgID, Title: dash.Title, @@ -1005,7 +1006,7 @@ func TestNestedFolderService(t *testing.T) { nestedFolderStore := NewFakeStore() db, _ := sqlstore.InitTestDB(t) features := featuremgmt.WithFeatures("nestedFolders") - folderSvc := setup(t, dashStore, dashboardFolderStore, nestedFolderStore, features, acimpl.ProvideAccessControl(features), db) + folderSvc := setup(t, dashStore, dashboardFolderStore, nestedFolderStore, features, acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()), db) _, err := folderSvc.Create(context.Background(), &folder.CreateFolderCommand{ OrgID: orgID, Title: dash.Title, @@ -1141,7 +1142,7 @@ func TestNestedFolderService(t *testing.T) { nestedFolderUser.Permissions[orgID] = map[string][]string{dashboards.ActionFoldersWrite: {dashboards.ScopeFoldersProvider.GetResourceScopeUID("wrong_uid")}} features := featuremgmt.WithFeatures("nestedFolders") - folderSvc := setup(t, dashStore, dashboardFolderStore, nestedFolderStore, features, acimpl.ProvideAccessControl(features), dbtest.NewFakeDB()) + folderSvc := setup(t, dashStore, dashboardFolderStore, nestedFolderStore, features, acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()), dbtest.NewFakeDB()) _, err := folderSvc.Move(context.Background(), &folder.MoveFolderCommand{UID: "myFolder", NewParentUID: "newFolder", OrgID: orgID, SignedInUser: nestedFolderUser}) require.ErrorIs(t, err, dashboards.ErrFolderAccessDenied) }) @@ -1162,7 +1163,7 @@ func TestNestedFolderService(t *testing.T) { nestedFolderUser.Permissions[orgID] = map[string][]string{dashboards.ActionFoldersWrite: {dashboards.ScopeFoldersProvider.GetResourceScopeUID("newFolder")}} features := featuremgmt.WithFeatures("nestedFolders") - folderSvc := setup(t, dashStore, dashboardFolderStore, nestedFolderStore, features, acimpl.ProvideAccessControl(features), dbtest.NewFakeDB()) + folderSvc := setup(t, dashStore, dashboardFolderStore, nestedFolderStore, features, acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()), dbtest.NewFakeDB()) _, err := folderSvc.Move(context.Background(), &folder.MoveFolderCommand{UID: "myFolder", NewParentUID: "newFolder", OrgID: orgID, SignedInUser: nestedFolderUser}) require.NoError(t, err) // the folder is set inside InTransaction() but the fake one is called @@ -1174,7 +1175,7 @@ func TestNestedFolderService(t *testing.T) { nestedFolderUser.Permissions[orgID] = map[string][]string{dashboards.ActionFoldersWrite: {dashboards.ScopeFoldersProvider.GetResourceAllScope()}} features := featuremgmt.WithFeatures("nestedFolders") - folderSvc := setup(t, &dashboards.FakeDashboardStore{}, foldertest.NewFakeFolderStore(t), NewFakeStore(), features, acimpl.ProvideAccessControl(features), dbtest.NewFakeDB()) + folderSvc := setup(t, &dashboards.FakeDashboardStore{}, foldertest.NewFakeFolderStore(t), NewFakeStore(), features, acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()), dbtest.NewFakeDB()) _, err := folderSvc.Move(context.Background(), &folder.MoveFolderCommand{UID: accesscontrol.K6FolderUID, NewParentUID: "newFolder", OrgID: orgID, SignedInUser: nestedFolderUser}) require.Error(t, err, folder.ErrBadRequest) }) @@ -1192,7 +1193,7 @@ func TestNestedFolderService(t *testing.T) { } features := featuremgmt.WithFeatures("nestedFolders") - folderSvc := setup(t, &dashboards.FakeDashboardStore{}, foldertest.NewFakeFolderStore(t), nestedFolderStore, features, acimpl.ProvideAccessControl(features), dbtest.NewFakeDB()) + folderSvc := setup(t, &dashboards.FakeDashboardStore{}, foldertest.NewFakeFolderStore(t), nestedFolderStore, features, acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()), dbtest.NewFakeDB()) _, err := folderSvc.Move(context.Background(), &folder.MoveFolderCommand{UID: childUID, NewParentUID: "newFolder", OrgID: orgID, SignedInUser: nestedFolderUser}) require.Error(t, err, folder.ErrBadRequest) }) @@ -1208,7 +1209,7 @@ func TestNestedFolderService(t *testing.T) { nestedFolderUser.Permissions[orgID] = map[string][]string{dashboards.ActionFoldersWrite: {dashboards.ScopeFoldersProvider.GetResourceScopeUID("")}} features := featuremgmt.WithFeatures("nestedFolders") - folderSvc := setup(t, dashStore, dashboardFolderStore, nestedFolderStore, features, acimpl.ProvideAccessControl(features), dbtest.NewFakeDB()) + folderSvc := setup(t, dashStore, dashboardFolderStore, nestedFolderStore, features, acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()), dbtest.NewFakeDB()) _, err := folderSvc.Move(context.Background(), &folder.MoveFolderCommand{UID: "myFolder", NewParentUID: "", OrgID: orgID, SignedInUser: nestedFolderUser}) require.Error(t, err, dashboards.ErrFolderAccessDenied) }) @@ -1229,7 +1230,7 @@ func TestNestedFolderService(t *testing.T) { nestedFolderUser.Permissions[orgID] = map[string][]string{dashboards.ActionFoldersCreate: {}} features := featuremgmt.WithFeatures("nestedFolders") - folderSvc := setup(t, dashStore, dashboardFolderStore, nestedFolderStore, features, acimpl.ProvideAccessControl(features), dbtest.NewFakeDB()) + folderSvc := setup(t, dashStore, dashboardFolderStore, nestedFolderStore, features, acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()), dbtest.NewFakeDB()) _, err := folderSvc.Move(context.Background(), &folder.MoveFolderCommand{UID: "myFolder", NewParentUID: "", OrgID: orgID, SignedInUser: nestedFolderUser}) require.NoError(t, err) // the folder is set inside InTransaction() but the fake one is called @@ -1387,7 +1388,7 @@ func TestIntegrationNestedFolderSharedWithMe(t *testing.T) { nestedFolderStore := ProvideStore(db) b := bus.ProvideBus(tracing.InitializeTracerForTest()) - ac := acimpl.ProvideAccessControl(featuresFlagOn) + ac := acimpl.ProvideAccessControl(featuresFlagOn, zanzana.NewNoopClient()) serviceWithFlagOn := &Service{ log: slog.New(logtest.NewTestHandler(t)).With("logger", "test-folder-service"), @@ -1807,7 +1808,7 @@ func TestFolderServiceGetFolder(t *testing.T) { nestedFolderStore := ProvideStore(db) b := bus.ProvideBus(tracing.InitializeTracerForTest()) - ac := acimpl.ProvideAccessControl(featuresFlagOff) + ac := acimpl.ProvideAccessControl(featuresFlagOff, zanzana.NewNoopClient()) return Service{ log: slog.New(logtest.NewTestHandler(t)).With("logger", "test-folder-service"), @@ -1889,7 +1890,7 @@ func TestFolderServiceGetFolders(t *testing.T) { nestedFolderStore := ProvideStore(db) b := bus.ProvideBus(tracing.InitializeTracerForTest()) - ac := acimpl.ProvideAccessControl(featuresFlagOff) + ac := acimpl.ProvideAccessControl(featuresFlagOff, zanzana.NewNoopClient()) serviceWithFlagOff := &Service{ log: slog.New(logtest.NewTestHandler(t)).With("logger", "test-folder-service"), @@ -1973,7 +1974,7 @@ func TestGetChildrenFilterByPermission(t *testing.T) { nestedFolderStore := ProvideStore(db) b := bus.ProvideBus(tracing.InitializeTracerForTest()) - ac := acimpl.ProvideAccessControl(featuresFlagOff) + ac := acimpl.ProvideAccessControl(featuresFlagOff, zanzana.NewNoopClient()) features := featuremgmt.WithFeatures(featuremgmt.FlagNestedFolders) diff --git a/pkg/services/folder/folderimpl/sqlstore.go b/pkg/services/folder/folderimpl/sqlstore.go index a554c6470f2..41f732b8ccc 100644 --- a/pkg/services/folder/folderimpl/sqlstore.go +++ b/pkg/services/folder/folderimpl/sqlstore.go @@ -9,9 +9,11 @@ import ( "github.com/grafana/dskit/concurrency" + "github.com/grafana/grafana/pkg/apimachinery/identity" "github.com/grafana/grafana/pkg/infra/db" "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/infra/metrics" + "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/dashboards" "github.com/grafana/grafana/pkg/services/folder" "github.com/grafana/grafana/pkg/services/sqlstore/migrator" @@ -319,6 +321,13 @@ func (ss *sqlStore) GetChildren(ctx context.Context, q folder.GetChildrenQuery) } sql.WriteString(")") } + + // only list k6 folders when requested by a service account - prevents showing k6 folders in the UI for users + if q.SignedInUser == nil || q.SignedInUser.GetID().Namespace() != identity.NamespaceServiceAccount { + sql.WriteString(" AND uid != ?") + args = append(args, accesscontrol.K6FolderUID) + } + sql.WriteString(" ORDER BY title ASC") if q.Limit != 0 { @@ -474,6 +483,12 @@ func (ss *sqlStore) GetFolders(ctx context.Context, q getFoldersQuery) ([]*folde } } + // only list k6 folders when requested by a service account - prevents showing k6 folders in the UI for users + if q.SignedInUser == nil || q.SignedInUser.GetID().Namespace() != identity.NamespaceServiceAccount { + s.WriteString(" AND f0.uid != ? AND (f0.parent_uid != ? OR f0.parent_uid IS NULL)") + args = append(args, accesscontrol.K6FolderUID, accesscontrol.K6FolderUID) + } + if len(q.ancestorUIDs) == 0 { if q.OrderByTitle { s.WriteString(` ORDER BY f0.title ASC`) diff --git a/pkg/services/folder/folderimpl/sqlstore_test.go b/pkg/services/folder/folderimpl/sqlstore_test.go index e346c91a960..c966bbf679b 100644 --- a/pkg/services/folder/folderimpl/sqlstore_test.go +++ b/pkg/services/folder/folderimpl/sqlstore_test.go @@ -14,11 +14,13 @@ import ( "github.com/stretchr/testify/require" "github.com/grafana/grafana/pkg/infra/db" + "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/folder" "github.com/grafana/grafana/pkg/services/org" "github.com/grafana/grafana/pkg/services/org/orgimpl" "github.com/grafana/grafana/pkg/services/quota/quotatest" "github.com/grafana/grafana/pkg/services/sqlstore" + "github.com/grafana/grafana/pkg/services/user" "github.com/grafana/grafana/pkg/setting" "github.com/grafana/grafana/pkg/util" ) @@ -731,6 +733,68 @@ func TestIntegrationGetChildren(t *testing.T) { t.Errorf("Result mismatch (-want +got):\n%s", diff) } }) + + t.Run("should hide k6-app folder for users but not for service accounts", func(t *testing.T) { + _, err = folderStore.Create(context.Background(), folder.CreateFolderCommand{ + Title: "k6-app-folder", + OrgID: orgID, + UID: accesscontrol.K6FolderUID, + }) + require.NoError(t, err) + + children, err := folderStore.GetChildren(context.Background(), folder.GetChildrenQuery{ + OrgID: orgID, + SignedInUser: usr, + }) + require.NoError(t, err) + require.Equal(t, 1, len(children)) + assert.Equal(t, parent.UID, children[0].UID) + + // Service account should be able to list k6 folder + children, err = folderStore.GetChildren(context.Background(), folder.GetChildrenQuery{ + OrgID: orgID, + SignedInUser: &user.SignedInUser{UserID: 2, OrgID: orgID, IsServiceAccount: true}, + }) + require.NoError(t, err) + require.Equal(t, 2, len(children)) + childrenUIDs := make([]string, 0, len(children)) + for _, child := range children { + childrenUIDs = append(childrenUIDs, child.UID) + } + assert.EqualValues(t, []string{parent.UID, accesscontrol.K6FolderUID}, childrenUIDs) + }) + + t.Run("pagination works if k6-app folder is hidden", func(t *testing.T) { + for i := 0; i < 4; i++ { + _, err = folderStore.Create(context.Background(), folder.CreateFolderCommand{ + Title: fmt.Sprintf("root-%d", i), + OrgID: orgID, + UID: fmt.Sprintf("root-%d", i), + }) + require.NoError(t, err) + } + + // Should skip k6-app folder but get parent folder and two more folders + children, err := folderStore.GetChildren(context.Background(), folder.GetChildrenQuery{ + OrgID: orgID, + SignedInUser: usr, + Limit: 3, + }) + require.NoError(t, err) + require.Equal(t, 3, len(children)) + assert.EqualValues(t, []string{parent.UID, "root-0", "root-1"}, []string{children[0].UID, children[1].UID, children[2].UID}) + + // Should get the two remaining folders + children, err = folderStore.GetChildren(context.Background(), folder.GetChildrenQuery{ + OrgID: orgID, + SignedInUser: usr, + Page: 2, + Limit: 3, + }) + require.NoError(t, err) + require.Equal(t, 2, len(children)) + assert.EqualValues(t, []string{"root-2", "root-3"}, []string{children[0].UID, children[1].UID}) + }) } func TestIntegrationGetHeight(t *testing.T) { diff --git a/pkg/services/guardian/accesscontrol_guardian_test.go b/pkg/services/guardian/accesscontrol_guardian_test.go index 8d24b282395..ae6aaf279b7 100644 --- a/pkg/services/guardian/accesscontrol_guardian_test.go +++ b/pkg/services/guardian/accesscontrol_guardian_test.go @@ -11,6 +11,7 @@ import ( "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/dashboards" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/folder/foldertest" @@ -956,7 +957,7 @@ func setupAccessControlGuardianTest( fakeDashboardService := dashboards.NewFakeDashboardService(t) fakeDashboardService.On("GetDashboard", mock.Anything, mock.AnythingOfType("*dashboards.GetDashboardQuery")).Maybe().Return(d, nil) - ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) folderSvc := foldertest.NewFakeService() folderStore := foldertest.NewFakeFolderStore(t) diff --git a/pkg/services/ldap/api/service_test.go b/pkg/services/ldap/api/service_test.go index 0e8ddc3c3dc..c3147819d1f 100644 --- a/pkg/services/ldap/api/service_test.go +++ b/pkg/services/ldap/api/service_test.go @@ -17,6 +17,7 @@ import ( "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" "github.com/grafana/grafana/pkg/services/auth/authtest" "github.com/grafana/grafana/pkg/services/authn/authntest" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/ldap" "github.com/grafana/grafana/pkg/services/ldap/multildap" @@ -67,7 +68,7 @@ func setupAPITest(t *testing.T, opts ...func(a *Service)) (*Service, *webtest.Se a := ProvideService(cfg, router, - acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), + acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), usertest.NewUserServiceFake(), &authinfotest.FakeService{}, ldap.ProvideGroupsService(), diff --git a/pkg/services/ldap/ldap.go b/pkg/services/ldap/ldap.go index add301da595..ac63c00cf9a 100644 --- a/pkg/services/ldap/ldap.go +++ b/pkg/services/ldap/ldap.go @@ -117,8 +117,8 @@ func (server *Server) Dial() error { InsecureSkipVerify: server.Config.SkipVerifySSL, ServerName: host, RootCAs: certPool, - MinVersion: server.Config.minTLSVersion, - CipherSuites: server.Config.tlsCiphers, + MinVersion: server.Config.MinTLSVersionID, + CipherSuites: server.Config.TLSCipherIDs, } if len(clientCert.Certificate) > 0 { tlsCfg.Certificates = append(tlsCfg.Certificates, clientCert) diff --git a/pkg/services/ldap/service/helpers.go b/pkg/services/ldap/service/helpers.go index d2b487c4289..c7413295542 100644 --- a/pkg/services/ldap/service/helpers.go +++ b/pkg/services/ldap/service/helpers.go @@ -3,91 +3,11 @@ package service import ( "encoding/json" "fmt" - "os" "strconv" - "github.com/BurntSushi/toml" - - "github.com/grafana/grafana/pkg/cmd/grafana-cli/logger" "github.com/grafana/grafana/pkg/services/ldap" - "github.com/grafana/grafana/pkg/setting" ) -const defaultTimeout = 10 - -func readConfig(configFile string) (*ldap.ServersConfig, error) { - result := &ldap.ServersConfig{} - - logger.Info("LDAP enabled, reading config file", "file", configFile) - - // nolint:gosec - // We can ignore the gosec G304 warning on this one because `filename` comes from grafana configuration file - fileBytes, err := os.ReadFile(configFile) - if err != nil { - return nil, fmt.Errorf("%v: %w", "Failed to load LDAP config file", err) - } - - // interpolate full toml string (it can contain ENV variables) - stringContent, err := setting.ExpandVar(string(fileBytes)) - if err != nil { - return nil, fmt.Errorf("%v: %w", "Failed to expand variables", err) - } - - _, err = toml.Decode(stringContent, result) - if err != nil { - return nil, fmt.Errorf("%v: %w", "Failed to load LDAP config file", err) - } - - if len(result.Servers) == 0 { - return nil, fmt.Errorf("LDAP enabled but no LDAP servers defined in config file") - } - - for _, server := range result.Servers { - // set default org id - err = assertNotEmptyCfg(server.SearchFilter, "search_filter") - if err != nil { - return nil, fmt.Errorf("%v: %w", "Failed to validate SearchFilter section", err) - } - err = assertNotEmptyCfg(server.SearchBaseDNs, "search_base_dns") - if err != nil { - return nil, fmt.Errorf("%v: %w", "Failed to validate SearchBaseDNs section", err) - } - - for _, groupMap := range server.Groups { - if groupMap.OrgRole == "" && groupMap.IsGrafanaAdmin == nil { - return nil, fmt.Errorf("LDAP group mapping: organization role or grafana admin status is required") - } - - if groupMap.OrgId == 0 { - groupMap.OrgId = 1 - } - } - - // set default timeout if unspecified - if server.Timeout == 0 { - server.Timeout = defaultTimeout - } - } - - return result, nil -} - -func assertNotEmptyCfg(val any, propName string) error { - switch v := val.(type) { - case string: - if v == "" { - return fmt.Errorf("LDAP config file is missing option: %q", propName) - } - case []string: - if len(v) == 0 { - return fmt.Errorf("LDAP config file is missing option: %q", propName) - } - default: - fmt.Println("unknown") - } - return nil -} - func resolveBool(input any, defaultValue bool) bool { strInput := fmt.Sprintf("%v", input) result, err := strconv.ParseBool(strInput) diff --git a/pkg/services/ldap/service/ldap.go b/pkg/services/ldap/service/ldap.go index d3d87e9b2fc..0a72f93721b 100644 --- a/pkg/services/ldap/service/ldap.go +++ b/pkg/services/ldap/service/ldap.go @@ -16,6 +16,7 @@ import ( "github.com/grafana/grafana/pkg/services/ssosettings" "github.com/grafana/grafana/pkg/services/ssosettings/models" "github.com/grafana/grafana/pkg/setting" + "github.com/grafana/grafana/pkg/util" ) var ( @@ -98,6 +99,34 @@ func (s *LDAPImpl) Reload(ctx context.Context, settings models.SSOSettings) erro return err } + // calculate MinTLSVersionID and TLSCipherIDs from input text values + // also initialize Timeout and OrgID from group mappings with default values if they are not configured + for _, server := range ldapCfg.Servers { + if server.MinTLSVersion != "" { + server.MinTLSVersionID, err = util.TlsNameToVersion(server.MinTLSVersion) + if err != nil { + s.log.Error("failed to set min TLS version, ignoring", "err", err, "server", server.Host) + } + } + + if len(server.TLSCiphers) > 0 { + server.TLSCipherIDs, err = util.TlsCiphersToIDs(server.TLSCiphers) + if err != nil { + s.log.Error("unrecognized TLS Cipher(s), ignoring", "err", err, "server", server.Host) + } + } + + for _, groupMap := range server.Groups { + if groupMap.OrgId == 0 { + groupMap.OrgId = 1 + } + } + + if server.Timeout == 0 { + server.Timeout = ldap.DefaultTimeout + } + } + s.loadingMutex.Lock() defer s.loadingMutex.Unlock() @@ -128,6 +157,34 @@ func (s *LDAPImpl) Validate(ctx context.Context, settings models.SSOSettings, ol if server.Host == "" { return fmt.Errorf("no host configured for server with index %d", i) } + + if server.SearchFilter == "" { + return fmt.Errorf("no search filter configured for server with index %d", i) + } + + if len(server.SearchBaseDNs) == 0 { + return fmt.Errorf("no search base DN configured for server with index %d", i) + } + + if server.MinTLSVersion != "" { + _, err = util.TlsNameToVersion(server.MinTLSVersion) + if err != nil { + return fmt.Errorf("invalid min TLS version configured for server with index %d", i) + } + } + + if len(server.TLSCiphers) > 0 { + _, err = util.TlsCiphersToIDs(server.TLSCiphers) + if err != nil { + return fmt.Errorf("invalid TLS ciphers configured for server with index %d", i) + } + } + + for _, groupMap := range server.Groups { + if groupMap.OrgRole == "" && groupMap.IsGrafanaAdmin == nil { + return fmt.Errorf("organization role or Grafana admin status is required in group mappings for server with index %d", i) + } + } } return nil @@ -141,7 +198,7 @@ func (s *LDAPImpl) ReloadConfig() error { s.loadingMutex.Lock() defer s.loadingMutex.Unlock() - config, err := readConfig(s.cfg.ConfigFilePath) + config, err := ldap.GetConfig(s.cfg) if err != nil { return err } diff --git a/pkg/services/ldap/service/ldap_test.go b/pkg/services/ldap/service/ldap_test.go index 6cf33fc1bff..d58efaf49aa 100644 --- a/pkg/services/ldap/service/ldap_test.go +++ b/pkg/services/ldap/service/ldap_test.go @@ -2,6 +2,7 @@ package service import ( "context" + "crypto/tls" "sync" "testing" @@ -71,6 +72,12 @@ func TestReload(t *testing.T) { "servers": []any{ map[string]any{ "host": "127.0.0.1", + "group_mappings": []any{ + map[string]any{ + "group_dn": "cn=admin,ou=groups,dc=ldap,dc=goauthentik,dc=io", + "grafana_admin": true, + }, + }, }, }, }, @@ -80,7 +87,15 @@ func TestReload(t *testing.T) { expectedServersConfig: &ldap.ServersConfig{ Servers: []*ldap.ServerConfig{ { - Host: "127.0.0.1", + Host: "127.0.0.1", + Timeout: 10, + Groups: []*ldap.GroupToOrgRole{ + { + GroupDN: "cn=admin,ou=groups,dc=ldap,dc=goauthentik,dc=io", + OrgId: 1, + IsGrafanaAdmin: &isAdmin, + }, + }, }, }, }, @@ -122,6 +137,7 @@ func TestReload(t *testing.T) { "group_search_base_dns": []string{"ou=groups,dc=grafana,dc=org"}, "tls_ciphers": []string{ "TLS_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", }, "attributes": map[string]string{ "email": "mail", @@ -147,7 +163,7 @@ func TestReload(t *testing.T) { }, map[string]any{ "group_dn": "cn=viewer,ou=groups,dc=ldap,dc=goauthentik,dc=io", - "org_id": 1, + "org_id": 2, "org_role": "Viewer", }, }, @@ -160,14 +176,20 @@ func TestReload(t *testing.T) { expectedServersConfig: &ldap.ServersConfig{ Servers: []*ldap.ServerConfig{ { - Host: "127.0.0.1", - Port: 3389, - UseSSL: true, - StartTLS: true, - SkipVerifySSL: false, - MinTLSVersion: "TLS1.3", + Host: "127.0.0.1", + Port: 3389, + UseSSL: true, + StartTLS: true, + SkipVerifySSL: false, + MinTLSVersion: "TLS1.3", + MinTLSVersionID: tls.VersionTLS13, TLSCiphers: []string{ "TLS_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + }, + TLSCipherIDs: []uint16{ + tls.TLS_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, }, RootCACert: "/path/to/certificate.crt", RootCACertValue: []string{validCert}, @@ -204,7 +226,7 @@ func TestReload(t *testing.T) { }, { GroupDN: "cn=viewer,ou=groups,dc=ldap,dc=goauthentik,dc=io", - OrgId: 1, + OrgId: 2, OrgRole: "Viewer", }, }, @@ -271,7 +293,8 @@ func TestReload(t *testing.T) { expectedServersConfig: &ldap.ServersConfig{ Servers: []*ldap.ServerConfig{ { - Host: "127.0.0.1", + Host: "127.0.0.1", + Timeout: 10, }, }, }, @@ -314,7 +337,21 @@ func TestValidate(t *testing.T) { "config": map[string]any{ "servers": []any{ map[string]any{ - "host": "127.0.0.1", + "host": "127.0.0.1", + "search_filter": "(cn=%s)", + "search_base_dns": []string{"dc=grafana,dc=org"}, + "min_tls_version": "TLS1.3", + "tls_ciphers": []string{"TLS_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"}, + "group_mappings": []any{ + map[string]any{ + "group_dn": "cn=admins,ou=groups,dc=grafana,dc=org", + "grafana_admin": true, + }, + map[string]any{ + "group_dn": "cn=users,ou=groups,dc=grafana,dc=org", + "org_role": "Editor", + }, + }, }, }, }, @@ -376,10 +413,14 @@ func TestValidate(t *testing.T) { "config": map[string]any{ "servers": []any{ map[string]any{ - "host": "127.0.0.1", + "host": "127.0.0.1", + "search_filter": "(cn=%s)", + "search_base_dns": []string{"dc=grafana,dc=org"}, }, map[string]any{ - "port": 123, + "port": 123, + "search_filter": "(cn=%s)", + "search_base_dns": []string{"dc=grafana,dc=org"}, }, }, }, @@ -388,6 +429,116 @@ func TestValidate(t *testing.T) { isValid: false, containsError: "no host configured", }, + { + description: "validation fails if search filter is not configured", + settings: models.SSOSettings{ + Provider: "ldap", + Settings: map[string]any{ + "enabled": true, + "config": map[string]any{ + "servers": []any{ + map[string]any{ + "host": "127.0.0.1", + "search_base_dns": []string{"dc=grafana,dc=org"}, + }, + }, + }, + }, + }, + isValid: false, + containsError: "no search filter", + }, + { + description: "validation fails if search base DN is not configured", + settings: models.SSOSettings{ + Provider: "ldap", + Settings: map[string]any{ + "enabled": true, + "config": map[string]any{ + "servers": []any{ + map[string]any{ + "host": "127.0.0.1", + "search_filter": "(cn=%s)", + }, + }, + }, + }, + }, + isValid: false, + containsError: "no search base DN", + }, + { + description: "validation fails if min TLS version is invalid", + settings: models.SSOSettings{ + Provider: "ldap", + Settings: map[string]any{ + "enabled": true, + "config": map[string]any{ + "servers": []any{ + map[string]any{ + "host": "127.0.0.1", + "search_filter": "(cn=%s)", + "search_base_dns": []string{"dc=grafana,dc=org"}, + "min_tls_version": "TLS5.18", + }, + }, + }, + }, + }, + isValid: false, + containsError: "invalid min TLS version", + }, + { + description: "validation fails if TLS cyphers are invalid", + settings: models.SSOSettings{ + Provider: "ldap", + Settings: map[string]any{ + "enabled": true, + "config": map[string]any{ + "servers": []any{ + map[string]any{ + "host": "127.0.0.1", + "search_filter": "(cn=%s)", + "search_base_dns": []string{"dc=grafana,dc=org"}, + "tls_ciphers": []string{"TLS_AES_128_GCM_SHA256", "invalid-tls-cypher"}, + }, + }, + }, + }, + }, + isValid: false, + containsError: "invalid TLS ciphers", + }, + { + description: "validation fails if a group mapping contains no organization role", + settings: models.SSOSettings{ + Provider: "ldap", + Settings: map[string]any{ + "enabled": true, + "config": map[string]any{ + "servers": []any{ + map[string]any{ + "host": "127.0.0.1", + "search_filter": "(cn=%s)", + "search_base_dns": []string{"dc=grafana,dc=org"}, + "group_mappings": []any{ + map[string]any{ + "group_dn": "cn=admins,ou=groups,dc=grafana,dc=org", + "org_role": "Admin", + "grafana_admin": true, + }, + map[string]any{ + "group_dn": "cn=users,ou=groups,dc=grafana,dc=org", + }, + }, + }, + }, + }, + }, + }, + isValid: false, + containsError: "organization role", + }, } for _, tt := range testCases { diff --git a/pkg/services/ldap/settings.go b/pkg/services/ldap/settings.go index 90f3732b235..5b0a5b30d55 100644 --- a/pkg/services/ldap/settings.go +++ b/pkg/services/ldap/settings.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "sync" + "time" "github.com/BurntSushi/toml" @@ -13,7 +14,7 @@ import ( "github.com/grafana/grafana/pkg/util" ) -const defaultTimeout = 10 +const DefaultTimeout = 10 // Config holds parameters from the .ini config file type Config struct { @@ -35,13 +36,13 @@ type ServerConfig struct { Host string `toml:"host" json:"host"` Port int `toml:"port" json:"port,omitempty"` - UseSSL bool `toml:"use_ssl" json:"use_ssl,omitempty"` - StartTLS bool `toml:"start_tls" json:"start_tls,omitempty"` - SkipVerifySSL bool `toml:"ssl_skip_verify" json:"ssl_skip_verify,omitempty"` - MinTLSVersion string `toml:"min_tls_version" json:"min_tls_version,omitempty"` - minTLSVersion uint16 `toml:"-" json:"-"` - TLSCiphers []string `toml:"tls_ciphers" json:"tls_ciphers,omitempty"` - tlsCiphers []uint16 `toml:"-" json:"-"` + UseSSL bool `toml:"use_ssl" json:"use_ssl,omitempty"` + StartTLS bool `toml:"start_tls" json:"start_tls,omitempty"` + SkipVerifySSL bool `toml:"ssl_skip_verify" json:"ssl_skip_verify,omitempty"` + MinTLSVersion string `toml:"min_tls_version" json:"min_tls_version,omitempty"` + MinTLSVersionID uint16 `toml:"-" json:"-"` + TLSCiphers []string `toml:"tls_ciphers" json:"tls_ciphers,omitempty"` + TLSCipherIDs []uint16 `toml:"-" json:"-"` RootCACert string `toml:"root_ca_cert" json:"root_ca_cert,omitempty"` RootCACertValue []string `json:"root_ca_cert_value,omitempty"` @@ -93,7 +94,11 @@ var loadingMutex = &sync.Mutex{} // We need to define in this space so `GetConfig` fn // could be defined as singleton -var config *ServersConfig +var cachedConfig struct { + config *ServersConfig + filePath string + fileModified time.Time +} func GetLDAPConfig(cfg *setting.Cfg) *Config { return &Config{ @@ -117,15 +122,27 @@ func GetConfig(cfg *Config) (*ServersConfig, error) { return nil, nil } - // Make it a singleton - if config != nil { - return config, nil + configFileStats, err := os.Stat(cfg.ConfigFilePath) + if err != nil { + return nil, err + } + configFileModified := configFileStats.ModTime() + + // return the config from cache if the config file hasn't been modified + if cachedConfig.config != nil && cachedConfig.filePath == cfg.ConfigFilePath && cachedConfig.fileModified.Equal(configFileModified) { + return cachedConfig.config, nil } loadingMutex.Lock() defer loadingMutex.Unlock() - return readConfig(cfg.ConfigFilePath) + cachedConfig.config, err = readConfig(cfg.ConfigFilePath) + if err == nil { + cachedConfig.filePath = cfg.ConfigFilePath + cachedConfig.fileModified = configFileModified + } + + return cachedConfig.config, err } func readConfig(configFile string) (*ServersConfig, error) { @@ -167,14 +184,14 @@ func readConfig(configFile string) (*ServersConfig, error) { } if server.MinTLSVersion != "" { - server.minTLSVersion, err = util.TlsNameToVersion(server.MinTLSVersion) + server.MinTLSVersionID, err = util.TlsNameToVersion(server.MinTLSVersion) if err != nil { logger.Error("Failed to set min TLS version. Ignoring", "err", err) } } if len(server.TLSCiphers) > 0 { - server.tlsCiphers, err = util.TlsCiphersToIDs(server.TLSCiphers) + server.TLSCipherIDs, err = util.TlsCiphersToIDs(server.TLSCiphers) if err != nil { logger.Error("Unrecognized TLS Cipher(s). Ignoring", "err", err) } @@ -192,7 +209,7 @@ func readConfig(configFile string) (*ServersConfig, error) { // set default timeout if unspecified if server.Timeout == 0 { - server.Timeout = defaultTimeout + server.Timeout = DefaultTimeout } } diff --git a/pkg/services/ldap/settings_test.go b/pkg/services/ldap/settings_test.go index b1562afe513..6775b8e511a 100644 --- a/pkg/services/ldap/settings_test.go +++ b/pkg/services/ldap/settings_test.go @@ -13,9 +13,9 @@ func TestReadingLDAPSettings(t *testing.T) { assert.Nil(t, err, "No error when reading ldap config") assert.EqualValues(t, "127.0.0.1", config.Servers[0].Host) assert.EqualValues(t, "tls1.3", config.Servers[0].MinTLSVersion) - assert.EqualValues(t, uint16(tls.VersionTLS13), config.Servers[0].minTLSVersion) + assert.EqualValues(t, uint16(tls.VersionTLS13), config.Servers[0].MinTLSVersionID) assert.EqualValues(t, []string{"TLS_CHACHA20_POLY1305_SHA256", "TLS_AES_128_GCM_SHA256"}, config.Servers[0].TLSCiphers) - assert.ElementsMatch(t, []uint16{tls.TLS_CHACHA20_POLY1305_SHA256, tls.TLS_AES_128_GCM_SHA256}, config.Servers[0].tlsCiphers) + assert.ElementsMatch(t, []uint16{tls.TLS_CHACHA20_POLY1305_SHA256, tls.TLS_AES_128_GCM_SHA256}, config.Servers[0].TLSCipherIDs) } func TestReadingLDAPSettingsWithEnvVariable(t *testing.T) { @@ -25,3 +25,28 @@ func TestReadingLDAPSettingsWithEnvVariable(t *testing.T) { require.NoError(t, err) assert.EqualValues(t, "MySecret", config.Servers[0].BindPassword) } + +func TestReadingLDAPSettingsUsingCache(t *testing.T) { + cfg := &Config{ + Enabled: true, + ConfigFilePath: "testdata/ldap.toml", + } + + // cache is empty initially + assert.Nil(t, cachedConfig.config) + + firstConfig, err := GetConfig(cfg) + + // cache has been initialized + assert.NotNil(t, cachedConfig.config) + assert.EqualValues(t, *firstConfig, *cachedConfig.config) + assert.Nil(t, err) + assert.EqualValues(t, "127.0.0.1", cachedConfig.config.Servers[0].Host) + + // make sure the cached config is returned on subsequent calls + config := cachedConfig.config + secondConfig, err := GetConfig(cfg) + + assert.Equal(t, config, secondConfig) + assert.Nil(t, err) +} diff --git a/pkg/services/libraryelements/libraryelements_test.go b/pkg/services/libraryelements/libraryelements_test.go index 133443f7264..bf82e13b3c8 100644 --- a/pkg/services/libraryelements/libraryelements_test.go +++ b/pkg/services/libraryelements/libraryelements_test.go @@ -25,6 +25,7 @@ import ( "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" "github.com/grafana/grafana/pkg/services/accesscontrol/actest" acmock "github.com/grafana/grafana/pkg/services/accesscontrol/mock" + "github.com/grafana/grafana/pkg/services/authz/zanzana" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" "github.com/grafana/grafana/pkg/services/dashboards" "github.com/grafana/grafana/pkg/services/dashboards/database" @@ -444,7 +445,7 @@ func testScenario(t *testing.T, desc string, fn func(t *testing.T, sc scenarioCo quotaService := quotatest.New(false, nil) dashboardStore, err := database.ProvideDashboardStore(sqlStore, cfg, features, tagimpl.ProvideService(sqlStore), quotaService) require.NoError(t, err) - ac := acimpl.ProvideAccessControl(features) + ac := acimpl.ProvideAccessControl(features, zanzana.NewNoopClient()) folderPermissions := acmock.NewMockedPermissionsService() folderPermissions.On("SetPermissions", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]accesscontrol.ResourcePermission{}, nil) dashboardPermissions := acmock.NewMockedPermissionsService() diff --git a/pkg/services/live/live_test.go b/pkg/services/live/live_test.go index 447910e1b84..427a6abf2aa 100644 --- a/pkg/services/live/live_test.go +++ b/pkg/services/live/live_test.go @@ -14,6 +14,7 @@ import ( "github.com/grafana/grafana/pkg/infra/usagestats" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" "github.com/grafana/grafana/pkg/services/annotations/annotationstest" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/dashboards" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/setting" @@ -36,7 +37,7 @@ func Test_provideLiveService_RedisUnavailable(t *testing.T) { nil, &usagestats.UsageStatsMock{T: t}, nil, - featuremgmt.WithFeatures(), acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), &dashboards.FakeDashboardService{}, annotationstest.NewFakeAnnotationsRepo(), nil) + featuremgmt.WithFeatures(), acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), &dashboards.FakeDashboardService{}, annotationstest.NewFakeAnnotationsRepo(), nil) // Proceeds without live HA if redis is unavaialble require.NoError(t, err) diff --git a/pkg/services/navtree/navtreeimpl/applinks.go b/pkg/services/navtree/navtreeimpl/applinks.go index 393af522b1c..fca821d2997 100644 --- a/pkg/services/navtree/navtreeimpl/applinks.go +++ b/pkg/services/navtree/navtreeimpl/applinks.go @@ -286,9 +286,9 @@ func (s *ServiceImpl) readNavigationSettings() { "grafana-k8s-app": {SectionID: navtree.NavIDInfrastructure, SortWeight: 1, Text: "Kubernetes"}, "grafana-aws-app": {SectionID: navtree.NavIDInfrastructure, SortWeight: 2}, "grafana-app-observability-app": {SectionID: navtree.NavIDRoot, SortWeight: navtree.WeightApplication, Text: "Application", Icon: "graph-bar"}, - "grafana-pyroscope-app": {SectionID: navtree.NavIDExplore, SortWeight: 1, Text: "Profiles"}, - "grafana-lokiexplore-app": {SectionID: navtree.NavIDExplore, SortWeight: 2, Text: "Logs"}, - "grafana-exploretraces-app": {SectionID: navtree.NavIDExplore, SortWeight: 3, Text: "Traces"}, + "grafana-lokiexplore-app": {SectionID: navtree.NavIDExplore, SortWeight: 1, Text: "Logs"}, + "grafana-exploretraces-app": {SectionID: navtree.NavIDExplore, SortWeight: 2, Text: "Traces"}, + "grafana-pyroscope-app": {SectionID: navtree.NavIDExplore, SortWeight: 3, Text: "Profiles"}, "grafana-kowalski-app": {SectionID: navtree.NavIDRoot, SortWeight: navtree.WeightFrontend, Text: "Frontend", Icon: "frontend-observability"}, "grafana-synthetic-monitoring-app": {SectionID: navtree.NavIDTestingAndSynthetics, SortWeight: 2, Text: "Synthetics"}, "grafana-oncall-app": {SectionID: navtree.NavIDAlertsAndIncidents, SortWeight: 1, Text: "OnCall"}, diff --git a/pkg/services/navtree/navtreeimpl/applinks_test.go b/pkg/services/navtree/navtreeimpl/applinks_test.go index 0b87ea27064..eeba3af0445 100644 --- a/pkg/services/navtree/navtreeimpl/applinks_test.go +++ b/pkg/services/navtree/navtreeimpl/applinks_test.go @@ -12,6 +12,7 @@ import ( ac "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" accesscontrolmock "github.com/grafana/grafana/pkg/services/accesscontrol/mock" + "github.com/grafana/grafana/pkg/services/authz/zanzana" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" "github.com/grafana/grafana/pkg/services/datasources" "github.com/grafana/grafana/pkg/services/featuremgmt" @@ -434,7 +435,7 @@ func TestAddAppLinksAccessControl(t *testing.T) { service := ServiceImpl{ log: log.New("navtree"), cfg: cfg, - accessControl: acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), + accessControl: acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), pluginSettings: &pluginSettings, features: featuremgmt.WithFeatures(), pluginStore: &pluginstore.FakePluginStore{ diff --git a/pkg/services/ngalert/api/api_alertmanager_test.go b/pkg/services/ngalert/api/api_alertmanager_test.go index 52def62fa93..d025bd86796 100644 --- a/pkg/services/ngalert/api/api_alertmanager_test.go +++ b/pkg/services/ngalert/api/api_alertmanager_test.go @@ -15,6 +15,8 @@ import ( "github.com/stretchr/testify/require" alertingNotify "github.com/grafana/alerting/notify" + + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/ngalert/accesscontrol" "github.com/grafana/grafana/pkg/api/response" @@ -631,9 +633,9 @@ func createSut(t *testing.T) AlertmanagerSrv { } mam := createMultiOrgAlertmanager(t, configs) log := log.NewNopLogger() - ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) ruleStore := ngfakes.NewRuleStore(t) - ruleAuthzService := accesscontrol.NewRuleService(acimpl.ProvideAccessControl(featuremgmt.WithFeatures())) + ruleAuthzService := accesscontrol.NewRuleService(acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient())) return AlertmanagerSrv{ mam: mam, crypto: mam.Crypto, diff --git a/pkg/services/ngalert/api/api_prometheus_test.go b/pkg/services/ngalert/api/api_prometheus_test.go index 1f28c685ef7..478253e5646 100644 --- a/pkg/services/ngalert/api/api_prometheus_test.go +++ b/pkg/services/ngalert/api/api_prometheus_test.go @@ -14,11 +14,13 @@ import ( "github.com/stretchr/testify/require" alertingModels "github.com/grafana/alerting/models" + "github.com/grafana/grafana-plugin-sdk-go/data" "github.com/grafana/grafana/pkg/expr" "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" + "github.com/grafana/grafana/pkg/services/authz/zanzana" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" "github.com/grafana/grafana/pkg/services/datasources" "github.com/grafana/grafana/pkg/services/featuremgmt" @@ -46,7 +48,7 @@ func Test_FormatValues(t *testing.T) { name: "with no value, it renders the evaluation string", alertState: &state.State{ LastEvaluationString: "[ var='A' metric='vector(10) + time() % 50' labels={} value=1.1 ]", - LatestResult: &state.Evaluation{Condition: "A", Values: map[string]*float64{}}, + LatestResult: &state.Evaluation{Condition: "A", Values: map[string]float64{}}, }, expected: "[ var='A' metric='vector(10) + time() % 50' labels={} value=1.1 ]", }, @@ -54,7 +56,7 @@ func Test_FormatValues(t *testing.T) { name: "with one value, it renders the single value", alertState: &state.State{ LastEvaluationString: "[ var='A' metric='vector(10) + time() % 50' labels={} value=1.1 ]", - LatestResult: &state.Evaluation{Condition: "A", Values: map[string]*float64{"A": &val1}}, + LatestResult: &state.Evaluation{Condition: "A", Values: map[string]float64{"A": val1}}, }, expected: "1.1e+00", }, @@ -62,7 +64,7 @@ func Test_FormatValues(t *testing.T) { name: "with two values, it renders the value based on their refID and position", alertState: &state.State{ LastEvaluationString: "[ var='B0' metric='vector(10) + time() % 50' labels={} value=1.1 ], [ var='B1' metric='vector(10) + time() % 50' labels={} value=1.4 ]", - LatestResult: &state.Evaluation{Condition: "B", Values: map[string]*float64{"B0": &val1, "B1": &val2}}, + LatestResult: &state.Evaluation{Condition: "B", Values: map[string]float64{"B0": val1, "B1": val2}}, }, expected: "B0: 1.1e+00, B1: 1.4e+00", }, @@ -70,7 +72,7 @@ func Test_FormatValues(t *testing.T) { name: "with a high number of values, it renders the value based on their refID and position using a natural order", alertState: &state.State{ LastEvaluationString: "[ var='B0' metric='vector(10) + time() % 50' labels={} value=1.1 ], [ var='B1' metric='vector(10) + time() % 50' labels={} value=1.4 ]", - LatestResult: &state.Evaluation{Condition: "B", Values: map[string]*float64{"B0": &val1, "B1": &val2, "B2": &val1, "B10": &val2, "B11": &val1}}, + LatestResult: &state.Evaluation{Condition: "B", Values: map[string]float64{"B0": val1, "B1": val2, "B2": val1, "B10": val2, "B11": val1}}, }, expected: "B0: 1.1e+00, B10: 1.4e+00, B11: 1.1e+00, B1: 1.4e+00, B2: 1.1e+00", }, @@ -238,11 +240,10 @@ func TestRouteGetAlertStatuses(t *testing.T) { func withAlertingState() forEachState { return func(s *state.State) *state.State { s.State = eval.Alerting - value := float64(1.1) s.LatestResult = &state.Evaluation{ EvaluationState: eval.Alerting, EvaluationTime: timeNow(), - Values: map[string]*float64{"B": &value}, + Values: map[string]float64{"B": float64(1.1)}, Condition: "B", } return s @@ -558,7 +559,7 @@ func TestRouteGetRuleStatuses(t *testing.T) { log: log.NewNopLogger(), manager: fakeAIM, store: ruleStore, - authz: accesscontrol.NewRuleService(acimpl.ProvideAccessControl(featuremgmt.WithFeatures())), + authz: accesscontrol.NewRuleService(acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient())), } permissions := createPermissionsForRules(slices.Concat(rulesInGroup1, rulesInGroup2, rulesInGroup3), orgID) @@ -673,7 +674,7 @@ func TestRouteGetRuleStatuses(t *testing.T) { log: log.NewNopLogger(), manager: fakeAIM, store: ruleStore, - authz: accesscontrol.NewRuleService(acimpl.ProvideAccessControl(featuremgmt.WithFeatures())), + authz: accesscontrol.NewRuleService(acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient())), } c := &contextmodel.ReqContext{Context: &web.Context{Req: req}, SignedInUser: &user.SignedInUser{OrgID: orgID, Permissions: createPermissionsForRules(rules, orgID)}} diff --git a/pkg/services/ngalert/api/api_ruler_test.go b/pkg/services/ngalert/api/api_ruler_test.go index 2b675426b2b..e1ae7815bb2 100644 --- a/pkg/services/ngalert/api/api_ruler_test.go +++ b/pkg/services/ngalert/api/api_ruler_test.go @@ -20,6 +20,7 @@ import ( "github.com/grafana/grafana/pkg/infra/log" ac "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" + "github.com/grafana/grafana/pkg/services/authz/zanzana" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" "github.com/grafana/grafana/pkg/services/dashboards" "github.com/grafana/grafana/pkg/services/datasources" @@ -649,7 +650,7 @@ func createService(store *fakes.RuleStore) *RulerSrv { cfg: &setting.UnifiedAlertingSettings{ BaseInterval: 10 * time.Second, }, - authz: accesscontrol.NewRuleService(acimpl.ProvideAccessControl(featuremgmt.WithFeatures())), + authz: accesscontrol.NewRuleService(acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient())), amConfigStore: &fakeAMRefresher{}, amRefresher: &fakeAMRefresher{}, featureManager: featuremgmt.WithFeatures(), diff --git a/pkg/services/ngalert/api/testing.go b/pkg/services/ngalert/api/testing.go index 3a84fbd6952..8b85525de78 100644 --- a/pkg/services/ngalert/api/testing.go +++ b/pkg/services/ngalert/api/testing.go @@ -86,7 +86,7 @@ func (f *fakeAlertInstanceManager) GenerateAlertInstances(orgID int64, alertRule LatestResult: &state.Evaluation{ EvaluationTime: evaluationTime.Add(1 * time.Minute), EvaluationState: eval.Normal, - Values: make(map[string]*float64), + Values: make(map[string]float64), }, LastEvaluationTime: evaluationTime.Add(1 * time.Minute), EvaluationDuration: evaluationDuration, diff --git a/pkg/services/ngalert/eval/testing.go b/pkg/services/ngalert/eval/testing.go index b06d071011b..d3549f3e2dd 100644 --- a/pkg/services/ngalert/eval/testing.go +++ b/pkg/services/ngalert/eval/testing.go @@ -80,6 +80,12 @@ func WithLabels(labels data.Labels) ResultMutator { } } +func WithValues(values map[string]NumberValueCapture) ResultMutator { + return func(r *Result) { + r.Values = values + } +} + type FakeLoadedMetricsReader struct { fingerprints map[data.Fingerprint]struct{} } diff --git a/pkg/services/ngalert/notifier/receiver_svc_test.go b/pkg/services/ngalert/notifier/receiver_svc_test.go index 862bc75e060..da367d576f1 100644 --- a/pkg/services/ngalert/notifier/receiver_svc_test.go +++ b/pkg/services/ngalert/notifier/receiver_svc_test.go @@ -6,12 +6,15 @@ import ( "fmt" "testing" + "github.com/stretchr/testify/require" + "github.com/grafana/grafana/pkg/components/simplejson" "github.com/grafana/grafana/pkg/infra/db" "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" "github.com/grafana/grafana/pkg/services/accesscontrol/actest" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions" "github.com/grafana/grafana/pkg/services/ngalert/models" @@ -20,7 +23,6 @@ import ( "github.com/grafana/grafana/pkg/services/secrets/database" "github.com/grafana/grafana/pkg/services/secrets/manager" "github.com/grafana/grafana/pkg/services/user" - "github.com/stretchr/testify/require" ) func TestReceiverService_GetReceiver(t *testing.T) { @@ -72,7 +74,7 @@ func TestReceiverService_GetReceivers(t *testing.T) { func TestReceiverService_DecryptRedact(t *testing.T) { sqlStore := db.InitTestDB(t) secretsService := manager.SetupTestService(t, database.ProvideSecretsStore(sqlStore)) - ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) getMethods := []string{"single", "multi"} diff --git a/pkg/services/ngalert/provisioning/contactpoints.go b/pkg/services/ngalert/provisioning/contactpoints.go index 1a57046e526..c475f09ffcf 100644 --- a/pkg/services/ngalert/provisioning/contactpoints.go +++ b/pkg/services/ngalert/provisioning/contactpoints.go @@ -339,7 +339,7 @@ func (ecp *ContactPointService) DeleteContactPoint(ctx context.Context, orgID in } } if fullRemoval && isContactPointInUse(name, []*apimodels.Route{revision.cfg.AlertmanagerConfig.Route}) { - return ErrContactPointReferenced + return ErrContactPointReferenced.Errorf("") } return ecp.xact.InTransaction(ctx, func(ctx context.Context) error { @@ -354,7 +354,7 @@ func (ecp *ContactPointService) DeleteContactPoint(ctx context.Context, orgID in uids = append(uids, key.UID) } ecp.log.Error("Cannot delete contact point because it is used in rule's notification settings", "receiverName", name, "rulesUid", strings.Join(uids, ",")) - return fmt.Errorf("contact point '%s' is currently used in notification settings by one or many alert rules", name) + return ErrContactPointUsedInRule.Errorf("") } } diff --git a/pkg/services/ngalert/provisioning/contactpoints_test.go b/pkg/services/ngalert/provisioning/contactpoints_test.go index 016e2552acd..b07dcc80237 100644 --- a/pkg/services/ngalert/provisioning/contactpoints_test.go +++ b/pkg/services/ngalert/provisioning/contactpoints_test.go @@ -17,6 +17,7 @@ import ( "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" "github.com/grafana/grafana/pkg/services/accesscontrol/actest" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions" "github.com/grafana/grafana/pkg/services/ngalert/models" @@ -261,7 +262,7 @@ func TestContactPointServiceDecryptRedact(t *testing.T) { secretsService := manager.SetupTestService(t, database.ProvideSecretsStore(db.InitTestDB(t))) receiverServiceWithAC := func(ecp *ContactPointService) *notifier.ReceiverService { return notifier.NewReceiverService( - acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), + acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), // Get won't use the sut's config store, so we can use a different one here. fakes.NewFakeAlertmanagerConfigStore(createEncryptedConfig(t, secretsService)), ecp.provenanceStore, diff --git a/pkg/services/ngalert/provisioning/errors.go b/pkg/services/ngalert/provisioning/errors.go index 45d8e3eb533..4c164381816 100644 --- a/pkg/services/ngalert/provisioning/errors.go +++ b/pkg/services/ngalert/provisioning/errors.go @@ -17,8 +17,8 @@ var ( ErrBadAlertmanagerConfiguration = errutil.Internal("alerting.notification.configCorrupted").MustTemplate("Failed to unmarshal the Alertmanager configuration", errutil.WithPublic("Current Alertmanager configuration in the storage is corrupted. Reset the configuration or rollback to a recent valid one.")) ErrProvenanceChangeNotAllowed = errutil.Forbidden("alerting.notifications.invalidProvenance").MustTemplate( - "Resource with provenance status '{{ .Public.CurrentProvenance }}' cannot be managed via API that handles resources with provenance status '{{ .Public.TargetProvenance }}'", - errutil.WithPublic("Resource with provenance status '{{ .Public.CurrentProvenance }}' cannot be managed via API that handles resources with provenance status '{{ .Public.TargetProvenance }}'. You must use appropriate API to manage this resource"), + "Resource with provenance status '{{ .Public.SourceProvenance }}' cannot be managed via API that handles resources with provenance status '{{ .Public.TargetProvenance }}'", + errutil.WithPublic("Resource with provenance status '{{ .Public.SourceProvenance }}' cannot be managed via API that handles resources with provenance status '{{ .Public.TargetProvenance }}'. You must use appropriate API to manage this resource"), ) ErrVersionConflict = errutil.Conflict("alerting.notifications.conflict") @@ -28,7 +28,8 @@ var ( ErrTimeIntervalInvalid = errutil.BadRequest("alerting.notifications.time-intervals.invalidFormat").MustTemplate("Invalid format of the submitted time interval", errutil.WithPublic("Time interval is in invalid format. Correct the payload and try again.")) ErrTimeIntervalInUse = errutil.Conflict("alerting.notifications.time-intervals.used", errutil.WithPublicMessage("Time interval is used by one or many notification policies")) - ErrContactPointReferenced = errutil.BadRequest("alerting.notifications.contact-points.referenced", errutil.WithPublicMessage("Contact point is currently referenced by a notification policy.")) + ErrContactPointReferenced = errutil.Conflict("alerting.notifications.contact-points.referenced", errutil.WithPublicMessage("Contact point is currently referenced by a notification policy.")) + ErrContactPointUsedInRule = errutil.Conflict("alerting.notifications.contact-points.used-by-rule", errutil.WithPublicMessage("Contact point is currently used in the notification settings of one or many alert rules.")) ) func makeErrBadAlertmanagerConfiguration(err error) error { diff --git a/pkg/services/ngalert/state/cache.go b/pkg/services/ngalert/state/cache.go index d6fca25632c..4799e6ab127 100644 --- a/pkg/services/ngalert/state/cache.go +++ b/pkg/services/ngalert/state/cache.go @@ -3,7 +3,6 @@ package state import ( "context" "errors" - "math" "net/url" "strings" "sync" @@ -157,15 +156,6 @@ func calculateState(ctx context.Context, log log.Logger, alertRule *ngModels.Ale labels, _ := expand(ctx, log, alertRule.Title, alertRule.Labels, templateData, externalURL, result.EvaluatedAt) annotations, _ := expand(ctx, log, alertRule.Title, alertRule.Annotations, templateData, externalURL, result.EvaluatedAt) - values := make(map[string]float64) - for refID, v := range result.Values { - if v.Value != nil { - values[refID] = *v.Value - } else { - values[refID] = math.NaN() - } - } - lbs := make(data.Labels, len(extraLabels)+len(labels)+len(resultLabels)) dupes := make(data.Labels) for key, val := range extraLabels { @@ -210,7 +200,6 @@ func calculateState(ctx context.Context, log log.Logger, alertRule *ngModels.Ale Labels: lbs, Annotations: annotations, EvaluationDuration: result.EvaluationDuration, - Values: values, StartsAt: result.EvaluatedAt, EndsAt: result.EvaluatedAt, ResultFingerprint: result.Instance.Fingerprint(), // remember original result fingerprint diff --git a/pkg/services/ngalert/state/cache_test.go b/pkg/services/ngalert/state/cache_test.go index 23292714226..fc57ced138c 100644 --- a/pkg/services/ngalert/state/cache_test.go +++ b/pkg/services/ngalert/state/cache_test.go @@ -238,34 +238,6 @@ func Test_getOrCreate(t *testing.T) { } }) - t.Run("expected Reduce and Math expression values", func(t *testing.T) { - result := eval.Result{ - Instance: models.GenerateAlertLabels(5, "result-"), - Values: map[string]eval.NumberValueCapture{ - "A": {Var: "A", Value: util.Pointer(1.0)}, - "B": {Var: "B", Value: util.Pointer(2.0)}, - }, - } - rule := generateRule() - - state := c.getOrCreate(context.Background(), l, rule, result, nil, url) - assert.Equal(t, map[string]float64{"A": 1, "B": 2}, state.Values) - }) - - t.Run("expected Classic Condition values", func(t *testing.T) { - result := eval.Result{ - Instance: models.GenerateAlertLabels(5, "result-"), - Values: map[string]eval.NumberValueCapture{ - "B0": {Var: "B", Value: util.Pointer(1.0)}, - "B1": {Var: "B", Value: util.Pointer(2.0)}, - }, - } - rule := generateRule() - - state := c.getOrCreate(context.Background(), l, rule, result, nil, url) - assert.Equal(t, map[string]float64{"B0": 1, "B1": 2}, state.Values) - }) - t.Run("when result labels collide with system labels from LabelsUserCannotSpecify", func(t *testing.T) { result := eval.Result{ Instance: models.GenerateAlertLabels(5, "result-"), diff --git a/pkg/services/ngalert/state/manager.go b/pkg/services/ngalert/state/manager.go index d1ecc17828e..19c61e64e6b 100644 --- a/pkg/services/ngalert/state/manager.go +++ b/pkg/services/ngalert/state/manager.go @@ -405,10 +405,11 @@ func (st *Manager) setNextState(ctx context.Context, alertRule *ngModels.AlertRu currentState.LastEvaluationTime = result.EvaluatedAt currentState.EvaluationDuration = result.EvaluationDuration + currentState.SetNextValues(result) currentState.LatestResult = &Evaluation{ EvaluationTime: result.EvaluatedAt, EvaluationState: result.State, - Values: NewEvaluationValues(result.Values), + Values: currentState.Values, Condition: alertRule.Condition, } currentState.LastEvaluationString = result.EvaluationString diff --git a/pkg/services/ngalert/state/manager_private_test.go b/pkg/services/ngalert/state/manager_private_test.go index ed996bca211..e263d11155d 100644 --- a/pkg/services/ngalert/state/manager_private_test.go +++ b/pkg/services/ngalert/state/manager_private_test.go @@ -23,6 +23,7 @@ import ( "github.com/grafana/grafana/pkg/services/ngalert/eval" "github.com/grafana/grafana/pkg/services/ngalert/metrics" ngmodels "github.com/grafana/grafana/pkg/services/ngalert/models" + "github.com/grafana/grafana/pkg/util" ) // Not for parallel tests. @@ -149,14 +150,18 @@ func TestProcessEvalResults_StateTransitions(t *testing.T) { return ngmodels.CopyRule(baseRule, mutators...) } - newEvaluation := func(evalTime time.Time, evalState eval.State) *Evaluation { + newEvaluationWithValues := func(evalTime time.Time, evalState eval.State, values map[string]float64) *Evaluation { return &Evaluation{ EvaluationTime: evalTime, EvaluationState: evalState, - Values: make(map[string]*float64), + Values: values, } } + newEvaluation := func(evalTime time.Time, evalState eval.State) *Evaluation { + return newEvaluationWithValues(evalTime, evalState, make(map[string]float64)) + } + newResult := func(mutators ...eval.ResultMutator) eval.Result { r := eval.Result{ State: eval.Normal, @@ -894,7 +899,11 @@ func TestProcessEvalResults_StateTransitions(t *testing.T) { desc: "t1[1:normal] t2[NoData] at t2", results: map[time.Time]eval.Results{ t1: { - newResult(eval.WithState(eval.Normal), eval.WithLabels(labels1)), + newResult( + eval.WithState(eval.Normal), + eval.WithLabels(labels1), + eval.WithValues(map[string]eval.NumberValueCapture{"A": {Var: "A", Value: util.Pointer(1.0)}}), + ), }, t2: { newResult(eval.WithState(eval.NoData), eval.WithLabels(noDataLabels)), @@ -913,6 +922,7 @@ func TestProcessEvalResults_StateTransitions(t *testing.T) { EndsAt: t2.Add(ResendDelay * 4), LastEvaluationTime: t2, LastSentAt: &t2, + Values: map[string]float64{}, }, }, }, @@ -925,11 +935,12 @@ func TestProcessEvalResults_StateTransitions(t *testing.T) { Labels: labels["system + rule + no-data"], State: eval.Alerting, StateReason: eval.NoData.String(), - LatestResult: newEvaluation(t2, eval.NoData), + LatestResult: newEvaluationWithValues(t2, eval.NoData, map[string]float64{}), StartsAt: t2, EndsAt: t2.Add(ResendDelay * 4), LastEvaluationTime: t2, LastSentAt: &t2, + Values: map[string]float64{}, }, }, }, @@ -942,10 +953,11 @@ func TestProcessEvalResults_StateTransitions(t *testing.T) { Labels: labels["system + rule + no-data"], State: eval.Normal, StateReason: eval.NoData.String(), - LatestResult: newEvaluation(t2, eval.NoData), + LatestResult: newEvaluationWithValues(t2, eval.NoData, map[string]float64{}), StartsAt: t2, EndsAt: t2, LastEvaluationTime: t2, + Values: map[string]float64{}, }, }, }, @@ -976,11 +988,12 @@ func TestProcessEvalResults_StateTransitions(t *testing.T) { Labels: labels["system + rule + labels1"], State: eval.Alerting, StateReason: eval.NoData.String(), - LatestResult: newEvaluation(t2, eval.NoData), + LatestResult: newEvaluationWithValues(t2, eval.NoData, map[string]float64{"A": float64(-1)}), StartsAt: t2, EndsAt: t2.Add(ResendDelay * 4), LastEvaluationTime: t2, LastSentAt: &t2, + Values: map[string]float64{"A": float64(-1)}, }, }, }, @@ -993,10 +1006,11 @@ func TestProcessEvalResults_StateTransitions(t *testing.T) { Labels: labels["system + rule + labels1"], State: eval.Normal, StateReason: eval.NoData.String(), - LatestResult: newEvaluation(t2, eval.NoData), + LatestResult: newEvaluationWithValues(t2, eval.NoData, map[string]float64{"A": float64(-1)}), StartsAt: t1, EndsAt: t1, LastEvaluationTime: t2, + Values: map[string]float64{"A": float64(-1)}, }, }, }, @@ -1009,10 +1023,11 @@ func TestProcessEvalResults_StateTransitions(t *testing.T) { Labels: labels["system + rule + labels1"], State: eval.Normal, StateReason: ngmodels.ConcatReasons(eval.NoData.String(), ngmodels.StateReasonKeepLast), - LatestResult: newEvaluation(t2, eval.NoData), + LatestResult: newEvaluationWithValues(t2, eval.NoData, map[string]float64{"A": float64(-1)}), StartsAt: t1, EndsAt: t1, LastEvaluationTime: t2, + Values: map[string]float64{"A": float64(-1)}, }, }, }, @@ -2807,7 +2822,7 @@ func TestProcessEvalResults_StateTransitions(t *testing.T) { ruleMutators: []ngmodels.AlertRuleMutator{ngmodels.RuleMuts.WithForNTimes(1)}, results: map[time.Time]eval.Results{ t1: { - newResult(eval.WithState(eval.Alerting), eval.WithLabels(labels1)), + newResult(eval.WithState(eval.Alerting), eval.WithLabels(labels1), eval.WithValues(map[string]eval.NumberValueCapture{"A": {Var: "A", Value: util.Pointer(1.0)}})), }, t2: { newResult(eval.WithError(datasourceError)), @@ -2895,11 +2910,12 @@ func TestProcessEvalResults_StateTransitions(t *testing.T) { State: eval.Alerting, StateReason: eval.Error.String(), Error: datasourceError, - LatestResult: newEvaluation(t2, eval.Error), + LatestResult: newEvaluationWithValues(t2, eval.Error, map[string]float64{"A": float64(-1)}), StartsAt: t2, EndsAt: t2.Add(ResendDelay * 4), LastEvaluationTime: t2, LastSentAt: &t2, + Values: map[string]float64{"A": float64(-1)}, }, }, }, @@ -2912,10 +2928,11 @@ func TestProcessEvalResults_StateTransitions(t *testing.T) { Labels: labels["system + rule + labels1"], State: eval.Normal, StateReason: eval.Error.String(), - LatestResult: newEvaluation(t2, eval.Error), + LatestResult: newEvaluationWithValues(t2, eval.Error, map[string]float64{"A": float64(-1)}), StartsAt: t2, EndsAt: t2, LastEvaluationTime: t2, + Values: map[string]float64{"A": float64(-1)}, }, }, }, @@ -2928,11 +2945,12 @@ func TestProcessEvalResults_StateTransitions(t *testing.T) { Labels: labels["system + rule + labels1"], State: eval.Alerting, StateReason: ngmodels.ConcatReasons(eval.Error.String(), ngmodels.StateReasonKeepLast), - LatestResult: newEvaluation(t2, eval.Error), + LatestResult: newEvaluationWithValues(t2, eval.Error, map[string]float64{"A": float64(-1)}), StartsAt: t2, EndsAt: t2.Add(ResendDelay * 4), LastEvaluationTime: t2, LastSentAt: &t2, + Values: map[string]float64{"A": float64(-1)}, }, }, }, @@ -2943,7 +2961,7 @@ func TestProcessEvalResults_StateTransitions(t *testing.T) { desc: "t1[1:normal] t2[QueryError] at t2", results: map[time.Time]eval.Results{ t1: { - newResult(eval.WithState(eval.Normal), eval.WithLabels(labels1)), + newResult(eval.WithState(eval.Normal), eval.WithLabels(labels1), eval.WithValues(map[string]eval.NumberValueCapture{"A": {Var: "A", Value: util.Pointer(1.0)}})), }, t2: { newResult(eval.WithError(datasourceError)), @@ -3032,11 +3050,12 @@ func TestProcessEvalResults_StateTransitions(t *testing.T) { State: eval.Alerting, StateReason: eval.Error.String(), Error: datasourceError, - LatestResult: newEvaluation(t2, eval.Error), + LatestResult: newEvaluationWithValues(t2, eval.Error, map[string]float64{"A": float64(-1)}), StartsAt: t2, EndsAt: t2.Add(ResendDelay * 4), LastEvaluationTime: t2, LastSentAt: &t2, + Values: map[string]float64{"A": float64(-1)}, }, }, }, @@ -3049,10 +3068,11 @@ func TestProcessEvalResults_StateTransitions(t *testing.T) { Labels: labels["system + rule + labels1"], State: eval.Normal, StateReason: eval.Error.String(), - LatestResult: newEvaluation(t2, eval.Error), + LatestResult: newEvaluationWithValues(t2, eval.Error, map[string]float64{"A": float64(-1)}), StartsAt: t1, EndsAt: t1, LastEvaluationTime: t2, + Values: map[string]float64{"A": float64(-1)}, }, }, }, @@ -3065,10 +3085,11 @@ func TestProcessEvalResults_StateTransitions(t *testing.T) { Labels: labels["system + rule + labels1"], State: eval.Normal, StateReason: ngmodels.ConcatReasons(eval.Error.String(), ngmodels.StateReasonKeepLast), - LatestResult: newEvaluation(t2, eval.Error), + LatestResult: newEvaluationWithValues(t2, eval.Error, map[string]float64{"A": float64(-1)}), StartsAt: t1, EndsAt: t1, LastEvaluationTime: t2, + Values: map[string]float64{"A": float64(-1)}, }, }, }, diff --git a/pkg/services/ngalert/state/manager_test.go b/pkg/services/ngalert/state/manager_test.go index a15a2a5da27..96f58326986 100644 --- a/pkg/services/ngalert/state/manager_test.go +++ b/pkg/services/ngalert/state/manager_test.go @@ -324,14 +324,18 @@ func TestProcessEvalResults(t *testing.T) { ExecErrState: models.ErrorErrState, } - newEvaluation := func(evalTime time.Time, evalState eval.State) *state.Evaluation { + newEvaluationWithValues := func(evalTime time.Time, evalState eval.State, values map[string]float64) *state.Evaluation { return &state.Evaluation{ EvaluationTime: evalTime, EvaluationState: evalState, - Values: make(map[string]*float64), + Values: values, } } + newEvaluation := func(evalTime time.Time, evalState eval.State) *state.Evaluation { + return newEvaluationWithValues(evalTime, evalState, make(map[string]float64)) + } + baseRuleWith := func(mutators ...models.AlertRuleMutator) *models.AlertRule { r := models.CopyRule(baseRule, mutators...) return r @@ -1378,6 +1382,76 @@ func TestProcessEvalResults(t *testing.T) { }, }, }, + { + desc: "expected Reduce and Math expression values", + alertRule: baseRuleWith(), + expectedAnnotations: 1, + evalResults: map[time.Time]eval.Results{ + t1: { + newResult( + eval.WithState(eval.Alerting), + eval.WithLabels(data.Labels{}), + eval.WithValues(map[string]eval.NumberValueCapture{ + "A": {Var: "A", Labels: data.Labels{}, Value: util.Pointer(1.0)}, + "B": {Var: "B", Labels: data.Labels{}, Value: util.Pointer(2.0)}, + })), + }, + }, + expectedStates: []*state.State{ + { + Labels: labels["system + rule"], + ResultFingerprint: data.Labels{}.Fingerprint(), + State: eval.Alerting, + LatestResult: newEvaluationWithValues(t1, eval.Alerting, map[string]float64{ + "A": 1.0, + "B": 2.0, + }), + StartsAt: t1, + EndsAt: t1.Add(state.ResendDelay * 4), + LastEvaluationTime: t1, + LastSentAt: &t1, + Values: map[string]float64{ + "A": 1.0, + "B": 2.0, + }, + }, + }, + }, + { + desc: "expected Classic Condition values", + alertRule: baseRuleWith(), + expectedAnnotations: 1, + evalResults: map[time.Time]eval.Results{ + t1: { + newResult( + eval.WithState(eval.Alerting), + eval.WithLabels(data.Labels{}), + eval.WithValues(map[string]eval.NumberValueCapture{ + "B0": {Var: "B", Labels: data.Labels{}, Value: util.Pointer(1.0)}, + "B1": {Var: "B", Labels: data.Labels{}, Value: util.Pointer(2.0)}, + })), + }, + }, + expectedStates: []*state.State{ + { + Labels: labels["system + rule"], + ResultFingerprint: data.Labels{}.Fingerprint(), + State: eval.Alerting, + LatestResult: newEvaluationWithValues(t1, eval.Alerting, map[string]float64{ + "B0": 1.0, + "B1": 2.0, + }), + StartsAt: t1, + EndsAt: t1.Add(state.ResendDelay * 4), + LastEvaluationTime: t1, + LastSentAt: &t1, + Values: map[string]float64{ + "B0": 1.0, + "B1": 2.0, + }, + }, + }, + }, } for _, tc := range testCases { @@ -1488,6 +1562,43 @@ func TestProcessEvalResults(t *testing.T) { }) } + t.Run("converts values to NaN if not defined", func(t *testing.T) { + // We set up our own special test for this, since we need special comparison logic - NaN != NaN + instanceStore := &state.FakeInstanceStore{} + clk := clock.NewMock() + cfg := state.ManagerCfg{ + Metrics: metrics.NewNGAlert(prometheus.NewPedanticRegistry()).GetStateMetrics(), + ExternalURL: nil, + InstanceStore: instanceStore, + Images: &state.NotAvailableImageService{}, + Clock: clk, + Historian: &state.FakeHistorian{}, + Tracer: tracing.InitializeTracerForTest(), + Log: log.New("ngalert.state.manager"), + MaxStateSaveConcurrency: 1, + } + st := state.NewManager(cfg, state.NewNoopPersister()) + rule := baseRuleWith() + time := t1 + res := eval.Results{newResult( + eval.WithState(eval.Alerting), + eval.WithLabels(data.Labels{}), + eval.WithEvaluatedAt(t1), + eval.WithValues(map[string]eval.NumberValueCapture{ + "A": {Var: "A", Labels: data.Labels{}, Value: nil}, + }), + )} + + _ = st.ProcessEvalResults(context.Background(), time, rule, res, systemLabels, state.NoopSender) + + states := st.GetStatesForRuleUID(rule.OrgID, rule.UID) + require.Len(t, states, 1) + state := states[0] + require.NotNil(t, state.Values) + require.Contains(t, state.Values, "A") + require.Truef(t, math.IsNaN(state.Values["A"]), "expected NaN but got %v", state.Values["A"]) + }) + t.Run("should save state to database", func(t *testing.T) { instanceStore := &state.FakeInstanceStore{} clk := clock.New() @@ -1623,7 +1734,7 @@ func TestStaleResultsHandler(t *testing.T) { LatestResult: &state.Evaluation{ EvaluationTime: evaluationTime, EvaluationState: eval.Normal, - Values: make(map[string]*float64), + Values: make(map[string]float64), Condition: "A", }, StartsAt: evaluationTime, diff --git a/pkg/services/ngalert/state/state.go b/pkg/services/ngalert/state/state.go index 8bdc883f0af..57e7f36649d 100644 --- a/pkg/services/ngalert/state/state.go +++ b/pkg/services/ngalert/state/state.go @@ -163,6 +163,32 @@ func (a *State) AddErrorAnnotations(err error, rule *models.AlertRule) { } } +func (a *State) SetNextValues(result eval.Result) { + const sentinel = float64(-1) + + // We try to provide a reasonable object for Values in the event of nodata/error. + // In order to not break templates that might refer to refIDs, + // we instead fill values with the latest known set of refIDs, but with a sentinel -1 to indicate that the value didn't exist. + if result.State == eval.NoData || result.State == eval.Error { + placeholder := make(map[string]float64, len(a.Values)) + for refID := range a.Values { + placeholder[refID] = sentinel + } + a.Values = placeholder + return + } + + newValues := make(map[string]float64, len(result.Values)) + for k, v := range result.Values { + if v.Value != nil { + newValues[k] = *v.Value + } else { + newValues[k] = math.NaN() + } + } + a.Values = newValues +} + // IsNormalStateWithNoReason returns true if the state is Normal and reason is empty func IsNormalStateWithNoReason(s *State) bool { return s.State == eval.Normal && s.StateReason == "" @@ -206,16 +232,20 @@ type Evaluation struct { // Values contains the RefID and value of reduce and math expressions. // Classic conditions can have different values for the same RefID as they can include multiple conditions. // For these, we use the index of the condition in addition RefID as the key e.g. "A0, A1, A2, etc.". - Values map[string]*float64 + Values map[string]float64 // Condition is the refID specified as the condition in the alerting rule at the time of the evaluation. Condition string } // NewEvaluationValues returns the labels and values for each RefID in the capture. -func NewEvaluationValues(m map[string]eval.NumberValueCapture) map[string]*float64 { - result := make(map[string]*float64, len(m)) +func NewEvaluationValues(m map[string]eval.NumberValueCapture) map[string]float64 { + result := make(map[string]float64, len(m)) for k, v := range m { - result[k] = v.Value + if v.Value != nil { + result[k] = *v.Value + } else { + result[k] = math.NaN() + } } return result } @@ -486,11 +516,7 @@ func (a *State) GetLastEvaluationValuesForCondition() map[string]float64 { for refID, value := range lastResult.Values { if strings.Contains(refID, lastResult.Condition) { - if value != nil { - r[refID] = *value - continue - } - r[refID] = math.NaN() + r[refID] = value } } diff --git a/pkg/services/ngalert/state/state_test.go b/pkg/services/ngalert/state/state_test.go index f079d90e391..152081c9cf1 100644 --- a/pkg/services/ngalert/state/state_test.go +++ b/pkg/services/ngalert/state/state_test.go @@ -528,9 +528,9 @@ func TestGetLastEvaluationValuesForCondition(t *testing.T) { eval := &Evaluation{ EvaluationTime: time.Time{}, EvaluationState: 0, - Values: map[string]*float64{ - "B": util.Pointer(rand.Float64()), - "A": util.Pointer(expected), + Values: map[string]float64{ + "B": rand.Float64(), + "A": expected, }, Condition: "A", } @@ -543,8 +543,8 @@ func TestGetLastEvaluationValuesForCondition(t *testing.T) { eval := &Evaluation{ EvaluationTime: time.Time{}, EvaluationState: 0, - Values: map[string]*float64{ - "C": util.Pointer(rand.Float64()), + Values: map[string]float64{ + "C": rand.Float64(), }, Condition: "A", } @@ -556,8 +556,8 @@ func TestGetLastEvaluationValuesForCondition(t *testing.T) { eval := &Evaluation{ EvaluationTime: time.Time{}, EvaluationState: 0, - Values: map[string]*float64{ - "A": nil, + Values: map[string]float64{ + "A": math.NaN(), }, Condition: "A", } diff --git a/pkg/services/oauthtoken/oauth_token.go b/pkg/services/oauthtoken/oauth_token.go index fffc81e9547..79820f391ff 100644 --- a/pkg/services/oauthtoken/oauth_token.go +++ b/pkg/services/oauthtoken/oauth_token.go @@ -100,21 +100,25 @@ func (o *Service) HasOAuthEntry(ctx context.Context, usr identity.Requester) (*l return nil, false, nil } + ctxLogger := logger.FromContext(ctx) + userID, err := identity.IntIdentifier(namespace, id) if err != nil { - logger.Error("Failed to convert user id to int", "namespace", namespace, "userId", id, "error", err) + ctxLogger.Error("Failed to convert user id to int", "namespace", namespace, "userID", id, "error", err) return nil, false, err } + ctxLogger = ctxLogger.New("userID", userID) + authInfoQuery := &login.GetAuthInfoQuery{UserId: userID} authInfo, err := o.AuthInfoService.GetAuthInfo(ctx, authInfoQuery) if err != nil { if errors.Is(err, user.ErrUserNotFound) { // Not necessarily an error. User may be logged in another way. - logger.Debug("No oauth token found for user", "userId", userID, "username", usr.GetLogin()) + ctxLogger.Debug("No oauth token found for user", "username", usr.GetLogin()) return nil, false, nil } - logger.Error("Failed to fetch oauth token for user", "userId", userID, "username", usr.GetLogin(), "error", err) + ctxLogger.Error("Failed to fetch oauth token for user", "username", usr.GetLogin(), "error", err) return nil, false, err } if !strings.Contains(authInfo.AuthModule, "oauth") { @@ -139,24 +143,28 @@ func (o *Service) TryTokenRefresh(ctx context.Context, usr identity.Requester) e return nil } + ctxLogger := logger.FromContext(ctx) + userID, err := identity.IntIdentifier(namespace, id) if err != nil { - logger.Warn("Failed to convert user id to int", "namespace", namespace, "userId", id, "error", err) + ctxLogger.Warn("Failed to convert user id to int", "namespace", namespace, "userId", id, "error", err) return nil } + ctxLogger = ctxLogger.New("userID", userID) + lockKey := fmt.Sprintf("oauth-refresh-token-%d", userID) if _, ok := o.cache.Get(lockKey); ok { - logger.Debug("Expiration check has been cached, no need to refresh", "userID", userID) + ctxLogger.Debug("Expiration check has been cached, no need to refresh") return nil } _, err, _ = o.singleFlightGroup.Do(lockKey, func() (any, error) { - logger.Debug("Singleflight request for getting a new access token", "key", lockKey) + ctxLogger.Debug("Singleflight request for getting a new access token", "key", lockKey) authInfo, exists, err := o.HasOAuthEntry(ctx, usr) if !exists { if err != nil { - logger.Debug("Failed to fetch oauth entry", "id", userID, "error", err) + ctxLogger.Debug("Failed to fetch oauth entry", "error", err) } else { // User is not logged in via OAuth no need to check o.cache.Set(lockKey, struct{}{}, maxOAuthTokenCacheTTL) @@ -174,13 +182,13 @@ func (o *Service) TryTokenRefresh(ctx context.Context, usr identity.Requester) e provider := strings.TrimPrefix(authInfo.AuthModule, "oauth_") currentOAuthInfo := o.SocialService.GetOAuthInfoProvider(provider) if currentOAuthInfo == nil { - logger.Warn("OAuth provider not found", "provider", provider) + ctxLogger.Warn("OAuth provider not found", "provider", provider) return nil, nil } // if refresh token handling is disabled for this provider, we can skip the refresh if !currentOAuthInfo.UseRefreshToken { - logger.Debug("Skipping token refresh", "provider", provider) + ctxLogger.Debug("Skipping token refresh", "provider", provider) return nil, nil } @@ -240,9 +248,11 @@ func (o *Service) InvalidateOAuthTokens(ctx context.Context, usr *login.UserAuth } func (o *Service) tryGetOrRefreshOAuthToken(ctx context.Context, usr *login.UserAuth) (*oauth2.Token, error) { + ctxLogger := logger.FromContext(ctx).New("userID", usr.UserId) + key := getCheckCacheKey(usr.UserId) if _, ok := o.cache.Get(key); ok { - logger.Debug("Expiration check has been cached", "userID", usr.UserId) + ctxLogger.Debug("Expiration check has been cached", "userID", usr.UserId) return buildOAuthTokenFromAuthInfo(usr), nil } @@ -259,13 +269,13 @@ func (o *Service) tryGetOrRefreshOAuthToken(ctx context.Context, usr *login.User authProvider := usr.AuthModule connect, err := o.SocialService.GetConnector(authProvider) if err != nil { - logger.Error("Failed to get oauth connector", "provider", authProvider, "error", err) + ctxLogger.Error("Failed to get oauth connector", "provider", authProvider, "error", err) return nil, err } client, err := o.SocialService.GetOAuthHttpClient(authProvider) if err != nil { - logger.Error("Failed to get oauth http client", "provider", authProvider, "error", err) + ctxLogger.Error("Failed to get oauth http client", "provider", authProvider, "error", err) return nil, err } ctx = context.WithValue(ctx, oauth2.HTTPClient, client) @@ -277,7 +287,7 @@ func (o *Service) tryGetOrRefreshOAuthToken(ctx context.Context, usr *login.User o.tokenRefreshDuration.WithLabelValues(authProvider, fmt.Sprintf("%t", err == nil)).Observe(duration.Seconds()) if err != nil { - logger.Error("Failed to retrieve oauth access token", + ctxLogger.Error("Failed to retrieve oauth access token", "provider", usr.AuthModule, "userId", usr.UserId, "error", err) return nil, err } @@ -292,8 +302,7 @@ func (o *Service) tryGetOrRefreshOAuthToken(ctx context.Context, usr *login.User } if o.Cfg.Env == setting.Dev { - logger.Debug("Oauth got token", - "user", usr.UserId, + ctxLogger.Debug("Oauth got token", "auth_module", usr.AuthModule, "expiry", fmt.Sprintf("%v", token.Expiry), "access_token", fmt.Sprintf("%v", token.AccessToken), @@ -302,10 +311,10 @@ func (o *Service) tryGetOrRefreshOAuthToken(ctx context.Context, usr *login.User } if err := o.AuthInfoService.UpdateAuthInfo(ctx, updateAuthCommand); err != nil { - logger.Error("Failed to update auth info during token refresh", "userId", usr.UserId, "error", err) + ctxLogger.Error("Failed to update auth info during token refresh", "userId", usr.UserId, "error", err) return nil, err } - logger.Debug("Updated oauth info for user", "userId", usr.UserId) + ctxLogger.Debug("Updated oauth info for user") } return token, nil @@ -359,7 +368,7 @@ func needTokenRefresh(usr *login.UserAuth) (*oauth2.Token, bool, time.Duration) idTokenExpires, hasIdTokenExpired = getExpiryWithSkew(idTokenExp) } if !hasAccessTokenExpired && !hasIdTokenExpired { - logger.Debug("Neither access nor id token have expired yet", "id", usr.Id) + logger.Debug("Neither access nor id token have expired yet", "userID", usr.UserId) return persistedToken, false, getOAuthTokenCacheTTL(accessTokenExpires, idTokenExpires) } if hasIdTokenExpired { diff --git a/pkg/services/pluginsintegration/clientmiddleware/caching_middleware.go b/pkg/services/pluginsintegration/clientmiddleware/caching_middleware.go index 95ad4d393fb..95756b169af 100644 --- a/pkg/services/pluginsintegration/clientmiddleware/caching_middleware.go +++ b/pkg/services/pluginsintegration/clientmiddleware/caching_middleware.go @@ -160,7 +160,7 @@ func (m *CachingMiddleware) CallResource(ctx context.Context, req *backend.CallR return m.next.CallResource(ctx, req, sender) } // Otherwise, intercept the responses in a wrapped sender so we can cache them first - cacheSender := callResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { + cacheSender := backend.CallResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { cr.UpdateCacheFn(ctx, res) return sender.Send(res) }) diff --git a/pkg/services/pluginsintegration/clientmiddleware/caching_middleware_test.go b/pkg/services/pluginsintegration/clientmiddleware/caching_middleware_test.go index ca8e771e415..5a54eafc775 100644 --- a/pkg/services/pluginsintegration/clientmiddleware/caching_middleware_test.go +++ b/pkg/services/pluginsintegration/clientmiddleware/caching_middleware_test.go @@ -221,7 +221,7 @@ func TestCachingMiddleware(t *testing.T) { } var sentResponse *backend.CallResourceResponse - var storeOneResponseCallResourceSender = callResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { + var storeOneResponseCallResourceSender = backend.CallResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { sentResponse = res return nil }) diff --git a/pkg/services/pluginsintegration/clientmiddleware/contextual_logger_middleware.go b/pkg/services/pluginsintegration/clientmiddleware/contextual_logger_middleware.go index 6846ce8d345..44603d9da4f 100644 --- a/pkg/services/pluginsintegration/clientmiddleware/contextual_logger_middleware.go +++ b/pkg/services/pluginsintegration/clientmiddleware/contextual_logger_middleware.go @@ -24,8 +24,15 @@ type ContextualLoggerMiddleware struct { } // instrumentContext adds a contextual logger with plugin and request details to the given context. -func instrumentContext(ctx context.Context, endpoint string, pCtx backend.PluginContext) context.Context { - p := []any{"endpoint", endpoint, "pluginId", pCtx.PluginID} +func instrumentContext(ctx context.Context, pCtx backend.PluginContext) context.Context { + p := []any{} + + if ep := backend.EndpointFromContext(ctx); !ep.IsEmpty() { + p = append(p, "endpoint", string(ep)) + } + + p = append(p, "pluginId", pCtx.PluginID) + if pCtx.DataSourceInstanceSettings != nil { p = append(p, "dsName", pCtx.DataSourceInstanceSettings.Name) p = append(p, "dsUID", pCtx.DataSourceInstanceSettings.UID) @@ -37,54 +44,54 @@ func instrumentContext(ctx context.Context, endpoint string, pCtx backend.Plugin } func (m *ContextualLoggerMiddleware) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) { - ctx = instrumentContext(ctx, endpointQueryData, req.PluginContext) + ctx = instrumentContext(ctx, req.PluginContext) return m.next.QueryData(ctx, req) } func (m *ContextualLoggerMiddleware) CallResource(ctx context.Context, req *backend.CallResourceRequest, sender backend.CallResourceResponseSender) error { - ctx = instrumentContext(ctx, endpointCallResource, req.PluginContext) + ctx = instrumentContext(ctx, req.PluginContext) return m.next.CallResource(ctx, req, sender) } func (m *ContextualLoggerMiddleware) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { - ctx = instrumentContext(ctx, endpointCheckHealth, req.PluginContext) + ctx = instrumentContext(ctx, req.PluginContext) return m.next.CheckHealth(ctx, req) } func (m *ContextualLoggerMiddleware) CollectMetrics(ctx context.Context, req *backend.CollectMetricsRequest) (*backend.CollectMetricsResult, error) { - ctx = instrumentContext(ctx, endpointCollectMetrics, req.PluginContext) + ctx = instrumentContext(ctx, req.PluginContext) return m.next.CollectMetrics(ctx, req) } func (m *ContextualLoggerMiddleware) SubscribeStream(ctx context.Context, req *backend.SubscribeStreamRequest) (*backend.SubscribeStreamResponse, error) { - ctx = instrumentContext(ctx, endpointSubscribeStream, req.PluginContext) + ctx = instrumentContext(ctx, req.PluginContext) return m.next.SubscribeStream(ctx, req) } func (m *ContextualLoggerMiddleware) PublishStream(ctx context.Context, req *backend.PublishStreamRequest) (*backend.PublishStreamResponse, error) { - ctx = instrumentContext(ctx, endpointPublishStream, req.PluginContext) + ctx = instrumentContext(ctx, req.PluginContext) return m.next.PublishStream(ctx, req) } func (m *ContextualLoggerMiddleware) RunStream(ctx context.Context, req *backend.RunStreamRequest, sender *backend.StreamSender) error { - ctx = instrumentContext(ctx, endpointRunStream, req.PluginContext) + ctx = instrumentContext(ctx, req.PluginContext) return m.next.RunStream(ctx, req, sender) } // ValidateAdmission implements backend.AdmissionHandler. func (m *ContextualLoggerMiddleware) ValidateAdmission(ctx context.Context, req *backend.AdmissionRequest) (*backend.ValidationResponse, error) { - ctx = instrumentContext(ctx, endpointValidateAdmission, req.PluginContext) + ctx = instrumentContext(ctx, req.PluginContext) return m.next.ValidateAdmission(ctx, req) } // MutateAdmission implements backend.AdmissionHandler. func (m *ContextualLoggerMiddleware) MutateAdmission(ctx context.Context, req *backend.AdmissionRequest) (*backend.MutationResponse, error) { - ctx = instrumentContext(ctx, endpointMutateAdmission, req.PluginContext) + ctx = instrumentContext(ctx, req.PluginContext) return m.next.MutateAdmission(ctx, req) } // ConvertObject implements backend.AdmissionHandler. func (m *ContextualLoggerMiddleware) ConvertObject(ctx context.Context, req *backend.ConversionRequest) (*backend.ConversionResponse, error) { - ctx = instrumentContext(ctx, endpointConvertObject, req.PluginContext) + ctx = instrumentContext(ctx, req.PluginContext) return m.next.ConvertObject(ctx, req) } diff --git a/pkg/services/pluginsintegration/clientmiddleware/logger_middleware.go b/pkg/services/pluginsintegration/clientmiddleware/logger_middleware.go index 7c9e3d26ba9..8edc9719a51 100644 --- a/pkg/services/pluginsintegration/clientmiddleware/logger_middleware.go +++ b/pkg/services/pluginsintegration/clientmiddleware/logger_middleware.go @@ -8,6 +8,7 @@ import ( "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/plugins" + "github.com/grafana/grafana/pkg/plugins/instrumentationutils" plog "github.com/grafana/grafana/pkg/plugins/log" "github.com/grafana/grafana/pkg/plugins/pluginrequestmeta" ) @@ -30,13 +31,13 @@ type LoggerMiddleware struct { logger plog.Logger } -func (m *LoggerMiddleware) logRequest(ctx context.Context, fn func(ctx context.Context) (requestStatus, error)) error { +func (m *LoggerMiddleware) logRequest(ctx context.Context, fn func(ctx context.Context) (instrumentationutils.RequestStatus, error)) error { start := time.Now() timeBeforePluginRequest := log.TimeSinceStart(ctx, start) status, err := fn(ctx) logParams := []any{ - "status", status, + "status", status.String(), "duration", time.Since(start), "eventName", "grafana-data-egress", "time_before_plugin_request", timeBeforePluginRequest, @@ -48,7 +49,7 @@ func (m *LoggerMiddleware) logRequest(ctx context.Context, fn func(ctx context.C ctxLogger := m.logger.FromContext(ctx) logFunc := ctxLogger.Info - if status > requestStatusOK { + if status > instrumentationutils.RequestStatusOK { logFunc = ctxLogger.Error } @@ -63,11 +64,12 @@ func (m *LoggerMiddleware) QueryData(ctx context.Context, req *backend.QueryData } var resp *backend.QueryDataResponse - err := m.logRequest(ctx, func(ctx context.Context) (status requestStatus, innerErr error) { + err := m.logRequest(ctx, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { + var innerErr error resp, innerErr = m.next.QueryData(ctx, req) if innerErr != nil { - return requestStatusFromError(innerErr), innerErr + return instrumentationutils.RequestStatusFromError(innerErr), innerErr } ctxLogger := m.logger.FromContext(ctx) @@ -83,7 +85,7 @@ func (m *LoggerMiddleware) QueryData(ctx context.Context, req *backend.QueryData } } - return requestStatusFromQueryDataResponse(resp, innerErr), innerErr + return instrumentationutils.RequestStatusFromQueryDataResponse(resp, innerErr), innerErr }) return resp, err @@ -94,9 +96,9 @@ func (m *LoggerMiddleware) CallResource(ctx context.Context, req *backend.CallRe return m.next.CallResource(ctx, req, sender) } - err := m.logRequest(ctx, func(ctx context.Context) (status requestStatus, innerErr error) { - innerErr = m.next.CallResource(ctx, req, sender) - return requestStatusFromError(innerErr), innerErr + err := m.logRequest(ctx, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { + innerErr := m.next.CallResource(ctx, req, sender) + return instrumentationutils.RequestStatusFromError(innerErr), innerErr }) return err @@ -108,9 +110,10 @@ func (m *LoggerMiddleware) CheckHealth(ctx context.Context, req *backend.CheckHe } var resp *backend.CheckHealthResult - err := m.logRequest(ctx, func(ctx context.Context) (status requestStatus, innerErr error) { + err := m.logRequest(ctx, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { + var innerErr error resp, innerErr = m.next.CheckHealth(ctx, req) - return requestStatusFromError(innerErr), innerErr + return instrumentationutils.RequestStatusFromError(innerErr), innerErr }) return resp, err @@ -122,9 +125,98 @@ func (m *LoggerMiddleware) CollectMetrics(ctx context.Context, req *backend.Coll } var resp *backend.CollectMetricsResult - err := m.logRequest(ctx, func(ctx context.Context) (status requestStatus, innerErr error) { + err := m.logRequest(ctx, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { + var innerErr error resp, innerErr = m.next.CollectMetrics(ctx, req) - return requestStatusFromError(innerErr), innerErr + return instrumentationutils.RequestStatusFromError(innerErr), innerErr + }) + + return resp, err +} + +func (m *LoggerMiddleware) SubscribeStream(ctx context.Context, req *backend.SubscribeStreamRequest) (*backend.SubscribeStreamResponse, error) { + if req == nil { + return m.next.SubscribeStream(ctx, req) + } + + var resp *backend.SubscribeStreamResponse + err := m.logRequest(ctx, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { + var innerErr error + resp, innerErr = m.next.SubscribeStream(ctx, req) + return instrumentationutils.RequestStatusFromError(innerErr), innerErr + }) + + return resp, err +} + +func (m *LoggerMiddleware) PublishStream(ctx context.Context, req *backend.PublishStreamRequest) (*backend.PublishStreamResponse, error) { + if req == nil { + return m.next.PublishStream(ctx, req) + } + + var resp *backend.PublishStreamResponse + err := m.logRequest(ctx, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { + var innerErr error + resp, innerErr = m.next.PublishStream(ctx, req) + return instrumentationutils.RequestStatusFromError(innerErr), innerErr + }) + + return resp, err +} + +func (m *LoggerMiddleware) RunStream(ctx context.Context, req *backend.RunStreamRequest, sender *backend.StreamSender) error { + if req == nil { + return m.next.RunStream(ctx, req, sender) + } + + err := m.logRequest(ctx, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { + innerErr := m.next.RunStream(ctx, req, sender) + return instrumentationutils.RequestStatusFromError(innerErr), innerErr + }) + + return err +} + +func (m *LoggerMiddleware) ValidateAdmission(ctx context.Context, req *backend.AdmissionRequest) (*backend.ValidationResponse, error) { + if req == nil { + return m.next.ValidateAdmission(ctx, req) + } + + var resp *backend.ValidationResponse + err := m.logRequest(ctx, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { + var innerErr error + resp, innerErr = m.next.ValidateAdmission(ctx, req) + return instrumentationutils.RequestStatusFromError(innerErr), innerErr + }) + + return resp, err +} + +func (m *LoggerMiddleware) MutateAdmission(ctx context.Context, req *backend.AdmissionRequest) (*backend.MutationResponse, error) { + if req == nil { + return m.next.MutateAdmission(ctx, req) + } + + var resp *backend.MutationResponse + err := m.logRequest(ctx, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { + var innerErr error + resp, innerErr = m.next.MutateAdmission(ctx, req) + return instrumentationutils.RequestStatusFromError(innerErr), innerErr + }) + + return resp, err +} + +func (m *LoggerMiddleware) ConvertObject(ctx context.Context, req *backend.ConversionRequest) (*backend.ConversionResponse, error) { + if req == nil { + return m.next.ConvertObject(ctx, req) + } + + var resp *backend.ConversionResponse + err := m.logRequest(ctx, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { + var innerErr error + resp, innerErr = m.next.ConvertObject(ctx, req) + return instrumentationutils.RequestStatusFromError(innerErr), innerErr }) return resp, err diff --git a/pkg/services/pluginsintegration/clientmiddleware/metrics_middleware.go b/pkg/services/pluginsintegration/clientmiddleware/metrics_middleware.go index 8f125c0b3ff..8a861ba0bf0 100644 --- a/pkg/services/pluginsintegration/clientmiddleware/metrics_middleware.go +++ b/pkg/services/pluginsintegration/clientmiddleware/metrics_middleware.go @@ -9,6 +9,7 @@ import ( "github.com/grafana/grafana/pkg/infra/tracing" "github.com/grafana/grafana/pkg/plugins" + "github.com/grafana/grafana/pkg/plugins/instrumentationutils" "github.com/grafana/grafana/pkg/plugins/manager/registry" "github.com/grafana/grafana/pkg/plugins/pluginrequestmeta" ) @@ -94,17 +95,18 @@ func (m *MetricsMiddleware) pluginTarget(ctx context.Context, pluginID, pluginVe } // instrumentPluginRequestSize tracks the size of the given request in the m.pluginRequestSize metric. -func (m *MetricsMiddleware) instrumentPluginRequestSize(ctx context.Context, pluginCtx backend.PluginContext, endpoint string, requestSize float64) error { +func (m *MetricsMiddleware) instrumentPluginRequestSize(ctx context.Context, pluginCtx backend.PluginContext, requestSize float64) error { target, err := m.pluginTarget(ctx, pluginCtx.PluginID, pluginCtx.PluginVersion) if err != nil { return err } - m.pluginRequestSize.WithLabelValues("grafana-backend", pluginCtx.PluginID, endpoint, target).Observe(requestSize) + endpoint := backend.EndpointFromContext(ctx) + m.pluginRequestSize.WithLabelValues("grafana-backend", pluginCtx.PluginID, string(endpoint), target).Observe(requestSize) return nil } // instrumentPluginRequest increments the m.pluginRequestCounter metric and tracks the duration of the given request. -func (m *MetricsMiddleware) instrumentPluginRequest(ctx context.Context, pluginCtx backend.PluginContext, endpoint string, fn func(context.Context) (requestStatus, error)) error { +func (m *MetricsMiddleware) instrumentPluginRequest(ctx context.Context, pluginCtx backend.PluginContext, fn func(context.Context) (instrumentationutils.RequestStatus, error)) error { target, err := m.pluginTarget(ctx, pluginCtx.PluginID, pluginCtx.PluginVersion) if err != nil { return err @@ -116,10 +118,11 @@ func (m *MetricsMiddleware) instrumentPluginRequest(ctx context.Context, pluginC elapsed := time.Since(start) statusSource := pluginrequestmeta.StatusSourceFromContext(ctx) + endpoint := backend.EndpointFromContext(ctx) - pluginRequestDurationWithLabels := m.pluginRequestDuration.WithLabelValues(pluginCtx.PluginID, endpoint, target, string(statusSource)) - pluginRequestCounterWithLabels := m.pluginRequestCounter.WithLabelValues(pluginCtx.PluginID, endpoint, status.String(), target, string(statusSource)) - pluginRequestDurationSecondsWithLabels := m.pluginRequestDurationSeconds.WithLabelValues("grafana-backend", pluginCtx.PluginID, endpoint, status.String(), target, string(statusSource)) + pluginRequestDurationWithLabels := m.pluginRequestDuration.WithLabelValues(pluginCtx.PluginID, string(endpoint), target, string(statusSource)) + pluginRequestCounterWithLabels := m.pluginRequestCounter.WithLabelValues(pluginCtx.PluginID, string(endpoint), status.String(), target, string(statusSource)) + pluginRequestDurationSecondsWithLabels := m.pluginRequestDurationSeconds.WithLabelValues("grafana-backend", pluginCtx.PluginID, string(endpoint), status.String(), target, string(statusSource)) if traceID := tracing.TraceIDFromContext(ctx, true); traceID != "" { pluginRequestDurationWithLabels.(prometheus.ExemplarObserver).ObserveWithExemplar( @@ -144,74 +147,108 @@ func (m *MetricsMiddleware) QueryData(ctx context.Context, req *backend.QueryDat requestSize += float64(len(v.JSON)) } - if err := m.instrumentPluginRequestSize(ctx, req.PluginContext, endpointQueryData, requestSize); err != nil { + if err := m.instrumentPluginRequestSize(ctx, req.PluginContext, requestSize); err != nil { return nil, err } var resp *backend.QueryDataResponse - err := m.instrumentPluginRequest(ctx, req.PluginContext, endpointQueryData, func(ctx context.Context) (status requestStatus, innerErr error) { + err := m.instrumentPluginRequest(ctx, req.PluginContext, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { + var innerErr error resp, innerErr = m.next.QueryData(ctx, req) - return requestStatusFromQueryDataResponse(resp, innerErr), innerErr + return instrumentationutils.RequestStatusFromQueryDataResponse(resp, innerErr), innerErr }) return resp, err } func (m *MetricsMiddleware) CallResource(ctx context.Context, req *backend.CallResourceRequest, sender backend.CallResourceResponseSender) error { - if err := m.instrumentPluginRequestSize(ctx, req.PluginContext, endpointCallResource, float64(len(req.Body))); err != nil { + if err := m.instrumentPluginRequestSize(ctx, req.PluginContext, float64(len(req.Body))); err != nil { return err } - return m.instrumentPluginRequest(ctx, req.PluginContext, endpointCallResource, func(ctx context.Context) (requestStatus, error) { + return m.instrumentPluginRequest(ctx, req.PluginContext, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { innerErr := m.next.CallResource(ctx, req, sender) - return requestStatusFromError(innerErr), innerErr + return instrumentationutils.RequestStatusFromError(innerErr), innerErr }) } func (m *MetricsMiddleware) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { - var result *backend.CheckHealthResult - err := m.instrumentPluginRequest(ctx, req.PluginContext, endpointCheckHealth, func(ctx context.Context) (status requestStatus, innerErr error) { - result, innerErr = m.next.CheckHealth(ctx, req) - return requestStatusFromError(innerErr), innerErr + var resp *backend.CheckHealthResult + err := m.instrumentPluginRequest(ctx, req.PluginContext, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { + var innerErr error + resp, innerErr = m.next.CheckHealth(ctx, req) + return instrumentationutils.RequestStatusFromError(innerErr), innerErr }) - return result, err + return resp, err } func (m *MetricsMiddleware) CollectMetrics(ctx context.Context, req *backend.CollectMetricsRequest) (*backend.CollectMetricsResult, error) { - var result *backend.CollectMetricsResult - err := m.instrumentPluginRequest(ctx, req.PluginContext, endpointCollectMetrics, func(ctx context.Context) (status requestStatus, innerErr error) { - result, innerErr = m.next.CollectMetrics(ctx, req) - return requestStatusFromError(innerErr), innerErr + var resp *backend.CollectMetricsResult + err := m.instrumentPluginRequest(ctx, req.PluginContext, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { + var innerErr error + resp, innerErr = m.next.CollectMetrics(ctx, req) + return instrumentationutils.RequestStatusFromError(innerErr), innerErr }) - return result, err + return resp, err +} + +func (m *MetricsMiddleware) SubscribeStream(ctx context.Context, req *backend.SubscribeStreamRequest) (*backend.SubscribeStreamResponse, error) { + var resp *backend.SubscribeStreamResponse + err := m.instrumentPluginRequest(ctx, req.PluginContext, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { + var innerErr error + resp, innerErr = m.next.SubscribeStream(ctx, req) + return instrumentationutils.RequestStatusFromError(innerErr), innerErr + }) + return resp, err +} + +func (m *MetricsMiddleware) PublishStream(ctx context.Context, req *backend.PublishStreamRequest) (*backend.PublishStreamResponse, error) { + var resp *backend.PublishStreamResponse + err := m.instrumentPluginRequest(ctx, req.PluginContext, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { + var innerErr error + resp, innerErr = m.next.PublishStream(ctx, req) + return instrumentationutils.RequestStatusFromError(innerErr), innerErr + }) + return resp, err +} + +func (m *MetricsMiddleware) RunStream(ctx context.Context, req *backend.RunStreamRequest, sender *backend.StreamSender) error { + err := m.instrumentPluginRequest(ctx, req.PluginContext, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { + innerErr := m.next.RunStream(ctx, req, sender) + return instrumentationutils.RequestStatusFromError(innerErr), innerErr + }) + return err } func (m *MetricsMiddleware) ValidateAdmission(ctx context.Context, req *backend.AdmissionRequest) (*backend.ValidationResponse, error) { - var result *backend.ValidationResponse - err := m.instrumentPluginRequest(ctx, req.PluginContext, endpointMutateAdmission, func(ctx context.Context) (status requestStatus, innerErr error) { - result, innerErr = m.next.ValidateAdmission(ctx, req) - return requestStatusFromError(innerErr), innerErr + var resp *backend.ValidationResponse + err := m.instrumentPluginRequest(ctx, req.PluginContext, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { + var innerErr error + resp, innerErr = m.next.ValidateAdmission(ctx, req) + return instrumentationutils.RequestStatusFromError(innerErr), innerErr }) - return result, err + return resp, err } func (m *MetricsMiddleware) MutateAdmission(ctx context.Context, req *backend.AdmissionRequest) (*backend.MutationResponse, error) { - var result *backend.MutationResponse - err := m.instrumentPluginRequest(ctx, req.PluginContext, endpointMutateAdmission, func(ctx context.Context) (status requestStatus, innerErr error) { - result, innerErr = m.next.MutateAdmission(ctx, req) - return requestStatusFromError(innerErr), innerErr + var resp *backend.MutationResponse + err := m.instrumentPluginRequest(ctx, req.PluginContext, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { + var innerErr error + resp, innerErr = m.next.MutateAdmission(ctx, req) + return instrumentationutils.RequestStatusFromError(innerErr), innerErr }) - return result, err + return resp, err } func (m *MetricsMiddleware) ConvertObject(ctx context.Context, req *backend.ConversionRequest) (*backend.ConversionResponse, error) { - var result *backend.ConversionResponse - err := m.instrumentPluginRequest(ctx, req.PluginContext, endpointMutateAdmission, func(ctx context.Context) (status requestStatus, innerErr error) { - result, innerErr = m.next.ConvertObject(ctx, req) - return requestStatusFromError(innerErr), innerErr + var resp *backend.ConversionResponse + err := m.instrumentPluginRequest(ctx, req.PluginContext, func(ctx context.Context) (instrumentationutils.RequestStatus, error) { + var innerErr error + resp, innerErr = m.next.ConvertObject(ctx, req) + return instrumentationutils.RequestStatusFromError(innerErr), innerErr }) - return result, err + return resp, err } diff --git a/pkg/services/pluginsintegration/clientmiddleware/metrics_middleware_test.go b/pkg/services/pluginsintegration/clientmiddleware/metrics_middleware_test.go index 6fe03743b12..40c26cef09a 100644 --- a/pkg/services/pluginsintegration/clientmiddleware/metrics_middleware_test.go +++ b/pkg/services/pluginsintegration/clientmiddleware/metrics_middleware_test.go @@ -14,6 +14,7 @@ import ( "github.com/grafana/grafana/pkg/plugins" "github.com/grafana/grafana/pkg/plugins/backendplugin" + "github.com/grafana/grafana/pkg/plugins/instrumentationutils" "github.com/grafana/grafana/pkg/plugins/manager/client/clienttest" "github.com/grafana/grafana/pkg/plugins/manager/fakes" "github.com/grafana/grafana/pkg/plugins/pluginrequestmeta" @@ -32,12 +33,12 @@ func TestInstrumentationMiddleware(t *testing.T) { pCtx := backend.PluginContext{PluginID: pluginID} t.Run("should instrument requests", func(t *testing.T) { for _, tc := range []struct { - expEndpoint string + expEndpoint backend.Endpoint fn func(cdt *clienttest.ClientDecoratorTest) error shouldInstrumentRequestSize bool }{ { - expEndpoint: endpointCheckHealth, + expEndpoint: backend.EndpointCheckHealth, fn: func(cdt *clienttest.ClientDecoratorTest) error { _, err := cdt.Decorator.CheckHealth(context.Background(), &backend.CheckHealthRequest{PluginContext: pCtx}) return err @@ -45,14 +46,14 @@ func TestInstrumentationMiddleware(t *testing.T) { shouldInstrumentRequestSize: false, }, { - expEndpoint: endpointCallResource, + expEndpoint: backend.EndpointCallResource, fn: func(cdt *clienttest.ClientDecoratorTest) error { return cdt.Decorator.CallResource(context.Background(), &backend.CallResourceRequest{PluginContext: pCtx}, nopCallResourceSender) }, shouldInstrumentRequestSize: true, }, { - expEndpoint: endpointQueryData, + expEndpoint: backend.EndpointQueryData, fn: func(cdt *clienttest.ClientDecoratorTest) error { _, err := cdt.Decorator.QueryData(context.Background(), &backend.QueryDataRequest{PluginContext: pCtx}) return err @@ -60,7 +61,7 @@ func TestInstrumentationMiddleware(t *testing.T) { shouldInstrumentRequestSize: true, }, { - expEndpoint: endpointCollectMetrics, + expEndpoint: backend.EndpointCollectMetrics, fn: func(cdt *clienttest.ClientDecoratorTest) error { _, err := cdt.Decorator.CollectMetrics(context.Background(), &backend.CollectMetricsRequest{PluginContext: pCtx}) return err @@ -68,7 +69,7 @@ func TestInstrumentationMiddleware(t *testing.T) { shouldInstrumentRequestSize: false, }, } { - t.Run(tc.expEndpoint, func(t *testing.T) { + t.Run(string(tc.expEndpoint), func(t *testing.T) { promRegistry := prometheus.NewRegistry() pluginsRegistry := fakes.NewFakePluginRegistry() require.NoError(t, pluginsRegistry.Add(context.Background(), &plugins.Plugin{ @@ -89,12 +90,12 @@ func TestInstrumentationMiddleware(t *testing.T) { require.Equal(t, 1, testutil.CollectAndCount(promRegistry, metricRequestDurationMs)) require.Equal(t, 1, testutil.CollectAndCount(promRegistry, metricRequestDurationS)) - counter := mw.pluginMetrics.pluginRequestCounter.WithLabelValues(pluginID, tc.expEndpoint, requestStatusOK.String(), string(backendplugin.TargetUnknown), string(pluginrequestmeta.DefaultStatusSource)) + counter := mw.pluginMetrics.pluginRequestCounter.WithLabelValues(pluginID, string(tc.expEndpoint), instrumentationutils.RequestStatusOK.String(), string(backendplugin.TargetUnknown), string(pluginrequestmeta.DefaultStatusSource)) require.Equal(t, 1.0, testutil.ToFloat64(counter)) for _, m := range []string{metricRequestDurationMs, metricRequestDurationS} { require.NoError(t, checkHistogram(promRegistry, m, map[string]string{ "plugin_id": pluginID, - "endpoint": tc.expEndpoint, + "endpoint": string(tc.expEndpoint), "target": string(backendplugin.TargetUnknown), })) } @@ -102,7 +103,7 @@ func TestInstrumentationMiddleware(t *testing.T) { require.Equal(t, 1, testutil.CollectAndCount(promRegistry, metricRequestSize), "request size should have been instrumented") require.NoError(t, checkHistogram(promRegistry, metricRequestSize, map[string]string{ "plugin_id": pluginID, - "endpoint": tc.expEndpoint, + "endpoint": string(tc.expEndpoint), "target": string(backendplugin.TargetUnknown), "source": "grafana-backend", }), "request size should have been instrumented") @@ -116,8 +117,8 @@ func TestInstrumentationMiddlewareStatusSource(t *testing.T) { const labelStatusSource = "status_source" queryDataErrorCounterLabels := prometheus.Labels{ "plugin_id": pluginID, - "endpoint": endpointQueryData, - "status": requestStatusError.String(), + "endpoint": string(backend.EndpointQueryData), + "status": instrumentationutils.RequestStatusError.String(), "target": string(backendplugin.TargetUnknown), } downstreamErrorResponse := backend.DataResponse{ diff --git a/pkg/services/pluginsintegration/clientmiddleware/resource_response_middleware.go b/pkg/services/pluginsintegration/clientmiddleware/resource_response_middleware.go index c7f0252e28b..b000a549656 100644 --- a/pkg/services/pluginsintegration/clientmiddleware/resource_response_middleware.go +++ b/pkg/services/pluginsintegration/clientmiddleware/resource_response_middleware.go @@ -35,7 +35,7 @@ func (m *ResourceResponseMiddleware) CallResource(ctx context.Context, req *back } processedStreams := 0 - wrappedSender := callResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { + wrappedSender := backend.CallResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { if processedStreams == 0 { if res.Headers == nil { res.Headers = map[string][]string{} diff --git a/pkg/services/pluginsintegration/clientmiddleware/resource_response_middleware_test.go b/pkg/services/pluginsintegration/clientmiddleware/resource_response_middleware_test.go index 2e0fcf60f4b..905977b32a4 100644 --- a/pkg/services/pluginsintegration/clientmiddleware/resource_response_middleware_test.go +++ b/pkg/services/pluginsintegration/clientmiddleware/resource_response_middleware_test.go @@ -24,7 +24,7 @@ func TestResourceResponseMiddleware(t *testing.T) { ) var sentResponse *backend.CallResourceResponse - sender := callResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { + sender := backend.CallResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { sentResponse = res return nil }) diff --git a/pkg/services/pluginsintegration/clientmiddleware/testing.go b/pkg/services/pluginsintegration/clientmiddleware/testing.go index 6f5f06806e0..391783cc0ef 100644 --- a/pkg/services/pluginsintegration/clientmiddleware/testing.go +++ b/pkg/services/pluginsintegration/clientmiddleware/testing.go @@ -2,6 +2,6 @@ package clientmiddleware import "github.com/grafana/grafana-plugin-sdk-go/backend" -var nopCallResourceSender = callResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { +var nopCallResourceSender = backend.CallResourceResponseSenderFunc(func(res *backend.CallResourceResponse) error { return nil }) diff --git a/pkg/services/pluginsintegration/clientmiddleware/tracing_middleware.go b/pkg/services/pluginsintegration/clientmiddleware/tracing_middleware.go index 8eff99b9bae..fa4167647fd 100644 --- a/pkg/services/pluginsintegration/clientmiddleware/tracing_middleware.go +++ b/pkg/services/pluginsintegration/clientmiddleware/tracing_middleware.go @@ -44,10 +44,10 @@ func setSpanAttributeFromHTTPHeader(headers http.Header, span trace.Span, attrib // plugin id, org id, user login, ds, dashboard and panel info. The second function returned is a cleanup function, // which should be called by the caller (deferred) and will set the span status/error and end the span. func (m *TracingMiddleware) traceWrap( - ctx context.Context, pluginContext backend.PluginContext, opName string, + ctx context.Context, pluginContext backend.PluginContext, ) (context.Context, func(error)) { - // Start span - ctx, span := m.tracer.Start(ctx, "PluginClient."+opName, trace.WithAttributes( + endpoint := backend.EndpointFromContext(ctx) + ctx, span := m.tracer.Start(ctx, "PluginClient."+string(endpoint), trace.WithAttributes( // Attach some plugin context information to span attribute.String("plugin_id", pluginContext.PluginID), attribute.Int64("org_id", pluginContext.OrgID), @@ -82,7 +82,7 @@ func (m *TracingMiddleware) traceWrap( func (m *TracingMiddleware) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) { var err error - ctx, end := m.traceWrap(ctx, req.PluginContext, "queryData") + ctx, end := m.traceWrap(ctx, req.PluginContext) defer func() { end(err) }() resp, err := m.next.QueryData(ctx, req) return resp, err @@ -90,7 +90,7 @@ func (m *TracingMiddleware) QueryData(ctx context.Context, req *backend.QueryDat func (m *TracingMiddleware) CallResource(ctx context.Context, req *backend.CallResourceRequest, sender backend.CallResourceResponseSender) error { var err error - ctx, end := m.traceWrap(ctx, req.PluginContext, "callResource") + ctx, end := m.traceWrap(ctx, req.PluginContext) defer func() { end(err) }() err = m.next.CallResource(ctx, req, sender) return err @@ -98,7 +98,7 @@ func (m *TracingMiddleware) CallResource(ctx context.Context, req *backend.CallR func (m *TracingMiddleware) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { var err error - ctx, end := m.traceWrap(ctx, req.PluginContext, "checkHealth") + ctx, end := m.traceWrap(ctx, req.PluginContext) defer func() { end(err) }() resp, err := m.next.CheckHealth(ctx, req) return resp, err @@ -106,7 +106,7 @@ func (m *TracingMiddleware) CheckHealth(ctx context.Context, req *backend.CheckH func (m *TracingMiddleware) CollectMetrics(ctx context.Context, req *backend.CollectMetricsRequest) (*backend.CollectMetricsResult, error) { var err error - ctx, end := m.traceWrap(ctx, req.PluginContext, "collectMetrics") + ctx, end := m.traceWrap(ctx, req.PluginContext) defer func() { end(err) }() resp, err := m.next.CollectMetrics(ctx, req) return resp, err @@ -114,7 +114,7 @@ func (m *TracingMiddleware) CollectMetrics(ctx context.Context, req *backend.Col func (m *TracingMiddleware) SubscribeStream(ctx context.Context, req *backend.SubscribeStreamRequest) (*backend.SubscribeStreamResponse, error) { var err error - ctx, end := m.traceWrap(ctx, req.PluginContext, "subscribeStream") + ctx, end := m.traceWrap(ctx, req.PluginContext) defer func() { end(err) }() resp, err := m.next.SubscribeStream(ctx, req) return resp, err @@ -122,7 +122,7 @@ func (m *TracingMiddleware) SubscribeStream(ctx context.Context, req *backend.Su func (m *TracingMiddleware) PublishStream(ctx context.Context, req *backend.PublishStreamRequest) (*backend.PublishStreamResponse, error) { var err error - ctx, end := m.traceWrap(ctx, req.PluginContext, "publishStream") + ctx, end := m.traceWrap(ctx, req.PluginContext) defer func() { end(err) }() resp, err := m.next.PublishStream(ctx, req) return resp, err @@ -130,7 +130,7 @@ func (m *TracingMiddleware) PublishStream(ctx context.Context, req *backend.Publ func (m *TracingMiddleware) RunStream(ctx context.Context, req *backend.RunStreamRequest, sender *backend.StreamSender) error { var err error - ctx, end := m.traceWrap(ctx, req.PluginContext, "runStream") + ctx, end := m.traceWrap(ctx, req.PluginContext) defer func() { end(err) }() err = m.next.RunStream(ctx, req, sender) return err @@ -139,7 +139,7 @@ func (m *TracingMiddleware) RunStream(ctx context.Context, req *backend.RunStrea // ValidateAdmission implements backend.AdmissionHandler. func (m *TracingMiddleware) ValidateAdmission(ctx context.Context, req *backend.AdmissionRequest) (*backend.ValidationResponse, error) { var err error - ctx, end := m.traceWrap(ctx, req.PluginContext, endpointValidateAdmission) + ctx, end := m.traceWrap(ctx, req.PluginContext) defer func() { end(err) }() resp, err := m.next.ValidateAdmission(ctx, req) return resp, err @@ -148,7 +148,7 @@ func (m *TracingMiddleware) ValidateAdmission(ctx context.Context, req *backend. // MutateAdmission implements backend.AdmissionHandler. func (m *TracingMiddleware) MutateAdmission(ctx context.Context, req *backend.AdmissionRequest) (*backend.MutationResponse, error) { var err error - ctx, end := m.traceWrap(ctx, req.PluginContext, endpointMutateAdmission) + ctx, end := m.traceWrap(ctx, req.PluginContext) defer func() { end(err) }() resp, err := m.next.MutateAdmission(ctx, req) return resp, err @@ -157,7 +157,7 @@ func (m *TracingMiddleware) MutateAdmission(ctx context.Context, req *backend.Ad // ConvertObject implements backend.AdmissionHandler. func (m *TracingMiddleware) ConvertObject(ctx context.Context, req *backend.ConversionRequest) (*backend.ConversionResponse, error) { var err error - ctx, end := m.traceWrap(ctx, req.PluginContext, endpointConvertObject) + ctx, end := m.traceWrap(ctx, req.PluginContext) defer func() { end(err) }() resp, err := m.next.ConvertObject(ctx, req) return resp, err diff --git a/pkg/services/pluginsintegration/clientmiddleware/utils.go b/pkg/services/pluginsintegration/clientmiddleware/utils.go deleted file mode 100644 index 77b8128c9ed..00000000000 --- a/pkg/services/pluginsintegration/clientmiddleware/utils.go +++ /dev/null @@ -1,81 +0,0 @@ -package clientmiddleware - -import ( - "context" - "errors" - - "github.com/grafana/grafana-plugin-sdk-go/backend" -) - -type requestStatus int - -const ( - requestStatusOK requestStatus = iota - requestStatusCancelled - requestStatusError -) - -func (status requestStatus) String() string { - names := [...]string{"ok", "cancelled", "error"} - if status < requestStatusOK || status > requestStatusError { - return "" - } - - return names[status] -} - -const ( - endpointCallResource = "callResource" - endpointCheckHealth = "checkHealth" - endpointCollectMetrics = "collectMetrics" - endpointQueryData = "queryData" - endpointSubscribeStream = "subscribeStream" - endpointPublishStream = "publishStream" - endpointRunStream = "runStream" - endpointValidateAdmission = "validateAdmission" - endpointMutateAdmission = "mutateAdmission" - endpointConvertObject = "convertObject" -) - -type callResourceResponseSenderFunc func(res *backend.CallResourceResponse) error - -func (fn callResourceResponseSenderFunc) Send(res *backend.CallResourceResponse) error { - return fn(res) -} - -func requestStatusFromError(err error) requestStatus { - status := requestStatusOK - if err != nil { - status = requestStatusError - if errors.Is(err, context.Canceled) { - status = requestStatusCancelled - } - } - - return status -} - -func requestStatusFromQueryDataResponse(res *backend.QueryDataResponse, err error) requestStatus { - if err != nil { - return requestStatusFromError(err) - } - - status := requestStatusOK - - if res != nil { - for _, dr := range res.Responses { - if dr.Error != nil { - s := requestStatusFromError(dr.Error) - if s > status { - status = s - } - - if status == requestStatusError { - break - } - } - } - } - - return status -} diff --git a/pkg/services/publicdashboards/api/common_test.go b/pkg/services/publicdashboards/api/common_test.go index bc70087a0ff..4c02be71c97 100644 --- a/pkg/services/publicdashboards/api/common_test.go +++ b/pkg/services/publicdashboards/api/common_test.go @@ -7,9 +7,10 @@ import ( "net/http/httptest" "testing" + "github.com/stretchr/testify/require" + "github.com/grafana/grafana-plugin-sdk-go/backend" "github.com/grafana/grafana-plugin-sdk-go/data" - "github.com/stretchr/testify/require" "github.com/grafana/grafana/pkg/api/routing" "github.com/grafana/grafana/pkg/infra/db" @@ -17,6 +18,7 @@ import ( "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/plugins" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/contexthandler/ctxkey" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" "github.com/grafana/grafana/pkg/services/datasources" @@ -55,7 +57,7 @@ func setupTestServer( // build router to register routes rr := routing.NewRouteRegister() - ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) // build mux m := web.New() diff --git a/pkg/services/publicdashboards/service/service_test.go b/pkg/services/publicdashboards/service/service_test.go index 30c662ed04a..fa28eed134c 100644 --- a/pkg/services/publicdashboards/service/service_test.go +++ b/pkg/services/publicdashboards/service/service_test.go @@ -18,6 +18,7 @@ import ( "github.com/grafana/grafana/pkg/apimachinery/errutil" "github.com/grafana/grafana/pkg/components/simplejson" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/dashboards" dashboardsDB "github.com/grafana/grafana/pkg/services/dashboards/database" "github.com/grafana/grafana/pkg/services/featuremgmt" @@ -1482,7 +1483,7 @@ func TestPublicDashboardServiceImpl_ListPublicDashboards(t *testing.T) { }, } - ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/services/quota/quotaimpl/quota_test.go b/pkg/services/quota/quotaimpl/quota_test.go index e24082e5ffb..e74347c1ed9 100644 --- a/pkg/services/quota/quotaimpl/quota_test.go +++ b/pkg/services/quota/quotaimpl/quota_test.go @@ -22,6 +22,7 @@ import ( "github.com/grafana/grafana/pkg/services/apikey/apikeyimpl" "github.com/grafana/grafana/pkg/services/auth" "github.com/grafana/grafana/pkg/services/auth/authimpl" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/dashboards" dashboardStore "github.com/grafana/grafana/pkg/services/dashboards/database" "github.com/grafana/grafana/pkg/services/datasources" @@ -494,7 +495,7 @@ func setupEnv(t *testing.T, sqlStore db.DB, cfg *setting.Cfg, b bus.Bus, quotaSe require.NoError(t, err) m := metrics.NewNGAlert(prometheus.NewRegistry()) - ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) ruleStore, err := ngstore.ProvideDBStore(cfg, featuremgmt.WithFeatures(), sqlStore, &foldertest.FakeService{}, &dashboards.FakeDashboardService{}, ac) require.NoError(t, err) _, err = ngalert.ProvideService( diff --git a/pkg/services/searchV2/index.go b/pkg/services/searchV2/index.go index 27a3c5a8102..627b21903e0 100644 --- a/pkg/services/searchV2/index.go +++ b/pkg/services/searchV2/index.go @@ -849,7 +849,8 @@ func (l sqlDashboardLoader) loadAllDashboards(ctx context.Context, limit int, or rows := make([]*dashboardQueryResult, 0) err := l.sql.WithDbSession(dashboardQueryCtx, func(sess *db.Session) error { sess.Table("dashboard"). - Where("org_id = ?", orgID) + Where("org_id = ?", orgID). + Where("deleted IS NULL") // don't index soft delete files if lastID > 0 { sess.Where("id > ?", lastID) diff --git a/pkg/services/searchV2/index_test.go b/pkg/services/searchV2/index_test.go index f0fa100dc91..b4cb38506f1 100644 --- a/pkg/services/searchV2/index_test.go +++ b/pkg/services/searchV2/index_test.go @@ -12,11 +12,21 @@ import ( "github.com/grafana/grafana-plugin-sdk-go/experimental" "github.com/stretchr/testify/require" + "github.com/grafana/grafana/pkg/infra/db" "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/infra/tracing" + "github.com/grafana/grafana/pkg/services/accesscontrol/actest" + "github.com/grafana/grafana/pkg/services/dashboards/database" "github.com/grafana/grafana/pkg/services/featuremgmt" + "github.com/grafana/grafana/pkg/services/folder/foldertest" + "github.com/grafana/grafana/pkg/services/org" + "github.com/grafana/grafana/pkg/services/org/orgtest" + "github.com/grafana/grafana/pkg/services/quota/quotatest" + "github.com/grafana/grafana/pkg/services/sqlstore" "github.com/grafana/grafana/pkg/services/store" "github.com/grafana/grafana/pkg/services/store/entity" + "github.com/grafana/grafana/pkg/services/tag/tagimpl" + "github.com/grafana/grafana/pkg/services/user" "github.com/grafana/grafana/pkg/setting" ) @@ -731,3 +741,75 @@ func TestDashboardIndex_MultiTermPrefixMatch(t *testing.T) { }) } } + +func setupIntegrationEnv(t *testing.T, folderCount, dashboardsPerFolder int, sqlStore *sqlstore.SQLStore) (*StandardSearchService, *user.SignedInUser, error) { + err := populateDB(folderCount, dashboardsPerFolder, sqlStore) + require.NoError(t, err, "error when populating the database for integration test") + + // load all dashboards and folders + dbLoadingBatchSize := (dashboardsPerFolder + 1) * folderCount + cfg := &setting.Cfg{Search: setting.SearchSettings{DashboardLoadingBatchSize: dbLoadingBatchSize}} + features := featuremgmt.WithFeatures() + orgSvc := &orgtest.FakeOrgService{ + ExpectedOrgs: []*org.OrgDTO{{ID: 1}}, + } + searchService, ok := ProvideService(cfg, sqlStore, store.NewDummyEntityEventsService(), actest.FakeService{}, + tracing.InitializeTracerForTest(), features, orgSvc, nil, foldertest.NewFakeService()).(*StandardSearchService) + require.True(t, ok) + + err = runSearchService(searchService) + require.NoError(t, err, "error when running search service for integration test") + + user := getSignedInUser(folderCount, dashboardsPerFolder) + + return searchService, user, nil +} + +func TestIntegrationSoftDeletion(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + + // Set up search v2. + folderCount := 1 + dashboardsPerFolder := 1 + sqlStore, cfg := db.InitTestDBWithCfg(t) + searchService, testUser, err := setupIntegrationEnv(t, folderCount, dashboardsPerFolder, sqlStore) + require.NoError(t, err) + + // Query search v2 to ensure "dashboard2" is present. + result := searchService.doDashboardQuery(context.Background(), testUser, 1, DashboardQuery{Kind: []string{string(entityKindDashboard)}}) + require.NoError(t, result.Error) + require.NotZero(t, len(result.Frames)) + for _, field := range result.Frames[0].Fields { + if field.Name == "uid" { + require.Equal(t, dashboardsPerFolder, field.Len()) + break + } + } + + // Set up dashboard store. + quotaService := quotatest.New(false, nil) + featureToggles := featuremgmt.WithFeatures(featuremgmt.FlagPanelTitleSearch, featuremgmt.FlagDashboardRestore) + dashboardStore, err := database.ProvideDashboardStore(sqlStore, cfg, featureToggles, tagimpl.ProvideService(sqlStore), quotaService) + require.NoError(t, err) + + // Soft delete "dashboard2". + err = dashboardStore.SoftDeleteDashboard(context.Background(), 1, "dashboard2") + require.NoError(t, err) + + // Reindex to ensure "dashboard2" is excluded from the index. + searchService.dashboardIndex.reIndexFromScratch(context.Background()) + + // Query search v2 to ensure "dashboard2" is no longer present. + expectedResultCount := dashboardsPerFolder - 1 + result2 := searchService.doDashboardQuery(context.Background(), testUser, 1, DashboardQuery{Kind: []string{string(entityKindDashboard)}}) + require.NoError(t, result2.Error) + require.NotZero(t, len(result2.Frames)) + for _, field := range result2.Frames[0].Fields { + if field.Name == "uid" { + require.Equal(t, expectedResultCount, field.Len()) + break + } + } +} diff --git a/pkg/services/searchV2/service_bench_test.go b/pkg/services/searchV2/service_bench_test.go index 575fbf36f17..778e7bd2bf8 100644 --- a/pkg/services/searchV2/service_bench_test.go +++ b/pkg/services/searchV2/service_bench_test.go @@ -13,6 +13,7 @@ import ( "github.com/grafana/grafana/pkg/services/accesscontrol/actest" "github.com/grafana/grafana/pkg/services/dashboards" "github.com/grafana/grafana/pkg/services/featuremgmt" + "github.com/grafana/grafana/pkg/services/folder/foldertest" "github.com/grafana/grafana/pkg/services/org" "github.com/grafana/grafana/pkg/services/org/orgtest" "github.com/grafana/grafana/pkg/services/store" @@ -41,7 +42,7 @@ func setupBenchEnv(b *testing.B, folderCount, dashboardsPerFolder int) (*Standar ExpectedOrgs: []*org.OrgDTO{{ID: 1}}, } searchService, ok := ProvideService(cfg, sqlStore, store.NewDummyEntityEventsService(), actest.FakeService{}, - tracing.InitializeTracerForTest(), features, orgSvc, nil, nil).(*StandardSearchService) + tracing.InitializeTracerForTest(), features, orgSvc, nil, foldertest.NewFakeService()).(*StandardSearchService) require.True(b, ok) err = runSearchService(searchService) diff --git a/pkg/services/serviceaccounts/api/api_test.go b/pkg/services/serviceaccounts/api/api_test.go index 144869b390a..03713c57e2a 100644 --- a/pkg/services/serviceaccounts/api/api_test.go +++ b/pkg/services/serviceaccounts/api/api_test.go @@ -16,6 +16,7 @@ import ( "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" "github.com/grafana/grafana/pkg/services/accesscontrol/actest" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/org" "github.com/grafana/grafana/pkg/services/serviceaccounts" @@ -306,7 +307,7 @@ func setupTests(t *testing.T, opts ...func(a *ServiceAccountsAPI)) *webtest.Serv cfg: cfg, service: &satests.FakeServiceAccountService{}, accesscontrolService: &actest.FakeService{}, - accesscontrol: acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), + accesscontrol: acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), RouterRegister: routing.NewRouteRegister(), log: log.NewNopLogger(), permissionService: &actest.FakePermissionsService{}, diff --git a/pkg/services/sqlstore/replstore.go b/pkg/services/sqlstore/replstore.go index 050341a3606..5cc1bda9ccf 100644 --- a/pkg/services/sqlstore/replstore.go +++ b/pkg/services/sqlstore/replstore.go @@ -192,3 +192,15 @@ func InitTestReplDB(t sqlutil.ITestDB, opts ...InitTestDBOpt) (*ReplStore, *sett } return &ReplStore{ss, ss}, cfg } + +// InitTestReplDBWithMigration initializes the test DB given custom migrations. +func InitTestReplDBWithMigration(t sqlutil.ITestDB, migration registry.DatabaseMigrator, opts ...InitTestDBOpt) *ReplStore { + t.Helper() + features := getFeaturesForTesting(opts...) + cfg := getCfgForTesting(opts...) + ss, err := initTestDB(t, cfg, features, migration, opts...) + if err != nil { + t.Fatalf("failed to initialize sql store: %s", err) + } + return &ReplStore{ss, ss} +} diff --git a/pkg/services/sqlstore/searchstore/filters.go b/pkg/services/sqlstore/searchstore/filters.go index 825585641fb..432ac9e4a4d 100644 --- a/pkg/services/sqlstore/searchstore/filters.go +++ b/pkg/services/sqlstore/searchstore/filters.go @@ -4,6 +4,7 @@ import ( "fmt" "strings" + "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/folder" "github.com/grafana/grafana/pkg/services/search/model" "github.com/grafana/grafana/pkg/services/sqlstore/migrator" @@ -127,6 +128,14 @@ func (f DashboardFilter) Where() (string, []any) { return sqlUIDin("dashboard.uid", f.UIDs) } +type K6FolderFilter struct{} + +func (f K6FolderFilter) Where() (string, []any) { + filter := "dashboard.uid != ? AND (dashboard.folder_uid != ? OR dashboard.folder_uid IS NULL)" + params := []any{accesscontrol.K6FolderUID, accesscontrol.K6FolderUID} + return filter, params +} + type TagsFilter struct { Tags []string } diff --git a/pkg/services/ssosettings/api/api_test.go b/pkg/services/ssosettings/api/api_test.go index 9084d4d3155..c70dbd36165 100644 --- a/pkg/services/ssosettings/api/api_test.go +++ b/pkg/services/ssosettings/api/api_test.go @@ -19,6 +19,7 @@ import ( "github.com/grafana/grafana/pkg/login/social" "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/org" "github.com/grafana/grafana/pkg/services/ssosettings" @@ -564,7 +565,7 @@ func setupTests(t *testing.T, service ssosettings.Service) *webtest.Server { api := &Api{ Log: logger, RouteRegister: routing.NewRouteRegister(), - AccessControl: acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), + AccessControl: acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), SSOSettingsService: service, } diff --git a/pkg/services/ssosettings/ssosettingsimpl/service_test.go b/pkg/services/ssosettings/ssosettingsimpl/service_test.go index 7aa812f3f2a..cabf5dfe719 100644 --- a/pkg/services/ssosettings/ssosettingsimpl/service_test.go +++ b/pkg/services/ssosettings/ssosettingsimpl/service_test.go @@ -21,6 +21,7 @@ import ( "github.com/grafana/grafana/pkg/login/social" "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/licensing/licensingtest" secretsFakes "github.com/grafana/grafana/pkg/services/secrets/fakes" @@ -1858,7 +1859,7 @@ func setupTestEnv(t *testing.T, isLicensingEnabled, keepFallbackStratergies, sam store := ssosettingstests.NewFakeStore() fallbackStrategy := ssosettingstests.NewFakeFallbackStrategy() secrets := secretsFakes.NewMockService(t) - accessControl := acimpl.ProvideAccessControl(featuremgmt.WithFeatures()) + accessControl := acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()) reloadables := make(map[string]ssosettings.Reloadable) fallbackStrategy.ExpectedIsMatch = true diff --git a/pkg/services/ssosettings/validation/oauth_validators.go b/pkg/services/ssosettings/validation/oauth_validators.go index 56fdf53f459..7899bce8706 100644 --- a/pkg/services/ssosettings/validation/oauth_validators.go +++ b/pkg/services/ssosettings/validation/oauth_validators.go @@ -3,6 +3,7 @@ package validation import ( "fmt" "net/url" + "slices" "strings" "github.com/grafana/grafana/pkg/apimachinery/identity" @@ -10,11 +11,34 @@ import ( "github.com/grafana/grafana/pkg/services/ssosettings" ) -func AllowAssignGrafanaAdminValidator(info *social.OAuthInfo, requester identity.Requester) error { - if info.AllowAssignGrafanaAdmin && !requester.GetIsGrafanaAdmin() { - return ssosettings.ErrInvalidOAuthConfig("Allow assign Grafana Admin can only be updated by Grafana Server Admins.") +func AllowAssignGrafanaAdminValidator(info *social.OAuthInfo, oldInfo *social.OAuthInfo, requester identity.Requester) ssosettings.ValidateFunc[social.OAuthInfo] { + return func(info *social.OAuthInfo, requester identity.Requester) error { + hasChanged := info.AllowAssignGrafanaAdmin != oldInfo.AllowAssignGrafanaAdmin + if hasChanged && !requester.GetIsGrafanaAdmin() { + return ssosettings.ErrInvalidOAuthConfig("Allow assign Grafana Admin can only be updated by Grafana Server Admins.") + } + return nil + } +} + +func OrgMappingValidator(info *social.OAuthInfo, oldInfo *social.OAuthInfo, requester identity.Requester) ssosettings.ValidateFunc[social.OAuthInfo] { + return func(info *social.OAuthInfo, requester identity.Requester) error { + hasChanged := !slices.Equal(oldInfo.OrgMapping, info.OrgMapping) + if hasChanged && !requester.GetIsGrafanaAdmin() { + return ssosettings.ErrInvalidOAuthConfig("Organization mapping can only be updated by Grafana Server Admins.") + } + return nil + } +} + +func OrgAttributePathValidator(info *social.OAuthInfo, oldInfo *social.OAuthInfo, requester identity.Requester) ssosettings.ValidateFunc[social.OAuthInfo] { + return func(info *social.OAuthInfo, requester identity.Requester) error { + hasChanged := info.OrgAttributePath != oldInfo.OrgAttributePath + if hasChanged && !requester.GetIsGrafanaAdmin() { + return ssosettings.ErrInvalidOAuthConfig("Organization attribute path can only be updated by Grafana Server Admins.") + } + return nil } - return nil } func SkipOrgRoleSyncAllowAssignGrafanaAdminValidator(info *social.OAuthInfo, requester identity.Requester) error { diff --git a/pkg/services/ssosettings/validation/oauth_validators_test.go b/pkg/services/ssosettings/validation/oauth_validators_test.go index 6cdea2a067d..9bb17f3eb79 100644 --- a/pkg/services/ssosettings/validation/oauth_validators_test.go +++ b/pkg/services/ssosettings/validation/oauth_validators_test.go @@ -12,10 +12,11 @@ import ( ) type testCase struct { - name string - input *social.OAuthInfo - requester identity.Requester - wantErr error + name string + input *social.OAuthInfo + oldSettings *social.OAuthInfo + requester identity.Requester + wantErr error } func TestUrlValidator(t *testing.T) { @@ -81,20 +82,39 @@ func TestRequiredValidator(t *testing.T) { func TestAllowAssignGrafanaAdminValidator(t *testing.T) { tc := []testCase{ { - name: "passes when user is grafana admin and allow assign grafana admin is true", + name: "passes when user is Grafana Admin and Allow assign Grafana Admin was changed", input: &social.OAuthInfo{ AllowAssignGrafanaAdmin: true, }, + oldSettings: &social.OAuthInfo{ + AllowAssignGrafanaAdmin: false, + }, requester: &user.SignedInUser{ IsGrafanaAdmin: true, }, wantErr: nil, }, { - name: "fails when user is not grafana admin and allow assign grafana admin is true", + name: "passess when user is not Grafana Admin and Allow assign Grafana Admin was not changed", input: &social.OAuthInfo{ AllowAssignGrafanaAdmin: true, }, + oldSettings: &social.OAuthInfo{ + AllowAssignGrafanaAdmin: true, + }, + requester: &user.SignedInUser{ + IsGrafanaAdmin: false, + }, + wantErr: nil, + }, + { + name: "fails when user is not Grafana Admin and Allow assign Grafana Admin was changed", + input: &social.OAuthInfo{ + AllowAssignGrafanaAdmin: true, + }, + oldSettings: &social.OAuthInfo{ + AllowAssignGrafanaAdmin: false, + }, requester: &user.SignedInUser{ IsGrafanaAdmin: false, }, @@ -104,7 +124,7 @@ func TestAllowAssignGrafanaAdminValidator(t *testing.T) { for _, tt := range tc { t.Run(tt.name, func(t *testing.T) { - err := AllowAssignGrafanaAdminValidator(tt.input, tt.requester) + err := AllowAssignGrafanaAdminValidator(tt.input, tt.oldSettings, tt.requester)(tt.input, tt.requester) if tt.wantErr != nil { require.ErrorIs(t, err, tt.wantErr) return @@ -117,7 +137,7 @@ func TestAllowAssignGrafanaAdminValidator(t *testing.T) { func TestSkipOrgRoleSyncAllowAssignGrafanaAdminValidator(t *testing.T) { tc := []testCase{ { - name: "passes when allow assign grafana admin is set, but skip org role sync is not set", + name: "passes when allow assign Grafana Admin is set, but skip org role sync is not set", input: &social.OAuthInfo{ AllowAssignGrafanaAdmin: true, SkipOrgRoleSync: false, @@ -125,7 +145,7 @@ func TestSkipOrgRoleSyncAllowAssignGrafanaAdminValidator(t *testing.T) { wantErr: nil, }, { - name: "passes when allow assign grafana admin is not set, but skip org role sync is set", + name: "passes when allow assign Grafana Admin is not set, but skip org role sync is set", input: &social.OAuthInfo{ AllowAssignGrafanaAdmin: false, SkipOrgRoleSync: true, @@ -133,7 +153,7 @@ func TestSkipOrgRoleSyncAllowAssignGrafanaAdminValidator(t *testing.T) { wantErr: nil, }, { - name: "fails when both allow assign grafana admin and skip org role sync is set", + name: "fails when both allow assign Grafana Admin and skip org role sync is set", input: &social.OAuthInfo{ AllowAssignGrafanaAdmin: true, SkipOrgRoleSync: true, @@ -153,3 +173,126 @@ func TestSkipOrgRoleSyncAllowAssignGrafanaAdminValidator(t *testing.T) { }) } } + +func TestOrgMappingValidator(t *testing.T) { + tc := []testCase{ + { + name: "passes when user is Grafana Admin and Org mapping was changed", + input: &social.OAuthInfo{ + OrgMapping: []string{"group1:1:Viewer"}, + }, + oldSettings: &social.OAuthInfo{ + OrgMapping: []string{"group1:2:Viewer"}, + }, + requester: &user.SignedInUser{ + IsGrafanaAdmin: true, + }, + wantErr: nil, + }, + { + name: "passes when user is not Grafana Admin and Org mapping was not changed", + input: &social.OAuthInfo{ + OrgMapping: []string{"group1:1:Viewer"}, + }, + oldSettings: &social.OAuthInfo{ + OrgMapping: []string{"group1:1:Viewer"}, + }, + requester: &user.SignedInUser{ + IsGrafanaAdmin: false, + }, + wantErr: nil, + }, + { + name: "fails when user is not Grafana Admin and Org mapping was changed", + input: &social.OAuthInfo{ + OrgMapping: []string{"group1:1:Viewer"}, + }, + oldSettings: &social.OAuthInfo{ + OrgMapping: []string{}, + }, + requester: &user.SignedInUser{ + IsGrafanaAdmin: false, + }, + wantErr: ssosettings.ErrInvalidOAuthConfig("Organization mapping can only be updated by Grafana Server Admins."), + }, + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + err := OrgMappingValidator(tt.input, tt.oldSettings, tt.requester)(tt.input, tt.requester) + if tt.wantErr != nil { + require.ErrorIs(t, err, tt.wantErr) + return + } + require.NoError(t, err) + }) + } +} + +func TestOrgAttributePathValidator(t *testing.T) { + tc := []testCase{ + { + name: "passes when user is Grafana Admin and Org attribute path was changed", + input: &social.OAuthInfo{ + OrgAttributePath: "path", + }, + oldSettings: &social.OAuthInfo{ + OrgAttributePath: "old-path", + }, + requester: &user.SignedInUser{ + IsGrafanaAdmin: true, + }, + wantErr: nil, + }, + { + name: "passes when user is Grafana Admin and Org attribute path was not changed", + input: &social.OAuthInfo{ + OrgAttributePath: "path", + }, + oldSettings: &social.OAuthInfo{ + OrgAttributePath: "path", + }, + requester: &user.SignedInUser{ + IsGrafanaAdmin: false, + }, + wantErr: nil, + }, + { + name: "fails when user is not Grafana Admin and Org attribute path casing was changed", + input: &social.OAuthInfo{ + OrgAttributePath: "path", + }, + oldSettings: &social.OAuthInfo{ + OrgAttributePath: "Path", + }, + requester: &user.SignedInUser{ + IsGrafanaAdmin: false, + }, + wantErr: ssosettings.ErrInvalidOAuthConfig("Organization attribute path can only be updated by Grafana Server Admins."), + }, + { + name: "fails when user is not Grafana Admin and Org attribute path was changed", + input: &social.OAuthInfo{ + OrgAttributePath: "path", + }, + oldSettings: &social.OAuthInfo{ + OrgAttributePath: "old-path", + }, + requester: &user.SignedInUser{ + IsGrafanaAdmin: false, + }, + wantErr: ssosettings.ErrInvalidOAuthConfig("Organization attribute path can only be updated by Grafana Server Admins."), + }, + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + err := OrgAttributePathValidator(tt.input, tt.oldSettings, tt.requester)(tt.input, tt.requester) + if tt.wantErr != nil { + require.ErrorIs(t, err, tt.wantErr) + return + } + require.NoError(t, err) + }) + } +} diff --git a/pkg/services/store/testdata/public_testdata.golden.jsonc b/pkg/services/store/testdata/public_testdata.golden.jsonc index 88d0760e029..a37239596c2 100644 --- a/pkg/services/store/testdata/public_testdata.golden.jsonc +++ b/pkg/services/store/testdata/public_testdata.golden.jsonc @@ -1,5 +1,5 @@ // 🌟 This was machine generated. Do not edit. 🌟 -// +// // Frame[0] { // "type": "directory-listing", // "typeVersion": [ @@ -10,7 +10,7 @@ // "HasMore": false // } // } -// Name: +// Name: // Dimensions: 3 Fields by 3 Rows // +----------------------------+----------------------+---------------+ // | Name: name | Name: mediaType | Name: size | @@ -21,8 +21,8 @@ // | example-with-style.geojson | application/geo+json | 3332 | // | usa-states.geojson | application/geo+json | 89263 | // +----------------------------+----------------------+---------------+ -// -// +// +// // 🌟 This was machine generated. Do not edit. 🌟 { "status": 200, diff --git a/pkg/services/team/teamapi/team_members_test.go b/pkg/services/team/teamapi/team_members_test.go index 9fba37925fa..c3eb3d2f639 100644 --- a/pkg/services/team/teamapi/team_members_test.go +++ b/pkg/services/team/teamapi/team_members_test.go @@ -14,6 +14,7 @@ import ( "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" "github.com/grafana/grafana/pkg/services/accesscontrol/actest" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/dashboards" "github.com/grafana/grafana/pkg/services/dashboards/dashboardaccess" "github.com/grafana/grafana/pkg/services/featuremgmt" @@ -37,7 +38,7 @@ func SetupAPITestServer(t *testing.T, opts ...func(a *TeamAPI)) *webtest.Server a := ProvideTeamAPI(router, teamtest.NewFakeService(), actest.FakeService{}, - acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), + acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), &actest.FakePermissionsService{}, &usertest.FakeUserService{}, &licensing.OSSLicensingService{}, @@ -259,7 +260,7 @@ func Test_getTeamMembershipUpdates(t *testing.T) { tapi := ProvideTeamAPI(routing.NewRouteRegister(), teamSvc, actest.FakeService{}, - acimpl.ProvideAccessControl(featuremgmt.WithFeatures()), + acimpl.ProvideAccessControl(featuremgmt.WithFeatures(), zanzana.NewNoopClient()), &actest.FakePermissionsService{}, userService, &licensing.OSSLicensingService{}, diff --git a/pkg/services/user/identity.go b/pkg/services/user/identity.go index bc3d8e273ef..acb12b59a6c 100644 --- a/pkg/services/user/identity.go +++ b/pkg/services/user/identity.go @@ -27,7 +27,9 @@ type SignedInUser struct { // AuthID will be set if user signed in using external method AuthID string // AuthenticatedBy be set if user signed in using external method - AuthenticatedBy string + AuthenticatedBy string + AllowedKubernetesNamespace string + ApiKeyID int64 `xorm:"api_key_id"` IsServiceAccount bool `xorm:"is_service_account"` IsGrafanaAdmin bool @@ -89,6 +91,10 @@ func (u *SignedInUser) HasUniqueId() bool { return u.IsRealUser() || u.IsApiKeyUser() || u.IsServiceAccountUser() } +func (u *SignedInUser) GetAllowedKubernetesNamespace() string { + return u.AllowedKubernetesNamespace +} + // GetCacheKey returns a unique key for the entity. // Add an extra prefix to avoid collisions with other caches func (u *SignedInUser) GetCacheKey() string { diff --git a/pkg/setting/setting.go b/pkg/setting/setting.go index 336a27b7e28..aa37813cc6e 100644 --- a/pkg/setting/setting.go +++ b/pkg/setting/setting.go @@ -366,6 +366,7 @@ type Cfg struct { ApplicationInsightsConnectionString string ApplicationInsightsEndpointUrl string FeedbackLinksEnabled bool + ReportingStaticContext map[string]string // Frontend analytics GoogleAnalyticsID string @@ -1156,6 +1157,15 @@ func (cfg *Cfg) parseINIFile(iniFile *ini.File) error { cfg.ApplicationInsightsEndpointUrl = analytics.Key("application_insights_endpoint_url").String() cfg.FeedbackLinksEnabled = analytics.Key("feedback_links_enabled").MustBool(true) + // parse reporting static context string of key=value, key=value pairs into an object + cfg.ReportingStaticContext = make(map[string]string) + for _, pair := range strings.Split(analytics.Key("reporting_static_context").String(), ",") { + kv := strings.Split(pair, "=") + if len(kv) == 2 { + cfg.ReportingStaticContext[strings.TrimSpace("_static_context_"+kv[0])] = strings.TrimSpace(kv[1]) + } + } + if err := cfg.readAlertingSettings(iniFile); err != nil { return err } diff --git a/pkg/setting/setting_jwt.go b/pkg/setting/setting_jwt.go index bbc7e3612e6..42b8009de4b 100644 --- a/pkg/setting/setting_jwt.go +++ b/pkg/setting/setting_jwt.go @@ -2,6 +2,10 @@ package setting import "time" +const ( + extJWTAccessTokenExpectAudience = "grafana" +) + type AuthJWTSettings struct { // JWT Auth Enabled bool @@ -29,6 +33,7 @@ type ExtJWTSettings struct { Enabled bool ExpectIssuer string JWKSUrl string + Audiences []string } func (cfg *Cfg) readAuthExtJWTSettings() { @@ -36,6 +41,9 @@ func (cfg *Cfg) readAuthExtJWTSettings() { jwtSettings := ExtJWTSettings{} jwtSettings.Enabled = authExtendedJWT.Key("enabled").MustBool(false) jwtSettings.JWKSUrl = authExtendedJWT.Key("jwks_url").MustString("") + // for Grafana, this is hard coded, but we leave it as a configurable param for other use-cases + jwtSettings.Audiences = []string{extJWTAccessTokenExpectAudience} + cfg.ExtJWTAuth = jwtSettings } diff --git a/pkg/storage/unified/apistore/test/watch_test.go b/pkg/storage/unified/apistore/test/watch_test.go deleted file mode 100644 index cc525c3a62d..00000000000 --- a/pkg/storage/unified/apistore/test/watch_test.go +++ /dev/null @@ -1,371 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// Provenance-includes-location: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher_test.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: The Kubernetes Authors. - -package test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/api/apitesting" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apiserver/pkg/apis/example" - examplev1 "k8s.io/apiserver/pkg/apis/example/v1" - genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - "k8s.io/apiserver/pkg/storage" - "k8s.io/apiserver/pkg/storage/storagebackend" - "k8s.io/apiserver/pkg/storage/storagebackend/factory" - storagetesting "k8s.io/apiserver/pkg/storage/testing" - - grafanaregistry "github.com/grafana/grafana/pkg/apiserver/registry/generic" - "github.com/grafana/grafana/pkg/infra/tracing" - "github.com/grafana/grafana/pkg/services/apiserver/storage/entity" - "github.com/grafana/grafana/pkg/services/featuremgmt" - "github.com/grafana/grafana/pkg/services/sqlstore" - entityStore "github.com/grafana/grafana/pkg/services/store/entity" - "github.com/grafana/grafana/pkg/services/store/entity/db/dbimpl" - "github.com/grafana/grafana/pkg/services/store/entity/sqlstash" - "github.com/grafana/grafana/pkg/setting" - "github.com/grafana/grafana/pkg/tests/testinfra" - "github.com/grafana/grafana/pkg/tests/testsuite" -) - -var scheme = runtime.NewScheme() -var codecs = serializer.NewCodecFactory(scheme) - -func TestMain(m *testing.M) { - testsuite.Run(m) -} - -func createTestContext(t *testing.T) (entityStore.EntityStoreClient, factory.DestroyFunc) { - t.Helper() - - grafDir, cfgPath := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{ - EnableFeatureToggles: []string{ - featuremgmt.FlagGrpcServer, - featuremgmt.FlagUnifiedStorage, - }, - AppModeProduction: false, // required for migrations to run - GRPCServerAddress: "127.0.0.1:0", // :0 for choosing the port automatically - }) - - cfg, err := setting.NewCfgFromArgs(setting.CommandLineArgs{Config: cfgPath, HomePath: grafDir}) - assert.NoError(t, err) - - featureManager, err := featuremgmt.ProvideManagerService(cfg) - assert.NoError(t, err) - - featureToggles := featuremgmt.ProvideToggles(featureManager) - - db := sqlstore.InitTestDBWithMigration(t, nil, sqlstore.InitTestDBOpt{EnsureDefaultOrgAndUser: false}) - require.NoError(t, err) - - eDB, err := dbimpl.ProvideEntityDB(db, cfg, featureToggles, nil) - require.NoError(t, err) - - err = eDB.Init() - require.NoError(t, err) - - traceConfig, err := tracing.ParseTracingConfig(cfg) - require.NoError(t, err) - tracer, err := tracing.ProvideService(traceConfig) - require.NoError(t, err) - store, err := sqlstash.ProvideSQLEntityServer(eDB, tracer) - require.NoError(t, err) - - client := entityStore.NewEntityStoreClientLocal(store) - - return client, func() { store.Stop() } -} - -func init() { - metav1.AddToGroupVersion(scheme, metav1.SchemeGroupVersion) - utilruntime.Must(example.AddToScheme(scheme)) - utilruntime.Must(examplev1.AddToScheme(scheme)) -} - -type setupOptions struct { - codec runtime.Codec - newFunc func() runtime.Object - newListFunc func() runtime.Object - prefix string - resourcePrefix string - groupResource schema.GroupResource -} - -type setupOption func(*setupOptions, *testing.T) - -func withDefaults(options *setupOptions, t *testing.T) { - options.codec = apitesting.TestCodec(codecs, examplev1.SchemeGroupVersion) - options.newFunc = newPod - options.newListFunc = newPodList - options.prefix = t.TempDir() - options.resourcePrefix = "/pods" - options.groupResource = schema.GroupResource{Resource: "pods"} -} - -var _ setupOption = withDefaults - -func testSetup(t *testing.T, opts ...setupOption) (context.Context, storage.Interface, factory.DestroyFunc, error) { - setupOpts := setupOptions{} - opts = append([]setupOption{withDefaults}, opts...) - for _, opt := range opts { - opt(&setupOpts, t) - } - - config := storagebackend.NewDefaultConfig(setupOpts.prefix, setupOpts.codec) - - client, destroyFunc := createTestContext(t) - - store, _, err := entity.NewStorage( - config.ForResource(setupOpts.groupResource), - setupOpts.groupResource, - client, - setupOpts.codec, - func(obj runtime.Object) (string, error) { - accessor, err := meta.Accessor(obj) - if err != nil { - return "", err - } - keyFn := grafanaregistry.NamespaceKeyFunc(setupOpts.groupResource) - return keyFn(genericapirequest.WithNamespace(genericapirequest.NewContext(), accessor.GetNamespace()), accessor.GetName()) - }, - setupOpts.newFunc, - setupOpts.newListFunc, - storage.DefaultNamespaceScopedAttr, - ) - if err != nil { - return nil, nil, nil, err - } - - ctx := context.Background() - - return ctx, store, destroyFunc, nil -} - -func TestIntegrationWatch(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - t.Skip("In maintenance") - - ctx, store, destroyFunc, err := testSetup(t) - defer destroyFunc() - assert.NoError(t, err) - storagetesting.RunTestWatch(ctx, t, store) -} - -func TestIntegrationClusterScopedWatch(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - t.Skip("In maintenance") - - ctx, store, destroyFunc, err := testSetup(t) - defer destroyFunc() - assert.NoError(t, err) - storagetesting.RunTestClusterScopedWatch(ctx, t, store) -} - -func TestIntegrationNamespaceScopedWatch(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - t.Skip("In maintenance") - - ctx, store, destroyFunc, err := testSetup(t) - defer destroyFunc() - assert.NoError(t, err) - storagetesting.RunTestNamespaceScopedWatch(ctx, t, store) -} - -func TestIntegrationDeleteTriggerWatch(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - t.Skip("In maintenance") - - ctx, store, destroyFunc, err := testSetup(t) - defer destroyFunc() - assert.NoError(t, err) - storagetesting.RunTestDeleteTriggerWatch(ctx, t, store) -} - -func TestIntegrationWatchFromZero(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - t.Skip("In maintenance") - - ctx, store, destroyFunc, err := testSetup(t) - defer destroyFunc() - assert.NoError(t, err) - storagetesting.RunTestWatchFromZero(ctx, t, store, nil) -} - -// TestWatchFromNonZero tests that -// - watch from non-0 should just watch changes after given version -func TestIntegrationWatchFromNonZero(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - t.Skip("In maintenance") - - ctx, store, destroyFunc, err := testSetup(t) - defer destroyFunc() - assert.NoError(t, err) - storagetesting.RunTestWatchFromNonZero(ctx, t, store) -} - -/* -// TODO this times out, we need to buffer events -func TestIntegrationDelayedWatchDelivery(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - - ctx, store, destroyFunc, err := testSetup(t) - defer destroyFunc() - assert.NoError(t, err) - storagetesting.RunTestDelayedWatchDelivery(ctx, t, store) -} -*/ - -/* func TestIntegrationWatchError(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - - ctx, store, _ := testSetup(t) - storagetesting.RunTestWatchError(ctx, t, &storeWithPrefixTransformer{store}) -} */ - -func TestIntegrationWatchContextCancel(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - t.Skip("In maintenance") - - ctx, store, destroyFunc, err := testSetup(t) - defer destroyFunc() - assert.NoError(t, err) - storagetesting.RunTestWatchContextCancel(ctx, t, store) -} - -func TestIntegrationWatcherTimeout(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - t.Skip("In maintenance") - - ctx, store, destroyFunc, err := testSetup(t) - defer destroyFunc() - assert.NoError(t, err) - storagetesting.RunTestWatcherTimeout(ctx, t, store) -} - -func TestIntegrationWatchDeleteEventObjectHaveLatestRV(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - t.Skip("In maintenance") - - ctx, store, destroyFunc, err := testSetup(t) - defer destroyFunc() - assert.NoError(t, err) - storagetesting.RunTestWatchDeleteEventObjectHaveLatestRV(ctx, t, store) -} - -// TODO: enable when we support flow control and priority fairness -/* func TestIntegrationWatchInitializationSignal(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - - ctx, store, destroyFunc, err := testSetup(t) - defer destroyFunc() - assert.NoError(t, err) - storagetesting.RunTestWatchInitializationSignal(ctx, t, store) -} */ - -/* func TestIntegrationProgressNotify(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - - ctx, store, destroyFunc, err := testSetup(t) - defer destroyFunc() - assert.NoError(t, err) - storagetesting.RunOptionalTestProgressNotify(ctx, t, store) -} */ - -// TestWatchDispatchBookmarkEvents makes sure that -// setting allowWatchBookmarks query param against -// etcd implementation doesn't have any effect. -func TestIntegrationWatchDispatchBookmarkEvents(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - t.Skip("In maintenance") - - ctx, store, destroyFunc, err := testSetup(t) - defer destroyFunc() - assert.NoError(t, err) - storagetesting.RunTestWatchDispatchBookmarkEvents(ctx, t, store, false) -} - -func TestIntegrationSendInitialEventsBackwardCompatibility(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - t.Skip("In maintenance") - - ctx, store, destroyFunc, err := testSetup(t) - defer destroyFunc() - assert.NoError(t, err) - storagetesting.RunSendInitialEventsBackwardCompatibility(ctx, t, store) -} - -// TODO this test times out -func TestIntegrationEtcdWatchSemantics(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - t.Skip("In maintenance") - - ctx, store, destroyFunc, err := testSetup(t) - defer destroyFunc() - assert.NoError(t, err) - storagetesting.RunWatchSemantics(ctx, t, store) -} - -/* -// TODO this test times out -func TestIntegrationEtcdWatchSemanticInitialEventsExtended(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - - ctx, store, destroyFunc, err := testSetup(t) - defer destroyFunc() - assert.NoError(t, err) - storagetesting.RunWatchSemanticInitialEventsExtended(ctx, t, store) -} -*/ - -func newPod() runtime.Object { - return &example.Pod{} -} - -func newPodList() runtime.Object { - return &example.PodList{} -} diff --git a/pkg/tests/api/alerting/api_provisioning_test.go b/pkg/tests/api/alerting/api_provisioning_test.go index f56b1df0cb6..c16eff9ce74 100644 --- a/pkg/tests/api/alerting/api_provisioning_test.go +++ b/pkg/tests/api/alerting/api_provisioning_test.go @@ -10,13 +10,16 @@ import ( "sort" "strings" "testing" + "time" "github.com/prometheus/alertmanager/config" "github.com/prometheus/alertmanager/timeinterval" + "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/grafana/grafana/pkg/apimachinery/errutil" + "github.com/grafana/grafana/pkg/expr" "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions" "github.com/grafana/grafana/pkg/services/ngalert/models" "github.com/grafana/grafana/pkg/services/org" @@ -24,6 +27,50 @@ import ( "github.com/grafana/grafana/pkg/tests/testinfra" ) +func createRuleWithNotificationSettings(t *testing.T, client apiClient, folder string, nfSettings *definitions.AlertRuleNotificationSettings) (definitions.PostableRuleGroupConfig, string) { + t.Helper() + + interval, err := model.ParseDuration("1m") + require.NoError(t, err) + doubleInterval := 2 * interval + rules := definitions.PostableRuleGroupConfig{ + Name: "arulegroup", + Interval: interval, + Rules: []definitions.PostableExtendedRuleNode{ + { + ApiRuleNode: &definitions.ApiRuleNode{ + For: &doubleInterval, + Labels: map[string]string{"label1": "val1"}, + Annotations: map[string]string{"annotation1": "val1"}, + }, + GrafanaManagedAlert: &definitions.PostableGrafanaRule{ + Title: fmt.Sprintf("rule under folder %s", folder), + Condition: "A", + Data: []definitions.AlertQuery{ + { + RefID: "A", + RelativeTimeRange: definitions.RelativeTimeRange{ + From: definitions.Duration(time.Duration(5) * time.Hour), + To: definitions.Duration(time.Duration(3) * time.Hour), + }, + DatasourceUID: expr.DatasourceUID, + Model: json.RawMessage(`{ + "type": "math", + "expression": "2 + 3 > 1" + }`), + }, + }, + NotificationSettings: nfSettings, + }, + }, + }, + } + resp, status, _ := client.PostRulesGroupWithStatus(t, folder, &rules) + assert.Equal(t, http.StatusAccepted, status) + require.Len(t, resp.Created, 1) + return rules, resp.Created[0] +} + func TestIntegrationProvisioning(t *testing.T) { testinfra.SQLiteIntegrationTest(t) @@ -258,6 +305,113 @@ func TestIntegrationProvisioning(t *testing.T) { require.Equal(t, 202, resp.StatusCode) }) + + createContactPoint := func(t *testing.T, name string) definitions.EmbeddedContactPoint { + cpBody := fmt.Sprintf(` + { + "name": "%s", + "type": "slack", + "settings": { + "recipient": "value_recipient", + "token": "value_token" + } + }`, name) + + req := createTestRequest("POST", url, "admin", cpBody) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + require.Equal(t, 202, resp.StatusCode) + + ecp := definitions.EmbeddedContactPoint{} + require.NoError(t, json.NewDecoder(resp.Body).Decode(&ecp)) + require.NoError(t, resp.Body.Close()) + + return ecp + } + + createPolicyForContactPoint := func(t *testing.T, receiver string) { + url := fmt.Sprintf("http://%s/api/v1/provisioning/policies", grafanaListedAddr) + body := fmt.Sprintf(` + { + "receiver": "%s", + "group_by": [ + "..." + ], + "routes": [] + }`, receiver) + + req := createTestRequest("PUT", url, "admin", body) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.Equal(t, 202, resp.StatusCode) + } + + t.Run("viewer DELETE should 403", func(t *testing.T) { + ecp := createContactPoint(t, "my-contact-point") + + deleteURL := fmt.Sprintf("http://%s/api/v1/provisioning/contact-points/%s", grafanaListedAddr, ecp.UID) + req := createTestRequest("DELETE", deleteURL, "viewer", body) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.Equal(t, 403, resp.StatusCode) + }) + + t.Run("admin DELETE should succeed", func(t *testing.T) { + ecp := createContactPoint(t, "my-contact-point") + + deleteURL := fmt.Sprintf("http://%s/api/v1/provisioning/contact-points/%s", grafanaListedAddr, ecp.UID) + req := createTestRequest("DELETE", deleteURL, "admin", "") + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.Equal(t, 202, resp.StatusCode) + }) + + t.Run("admin DELETE should 409 when contact point used by notification policy", func(t *testing.T) { + ecp := createContactPoint(t, "my-cp-used-by-policy") + + createPolicyForContactPoint(t, "my-cp-used-by-policy") + + deleteURL := fmt.Sprintf("http://%s/api/v1/provisioning/contact-points/%s", grafanaListedAddr, ecp.UID) + deleteReq := createTestRequest("DELETE", deleteURL, "admin", "") + + resp, err := http.DefaultClient.Do(deleteReq) + require.NoError(t, err) + require.Equal(t, 409, resp.StatusCode) + var validationError errutil.PublicError + assert.NoError(t, json.NewDecoder(resp.Body).Decode(&validationError)) + require.NoError(t, resp.Body.Close()) + assert.NotEmpty(t, validationError, validationError.Message) + assert.Equal(t, "alerting.notifications.contact-points.referenced", validationError.MessageID) + }) + + t.Run("admin DELETE should 409 when contact point used by rule", func(t *testing.T) { + ecp := createContactPoint(t, "my-cp-used-by-rule") + + nfSettings := &definitions.AlertRuleNotificationSettings{ + Receiver: "my-cp-used-by-rule", + } + apiClient := newAlertingApiClient(grafanaListedAddr, "admin", "admin") + createRuleWithNotificationSettings(t, apiClient, namespaceUID, nfSettings) + + deleteURL := fmt.Sprintf("http://%s/api/v1/provisioning/contact-points/%s", grafanaListedAddr, ecp.UID) + deleteReq := createTestRequest("DELETE", deleteURL, "admin", "") + + resp, err := http.DefaultClient.Do(deleteReq) + require.NoError(t, err) + require.Equal(t, 409, resp.StatusCode) + var validationError errutil.PublicError + assert.NoError(t, json.NewDecoder(resp.Body).Decode(&validationError)) + require.NoError(t, resp.Body.Close()) + assert.NotEmpty(t, validationError, validationError.Message) + assert.Equal(t, "alerting.notifications.contact-points.used-by-rule", validationError.MessageID) + }) }) t.Run("when provisioning templates", func(t *testing.T) { diff --git a/pkg/tests/apis/alerting/notifications/timeinterval/timeinterval_test.go b/pkg/tests/apis/alerting/notifications/timeinterval/timeinterval_test.go index c8141cbcdee..ae3cf8bc6b1 100644 --- a/pkg/tests/apis/alerting/notifications/timeinterval/timeinterval_test.go +++ b/pkg/tests/apis/alerting/notifications/timeinterval/timeinterval_test.go @@ -17,6 +17,7 @@ import ( "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/accesscontrol/acimpl" "github.com/grafana/grafana/pkg/services/accesscontrol/resourcepermissions" + "github.com/grafana/grafana/pkg/services/authz/zanzana" "github.com/grafana/grafana/pkg/services/dashboards" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/folder/foldertest" @@ -191,15 +192,13 @@ func TestIntegrationTimeIntervalAccessControl(t *testing.T) { var expected = &v0alpha1.TimeInterval{ ObjectMeta: v1.ObjectMeta{ Namespace: "default", - Annotations: map[string]string{ - "grafana.com/provenance": "", - }, }, Spec: v0alpha1.TimeIntervalSpec{ Name: fmt.Sprintf("time-interval-1-%s", tc.user.Identity.GetLogin()), TimeIntervals: v0alpha1.IntervalGenerator{}.GenerateMany(2), }, } + expected.SetProvenanceStatus("") d, err := json.Marshal(expected) require.NoError(t, err) @@ -348,7 +347,7 @@ func TestIntegrationTimeIntervalProvisioning(t *testing.T) { adminClient := adminK8sClient.NotificationsV0alpha1().TimeIntervals("default") env := helper.GetEnv() - ac := acimpl.ProvideAccessControl(env.FeatureToggles) + ac := acimpl.ProvideAccessControl(env.FeatureToggles, zanzana.NewNoopClient()) db, err := store.ProvideDBStore(env.Cfg, env.FeatureToggles, env.SQLStore, &foldertest.FakeService{}, &dashboards.FakeDashboardService{}, ac) require.NoError(t, err) @@ -362,7 +361,7 @@ func TestIntegrationTimeIntervalProvisioning(t *testing.T) { }, }, v1.CreateOptions{}) require.NoError(t, err) - require.Equal(t, "", created.Annotations["grafana.com/provenance"]) + require.Equal(t, "none", created.GetProvenanceStatus()) t.Run("should provide provenance status", func(t *testing.T) { require.NoError(t, db.SetProvenance(ctx, &definitions.MuteTimeInterval{ @@ -373,7 +372,7 @@ func TestIntegrationTimeIntervalProvisioning(t *testing.T) { got, err := adminClient.Get(ctx, created.Name, v1.GetOptions{}) require.NoError(t, err) - require.Equal(t, "API", got.Annotations["grafana.com/provenance"]) + require.Equal(t, "API", got.GetProvenanceStatus()) }) t.Run("should not let update if provisioned", func(t *testing.T) { updated := created.DeepCopy() @@ -539,3 +538,91 @@ func TestIntegrationTimeIntervalPatch(t *testing.T) { current = result }) } + +func TestIntegrationTimeIntervalListSelector(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + + ctx := context.Background() + helper := getTestHelper(t) + + adminK8sClient, err := versioned.NewForConfig(helper.Org1.Admin.NewRestConfig()) + require.NoError(t, err) + adminClient := adminK8sClient.NotificationsV0alpha1().TimeIntervals("default") + + interval1 := &v0alpha1.TimeInterval{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "default", + }, + Spec: v0alpha1.TimeIntervalSpec{ + Name: "test1", + TimeIntervals: v0alpha1.IntervalGenerator{}.GenerateMany(2), + }, + } + interval1, err = adminClient.Create(ctx, interval1, v1.CreateOptions{}) + require.NoError(t, err) + + interval2 := &v0alpha1.TimeInterval{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "default", + }, + Spec: v0alpha1.TimeIntervalSpec{ + Name: "test2", + TimeIntervals: v0alpha1.IntervalGenerator{}.GenerateMany(2), + }, + } + interval2, err = adminClient.Create(ctx, interval2, v1.CreateOptions{}) + require.NoError(t, err) + env := helper.GetEnv() + ac := acimpl.ProvideAccessControl(env.FeatureToggles, zanzana.NewNoopClient()) + db, err := store.ProvideDBStore(env.Cfg, env.FeatureToggles, env.SQLStore, &foldertest.FakeService{}, &dashboards.FakeDashboardService{}, ac) + require.NoError(t, err) + require.NoError(t, db.SetProvenance(ctx, &definitions.MuteTimeInterval{ + MuteTimeInterval: config.MuteTimeInterval{ + Name: interval2.Spec.Name, + }, + }, helper.Org1.Admin.Identity.GetOrgID(), "API")) + interval2, err = adminClient.Get(ctx, interval2.Name, v1.GetOptions{}) + + require.NoError(t, err) + + intervals, err := adminClient.List(ctx, v1.ListOptions{}) + require.NoError(t, err) + require.Len(t, intervals.Items, 2) + + t.Run("should filter by interval name", func(t *testing.T) { + list, err := adminClient.List(ctx, v1.ListOptions{ + FieldSelector: "spec.name=" + interval1.Spec.Name, + }) + require.NoError(t, err) + require.Len(t, list.Items, 1) + require.Equal(t, interval1.Name, list.Items[0].Name) + }) + + t.Run("should filter by interval metadata name", func(t *testing.T) { + list, err := adminClient.List(ctx, v1.ListOptions{ + FieldSelector: "metadata.name=" + interval2.Name, + }) + require.NoError(t, err) + require.Len(t, list.Items, 1) + require.Equal(t, interval2.Name, list.Items[0].Name) + }) + + t.Run("should filter by multiple filters", func(t *testing.T) { + list, err := adminClient.List(ctx, v1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.name=%s,metadata.provenance=%s", interval2.Name, "API"), + }) + require.NoError(t, err) + require.Len(t, list.Items, 1) + require.Equal(t, interval2.Name, list.Items[0].Name) + }) + + t.Run("should be empty when filter does not match", func(t *testing.T) { + list, err := adminClient.List(ctx, v1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.name=%s,metadata.provenance=%s", interval2.Name, "unknown"), + }) + require.NoError(t, err) + require.Empty(t, list.Items) + }) +} diff --git a/pkg/tests/apis/helper.go b/pkg/tests/apis/helper.go index 5fa3c0633cb..a19ca52eede 100644 --- a/pkg/tests/apis/helper.go +++ b/pkg/tests/apis/helper.go @@ -25,6 +25,7 @@ import ( "k8s.io/client-go/rest" "github.com/grafana/grafana/pkg/apimachinery/identity" + "github.com/grafana/grafana/pkg/apimachinery/utils" "github.com/grafana/grafana/pkg/infra/localcache" "github.com/grafana/grafana/pkg/infra/tracing" "github.com/grafana/grafana/pkg/server" @@ -158,14 +159,11 @@ func (c *K8sResourceClient) SanitizeJSON(v *unstructured.Unstructured) string { if anno["grafana.app/originHash"] != "" { anno["grafana.app/originHash"] = "${originHash}" } - if anno["grafana.app/updatedTimestamp"] != "" { - anno["grafana.app/updatedTimestamp"] = "${updatedTimestamp}" - } // Remove annotations that are not added by legacy storage - delete(anno, "grafana.app/originTimestamp") - delete(anno, "grafana.app/createdBy") - delete(anno, "grafana.app/updatedBy") - delete(anno, "grafana.app/action") + delete(anno, utils.AnnoKeyOriginTimestamp) + delete(anno, utils.AnnoKeyCreatedBy) + delete(anno, utils.AnnoKeyUpdatedBy) + delete(anno, utils.AnnoKeyUpdatedTimestamp) deep.SetAnnotations(anno) copy := deep.Object diff --git a/pkg/tests/apis/playlist/playlist_test.go b/pkg/tests/apis/playlist/playlist_test.go index 1083dde313b..440f08ff474 100644 --- a/pkg/tests/apis/playlist/playlist_test.go +++ b/pkg/tests/apis/playlist/playlist_test.go @@ -361,8 +361,7 @@ func doPlaylistTests(t *testing.T, helper *apis.K8sTestHelper) *apis.K8sTestHelp "metadata": { "annotations": { "grafana.app/originPath": "${originPath}", - "grafana.app/originName": "SQL", - "grafana.app/updatedTimestamp": "${updatedTimestamp}" + "grafana.app/originName": "SQL" }, "creationTimestamp": "${creationTimestamp}", "name": "` + uid + `", diff --git a/pkg/tsdb/azuremonitor/azuremonitor-resource-handler.go b/pkg/tsdb/azuremonitor/azuremonitor-resource-handler.go index 13d4c411b87..c52aa3a1670 100644 --- a/pkg/tsdb/azuremonitor/azuremonitor-resource-handler.go +++ b/pkg/tsdb/azuremonitor/azuremonitor-resource-handler.go @@ -9,7 +9,6 @@ import ( "github.com/grafana/grafana-plugin-sdk-go/backend" "github.com/grafana/grafana-plugin-sdk-go/backend/log" - "github.com/grafana/grafana-plugin-sdk-go/backend/resource/httpadapter" "github.com/grafana/grafana/pkg/tsdb/azuremonitor/types" ) @@ -71,7 +70,7 @@ func (s *httpServiceProxy) Do(rw http.ResponseWriter, req *http.Request, cli *ht func (s *Service) getDataSourceFromHTTPReq(req *http.Request) (types.DatasourceInfo, error) { ctx := req.Context() - pluginContext := httpadapter.PluginConfigFromContext(ctx) + pluginContext := backend.PluginConfigFromContext(ctx) i, err := s.im.Get(ctx, pluginContext) if err != nil { return types.DatasourceInfo{}, err diff --git a/pkg/tsdb/cloud-monitoring/resource_handler.go b/pkg/tsdb/cloud-monitoring/resource_handler.go index 5347b9a9499..afcce954784 100644 --- a/pkg/tsdb/cloud-monitoring/resource_handler.go +++ b/pkg/tsdb/cloud-monitoring/resource_handler.go @@ -16,7 +16,6 @@ import ( "github.com/andybalholm/brotli" "github.com/grafana/grafana-plugin-sdk-go/backend" - "github.com/grafana/grafana-plugin-sdk-go/backend/resource/httpadapter" ) // nameExp matches the part after the last '/' symbol @@ -397,7 +396,7 @@ func writeResponse(rw http.ResponseWriter, code int, msg string) { func (s *Service) getDataSourceFromHTTPReq(req *http.Request) (*datasourceInfo, error) { ctx := req.Context() - pluginContext := httpadapter.PluginConfigFromContext(ctx) + pluginContext := backend.PluginConfigFromContext(ctx) i, err := s.im.Get(ctx, pluginContext) if err != nil { return nil, nil diff --git a/pkg/tsdb/cloudwatch/cloudwatch.go b/pkg/tsdb/cloudwatch/cloudwatch.go index 9772e11de9d..b7d33be7a62 100644 --- a/pkg/tsdb/cloudwatch/cloudwatch.go +++ b/pkg/tsdb/cloudwatch/cloudwatch.go @@ -192,12 +192,12 @@ func (e *cloudWatchExecutor) getRequestContextOnlySettings(ctx context.Context, } func (e *cloudWatchExecutor) CallResource(ctx context.Context, req *backend.CallResourceRequest, sender backend.CallResourceResponseSender) error { - ctx = instrumentContext(ctx, "callResource", req.PluginContext) + ctx = instrumentContext(ctx, string(backend.EndpointCallResource), req.PluginContext) return e.resourceHandler.CallResource(ctx, req, sender) } func (e *cloudWatchExecutor) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) { - ctx = instrumentContext(ctx, "queryData", req.PluginContext) + ctx = instrumentContext(ctx, string(backend.EndpointQueryData), req.PluginContext) q := req.Queries[0] var model DataQueryJson err := json.Unmarshal(q.JSON, &model) @@ -236,7 +236,7 @@ func (e *cloudWatchExecutor) QueryData(ctx context.Context, req *backend.QueryDa } func (e *cloudWatchExecutor) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { - ctx = instrumentContext(ctx, "checkHealth", req.PluginContext) + ctx = instrumentContext(ctx, string(backend.EndpointCheckHealth), req.PluginContext) status := backend.HealthStatusOk metricsTest := "Successfully queried the CloudWatch metrics API." logsTest := "Successfully queried the CloudWatch logs API." diff --git a/pkg/tsdb/cloudwatch/kinds/dataquery/types_dataquery_gen.go b/pkg/tsdb/cloudwatch/kinds/dataquery/types_dataquery_gen.go index a41f2d27536..93a14021487 100644 --- a/pkg/tsdb/cloudwatch/kinds/dataquery/types_dataquery_gen.go +++ b/pkg/tsdb/cloudwatch/kinds/dataquery/types_dataquery_gen.go @@ -271,7 +271,7 @@ type CloudWatchMetricsQuery struct { Region *string `json:"region,omitempty"` Sql *SQLExpression `json:"sql,omitempty"` - // When the metric query type is `metricQueryType` is set to `Query`, this field is used to specify the query string. + // When the metric query type is set to `Insights`, this field is used to specify the query string. SqlExpression *string `json:"sqlExpression,omitempty"` // Metric data aggregations over specified periods of time. For detailed definitions of the statistics supported by CloudWatch, see https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html. diff --git a/pkg/tsdb/cloudwatch/resource_handler.go b/pkg/tsdb/cloudwatch/resource_handler.go index 9107a76cb5c..2d4a9e839af 100644 --- a/pkg/tsdb/cloudwatch/resource_handler.go +++ b/pkg/tsdb/cloudwatch/resource_handler.go @@ -9,7 +9,6 @@ import ( "github.com/grafana/grafana-plugin-sdk-go/backend" "github.com/grafana/grafana-plugin-sdk-go/backend/log" - "github.com/grafana/grafana-plugin-sdk-go/backend/resource/httpadapter" "github.com/grafana/grafana/pkg/tsdb/cloudwatch/routes" ) @@ -38,7 +37,7 @@ type handleFn func(ctx context.Context, pluginCtx backend.PluginContext, paramet func handleResourceReq(handleFunc handleFn, logger log.Logger) func(rw http.ResponseWriter, req *http.Request) { return func(rw http.ResponseWriter, req *http.Request) { ctx := req.Context() - pluginContext := httpadapter.PluginConfigFromContext(ctx) + pluginContext := backend.PluginConfigFromContext(ctx) err := req.ParseForm() if err != nil { writeResponse(rw, http.StatusBadRequest, fmt.Sprintf("unexpected error %v", err), logger.FromContext(ctx)) diff --git a/pkg/tsdb/cloudwatch/routes/middleware.go b/pkg/tsdb/cloudwatch/routes/middleware.go index cf75a9cc3f3..3039c665787 100644 --- a/pkg/tsdb/cloudwatch/routes/middleware.go +++ b/pkg/tsdb/cloudwatch/routes/middleware.go @@ -3,8 +3,8 @@ package routes import ( "net/http" + "github.com/grafana/grafana-plugin-sdk-go/backend" "github.com/grafana/grafana-plugin-sdk-go/backend/log" - "github.com/grafana/grafana-plugin-sdk-go/backend/resource/httpadapter" "github.com/grafana/grafana/pkg/tsdb/cloudwatch/models" ) @@ -17,7 +17,7 @@ func ResourceRequestMiddleware(handleFunc models.RouteHandlerFunc, logger log.Lo } ctx := req.Context() - pluginContext := httpadapter.PluginConfigFromContext(ctx) + pluginContext := backend.PluginConfigFromContext(ctx) json, httpError := handleFunc(ctx, pluginContext, reqCtxFactory, req.URL.Query()) if httpError != nil { logger.FromContext(ctx).Error("Error handling resource request", "error", httpError.Message) diff --git a/pkg/tsdb/elasticsearch/instrumentation/instrumentation.go b/pkg/tsdb/elasticsearch/instrumentation/instrumentation.go index 7b6f5ae6d29..e64e340b64a 100644 --- a/pkg/tsdb/elasticsearch/instrumentation/instrumentation.go +++ b/pkg/tsdb/elasticsearch/instrumentation/instrumentation.go @@ -4,6 +4,7 @@ import ( "context" "time" + "github.com/grafana/grafana-plugin-sdk-go/backend" "github.com/grafana/grafana/pkg/infra/tracing" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -18,12 +19,8 @@ var ( }, []string{"status", "endpoint"}) ) -const ( - EndpointQueryData = "queryData" -) - func UpdatePluginParsingResponseDurationSeconds(ctx context.Context, duration time.Duration, status string) { - histogram := pluginParsingResponseDurationSeconds.WithLabelValues(status, EndpointQueryData) + histogram := pluginParsingResponseDurationSeconds.WithLabelValues(status, string(backend.EndpointQueryData)) if traceID := tracing.TraceIDFromContext(ctx, true); traceID != "" { histogram.(prometheus.ExemplarObserver).ObserveWithExemplar(duration.Seconds(), prometheus.Labels{"traceID": traceID}) diff --git a/pkg/tsdb/grafana-testdata-datasource/scenarios.go b/pkg/tsdb/grafana-testdata-datasource/scenarios.go index 9114c3dc765..de265313f54 100644 --- a/pkg/tsdb/grafana-testdata-datasource/scenarios.go +++ b/pkg/tsdb/grafana-testdata-datasource/scenarios.go @@ -215,7 +215,7 @@ func instrumentScenarioHandler(logger log.Logger, scenario kinds.TestDataQueryTy defer span.End() ctxLogger := logger.FromContext(ctx) - ctxLogger.Debug("queryData", "scenario", scenario) + ctxLogger.Debug(string(backend.EndpointQueryData), "scenario", scenario) return fn(ctx, req) }) diff --git a/pkg/tsdb/graphite/admission_handler.go b/pkg/tsdb/graphite/admission_handler.go index 829324d844b..8580c6ed1d3 100644 --- a/pkg/tsdb/graphite/admission_handler.go +++ b/pkg/tsdb/graphite/admission_handler.go @@ -48,8 +48,8 @@ func (s *Service) MutateAdmission(ctx context.Context, req *backend.AdmissionReq default: return getBadRequest(fmt.Sprintf("expected apiVersion: v0alpha1, found: %s", settings.APIVersion)), nil } - if settings.URL != "" { - return getBadRequest("unsupported URL value"), nil + if settings.URL == "" { + return getBadRequest("missing URL value"), nil } pb, err := backend.DataSourceInstanceSettingsToProtoBytes(settings) diff --git a/pkg/tsdb/influxdb/influxql/buffered/response_parser.go b/pkg/tsdb/influxdb/influxql/buffered/response_parser.go index 91fc8b9967f..b6b840e6dc5 100644 --- a/pkg/tsdb/influxdb/influxql/buffered/response_parser.go +++ b/pkg/tsdb/influxdb/influxql/buffered/response_parser.go @@ -23,6 +23,14 @@ func ResponseParse(buf io.ReadCloser, statusCode int, query *models.Query) *back func parse(buf io.Reader, statusCode int, query *models.Query) *backend.DataResponse { response, jsonErr := parseJSON(buf) + if statusCode/100 != 2 { + errorStr := response.Error + if errorStr == "" { + errorStr = response.Message + } + return &backend.DataResponse{Error: fmt.Errorf("InfluxDB returned error: %s", errorStr)} + } + if jsonErr != nil { return &backend.DataResponse{Error: jsonErr} } diff --git a/pkg/tsdb/influxdb/influxql/buffered/response_parser_test.go b/pkg/tsdb/influxdb/influxql/buffered/response_parser_test.go index 3dab1ff1603..b1ec590c2d5 100644 --- a/pkg/tsdb/influxdb/influxql/buffered/response_parser_test.go +++ b/pkg/tsdb/influxdb/influxql/buffered/response_parser_test.go @@ -338,9 +338,15 @@ func TestInfluxdbResponseParser(t *testing.T) { }) t.Run("Influxdb response parser with top-level error", func(t *testing.T) { - result := ResponseParse(readJsonFile("error_on_top_level_response"), 200, generateQuery("Test raw query", "time_series", "")) + result := ResponseParse(readJsonFile("error_on_top_level_response"), 400, generateQuery("Test raw query", "time_series", "")) require.Nil(t, result.Frames) - require.EqualError(t, result.Error, "error parsing query: found THING") + require.EqualError(t, result.Error, "InfluxDB returned error: error parsing query: found THING") + }) + + t.Run("Influxdb response parser with error message", func(t *testing.T) { + result := ResponseParse(readJsonFile("invalid_response"), 400, generateQuery("Test raw query", "time_series", "")) + require.Nil(t, result.Frames) + require.EqualError(t, result.Error, "InfluxDB returned error: failed to parse query: found WERE, expected ; at line 1, char 38") }) t.Run("Influxdb response parser parseNumber nil", func(t *testing.T) { diff --git a/pkg/tsdb/influxdb/influxql/converter/converter.go b/pkg/tsdb/influxdb/influxql/converter/converter.go index 0cdbc010b73..337b24014b1 100644 --- a/pkg/tsdb/influxdb/influxql/converter/converter.go +++ b/pkg/tsdb/influxdb/influxql/converter/converter.go @@ -34,6 +34,26 @@ l1Fields: if rsp.Error != nil { return rsp } + case "error": + v, err := iter.ReadString() + if err != nil { + rsp.Error = err + } else { + rsp.Error = fmt.Errorf(v) + } + return rsp + case "code": + // we only care of the message + _, err := iter.Read() + if err != nil { + return rspErr(err) + } + case "message": + v, err := iter.Read() + if err != nil { + return rspErr(err) + } + return rspErr(fmt.Errorf("%s", v)) case "": if err != nil { return rspErr(err) @@ -41,11 +61,15 @@ l1Fields: break l1Fields default: v, err := iter.Read() - if err != nil { - rsp.Error = err - return rsp - } fmt.Printf("[ROOT] unsupported key: %s / %v\n\n", l1Field, v) + if err != nil { + if rsp != nil { + rsp.Error = err + return rsp + } else { + return rspErr(err) + } + } } } diff --git a/pkg/tsdb/influxdb/influxql/influxql.go b/pkg/tsdb/influxdb/influxql/influxql.go index 660e245a940..61879579f0e 100644 --- a/pkg/tsdb/influxdb/influxql/influxql.go +++ b/pkg/tsdb/influxdb/influxql/influxql.go @@ -177,11 +177,6 @@ func execute(ctx context.Context, tracer trace.Tracer, dsInfo *models.Datasource if err != nil { return backend.DataResponse{}, err } - - if res.StatusCode/100 != 2 { - return backend.DataResponse{Error: fmt.Errorf("InfluxDB returned error: %v", res.Body)}, nil - } - defer func() { if err := res.Body.Close(); err != nil { logger.Warn("Failed to close response body", "err", err) diff --git a/pkg/tsdb/influxdb/influxql/querydata/stream_parser.go b/pkg/tsdb/influxdb/influxql/querydata/stream_parser.go index 2efded14ed9..dde413c8fa4 100644 --- a/pkg/tsdb/influxdb/influxql/querydata/stream_parser.go +++ b/pkg/tsdb/influxdb/influxql/querydata/stream_parser.go @@ -23,6 +23,10 @@ func ResponseParse(buf io.ReadCloser, statusCode int, query *models.Query) *back iter := jsoniter.Parse(jsoniter.ConfigDefault, buf, 1024) r := converter.ReadInfluxQLStyleResult(iter, query) + if statusCode/100 != 2 { + return &backend.DataResponse{Error: fmt.Errorf("InfluxDB returned error: %s", r.Error)} + } + // The ExecutedQueryString can be viewed in QueryInspector in UI for i, frame := range r.Frames { if i == 0 { diff --git a/pkg/tsdb/influxdb/influxql/querydata/stream_parser_test.go b/pkg/tsdb/influxdb/influxql/querydata/stream_parser_test.go index 7e2f8951bbe..3c6222ea824 100644 --- a/pkg/tsdb/influxdb/influxql/querydata/stream_parser_test.go +++ b/pkg/tsdb/influxdb/influxql/querydata/stream_parser_test.go @@ -105,3 +105,11 @@ func TestParsingAsTimeSeriesWithoutTimeColumn(t *testing.T) { runQuery(t, f, "cardinality", "time_series", query) }) } + +func TestInfluxDBStreamingParser(t *testing.T) { + t.Run("Influxdb response parser with error message", func(t *testing.T) { + result := ResponseParse(readJsonFile("invalid_response"), 400, generateQuery("Test raw query", "time_series", "")) + require.Nil(t, result.Frames) + require.EqualError(t, result.Error, "InfluxDB returned error: failed to parse query: found WERE, expected ; at line 1, char 38") + }) +} diff --git a/pkg/tsdb/influxdb/influxql/testdata/invalid_response.json b/pkg/tsdb/influxdb/influxql/testdata/invalid_response.json new file mode 100644 index 00000000000..1d8d62eb9c5 --- /dev/null +++ b/pkg/tsdb/influxdb/influxql/testdata/invalid_response.json @@ -0,0 +1,4 @@ +{ + "code": "invalid", + "message": "failed to parse query: found WERE, expected ; at line 1, char 38" +} diff --git a/pkg/tsdb/influxdb/models/models.go b/pkg/tsdb/influxdb/models/models.go index 020b90742bd..2afab2f848b 100644 --- a/pkg/tsdb/influxdb/models/models.go +++ b/pkg/tsdb/influxdb/models/models.go @@ -32,6 +32,8 @@ type Select []QueryPart type Response struct { Results []Result Error string + Code string + Message string } type Result struct { diff --git a/pkg/tsdb/loki/instrumentation/instrumentation.go b/pkg/tsdb/loki/instrumentation/instrumentation.go index 8fadd73bcdf..342378eaf58 100644 --- a/pkg/tsdb/loki/instrumentation/instrumentation.go +++ b/pkg/tsdb/loki/instrumentation/instrumentation.go @@ -4,6 +4,7 @@ import ( "context" "time" + "github.com/grafana/grafana-plugin-sdk-go/backend" "github.com/grafana/grafana/pkg/infra/tracing" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -18,12 +19,8 @@ var ( }, []string{"status", "endpoint"}) ) -const ( - EndpointQueryData = "queryData" -) - func UpdatePluginParsingResponseDurationSeconds(ctx context.Context, duration time.Duration, status string) { - histogram := pluginParsingResponseDurationSeconds.WithLabelValues(status, EndpointQueryData) + histogram := pluginParsingResponseDurationSeconds.WithLabelValues(status, string(backend.EndpointQueryData)) if traceID := tracing.TraceIDFromContext(ctx, true); traceID != "" { histogram.(prometheus.ExemplarObserver).ObserveWithExemplar(duration.Seconds(), prometheus.Labels{"traceID": traceID}) diff --git a/pkg/tsdb/prometheus/prometheus.go b/pkg/tsdb/prometheus/prometheus.go index ef4e1df3105..123e0689103 100644 --- a/pkg/tsdb/prometheus/prometheus.go +++ b/pkg/tsdb/prometheus/prometheus.go @@ -45,6 +45,17 @@ func (s *Service) CheckHealth(ctx context.Context, req *backend.CheckHealthReque return s.lib.CheckHealth(ctx, req) } +func (s *Service) ValidateAdmission(ctx context.Context, req *backend.AdmissionRequest) (*backend.ValidationResponse, error) { + return s.lib.ValidateAdmission(ctx, req) +} + +func (s *Service) MutateAdmission(ctx context.Context, req *backend.AdmissionRequest) (*backend.MutationResponse, error) { + return s.lib.MutateAdmission(ctx, req) +} +func (s *Service) ConvertObject(ctx context.Context, req *backend.ConversionRequest) (*backend.ConversionResponse, error) { + return s.lib.ConvertObject(ctx, req) +} + func extendClientOpts(ctx context.Context, settings backend.DataSourceInstanceSettings, clientOpts *sdkhttpclient.Options) error { // Set SigV4 service namespace if clientOpts.SigV4 != nil { diff --git a/pkg/tsdb/tempo/grpc.go b/pkg/tsdb/tempo/grpc.go index 67da1725f86..4fc68e750b8 100644 --- a/pkg/tsdb/tempo/grpc.go +++ b/pkg/tsdb/tempo/grpc.go @@ -48,7 +48,28 @@ func newGrpcClient(ctx context.Context, settings backend.DataSourceInstanceSetti if err != nil { return nil, fmt.Errorf("error getting dial options: %w", err) } - clientConn, err := grpc.NewClient(onlyHost, dialOpts...) + + // grpc.Dial() is deprecated in favor of grpc.NewClient(), but grpc.NewClient() changed the default resolver to dns from passthrough. + // This is a problem because the getDialOpts() function appends a custom dialer to the dial options to support Grafana Cloud PDC. + // + // See the following quote from the grpc package documentation: + // One subtle difference between NewClient and Dial and DialContext is that the + // former uses "dns" as the default name resolver, while the latter use + // "passthrough" for backward compatibility. This distinction should not matter + // to most users, but could matter to legacy users that specify a custom dialer + // and expect it to receive the target string directly. + // https://github.com/grpc/grpc-go/blob/fa274d77904729c2893111ac292048d56dcf0bb1/clientconn.go#L209 + // + // Unfortunately, the passthrough resolver isn't exported by the grpc package, so we can't use it. + // The options are to continue using grpc.Dial() or implement a custom resolver. + // Since the go-grpc package maintainers intend to continue supporting grpc.Dial() through the 1.x series, + // we'll continue using grpc.Dial() until we have a compelling reason or bandwidth to implement the custom resolver. + // Reference: https://github.com/grpc/grpc-go/blob/f199062ef31ddda54152e1ca5e3d15fb63903dc3/clientconn.go#L204 + // + // See this issue for more information: https://github.com/grpc/grpc-go/issues/7091 + // Ignore the lint check as this fails the build and for the reasons above. + // nolint:staticcheck + clientConn, err := grpc.Dial(onlyHost, dialOpts...) if err != nil { logger.Error("Error dialing gRPC client", "error", err, "URL", settings.URL, "function", logEntrypoint()) return nil, err diff --git a/pkg/web/macaron.go b/pkg/web/macaron.go index ceecf6dc08b..17afc06657a 100644 --- a/pkg/web/macaron.go +++ b/pkg/web/macaron.go @@ -139,6 +139,17 @@ func mwFromHandler(handler Handler) Middleware { } } +// a convenience function that is provided for users of contexthandler package (standalone apiservers) +// who have an implicit dependency on Macron in context but don't want to take a dependency on +// router additionally +func EmptyMacronMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + m := New() + c := m.createContext(writer, request) + next.ServeHTTP(writer, c.Req) // since c.Req has the newer context attached + }) +} + func (m *Macaron) createContext(rw http.ResponseWriter, req *http.Request) *Context { // NOTE: we have to explicitly copy the middleware chain here to avoid // passing a shared slice to the *Context, which leads to racy behavior in diff --git a/public/api-enterprise-spec.json b/public/api-enterprise-spec.json index a2c5df56352..a33e984f9e1 100644 --- a/public/api-enterprise-spec.json +++ b/public/api-enterprise-spec.json @@ -1556,6 +1556,34 @@ } } }, + "/reports/images/:image": { + "get": { + "description": "Available to org admins only and with a valid or expired license.\n\nYou need to have a permission with action `reports.settings:read`.", + "tags": [ + "reports", + "enterprise" + ], + "summary": "Get custom branding report image.", + "operationId": "getSettingsImage", + "responses": { + "200": { + "$ref": "#/responses/contentResponse" + }, + "401": { + "$ref": "#/responses/unauthorisedError" + }, + "403": { + "$ref": "#/responses/forbiddenError" + }, + "404": { + "$ref": "#/responses/notFoundError" + }, + "500": { + "$ref": "#/responses/internalServerError" + } + } + } + }, "/reports/render/csvs": { "get": { "description": "Available to all users and with a valid license.", @@ -1666,7 +1694,7 @@ "reports", "enterprise" ], - "summary": "Get settings.", + "summary": "Get report settings.", "operationId": "getReportSettings", "responses": { "200": { @@ -5455,6 +5483,18 @@ } } }, + "NavbarPreference": { + "type": "object", + "title": "NavbarPreference defines model for NavbarPreference.", + "properties": { + "savedItemIds": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, "NewApiKeyResult": { "type": "object", "properties": { @@ -5697,6 +5737,9 @@ "language": { "type": "string" }, + "navbar": { + "$ref": "#/definitions/NavbarPreference" + }, "queryHistory": { "$ref": "#/definitions/QueryHistoryPreference" }, @@ -5946,6 +5989,9 @@ "description": "Selected language (beta)", "type": "string" }, + "navbar": { + "$ref": "#/definitions/NavbarPreference" + }, "queryHistory": { "$ref": "#/definitions/QueryHistoryPreference" }, @@ -7935,6 +7981,9 @@ "language": { "type": "string" }, + "navbar": { + "$ref": "#/definitions/NavbarPreference" + }, "queryHistory": { "$ref": "#/definitions/QueryHistoryPreference" }, diff --git a/public/api-merged.json b/public/api-merged.json index 2fe5f57a88f..f30ece0f754 100644 --- a/public/api-merged.json +++ b/public/api-merged.json @@ -8051,6 +8051,34 @@ } } }, + "/reports/images/:image": { + "get": { + "description": "Available to org admins only and with a valid or expired license.\n\nYou need to have a permission with action `reports.settings:read`.", + "tags": [ + "reports", + "enterprise" + ], + "summary": "Get custom branding report image.", + "operationId": "getSettingsImage", + "responses": { + "200": { + "$ref": "#/responses/contentResponse" + }, + "401": { + "$ref": "#/responses/unauthorisedError" + }, + "403": { + "$ref": "#/responses/forbiddenError" + }, + "404": { + "$ref": "#/responses/notFoundError" + }, + "500": { + "$ref": "#/responses/internalServerError" + } + } + } + }, "/reports/render/csvs": { "get": { "description": "Available to all users and with a valid license.", @@ -8161,7 +8189,7 @@ "reports", "enterprise" ], - "summary": "Get settings.", + "summary": "Get report settings.", "operationId": "getReportSettings", "responses": { "200": { diff --git a/public/app/features/migrate-to-cloud/cloud/InfoPane.tsx b/public/app/features/migrate-to-cloud/cloud/InfoPane.tsx deleted file mode 100644 index 7f203730bbc..00000000000 --- a/public/app/features/migrate-to-cloud/cloud/InfoPane.tsx +++ /dev/null @@ -1,74 +0,0 @@ -import { css } from '@emotion/css'; - -import { GrafanaTheme2 } from '@grafana/data'; -import { Box, Stack, TextLink, useStyles2 } from '@grafana/ui'; -import { t, Trans } from 'app/core/internationalization'; - -import { InfoItem } from '../shared/InfoItem'; - -export const InfoPane = () => { - const styles = useStyles2(getStyles); - - return ( - - - - Some configuration from your self-managed Grafana instance can be automatically copied to this cloud stack. - - - - - - The migration process must be started from your self-managed Grafana instance. - -
    -
  1. - - Log in to your self-managed instance and navigate to Administration, General, Migrate to Grafana Cloud. - -
  2. -
  3. - - Select "Migrate this instance to Cloud". - -
  4. -
  5. - - You'll be prompted for a migration token. Generate one from this screen. - -
  6. -
  7. - - In your self-managed instance, select "Upload everything" to upload data sources and - dashboards to this cloud stack. - -
  8. -
  9. - - If some of your data sources will not work over the public internet, you’ll need to install Private Data - Source Connect in your self-managed environment. - -
  10. -
-
-
- - {t('migrate-to-cloud.get-started.configure-pdc-link', 'Configure PDC for this stack')} - -
- ); -}; - -const getStyles = (theme: GrafanaTheme2) => ({ - list: css({ - padding: 'revert', - }), -}); diff --git a/public/locales/en-US/grafana.json b/public/locales/en-US/grafana.json index b8fbb6294ff..a9f224fae5b 100644 --- a/public/locales/en-US/grafana.json +++ b/public/locales/en-US/grafana.json @@ -56,6 +56,39 @@ "pause": "Pause evaluation" }, "alerting": { + "central-alert-history": { + "details": { + "annotations": "Annotations", + "error": "Error loading rule for this event.", + "loading": "Loading...", + "no-annotations": "No annotations", + "no-recognized-state": "No recognized state", + "no-values": "No values", + "not-found": "Rule not found for this event.", + "not-grafana-rule": "Rule is not a Grafana rule", + "number-transitions": "State transitions for selected period", + "state": { + "alerting": "Alerting", + "error": "Error", + "no-data": "No data", + "normal": "Normal", + "pending": "Pending" + }, + "state-transitions": "State transition", + "unknown-event-state": "Unknown", + "unknown-rule": "Unknown", + "value-in-transition": "Value in transition" + }, + "error": "Something went wrong loading the alert state history", + "filter": { + "info": { + "label1": "Filter events using label querying without spaces, ex:", + "label2": "Invalid use of spaces:", + "label3": "Valid use of spaces:", + "label4": "Filter alerts using label querying without braces, ex:" + } + } + }, "contact-points": { "telegram": { "parse-mode-warning-body": "If you use a <1>parse_mode option other than <3>None, truncation may result in an invalid message, causing the notification to fail. For longer messages, we recommend using an alternative contact method.", @@ -173,17 +206,6 @@ "text": "No results found for your query" } }, - "central-alert-history": { - "error": "Something went wrong loading the alert state history", - "filter": { - "info": { - "label1": "Filter events using label querying without spaces, ex:", - "label2": "Invalid use of spaces:", - "label3": "Valid use of spaces:", - "label4": "Filter alerts using label querying without braces, ex:" - } - } - }, "clipboard-button": { "inline-toast": { "success": "Copied" @@ -879,6 +901,16 @@ } }, "migrate-to-cloud": { + "build-snapshot": { + "description": "This tool can migrate some resources from this installation to your cloud stack. To get started, you'll need to create a snapshot of this installation. Creating a snapshot typically takes less than two minutes. The snapshot is stored alongside this Grafana installation.", + "title": "No snapshot exists", + "when-complete": "Once the snapshot is complete, you will be able to upload it to your cloud stack." + }, + "building-snapshot": { + "description": "We're creating a point-in-time snapshot of the current state of this installation. Once the snapshot is complete. you'll be able to upload it to Grafana Cloud.", + "description-eta": "Creating a snapshot typically takes less than two minutes.", + "title": "Building installation snapshot" + }, "can-i-move": { "body": "Once you connect this installation to a cloud stack, you'll be able to upload data sources and dashboards.", "link-title": "Learn about migrating other settings", @@ -922,7 +954,7 @@ "step-3": "You'll be prompted for a migration token. Generate one from this screen.", "step-4": "In your self-managed instance, select \"Upload everything\" to upload data sources and dashboards to this cloud stack.", "step-5": "If some of your data sources will not work over the public internet, you’ll need to install Private Data Source Connect in your self-managed environment.", - "title": "How to get started" + "title": "Performing a migration" }, "is-it-secure": { "body": "Grafana Labs is committed to maintaining the highest standards of data privacy and security. By implementing industry-standard security technologies and procedures, we help protect our customers' data from unauthorized access, use, or disclosure.", @@ -930,12 +962,11 @@ "title": "Is it secure?" }, "migrate-to-this-stack": { - "body": "Some configuration from your self-managed Grafana instance can be automatically copied to this cloud stack.", + "body": "You can migrate some resources from your self-managed Grafana installation to this cloud stack. To do this securely, you'll need to generate a migration token. Your self-managed instance will use the token to authenticate with this cloud stack.", "link-title": "View the full migration guide", - "title": "Migrate configuration to this stack" + "title": "Let us help you migrate to this stack" }, "migration-token": { - "body": "Your self-managed Grafana instance will require a special authentication token to securely connect to this cloud stack.", "delete-modal-body": "If you've already used this token with a self-managed installation, that installation will no longer be able to upload content.", "delete-modal-cancel": "Cancel", "delete-modal-confirm": "Delete", @@ -951,8 +982,7 @@ "modal-field-description": "Copy the token now as you will not be able to see it again. Losing a token requires creating a new one.", "modal-field-label": "Token", "modal-title": "Migration token created", - "status": "Current status: <1>", - "title": "Migration token" + "status": "Current status: <1>" }, "pdc": { "body": "Exposing your data sources to the internet can raise security concerns. Private data source connect (PDC) allows Grafana Cloud to access your existing data sources over a secure network tunnel.", @@ -983,14 +1013,20 @@ "unknown": "Unknown" }, "summary": { + "cancel-snapshot": "Cancel snapshot", "disconnect": "Disconnect", "disconnect-error-description": "See the Grafana server logs for more details", "disconnect-error-title": "There was an error disconnecting", + "errored-resource-count": "Errors", "run-migration-error-description": "See the Grafana server logs for more details", "run-migration-error-title": "There was an error migrating your resources", + "snapshot-date": "Snapshot timestamp", + "snapshot-not-created": "Not yet created", "start-migration": "Build snapshot", + "successful-resource-count": "Successfully migrated", "target-stack-title": "Uploading to", - "upload-migration": "Upload & migrate snapshot" + "total-resource-count": "Total resources", + "upload-migration": "Upload snapshot" }, "token-status": { "active": "Token created and active", diff --git a/public/locales/pseudo-LOCALE/grafana.json b/public/locales/pseudo-LOCALE/grafana.json index da9480c587c..480a53ebd9b 100644 --- a/public/locales/pseudo-LOCALE/grafana.json +++ b/public/locales/pseudo-LOCALE/grafana.json @@ -56,6 +56,39 @@ "pause": "Päūşę ęväľūäŧįőʼn" }, "alerting": { + "central-alert-history": { + "details": { + "annotations": "Åʼnʼnőŧäŧįőʼnş", + "error": "Ēřřőř ľőäđįʼnģ řūľę ƒőř ŧĥįş ęvęʼnŧ.", + "loading": "Ŀőäđįʼnģ...", + "no-annotations": "Ńő äʼnʼnőŧäŧįőʼnş", + "no-recognized-state": "Ńő řęčőģʼnįžęđ şŧäŧę", + "no-values": "Ńő väľūęş", + "not-found": "Ŗūľę ʼnőŧ ƒőūʼnđ ƒőř ŧĥįş ęvęʼnŧ.", + "not-grafana-rule": "Ŗūľę įş ʼnőŧ ä Ğřäƒäʼnä řūľę", + "number-transitions": "Ŝŧäŧę ŧřäʼnşįŧįőʼnş ƒőř şęľęčŧęđ pęřįőđ", + "state": { + "alerting": "Åľęřŧįʼnģ", + "error": "Ēřřőř", + "no-data": "Ńő đäŧä", + "normal": "Ńőřmäľ", + "pending": "Pęʼnđįʼnģ" + }, + "state-transitions": "Ŝŧäŧę ŧřäʼnşįŧįőʼn", + "unknown-event-state": "Ůʼnĸʼnőŵʼn", + "unknown-rule": "Ůʼnĸʼnőŵʼn", + "value-in-transition": "Väľūę įʼn ŧřäʼnşįŧįőʼn" + }, + "error": "Ŝőmęŧĥįʼnģ ŵęʼnŧ ŵřőʼnģ ľőäđįʼnģ ŧĥę äľęřŧ şŧäŧę ĥįşŧőřy", + "filter": { + "info": { + "label1": "Fįľŧęř ęvęʼnŧş ūşįʼnģ ľäþęľ qūęřyįʼnģ ŵįŧĥőūŧ şpäčęş, ęχ:", + "label2": "Ĩʼnväľįđ ūşę őƒ şpäčęş:", + "label3": "Väľįđ ūşę őƒ şpäčęş:", + "label4": "Fįľŧęř äľęřŧş ūşįʼnģ ľäþęľ qūęřyįʼnģ ŵįŧĥőūŧ þřäčęş, ęχ:" + } + } + }, "contact-points": { "telegram": { "parse-mode-warning-body": "Ĩƒ yőū ūşę ä <1>päřşę_mőđę őpŧįőʼn őŧĥęř ŧĥäʼn <3>Ńőʼnę, ŧřūʼnčäŧįőʼn mäy řęşūľŧ įʼn äʼn įʼnväľįđ męşşäģę, čäūşįʼnģ ŧĥę ʼnőŧįƒįčäŧįőʼn ŧő ƒäįľ. Főř ľőʼnģęř męşşäģęş, ŵę řęčőmmęʼnđ ūşįʼnģ äʼn äľŧęřʼnäŧįvę čőʼnŧäčŧ męŧĥőđ.", @@ -173,17 +206,6 @@ "text": "Ńő řęşūľŧş ƒőūʼnđ ƒőř yőūř qūęřy" } }, - "central-alert-history": { - "error": "Ŝőmęŧĥįʼnģ ŵęʼnŧ ŵřőʼnģ ľőäđįʼnģ ŧĥę äľęřŧ şŧäŧę ĥįşŧőřy", - "filter": { - "info": { - "label1": "Fįľŧęř ęvęʼnŧş ūşįʼnģ ľäþęľ qūęřyįʼnģ ŵįŧĥőūŧ şpäčęş, ęχ:", - "label2": "Ĩʼnväľįđ ūşę őƒ şpäčęş:", - "label3": "Väľįđ ūşę őƒ şpäčęş:", - "label4": "Fįľŧęř äľęřŧş ūşįʼnģ ľäþęľ qūęřyįʼnģ ŵįŧĥőūŧ þřäčęş, ęχ:" - } - } - }, "clipboard-button": { "inline-toast": { "success": "Cőpįęđ" @@ -879,6 +901,16 @@ } }, "migrate-to-cloud": { + "build-snapshot": { + "description": "Ŧĥįş ŧőőľ čäʼn mįģřäŧę şőmę řęşőūřčęş ƒřőm ŧĥįş įʼnşŧäľľäŧįőʼn ŧő yőūř čľőūđ şŧäčĸ. Ŧő ģęŧ şŧäřŧęđ, yőū'ľľ ʼnęęđ ŧő čřęäŧę ä şʼnäpşĥőŧ őƒ ŧĥįş įʼnşŧäľľäŧįőʼn. Cřęäŧįʼnģ ä şʼnäpşĥőŧ ŧypįčäľľy ŧäĸęş ľęşş ŧĥäʼn ŧŵő mįʼnūŧęş. Ŧĥę şʼnäpşĥőŧ įş şŧőřęđ äľőʼnģşįđę ŧĥįş Ğřäƒäʼnä įʼnşŧäľľäŧįőʼn.", + "title": "Ńő şʼnäpşĥőŧ ęχįşŧş", + "when-complete": "Øʼnčę ŧĥę şʼnäpşĥőŧ įş čőmpľęŧę, yőū ŵįľľ þę äþľę ŧő ūpľőäđ įŧ ŧő yőūř čľőūđ şŧäčĸ." + }, + "building-snapshot": { + "description": "Ŵę'řę čřęäŧįʼnģ ä pőįʼnŧ-įʼn-ŧįmę şʼnäpşĥőŧ őƒ ŧĥę čūřřęʼnŧ şŧäŧę őƒ ŧĥįş įʼnşŧäľľäŧįőʼn. Øʼnčę ŧĥę şʼnäpşĥőŧ įş čőmpľęŧę. yőū'ľľ þę äþľę ŧő ūpľőäđ įŧ ŧő Ğřäƒäʼnä Cľőūđ.", + "description-eta": "Cřęäŧįʼnģ ä şʼnäpşĥőŧ ŧypįčäľľy ŧäĸęş ľęşş ŧĥäʼn ŧŵő mįʼnūŧęş.", + "title": "ßūįľđįʼnģ įʼnşŧäľľäŧįőʼn şʼnäpşĥőŧ" + }, "can-i-move": { "body": "Øʼnčę yőū čőʼnʼnęčŧ ŧĥįş įʼnşŧäľľäŧįőʼn ŧő ä čľőūđ şŧäčĸ, yőū'ľľ þę äþľę ŧő ūpľőäđ đäŧä şőūřčęş äʼnđ đäşĥþőäřđş.", "link-title": "Ŀęäřʼn äþőūŧ mįģřäŧįʼnģ őŧĥęř şęŧŧįʼnģş", @@ -922,7 +954,7 @@ "step-3": "Ÿőū'ľľ þę přőmpŧęđ ƒőř ä mįģřäŧįőʼn ŧőĸęʼn. Ğęʼnęřäŧę őʼnę ƒřőm ŧĥįş şčřęęʼn.", "step-4": "Ĩʼn yőūř şęľƒ-mäʼnäģęđ įʼnşŧäʼnčę, şęľęčŧ \"Ůpľőäđ ęvęřyŧĥįʼnģ\" ŧő ūpľőäđ đäŧä şőūřčęş äʼnđ đäşĥþőäřđş ŧő ŧĥįş čľőūđ şŧäčĸ.", "step-5": "Ĩƒ şőmę őƒ yőūř đäŧä şőūřčęş ŵįľľ ʼnőŧ ŵőřĸ ővęř ŧĥę pūþľįč įʼnŧęřʼnęŧ, yőū’ľľ ʼnęęđ ŧő įʼnşŧäľľ Přįväŧę Đäŧä Ŝőūřčę Cőʼnʼnęčŧ įʼn yőūř şęľƒ-mäʼnäģęđ ęʼnvįřőʼnmęʼnŧ.", - "title": "Ħőŵ ŧő ģęŧ şŧäřŧęđ" + "title": "Pęřƒőřmįʼnģ ä mįģřäŧįőʼn" }, "is-it-secure": { "body": "Ğřäƒäʼnä Ŀäþş įş čőmmįŧŧęđ ŧő mäįʼnŧäįʼnįʼnģ ŧĥę ĥįģĥęşŧ şŧäʼnđäřđş őƒ đäŧä přįväčy äʼnđ şęčūřįŧy. ßy įmpľęmęʼnŧįʼnģ įʼnđūşŧřy-şŧäʼnđäřđ şęčūřįŧy ŧęčĥʼnőľőģįęş äʼnđ přőčęđūřęş, ŵę ĥęľp přőŧęčŧ őūř čūşŧőmęřş' đäŧä ƒřőm ūʼnäūŧĥőřįžęđ äččęşş, ūşę, őř đįşčľőşūřę.", @@ -930,12 +962,11 @@ "title": "Ĩş įŧ şęčūřę?" }, "migrate-to-this-stack": { - "body": "Ŝőmę čőʼnƒįģūřäŧįőʼn ƒřőm yőūř şęľƒ-mäʼnäģęđ Ğřäƒäʼnä įʼnşŧäʼnčę čäʼn þę äūŧőmäŧįčäľľy čőpįęđ ŧő ŧĥįş čľőūđ şŧäčĸ.", + "body": "Ÿőū čäʼn mįģřäŧę şőmę řęşőūřčęş ƒřőm yőūř şęľƒ-mäʼnäģęđ Ğřäƒäʼnä įʼnşŧäľľäŧįőʼn ŧő ŧĥįş čľőūđ şŧäčĸ. Ŧő đő ŧĥįş şęčūřęľy, yőū'ľľ ʼnęęđ ŧő ģęʼnęřäŧę ä mįģřäŧįőʼn ŧőĸęʼn. Ÿőūř şęľƒ-mäʼnäģęđ įʼnşŧäʼnčę ŵįľľ ūşę ŧĥę ŧőĸęʼn ŧő äūŧĥęʼnŧįčäŧę ŵįŧĥ ŧĥįş čľőūđ şŧäčĸ.", "link-title": "Vįęŵ ŧĥę ƒūľľ mįģřäŧįőʼn ģūįđę", - "title": "Mįģřäŧę čőʼnƒįģūřäŧįőʼn ŧő ŧĥįş şŧäčĸ" + "title": "Ŀęŧ ūş ĥęľp yőū mįģřäŧę ŧő ŧĥįş şŧäčĸ" }, "migration-token": { - "body": "Ÿőūř şęľƒ-mäʼnäģęđ Ğřäƒäʼnä įʼnşŧäʼnčę ŵįľľ řęqūįřę ä şpęčįäľ äūŧĥęʼnŧįčäŧįőʼn ŧőĸęʼn ŧő şęčūřęľy čőʼnʼnęčŧ ŧő ŧĥįş čľőūđ şŧäčĸ.", "delete-modal-body": "Ĩƒ yőū'vę äľřęäđy ūşęđ ŧĥįş ŧőĸęʼn ŵįŧĥ ä şęľƒ-mäʼnäģęđ įʼnşŧäľľäŧįőʼn, ŧĥäŧ įʼnşŧäľľäŧįőʼn ŵįľľ ʼnő ľőʼnģęř þę äþľę ŧő ūpľőäđ čőʼnŧęʼnŧ.", "delete-modal-cancel": "Cäʼnčęľ", "delete-modal-confirm": "Đęľęŧę", @@ -951,8 +982,7 @@ "modal-field-description": "Cőpy ŧĥę ŧőĸęʼn ʼnőŵ äş yőū ŵįľľ ʼnőŧ þę äþľę ŧő şęę įŧ äģäįʼn. Ŀőşįʼnģ ä ŧőĸęʼn řęqūįřęş čřęäŧįʼnģ ä ʼnęŵ őʼnę.", "modal-field-label": "Ŧőĸęʼn", "modal-title": "Mįģřäŧįőʼn ŧőĸęʼn čřęäŧęđ", - "status": "Cūřřęʼnŧ şŧäŧūş: <1>", - "title": "Mįģřäŧįőʼn ŧőĸęʼn" + "status": "Cūřřęʼnŧ şŧäŧūş: <1>" }, "pdc": { "body": "Ēχpőşįʼnģ yőūř đäŧä şőūřčęş ŧő ŧĥę įʼnŧęřʼnęŧ čäʼn řäįşę şęčūřįŧy čőʼnčęřʼnş. Přįväŧę đäŧä şőūřčę čőʼnʼnęčŧ (PĐC) äľľőŵş Ğřäƒäʼnä Cľőūđ ŧő äččęşş yőūř ęχįşŧįʼnģ đäŧä şőūřčęş ővęř ä şęčūřę ʼnęŧŵőřĸ ŧūʼnʼnęľ.", @@ -983,14 +1013,20 @@ "unknown": "Ůʼnĸʼnőŵʼn" }, "summary": { + "cancel-snapshot": "Cäʼnčęľ şʼnäpşĥőŧ", "disconnect": "Đįşčőʼnʼnęčŧ", "disconnect-error-description": "Ŝęę ŧĥę Ğřäƒäʼnä şęřvęř ľőģş ƒőř mőřę đęŧäįľş", "disconnect-error-title": "Ŧĥęřę ŵäş äʼn ęřřőř đįşčőʼnʼnęčŧįʼnģ", + "errored-resource-count": "Ēřřőřş", "run-migration-error-description": "Ŝęę ŧĥę Ğřäƒäʼnä şęřvęř ľőģş ƒőř mőřę đęŧäįľş", "run-migration-error-title": "Ŧĥęřę ŵäş äʼn ęřřőř mįģřäŧįʼnģ yőūř řęşőūřčęş", + "snapshot-date": "Ŝʼnäpşĥőŧ ŧįmęşŧämp", + "snapshot-not-created": "Ńőŧ yęŧ čřęäŧęđ", "start-migration": "ßūįľđ şʼnäpşĥőŧ", + "successful-resource-count": "Ŝūččęşşƒūľľy mįģřäŧęđ", "target-stack-title": "Ůpľőäđįʼnģ ŧő", - "upload-migration": "Ůpľőäđ & mįģřäŧę şʼnäpşĥőŧ" + "total-resource-count": "Ŧőŧäľ řęşőūřčęş", + "upload-migration": "Ůpľőäđ şʼnäpşĥőŧ" }, "token-status": { "active": "Ŧőĸęʼn čřęäŧęđ äʼnđ äčŧįvę", diff --git a/public/openapi3.json b/public/openapi3.json index 46d1e24a1c3..01b0937f681 100644 --- a/public/openapi3.json +++ b/public/openapi3.json @@ -21461,6 +21461,34 @@ ] } }, + "/reports/images/:image": { + "get": { + "description": "Available to org admins only and with a valid or expired license.\n\nYou need to have a permission with action `reports.settings:read`.", + "operationId": "getSettingsImage", + "responses": { + "200": { + "$ref": "#/components/responses/contentResponse" + }, + "401": { + "$ref": "#/components/responses/unauthorisedError" + }, + "403": { + "$ref": "#/components/responses/forbiddenError" + }, + "404": { + "$ref": "#/components/responses/notFoundError" + }, + "500": { + "$ref": "#/components/responses/internalServerError" + } + }, + "summary": "Get custom branding report image.", + "tags": [ + "reports", + "enterprise" + ] + } + }, "/reports/render/csvs": { "get": { "description": "Available to all users and with a valid license.", @@ -21592,7 +21620,7 @@ "$ref": "#/components/responses/internalServerError" } }, - "summary": "Get settings.", + "summary": "Get report settings.", "tags": [ "reports", "enterprise" diff --git a/public/sass/_angular.scss b/public/sass/_angular.scss index 7e409b8a435..080cacd5866 100644 --- a/public/sass/_angular.scss +++ b/public/sass/_angular.scss @@ -2,6 +2,7 @@ // once angular is disabled, this file can be deleted @use 'sass:map'; +@use 'sass:color'; .edit-tab-content { flex-grow: 1; @@ -1935,3 +1936,96 @@ $easing: cubic-bezier(0, 0, 0.265, 1); width: 100%; } } + +// Base classes +.label, +.badge { + display: inline-block; + padding: 2px 4px; + font-size: $font-size-base * 0.846; + font-weight: $font-weight-semi-bold; + line-height: 14px; // ensure proper line-height if floated + color: $white; + vertical-align: baseline; + white-space: nowrap; + text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); + background-color: $gray-1; +} + +// Labels & Badges +.label-tag { + background-color: $purple; + color: color.adjust($white, $lightness: -5%); + white-space: nowrap; + border-radius: 3px; + text-shadow: none; + font-size: 12px; + padding: 0px 6px; + line-height: 20px; + height: 20px; + + svg { + margin-bottom: 0; + } + + &:hover { + opacity: 0.85; + background-color: color.adjust($purple, $lightness: -10%); + } +} + +.query-part__link { + cursor: pointer; + + &--no-value { + color: $text-muted; + } +} + +.grafana-info-box { + position: relative; + padding: $space-lg; + background-color: $empty-list-cta-bg; + border-left: 3px solid $info-box-border-color; + margin-bottom: $space-md; + margin-right: $space-xs; + box-shadow: $card-shadow; + flex-grow: 1; + + h5 { + margin-bottom: $spacer; + } + ul { + padding-left: $spacer * 1.5; + } + + code { + @include font-family-monospace(); + font-size: $font-size-base - 2; + background-color: $code-tag-bg; + color: $text-color; + border: 1px solid $code-tag-border; + border-radius: 4px; + } + + p:last-child { + margin-bottom: 0; + } + + a { + @extend .external-link; + } + + &--max-lg { + max-width: map.get($grid-breakpoints, 'lg'); + } +} + +.grafana-info-box__close { + text-align: center; + display: block; + color: $link-color !important; + height: 0; + position: relative; + top: -9px; +} diff --git a/public/sass/_grafana.scss b/public/sass/_grafana.scss index e4108aa327f..ffff5a0b999 100644 --- a/public/sass/_grafana.scss +++ b/public/sass/_grafana.scss @@ -16,14 +16,10 @@ // COMPONENTS @import 'components/buttons'; @import 'components/alerts'; -@import 'components/tags'; @import 'components/gf-form'; -@import 'components/filter-table'; @import 'components/modals'; @import 'components/dropdown'; -@import 'components/infobox'; @import 'components/query_editor'; -@import 'components/query_part'; @import 'components/dashboard_grid'; // PAGES diff --git a/public/sass/components/_filter-table.scss b/public/sass/components/_filter-table.scss deleted file mode 100644 index 8800835b994..00000000000 --- a/public/sass/components/_filter-table.scss +++ /dev/null @@ -1,92 +0,0 @@ -// ========================================================================== -// FILTER TABLE -// ========================================================================== - -// Table -// -------------------------------------------------------------------------- - -.filter-table * { - box-sizing: border-box; -} - -.filter-table { - width: 100%; - border-collapse: separate; - - tbody { - tr:nth-child(odd) { - background: $table-bg-odd; - } - } - - th { - width: auto; - padding: $space-inset-squish-md; - text-align: left; - line-height: 30px; - height: 30px; - white-space: nowrap; - } - - td { - padding: $space-inset-squish-md; - line-height: 30px; - height: 30px; - white-space: nowrap; - - &.filter-table__switch-cell { - padding: 0; - border-right: 3px solid $page-bg; - } - } - - .link-td { - padding: 0; - line-height: 30px; - height: 30px; - white-space: nowrap; - - &.filter-table__switch-cell { - padding: 0; - border-right: 3px solid $page-bg; - } - - a { - display: block; - padding: 0px $space-sm; - height: 30px; - } - } - - .ellipsis { - display: block; - width: 100%; - white-space: nowrap; - overflow: hidden; - text-overflow: ellipsis; - } - - .expanded { - border-color: $panel-bg; - } - - .expanded > td { - padding-bottom: 0; - } - - .filter-table__avatar { - width: 25px; - height: 25px; - border-radius: 50%; - } - - &--hover { - tbody tr:hover { - background: $table-bg-hover; - } - } -} -.filter-table__weak-italic { - font-style: italic; - color: $text-color-weak; -} diff --git a/public/sass/components/_infobox.scss b/public/sass/components/_infobox.scss deleted file mode 100644 index bae8c24fa13..00000000000 --- a/public/sass/components/_infobox.scss +++ /dev/null @@ -1,48 +0,0 @@ -@use 'sass:map'; -.grafana-info-box { - position: relative; - padding: $space-lg; - background-color: $empty-list-cta-bg; - border-left: 3px solid $info-box-border-color; - margin-bottom: $space-md; - margin-right: $space-xs; - box-shadow: $card-shadow; - flex-grow: 1; - - h5 { - margin-bottom: $spacer; - } - ul { - padding-left: $spacer * 1.5; - } - - code { - @include font-family-monospace(); - font-size: $font-size-base - 2; - background-color: $code-tag-bg; - color: $text-color; - border: 1px solid $code-tag-border; - border-radius: 4px; - } - - p:last-child { - margin-bottom: 0; - } - - a { - @extend .external-link; - } - - &--max-lg { - max-width: map.get($grid-breakpoints, 'lg'); - } -} - -.grafana-info-box__close { - text-align: center; - display: block; - color: $link-color !important; - height: 0; - position: relative; - top: -9px; -} diff --git a/public/sass/components/_query_editor.scss b/public/sass/components/_query_editor.scss index 99d1229bfe0..56f678e5dbe 100644 --- a/public/sass/components/_query_editor.scss +++ b/public/sass/components/_query_editor.scss @@ -13,12 +13,6 @@ .tight-form-func { background: $tight-form-func-bg; - - &.show-function-controls { - padding-top: 5px; - min-width: 100px; - text-align: center; - } } input[type='text'].tight-form-func-param { @@ -53,12 +47,6 @@ input[type='text'].tight-form-func-param { .tight-form-func { background: $tight-form-func-bg; - - &.show-function-controls { - padding-top: 5px; - min-width: 100px; - text-align: center; - } } .rst-text::before { diff --git a/public/sass/components/_query_part.scss b/public/sass/components/_query_part.scss deleted file mode 100644 index 6ec484e841a..00000000000 --- a/public/sass/components/_query_part.scss +++ /dev/null @@ -1,29 +0,0 @@ -.query-part { - background-color: $tight-form-func-bg; - - &.show-function-controls { - padding-top: 5px; - min-width: 100px; - text-align: center; - } - - .query-part__last { - display: none; - } - - &:hover .query-part__last { - display: inline; - } - - &:hover { - background: $tight-form-func-highlight-bg; - } -} - -.query-part__link { - cursor: pointer; - - &--no-value { - color: $text-muted; - } -} diff --git a/public/sass/components/_tags.scss b/public/sass/components/_tags.scss deleted file mode 100644 index 0848dca0dfd..00000000000 --- a/public/sass/components/_tags.scss +++ /dev/null @@ -1,59 +0,0 @@ -@use 'sass:color'; -// Base classes -.label, -.badge { - display: inline-block; - padding: 2px 4px; - font-size: $font-size-base * 0.846; - font-weight: $font-weight-semi-bold; - line-height: 14px; // ensure proper line-height if floated - color: $white; - vertical-align: baseline; - white-space: nowrap; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); - background-color: $gray-1; -} - -// Labels & Badges -.label-tag { - background-color: $purple; - color: color.adjust($white, $lightness: -5%); - white-space: nowrap; - border-radius: 3px; - text-shadow: none; - font-size: 12px; - padding: 0px 6px; - line-height: 20px; - height: 20px; - - svg { - margin-bottom: 0; - } - - .icon-tag { - position: relative; - top: 1px; - padding-right: 4px; - } - - &.muted { - opacity: 0.85; - background-color: color.adjust($purple, $lightness: -10%); - color: $text-muted; - } - - &:hover { - opacity: 0.85; - background-color: color.adjust($purple, $lightness: -10%); - } - - &--gray { - opacity: 0.85; - background-color: $gray-1; - border-color: $gray-2; - - &:hover { - background-color: $gray-1; - } - } -} diff --git a/scripts/drone/events/pr.star b/scripts/drone/events/pr.star index a67b0956edc..9a57ec4cc53 100644 --- a/scripts/drone/events/pr.star +++ b/scripts/drone/events/pr.star @@ -87,7 +87,7 @@ def pr_pipelines(): ), verify_storybook( get_pr_trigger( - exclude_paths = ["pkg/**", "packaging/**", "go.sum", "go.mod"], + include_paths = ["packages/grafana-ui/**"], ), ver_mode, ), diff --git a/scripts/drone/steps/lib.star b/scripts/drone/steps/lib.star index 734bb553e20..02894677e15 100644 --- a/scripts/drone/steps/lib.star +++ b/scripts/drone/steps/lib.star @@ -99,7 +99,7 @@ def clone_enterprise_step_pr(source = "${DRONE_COMMIT}", target = "main", canFai check = [] else: check = [ - 'is_fork=$(curl "https://$GITHUB_TOKEN@api.github.com/repos/grafana/grafana/pulls/$DRONE_PULL_REQUEST" | jq .head.repo.fork)', + 'is_fork=$(curl --retry 5 "https://$GITHUB_TOKEN@api.github.com/repos/grafana/grafana/pulls/$DRONE_PULL_REQUEST" | jq .head.repo.fork)', 'if [ "$is_fork" != false ]; then return 1; fi', # Only clone if we're confident that 'fork' is 'false'. Fail if it's also empty. ] @@ -218,8 +218,8 @@ def validate_openapi_spec_step(): ], } -def dockerize_step(name, hostname, port): - return { +def dockerize_step(name, hostname, port, canFail = False): + step = { "name": name, "image": images["dockerize"], "commands": [ @@ -227,6 +227,11 @@ def dockerize_step(name, hostname, port): ], } + if canFail: + step["failure"] = "ignore" + + return step + def build_storybook_step(ver_mode): return { "name": "build-storybook", @@ -799,7 +804,7 @@ def e2e_storybook_step(): "PORT": "9001", }, "commands": [ - "npx wait-on@7.0.1 http://$HOST:$PORT", + "npx wait-on@7.2.0 -t 1m http://$HOST:$PORT", "yarn e2e:storybook", ], } @@ -959,7 +964,7 @@ def publish_images_step(ver_mode, docker_repo, trigger = None): return step -def integration_tests_steps(name, cmds, hostname = None, port = None, environment = None): +def integration_tests_steps(name, cmds, hostname = None, port = None, environment = None, canFail = False): """Integration test steps Args: @@ -968,6 +973,7 @@ def integration_tests_steps(name, cmds, hostname = None, port = None, environmen hostname: the hostname where the remote server is available. port: the port where the remote server is available. environment: Any extra environment variables needed to run the integration tests. + canFail: controls whether the step can fail. Returns: A list of drone steps. If a hostname / port were provided, then a step to wait for the remove server to be @@ -988,6 +994,9 @@ def integration_tests_steps(name, cmds, hostname = None, port = None, environmen ] + cmds, } + if canFail: + step["failure"] = "ignore" + if environment: step["environment"] = environment @@ -1064,7 +1073,7 @@ def remote_alertmanager_integration_tests_steps(): "AM_URL": "http://mimir_backend:8080", } - return integration_tests_steps("remote-alertmanager", cmds, "mimir_backend", "8080", environment = environment) + return integration_tests_steps("remote-alertmanager", cmds, "mimir_backend", "8080", environment = environment, canFail = True) def memcached_integration_tests_steps(): cmds = [ diff --git a/scripts/generate-rtk-apis.ts b/scripts/generate-rtk-apis.ts index 72724c99a84..f965b2c044d 100644 --- a/scripts/generate-rtk-apis.ts +++ b/scripts/generate-rtk-apis.ts @@ -30,6 +30,11 @@ const config: ConfigFile = { 'getDashboardByUid', ], }, + '../public/app/features/preferences/api/user/endpoints.gen.ts': { + apiFile: '../public/app/features/preferences/api/user/baseAPI.ts', + apiImport: 'baseAPI', + filterEndpoints: ['getUserPreferences', 'updateUserPreferences', 'patchUserPreferences'], + }, }, }; diff --git a/scripts/webpack/webpack.dev.js b/scripts/webpack/webpack.dev.js index aea468e7959..df92c8d811c 100644 --- a/scripts/webpack/webpack.dev.js +++ b/scripts/webpack/webpack.dev.js @@ -52,6 +52,10 @@ module.exports = (env = {}) => { // the same singletons '@grafana/runtime': path.resolve(__dirname, '../../packages/grafana-runtime'), '@grafana/data': path.resolve(__dirname, '../../packages/grafana-data'), + + // This is required to correctly resolve react-router-dom when linking with + // local version of @grafana/scenes + 'react-router-dom': path.resolve('./node_modules/react-router-dom'), }, }, diff --git a/yarn.lock b/yarn.lock index 7b2268ecc40..2bb91a3ab8a 100644 --- a/yarn.lock +++ b/yarn.lock @@ -15920,9 +15920,9 @@ __metadata: linkType: hard "fast-loops@npm:^1.1.3": - version: 1.1.3 - resolution: "fast-loops@npm:1.1.3" - checksum: 10/1bf9f102d8ed48a8c8304e2b27fd32afa65d370498db9b49d5762696ac4aa8c55593d505c142c2b7e25ca79f45207c4b25f778afd80f35df98cb2caaaf9609b7 + version: 1.1.4 + resolution: "fast-loops@npm:1.1.4" + checksum: 10/52516fc8bb95a60e512271e731c4dc7b7672af90c5e54681004ee2f509d6ccc8e62d5222e731377dafd48a31218f915fd6d0d02efe602b1b822e1ff93994d2a6 languageName: node linkType: hard