Merge branch 'main' into jd/add-forgot-output

Signed-off-by: Christian Mesh <christianmesh1@gmail.com>
This commit is contained in:
Christian Mesh 2025-02-24 14:46:00 -05:00 committed by GitHub
commit bd988cf2f8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
149 changed files with 6066 additions and 1389 deletions

View File

@ -1,6 +1,6 @@
{
"name": "Go",
"image": "mcr.microsoft.com/devcontainers/go:1-1.21-bullseye",
"image": "mcr.microsoft.com/devcontainers/go:1-1.22-bullseye",
"customizations": {
"vscode": {
"extensions": [

View File

@ -23,7 +23,14 @@ body:
* Set defaults on (or omit) any variables. The person reproducing it should not need to invent variable settings
* If multiple steps are required, such as running tofu twice, consider scripting it in a simple shell script. Providing a script can be easier than explaining what changes to make to the config between runs.
* Omit any unneeded complexity: remove variables, conditional statements, functions, modules, providers, and resources that are not needed to trigger the bug
- type: textarea
id: community-note
attributes:
label: Community note
description: Please leave this note unchanged.
value: |
> [!TIP]
> 👋 Hi there, OpenTofu community! The OpenTofu team prioritizes issues based on upvotes. Please make sure to upvote this issue and describe how it affects you in detail in the comments to show your support.
- type: textarea
id: tf-version
attributes:

View File

@ -11,7 +11,20 @@ body:
attributes:
value: |
# Thank you for opening a feature request.
In order to make your feature request a success, here are some simple tips to follow:
1. Try to describe what you need to achieve rather than how you would like to change OpenTofu to change.
2. Be as specific as possible. Overarching large changes to OpenTofu have a lower chance of getting accepted than specific changes.
3. Describe how it affects your current project specifically. Try to support it with specific code and describe why the current situation is unsatisfactory.
- type: textarea
id: community-note
attributes:
label: Community note
description: Please leave this note unchanged.
value: |
> [!TIP]
> 👋 Hi there, OpenTofu community! The OpenTofu team prioritizes issues based on upvotes. Please make sure to upvote this issue and describe how it affects you in detail in the comments to show your support.
- type: textarea
id: tf-version
attributes:
@ -60,6 +73,16 @@ body:
validations:
required: false
- type: textarea
id: tf-workarounds
attributes:
label: Workarounds and Alternatives
description: |
What workarounds and alternatives have you tried? What worked and what didn't? How would this proposal make life easier compared to these solutions?
placeholder:
value:
validations:
required: true
- type: textarea
id: tf-references
attributes:

View File

@ -15,7 +15,7 @@ jobs:
pull-requests: write
steps:
- name: Checkout
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Check if backport label exists in any of the labels on the pull request
id: check_backport_label
run: |

View File

@ -41,8 +41,8 @@ jobs:
runs-on: ${{ inputs.runson }}
name: OpenTofu ${{ inputs.goos }} ${{ inputs.goarch }} v${{ inputs.product-version }}
steps:
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version: ${{ inputs.go-version }}
- name: Determine artifact basename
@ -54,7 +54,7 @@ jobs:
GO_LDFLAGS: ${{ inputs.ld-flags }}
ACTIONSOS: ${{ inputs.runson }}
CGO_ENABLED: ${{ inputs.cgo-enabled }}
uses: hashicorp/actions-go-build@v0.1.7
uses: hashicorp/actions-go-build@37358f6098ef21b09542d84a9814ebb843aa4e3e # v1.0.0
with:
bin_name: ${{ inputs.bin-name }}
product_name: ${{ inputs.product-name }}
@ -68,13 +68,13 @@ jobs:
set -x
go build -ldflags "${{ inputs.ld-flags }}" -o dist/ ./cmd/tofu
zip -r -j out/${{ env.ARTIFACT_BASENAME }} dist/
- uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
- uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
with:
name: ${{ env.ARTIFACT_BASENAME }}
path: out/${{ env.ARTIFACT_BASENAME }}
if-no-files-found: error
- if: ${{ inputs.goos == 'linux' }}
uses: hashicorp/actions-packaging-linux@v1
uses: hashicorp/actions-packaging-linux@514d75d0961adeddf1f928fb93b82f41735fc488 # v1.6.0
with:
name: "opentofu"
description: "OpenTofu enables you to safely and predictably create, change, and improve infrastructure. It is an open source tool that codifies APIs into declarative configuration files that can be shared amongst team members, treated as code, edited, reviewed, and versioned."
@ -92,13 +92,13 @@ jobs:
echo "RPM_PACKAGE=$(basename out/*.rpm)" >> $GITHUB_ENV
echo "DEB_PACKAGE=$(basename out/*.deb)" >> $GITHUB_ENV
- if: ${{ inputs.goos == 'linux' }}
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
with:
name: ${{ env.RPM_PACKAGE }}
path: out/${{ env.RPM_PACKAGE }}
if-no-files-found: error
- if: ${{ inputs.goos == 'linux' }}
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
with:
name: ${{ env.DEB_PACKAGE }}
path: out/${{ env.DEB_PACKAGE }}

View File

@ -35,7 +35,7 @@ jobs:
pkg-name: ${{ steps.get-pkg-name.outputs.pkg-name }}
steps:
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Get Package Name
id: get-pkg-name
run: |
@ -43,7 +43,7 @@ jobs:
echo "pkg-name=${pkg_name}" | tee -a "${GITHUB_OUTPUT}"
- name: Decide version number
id: get-product-version
uses: hashicorp/actions-set-product-version@v1
uses: hashicorp/actions-set-product-version@b426ea77cad0389738a8a81d89e933146a3ba97f # unreleased, untagged (upgrade of actions/checkout)
- name: Determine experiments
id: get-ldflags
env:
@ -62,7 +62,7 @@ jobs:
go-version: ${{ steps.get-go-version.outputs.version }}
steps:
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Determine Go version
id: get-go-version
uses: ./.github/actions/go-version
@ -75,15 +75,15 @@ jobs:
filepath: ${{ steps.generate-metadata-file.outputs.filepath }}
steps:
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Generate package metadata
id: generate-metadata-file
uses: hashicorp/actions-generate-metadata@v1
uses: hashicorp/actions-generate-metadata@f6f1ca9cededa05d841a58d171064faf3de8ec74 # unreleased, untagged (upgrade of multiple gha)
with:
version: ${{ needs.get-product-version.outputs.product-version }}
product: ${{ env.PKG_NAME }}
- uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
- uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
with:
name: metadata.json
path: ${{ steps.generate-metadata-file.outputs.filepath }}
@ -137,9 +137,9 @@ jobs:
repo: "opentofu"
version: ${{needs.get-product-version.outputs.product-version}}
steps:
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Build Docker images
uses: hashicorp/actions-docker-build@v1
uses: hashicorp/actions-docker-build@0635aed766f1f4e4519845ca22a4d024c96fb55d # unreleased, untagged (upgrade of multiple gha)
with:
pkg_name: "opentofu_${{env.version}}"
version: ${{env.version}}
@ -185,10 +185,10 @@ jobs:
cache_path=internal/command/e2etest/build
echo "e2e-cache-key=${cache_key}" | tee -a "${GITHUB_OUTPUT}"
echo "e2e-cache-path=${cache_path}" | tee -a "${GITHUB_OUTPUT}"
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Go toolchain
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version: ${{ needs.get-go-version.outputs.go-version }}
@ -205,7 +205,7 @@ jobs:
bash ./internal/command/e2etest/make-archive.sh
- name: Save test harness to cache
uses: actions/cache/save@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
uses: actions/cache/save@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: ${{ steps.set-cache-values.outputs.e2e-cache-path }}
key: ${{ steps.set-cache-values.outputs.e2e-cache-key }}_${{ matrix.goos }}_${{ matrix.goarch }}
@ -243,9 +243,9 @@ jobs:
# fresh build from source.)
- name: Checkout repo
if: ${{ (matrix.goos == 'linux') || (matrix.goos == 'darwin') }}
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: "Restore cache"
uses: actions/cache/restore@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
uses: actions/cache/restore@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
id: e2etestpkg
with:
path: ${{ needs.e2etest-build.outputs.e2e-cache-path }}
@ -253,7 +253,7 @@ jobs:
fail-on-cache-miss: true
enableCrossOsArchive: true
- name: "Download OpenTofu CLI package"
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
id: clipkg
with:
name: tofu_${{env.version}}_${{ env.os }}_${{ env.arch }}.zip
@ -264,7 +264,7 @@ jobs:
unzip "${{ needs.e2etest-build.outputs.e2e-cache-path }}/tofu-e2etest_${{ env.os }}_${{ env.arch }}.zip"
unzip "./tofu_${{env.version}}_${{ env.os }}_${{ env.arch }}.zip"
- name: Set up QEMU
uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # v2.2.0
uses: docker/setup-qemu-action@53851d14592bedcffcf25ea515637cff71ef929a # v3.3.0
if: ${{ contains(matrix.goarch, 'arm') }}
with:
platforms: all
@ -298,17 +298,17 @@ jobs:
steps:
- name: Install Go toolchain
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version: ${{ needs.get-go-version.outputs.go-version }}
- name: Download OpenTofu CLI package
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
id: clipkg
with:
name: tofu_${{ env.version }}_linux_amd64.zip
path: .
- name: Checkout tofu-exec repo
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
repository: opentofu/tofu-exec
path: tofu-exec

View File

@ -42,7 +42,7 @@ jobs:
strategy:
matrix:
include:
- { runson: ubuntu-latest, goos: linux, goarch: "arm64" }
- { runson: ubuntu-24.04-arm, goos: linux, goarch: "arm64" }
- { runson: ubuntu-latest, goos: linux, goarch: "amd64" }
- { runson: ubuntu-latest, goos: linux, goarch: "386" }
- { runson: ubuntu-latest, goos: linux, goarch: "arm" }
@ -55,18 +55,18 @@ jobs:
# 👇🏾 GH actions supports only "AMD64 arch", so we are using this action
# for testing on non amd64 envs like 386, arm64 etc...
- name: "Set up QEMU"
if: matrix.goos == 'linux' && matrix.goarch != 'amd64'
uses: docker/setup-qemu-action@v3
if: matrix.goos == 'linux' && matrix.goarch != 'amd64' && matrix.goarch != 'arm64'
uses: docker/setup-qemu-action@53851d14592bedcffcf25ea515637cff71ef929a # v3.3.0
- name: "Fetch source code"
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Determine Go version
id: go
uses: ./.github/actions/go-version
- name: Install Go toolchain
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version: ${{ steps.go.outputs.version }}
@ -74,7 +74,7 @@ jobs:
# identical across the unit-tests, e2e-tests, and consistency-checks
# jobs, or else weird things could happen.
- name: Cache Go modules
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: "~/go/pkg"
key: go-mod-${{ hashFiles('go.sum') }}
@ -91,14 +91,14 @@ jobs:
steps:
- name: "Fetch source code"
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Determine Go version
id: go
uses: ./.github/actions/go-version
- name: Install Go toolchain
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version: ${{ steps.go.outputs.version }}
@ -106,7 +106,7 @@ jobs:
# identical across the unit-tests, e2e-tests, and consistency-checks
# jobs, or else weird things could happen.
- name: Cache Go modules
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: "~/go/pkg"
key: go-mod-${{ hashFiles('go.sum') }}
@ -129,14 +129,14 @@ jobs:
steps:
- name: "Fetch source code"
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Determine Go version
id: go
uses: ./.github/actions/go-version
- name: Install Go toolchain
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version: ${{ steps.go.outputs.version }}
@ -144,7 +144,7 @@ jobs:
# identical across the unit-tests, e2e-tests, and consistency-checks
# jobs, or else weird things could happen.
- name: Cache Go modules
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: "~/go/pkg"
key: go-mod-${{ hashFiles('go.sum') }}
@ -161,7 +161,7 @@ jobs:
steps:
- name: "Fetch source code"
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0 # We need to do comparisons against the main branch.
@ -170,7 +170,7 @@ jobs:
uses: ./.github/actions/go-version
- name: Install Go toolchain
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version: ${{ steps.go.outputs.version }}
@ -178,7 +178,7 @@ jobs:
# identical across the unit-tests, e2e-tests, and consistency-checks
# jobs, or else weird things could happen.
- name: Cache Go modules
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: "~/go/pkg"
key: go-mod-${{ hashFiles('go.sum') }}
@ -194,7 +194,7 @@ jobs:
fi
- name: Cache protobuf tools
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: "tools/protobuf-compile/.workdir"
key: protobuf-tools-${{ hashFiles('tools/protobuf-compile/protobuf-compile.go') }}
@ -210,7 +210,7 @@ jobs:
exit 1
fi
- name: "Code linting"
uses: golangci/golangci-lint-action@v6
uses: golangci/golangci-lint-action@e60da84bfae8c7920a47be973d75e15710aa8bd7 # v6.3.0
with:
version: v1.62
# Only compare with changes from when we introduced linting. Eventually we can get rid of this and lint the whole project
@ -230,14 +230,14 @@ jobs:
steps:
- name: "Fetch source code"
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install licensei
run: |
make deps
- name: Restore cache license information of dependencies
uses: actions/cache/restore@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
uses: actions/cache/restore@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: ".licensei.cache"
key: licensei-cache-${{ hashFiles('go.sum') }}
@ -249,7 +249,7 @@ jobs:
uses: ./.github/actions/go-version
- name: Install Go toolchain
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version: ${{ steps.go.outputs.version }}
@ -261,7 +261,7 @@ jobs:
if: env.LICENSE_CHECK != 'false'
- name: Save cache license information of dependencies
uses: actions/cache/save@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
uses: actions/cache/save@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
if: always()
with:
path: ".licensei.cache"
@ -273,7 +273,7 @@ jobs:
steps:
- name: "Fetch source code"
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: "Run Installation Instructions Test"
run: make test-linux-install-instructions

View File

@ -16,14 +16,14 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Determine Go version
id: go
uses: ./.github/actions/go-version
- name: Set up Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version: ${{ steps.go.outputs.version }}

View File

@ -11,7 +11,7 @@ jobs:
pr_open_job:
runs-on: ubuntu-latest
steps:
- uses: actions/github-script@d7906e4ad0b1822421a7e6a35d5ca353c962f410 # v6.4.1
- uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
with:
script: |
github.rest.issues.createComment({

View File

@ -27,12 +27,12 @@ jobs:
steps:
- name: Set up QEMU cross build support
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@53851d14592bedcffcf25ea515637cff71ef929a # v3.3.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5 # v3.8.0
- name: Login to Github Container Registry
uses: docker/login-action@v2
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
if: startsWith(inputs.tag, 'v')
with:
registry: ghcr.io
@ -40,7 +40,7 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }}
- name: Checkout
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
ref: ${{ inputs.tag }}
@ -75,12 +75,12 @@ jobs:
uses: ./.github/actions/go-version
- name: Set up Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version: ${{ steps.go.outputs.version }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5 # v3.8.0
- name: Install cosign
uses: sigstore/cosign-installer@main
@ -112,7 +112,7 @@ jobs:
GPG_TTY: /dev/ttys000 # Set the GPG_TTY to avoid issues with pinentry
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v4
uses: goreleaser/goreleaser-action@5742e2a039330cbb23ebf35f046f814d4c6ff811 # v5.1.0
with:
version: v1.21.2
args: release --clean --timeout=60m --snapshot=${{ !startsWith(inputs.tag, 'v') }}
@ -133,7 +133,7 @@ jobs:
fi
- name: Upload artifacts
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
with:
name: dist
path: dist

View File

@ -14,9 +14,9 @@ jobs:
issues: write
steps:
- name: Checkout
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version: 1.22
- name: Update top issues ranking

View File

@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: Prepare website container

2
.gitignore vendored
View File

@ -27,6 +27,8 @@ vendor/
.vscode/launch.json
# Coverage
coverage.html
coverage.out
coverage.txt
# GoReleaser build directory

View File

@ -1 +1 @@
1.22.8
1.22.11

View File

@ -89,6 +89,25 @@ dockers:
image_templates:
- "ghcr.io/opentofu/opentofu:{{ .Version }}-amd64"
- use: buildx
goarch: amd64
dockerfile: Dockerfile.minimal
build_flag_templates:
- "--pull"
- "--platform=linux/amd64"
- "--label=org.opencontainers.image.title={{ .ProjectName }}"
- "--label=org.opencontainers.image.vendor=OpenTofu"
- "--label=org.opencontainers.image.description=OpenTofu {{ .Version }}"
- "--label=org.opencontainers.image.url=https://github.com/opentofu/opentofu"
- "--label=org.opencontainers.image.documentation=https://github.com/opentofu/opentofu/blob/main/README.md"
- "--label=org.opencontainers.image.source=https://github.com/opentofu/opentofu"
- "--label=org.opencontainers.image.licenses=MPL-2.0"
- "--label=org.opencontainers.image.version={{ .Version }}"
- "--label=org.opencontainers.image.revision={{ .FullCommit }}"
- "--label=org.opencontainers.image.created={{ time \"2006-01-02T15:04:05Z07:00\" }}"
image_templates:
- "ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-amd64"
- use: buildx
goarch: arm64
build_flag_templates:
@ -107,6 +126,25 @@ dockers:
image_templates:
- "ghcr.io/opentofu/opentofu:{{ .Version }}-arm64"
- use: buildx
goarch: arm64
dockerfile: Dockerfile.minimal
build_flag_templates:
- "--pull"
- "--platform=linux/arm64"
- "--label=org.opencontainers.image.title={{ .ProjectName }}"
- "--label=org.opencontainers.image.vendor=OpenTofu"
- "--label=org.opencontainers.image.description=OpenTofu {{ .Version }}"
- "--label=org.opencontainers.image.url=https://github.com/opentofu/opentofu"
- "--label=org.opencontainers.image.documentation=https://github.com/opentofu/opentofu/blob/main/README.md"
- "--label=org.opencontainers.image.source=https://github.com/opentofu/opentofu"
- "--label=org.opencontainers.image.licenses=MPL-2.0"
- "--label=org.opencontainers.image.version={{ .Version }}"
- "--label=org.opencontainers.image.revision={{ .FullCommit }}"
- "--label=org.opencontainers.image.created={{ time \"2006-01-02T15:04:05Z07:00\" }}"
image_templates:
- "ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-arm64"
- use: buildx
goarch: arm
build_flag_templates:
@ -125,6 +163,25 @@ dockers:
image_templates:
- "ghcr.io/opentofu/opentofu:{{ .Version }}-arm"
- use: buildx
goarch: arm
dockerfile: Dockerfile.minimal
build_flag_templates:
- "--pull"
- "--platform=linux/arm"
- "--label=org.opencontainers.image.title={{ .ProjectName }}"
- "--label=org.opencontainers.image.vendor=OpenTofu"
- "--label=org.opencontainers.image.description=OpenTofu {{ .Version }}"
- "--label=org.opencontainers.image.url=https://github.com/opentofu/opentofu"
- "--label=org.opencontainers.image.documentation=https://github.com/opentofu/opentofu/blob/main/README.md"
- "--label=org.opencontainers.image.source=https://github.com/opentofu/opentofu"
- "--label=org.opencontainers.image.licenses=MPL-2.0"
- "--label=org.opencontainers.image.version={{ .Version }}"
- "--label=org.opencontainers.image.revision={{ .FullCommit }}"
- "--label=org.opencontainers.image.created={{ time \"2006-01-02T15:04:05Z07:00\" }}"
image_templates:
- "ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-arm"
- use: buildx
goarch: "386"
build_flag_templates:
@ -143,6 +200,25 @@ dockers:
image_templates:
- "ghcr.io/opentofu/opentofu:{{ .Version }}-386"
- use: buildx
goarch: "386"
dockerfile: Dockerfile.minimal
build_flag_templates:
- "--pull"
- "--platform=linux/386"
- "--label=org.opencontainers.image.title={{ .ProjectName }}"
- "--label=org.opencontainers.image.vendor=OpenTofu"
- "--label=org.opencontainers.image.description=OpenTofu {{ .Version }}"
- "--label=org.opencontainers.image.url=https://github.com/opentofu/opentofu"
- "--label=org.opencontainers.image.documentation=https://github.com/opentofu/opentofu/blob/main/README.md"
- "--label=org.opencontainers.image.source=https://github.com/opentofu/opentofu"
- "--label=org.opencontainers.image.licenses=MPL-2.0"
- "--label=org.opencontainers.image.version={{ .Version }}"
- "--label=org.opencontainers.image.revision={{ .FullCommit }}"
- "--label=org.opencontainers.image.created={{ time \"2006-01-02T15:04:05Z07:00\" }}"
image_templates:
- "ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-386"
docker_manifests:
- name_template: ghcr.io/opentofu/opentofu:{{ .Version }}
image_templates:
@ -151,6 +227,13 @@ docker_manifests:
- ghcr.io/opentofu/opentofu:{{ .Version }}-arm
- ghcr.io/opentofu/opentofu:{{ .Version }}-386
- name_template: ghcr.io/opentofu/opentofu:{{ .Version }}-minimal
image_templates:
- ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-amd64
- ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-arm64
- ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-arm
- ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-386
- name_template: ghcr.io/opentofu/opentofu:{{ .Major }}.{{ .Minor }}
image_templates:
- ghcr.io/opentofu/opentofu:{{ .Version }}-amd64
@ -159,6 +242,14 @@ docker_manifests:
- ghcr.io/opentofu/opentofu:{{ .Version }}-386
skip_push: auto # Skips the push on pre-release versions, like 1.6.1-alpha1. See https://goreleaser.com/customization/docker_manifest/#customization
- name_template: ghcr.io/opentofu/opentofu:{{ .Major }}.{{ .Minor }}-minimal
image_templates:
- ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-amd64
- ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-arm64
- ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-arm
- ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-386
skip_push: auto # Skips the push on pre-release versions, like 1.6.1-alpha1. See https://goreleaser.com/customization/docker_manifest/#customization
- name_template: ghcr.io/opentofu/opentofu:{{ .Major }}
image_templates:
- ghcr.io/opentofu/opentofu:{{ .Version }}-amd64
@ -167,6 +258,14 @@ docker_manifests:
- ghcr.io/opentofu/opentofu:{{ .Version }}-386
skip_push: auto # Skips the push on pre-release versions, like 1.6.1-alpha1. See https://goreleaser.com/customization/docker_manifest/#customization
- name_template: ghcr.io/opentofu/opentofu:{{ .Major }}-minimal
image_templates:
- ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-amd64
- ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-arm64
- ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-arm
- ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-386
skip_push: auto # Skips the push on pre-release versions, like 1.6.1-alpha1. See https://goreleaser.com/customization/docker_manifest/#customization
- name_template: ghcr.io/opentofu/opentofu:latest
image_templates:
- ghcr.io/opentofu/opentofu:{{ .Version }}-amd64
@ -175,6 +274,14 @@ docker_manifests:
- ghcr.io/opentofu/opentofu:{{ .Version }}-386
skip_push: auto # Skips the push on pre-release versions, like 1.6.1-alpha1. See https://goreleaser.com/customization/docker_manifest/#customization
- name_template: ghcr.io/opentofu/opentofu:minimal
image_templates:
- ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-amd64
- ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-arm64
- ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-arm
- ghcr.io/opentofu/opentofu:{{ .Version }}-minimal-386
skip_push: auto # Skips the push on pre-release versions, like 1.6.1-alpha1. See https://goreleaser.com/customization/docker_manifest/#customization
nfpms:
- file_name_template: '{{ .ProjectName }}_{{ .Version }}_{{ .Arch }}'
maintainer: 'https://github.com/orgs/opentofu/teams/opentofu-core-team'

View File

@ -3,6 +3,7 @@
UPGRADE NOTES:
* Using the `ghcr.io/opentofu/opentofu` image as a base image for custom images is no longer supported. Please see https://opentofu.org/docs/intro/install/docker/ for instructions on building your own image.
* OpenTofu 1.10 with `pg` backend must not be used in parallel with older versions. It may lead to unsafe state writes, when the database is shared across multiple projects.
NEW FEATURES:
@ -15,10 +16,25 @@ ENHANCEMENTS:
* OpenTofu will now recommend using `-exclude` instead of `-target`, when possible, in the error messages about unknown values in `count` and `for_each` arguments, thereby providing a more definitive workaround. ([#2154](https://github.com/opentofu/opentofu/pull/2154))
* State encryption now supports using external programs as key providers. Additionally, the PBKDF2 key provider now supports chaining via the `chain` parameter. ([#2023](https://github.com/opentofu/opentofu/pull/2023))
* Added count of forgotten resources to plan and apply outputs. ([#1956](https://github.com/opentofu/opentofu/issues/1956))
* The `element` function now accepts negative indices, which extends the existing "wrapping" model into the negative direction. In particular, choosing element `-1` selects the final element in the sequence. ([#2371](https://github.com/opentofu/opentofu/pull/2371))
* `moved` now supports moving between different types ([#2370](https://github.com/opentofu/opentofu/pull/2370))
* `moved` block can now be used to migrate from the `null_resource` to the `terraform_data` resource. ([#2481](https://github.com/opentofu/opentofu/pull/2481))
* Warn on implicit references of providers without a `required_providers` entry. ([#2084](https://github.com/opentofu/opentofu/issues/2084))
BUG FIXES:
- Fixed an issue where an invalid provider name in the `provider_meta` block would crash OpenTofu rather than report an error ([#2347](https://github.com/opentofu/opentofu/pull/2347))
- When assigning an empty map to a variable that is declared as a map of an object type with at least one optional attribute, OpenTofu will no longer create a subtly-broken value. ([#2371](https://github.com/opentofu/opentofu/pull/2371))
- The `format` and `formatlist` functions can now accept `null` as one of the arguments without causing problems during the apply phase. Previously these functions would incorrectly return an unknown value when given `null` and so could cause a failure during the apply phase where no unknown values are allowed. ([#2371](https://github.com/opentofu/opentofu/pull/2371))
- Provider used in import is correctly identified. ([#2336](https://github.com/opentofu/opentofu/pull/2336))
- `plantimestamp()` now returns unknown value during validation ([#2397](https://github.com/opentofu/opentofu/issues/2397))
- Syntax error in the `required_providers` block does not panic anymore, but yields "syntax error" ([2344](https://github.com/opentofu/opentofu/issues/2344))
- Changing Go version to 1.22.11 in order to fix [CVE-2024-45336](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-45336) and [CVE-2024-45341](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-45341) ([#2438](https://github.com/opentofu/opentofu/pull/2438))
- Fix the error message when default value of a complex variable is containing a wrong type ([2394](https://github.com/opentofu/opentofu/issues/2394))
- Fix the way OpenTofu downloads a module that is sourced from a GitHub branch containing slashes in the name. ([2396](https://github.com/opentofu/opentofu/issues/2396))
- `pg` backend doesn't fail on workspace creation for paralel runs, when the database is shared across multiple projects. ([#2411](https://github.com/opentofu/opentofu/pull/2411))
- Generating an OpenTofu configuration from an `import` block that is referencing a resource with nested attributes now works correctly, instead of giving an error that the nested computed attribute is required. ([#2372](https://github.com/opentofu/opentofu/issues/2372))
- `base64gunzip` now doesn't expose sensitive values if it fails during the base64 decoding. ([#2503](https://github.com/opentofu/opentofu/pull/2503))
## Previous Releases

View File

@ -17,6 +17,9 @@ release.
- **Stable** is a release that has no new features and bug fixes over an RC. This is versioned `X.Y.0`, where `X` and `Y` are numbers, such as `1.2.0`.
- **Point release** is a release that contains bugfixes only on top of a stable release. This is versioned `X.Y.Z` where `X`, `Y` and `Z` are numbers, such as `1.2.3`.
> [!WARNING]
> Many tools depend on the release order on GitHub to determine the latest version. When creating a point release, make sure to release the oldest version first, then follow by the newer versions. Do not release an older point release without also releasing the newer versions or tooling _will_ break.
---
## Gathering the team for a release

12
Dockerfile.minimal Normal file
View File

@ -0,0 +1,12 @@
# Copyright (c) The OpenTofu Authors
# SPDX-License-Identifier: MPL-2.0
# Copyright (c) 2023 HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
FROM scratch
LABEL maintainer="OpenTofu Core Team <core@opentofu.org>"
COPY tofu /usr/local/bin/tofu
ENTRYPOINT ["/usr/local/bin/tofu"]

View File

@ -1,7 +1,9 @@
Andrei Ciobanu <andrei.ciobanu@opentofu.org> @yottta
Andy Hayes <andrew.hayes@harness.io> @Andrew-Hayes
Arel Rabinowitz <arel.rabinowitz@env0.com> @RLRabinowitz
Arthur Bonic <arthurb@opentofu.org> @abstractionfactory
Christian Mesh <christianm@opentofu.org> @cam72cam
Diógenes Fernandes <diofeher@opentofu.org> @diofeher
Ilia Gogotchuri <gogotchuri@opentofu.org> @Gogotchuri
James Humphries <jamesh@spacelift.io> @Yantrio
Martin Atkins <apparentlymart@opentofu.org> @apparentlymart

View File

@ -8,6 +8,59 @@ The Technical Steering Committee is a group comprised of people from companies a
- Wojciech Barczynski ([@wojciech12](https://github.com/wojciech12)) representing Spacelift Inc.
- Zach Goldberg ([@ZachGoldberg](https://github.com/ZachGoldberg)) representing Gruntwork, Inc.
## 2024-01-28
- Christan Mesh ([@cam72cam](https://github.com/cam72cam)) (OpenTofu Tech Lead)
- Roger Simms ([@allofthesepeople](https://github.com/allofthesepeople))
- Zach Goldberg ([@ZachGoldberg](https://github.com/ZachGoldberg))
- Igor Savchenko ([@DiscyDel](https://github.com/DicsyDel))
- Roni Frantchi ([@roni-frantchi](https://github.com/roni-frantchi))
### Agenda
- Discuss OpenTofu Charter updates. Vote: All present members voted yes to submitting the updated charter to the Linux Foundation
- CNCF Application Review - Continuing to find ways to communicate with them
### Discussion
- Actively planning OpenTofu day at CNCF London - Roger, James, Christian are planning to attend in person
- Moving forward with interviews for a candidate core team member sponsored by Gruntwork
- OCI Survey was published and has ~100 results so far
- [Make the Switch to OpenTofu](https://blog.gruntwork.io/make-the-switch-to-opentofu-6904ba95e799) published by Gruntwork
- Discussed the status of stacks, discussed the need to gather requirements from the community on how/if OpenTofu should be doing anything here
## 2024-01-14
- Christan Mesh ([@cam72cam](https://github.com/cam72cam)) (OpenTofu Tech Lead)
- Roger Simms ([@allofthesepeople](https://github.com/allofthesepeople))
- Zach Goldberg ([@ZachGoldberg](https://github.com/ZachGoldberg))
- Igor Savchenko ([@DiscyDel](https://github.com/DicsyDel))
### Agenda
- Release process discussion. Guiding principle decided "The policy should be guided by need to balance the desire to provide assurance to adopters with the resourcing required to maintain older versions, were open to feedback"
- No formal votes.
### Discussion
- Discussion of release process, and that we think its important for enterprise support that we provide patches for every major version going back at least 1 year. Christian agreed to discuss w/Core team.
- Discussion that regardless of final policy, we want to be explict about what versions are supported with e.g. an actual table on opentofu.org
- Discussion of how much traction we're seeing, especially post-releases. Setup a download-tracker spreadsheet to track github release download counts, we don't do much else on reddit, linkedin etc.
- Discussion of getting feedback from TACOs for OpenTofu - "Make it Faster" - Add OpenTelemetry.
- Discussion of CNCF application and what steps are needed to continue to advance the application and gain an exception to the apache license policy
## 2024-01-07
- Christan Mesh ([@cam72cam](https://github.com/cam72cam)) (OpenTofu Tech Lead)
- Roger Simms ([@allofthesepeople](https://github.com/allofthesepeople))
- Wojciech Barczynski ([@wojciech12](https://github.com/wojciech12))
- Zach Goldberg ([@ZachGoldberg](https://github.com/ZachGoldberg))
- Roni Frantchi ([@roni-frantchi](https://github.com/roni-frantchi))
### Agenda
- Vote on Emepheral values. Results: Roger, Roni, Oleksandr, Zach and Woj for Pushing Ephemeral to after 1.10.
### Discussion
- Timing of 1.9 release, confirming its this week
## 2024-12-10
### Attendees

93
go.mod
View File

@ -17,24 +17,24 @@ require (
github.com/apparentlymart/go-userdirs v0.0.0-20200915174352-b0c018a67c13
github.com/apparentlymart/go-versions v1.0.2
github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2
github.com/aws/aws-sdk-go-v2 v1.23.2
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.6
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.25.5
github.com/aws/aws-sdk-go-v2/service/kms v1.26.5
github.com/aws/aws-sdk-go-v2/service/s3 v1.46.0
github.com/aws/smithy-go v1.17.0
github.com/aws/aws-sdk-go-v2 v1.32.7
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.39.1
github.com/aws/aws-sdk-go-v2/service/kms v1.37.6
github.com/aws/aws-sdk-go-v2/service/s3 v1.72.1
github.com/aws/smithy-go v1.22.1
github.com/bgentry/speakeasy v0.1.0
github.com/bmatcuk/doublestar/v4 v4.6.0
github.com/chzyer/readline v1.5.1
github.com/cli/browser v1.3.0
github.com/davecgh/go-spew v1.1.1
github.com/dylanmei/winrmtest v0.0.0-20210303004826-fbc9ae56efb6
github.com/go-test/deep v1.0.3
github.com/go-test/deep v1.1.0
github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1
github.com/google/go-cmp v0.6.0
github.com/google/uuid v1.6.0
github.com/googleapis/gax-go/v2 v2.12.0
github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.43
github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.60
github.com/hashicorp/consul/api v1.13.0
github.com/hashicorp/consul/sdk v0.8.0
github.com/hashicorp/copywrite v0.16.3
@ -69,7 +69,7 @@ require (
github.com/mitchellh/gox v1.0.1
github.com/mitchellh/reflectwalk v1.0.2
github.com/nishanths/exhaustive v0.7.11
github.com/openbao/openbao/api v0.0.0-20240326035453-c075f0ef2c7e
github.com/openbao/openbao/api/v2 v2.1.0
github.com/opentofu/registry-address v0.0.0-20230920144404-f1e51167f633
github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db
github.com/pkg/errors v0.9.1
@ -82,21 +82,21 @@ require (
github.com/tombuildsstuff/giovanni v0.15.1
github.com/xanzy/ssh-agent v0.3.1
github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557
github.com/zclconf/go-cty v1.14.4
github.com/zclconf/go-cty v1.16.1
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940
github.com/zclconf/go-cty-yaml v1.1.0
go.opentelemetry.io/contrib/exporters/autoexport v0.0.0-20230703072336-9a582bd098a2
go.opentelemetry.io/otel v1.21.0
go.opentelemetry.io/otel/sdk v1.21.0
go.opentelemetry.io/otel/trace v1.21.0
go.opentelemetry.io/otel v1.33.0
go.opentelemetry.io/otel/sdk v1.33.0
go.opentelemetry.io/otel/trace v1.33.0
go.uber.org/mock v0.4.0
golang.org/x/crypto v0.31.0
golang.org/x/crypto v0.32.0
golang.org/x/exp v0.0.0-20230905200255-921286631fa9
golang.org/x/mod v0.17.0
golang.org/x/net v0.33.0
golang.org/x/net v0.34.0
golang.org/x/oauth2 v0.16.0
golang.org/x/sys v0.28.0
golang.org/x/term v0.27.0
golang.org/x/sys v0.29.0
golang.org/x/term v0.28.0
golang.org/x/text v0.21.0
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d
google.golang.org/api v0.155.0
@ -139,27 +139,27 @@ require (
github.com/armon/go-radix v1.0.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect
github.com/aws/aws-sdk-go v1.44.122 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.1 // indirect
github.com/aws/aws-sdk-go-v2/config v1.25.8 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.16.6 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.5 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.5 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.5 // indirect
github.com/aws/aws-sdk-go-v2/service/iam v1.27.5 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.5 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.5 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.5 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.5 // indirect
github.com/aws/aws-sdk-go-v2/service/sqs v1.28.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.17.5 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.25.6 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect
github.com/aws/aws-sdk-go-v2/config v1.28.8 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.49 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.26 // indirect
github.com/aws/aws-sdk-go-v2/service/iam v1.38.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.7 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.7 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.7 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.7 // indirect
github.com/aws/aws-sdk-go-v2/service/sns v1.33.7 // indirect
github.com/aws/aws-sdk-go-v2/service/sqs v1.37.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.24.8 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.7 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.33.4 // indirect
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
github.com/bradleyfalzon/ghinstallation/v2 v2.1.0 // indirect
github.com/cenkalti/backoff/v3 v3.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cli/go-gh v1.0.0 // indirect
github.com/cli/safeexec v1.0.0 // indirect
github.com/cli/shurcooL-graphql v0.0.2 // indirect
@ -167,11 +167,11 @@ require (
github.com/creack/pty v1.1.18 // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect
github.com/dylanmei/iso8601 v0.1.0 // indirect
github.com/fatih/color v1.16.0 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/errors v0.20.2 // indirect
github.com/go-openapi/strfmt v0.21.3 // indirect
@ -191,10 +191,10 @@ require (
github.com/hashicorp/go-msgpack v0.5.4 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/go-safetemp v1.0.0 // indirect
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.9 // indirect
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
github.com/hashicorp/go-slug v0.15.0 // indirect
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
github.com/hashicorp/go-slug v0.16.3 // indirect
github.com/hashicorp/go-sockaddr v1.0.6 // indirect
github.com/hashicorp/golang-lru v0.5.1 // indirect
github.com/hashicorp/serf v0.9.6 // indirect
github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect
@ -241,17 +241,18 @@ require (
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
go.mongodb.org/mongo-driver v1.11.6 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.46.1 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.58.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect
go.opentelemetry.io/otel/metric v1.21.0 // indirect
go.opentelemetry.io/otel/metric v1.33.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/time v0.5.0 // indirect
golang.org/x/time v0.9.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect
@ -266,6 +267,8 @@ require (
sigs.k8s.io/yaml v1.2.0 // indirect
)
go 1.22
go 1.22.0
toolchain go1.22.8
replace github.com/hashicorp/hcl/v2 v2.20.1 => github.com/opentofu/hcl/v2 v2.0.0-20240814143621-8048794c5c52

187
go.sum
View File

@ -302,61 +302,63 @@ github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:W
github.com/aws/aws-sdk-go v1.44.122 h1:p6mw01WBaNpbdP2xrisz5tIkcNwzj/HysobNoaAHjgo=
github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
github.com/aws/aws-sdk-go-v2 v1.23.2 h1:UoTll1Y5b88x8h53OlsJGgOHwpggdMr7UVnLjMb3XYg=
github.com/aws/aws-sdk-go-v2 v1.23.2/go.mod h1:i1XDttT4rnf6vxc9AuskLc6s7XBee8rlLilKlc03uAA=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.1 h1:ZY3108YtBNq96jNZTICHxN1gSBSbnvIdYwwqnvCV4Mc=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.1/go.mod h1:t8PYl/6LzdAqsU4/9tz28V/kU+asFePvpOMkdul0gEQ=
github.com/aws/aws-sdk-go-v2 v1.32.7 h1:ky5o35oENWi0JYWUZkB7WYvVPP+bcRF5/Iq7JWSb5Rw=
github.com/aws/aws-sdk-go-v2 v1.32.7/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc=
github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw=
github.com/aws/aws-sdk-go-v2/config v1.25.8 h1:CHr7PIzyfevjNiqL9rU6xoqHZKCO2ldY6LmvRDfpRuI=
github.com/aws/aws-sdk-go-v2/config v1.25.8/go.mod h1:zefIy117FDPOVU0xSOFG8mx9kJunuVopzI639tjYXc0=
github.com/aws/aws-sdk-go-v2/config v1.28.8 h1:4nUeC9TsZoHm9GHlQ5tnoIklNZgISXXVGPKP5/CS0fk=
github.com/aws/aws-sdk-go-v2/config v1.28.8/go.mod h1:2C+fhFxnx1ymomFjj5NBUc/vbjyIUR7mZ/iNRhhb7BU=
github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ=
github.com/aws/aws-sdk-go-v2/credentials v1.16.6 h1:TimIpn1p4v44i0sJMKsnpby1P9sP1ByKLsdm7bvOmwM=
github.com/aws/aws-sdk-go-v2/credentials v1.16.6/go.mod h1:+CLPlYf9FQLeXD8etOYiZxpLQqc3GL4EikxjkFFp1KA=
github.com/aws/aws-sdk-go-v2/credentials v1.17.49 h1:+7u6eC8K6LLGQwWMYKHSsHAPQl+CGACQmnzd/EPMW0k=
github.com/aws/aws-sdk-go-v2/credentials v1.17.49/go.mod h1:0SgZcTAEIlKoYw9g+kuYUwbtUUVjfxnR03YkCOhMbQ0=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.6 h1:pPs23/JLSOlwnmSRNkdbt3upmBeF6QL/3MHEb6KzTyo=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.6/go.mod h1:jsoDHV44SxWv00wlbx0yA5M7n5rmE5rGk+OGA0suXSw=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.5 h1:16Z1XuMUv63fcyW5bIUno6AFcX4drsrE0gof+xue6g4=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.5/go.mod h1:pRvFacV2qbRKy34ZFptHZW4wpauJA445bqFbvA6ikSo=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.5 h1:RxpMuBgzP3Dj1n5CZY6droLFcsn5gc7QsrIcaGQoeCs=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.5/go.mod h1:dO8Js7ym4Jzg/wcjTgCRVln/jFn3nI82XNhsG2lWbDI=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22 h1:kqOrpojG71DxJm/KDPO+Z/y1phm1JlC8/iT+5XRmAn8=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.22/go.mod h1:NtSFajXVVL8TA2QNngagVZmUtXciyrHOt7xgz4faS/M=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26 h1:I/5wmGMffY4happ8NOCuIUEWGUvvFp5NSeQcXl9RHcI=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.26/go.mod h1:FR8f4turZtNy6baO0KJ5FJUmXH/cSkI9fOngs0yl6mA=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26 h1:zXFLuEuMMUOvEARXFUVJdfqZ4bvvSgdGRq/ATcrQxzM=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26/go.mod h1:3o2Wpy0bogG1kyOPrgkXA8pgIfEEv0+m19O9D5+W8y8=
github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ=
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 h1:uR9lXYjdPX0xY+NhvaJ4dD8rpSRz5VY81ccIIoNG+lw=
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.5 h1:CesTZ0o3+/7N7pDHyoEuS/zL0mD652uRsYCelV08ABU=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.5/go.mod h1:Srr966fyoo72fJ/Hkz3ij6WQiZBX0RMO7w0jyzEwDyo=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.26 h1:GeNJsIFHB+WW5ap2Tec4K6dzcVTsRbsT1Lra46Hv9ME=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.26/go.mod h1:zfgMpwHDXX2WGoG84xG2H+ZlPTkJUU4YUvx2svLQYWo=
github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY=
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.25.5 h1:NfKXRrQTesomlTgmum5kTrd5ywuU4XRmA3bNrXnJ5yk=
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.25.5/go.mod h1:k4O1PkdCW+6ZUQGZjEZUkCT+8jmDmneKgLQ0mmmeT8s=
github.com/aws/aws-sdk-go-v2/service/iam v1.27.5 h1:4v1TyMBPGMOeagieS9TFnPaHaqs0pZFu1DXgFecsvwo=
github.com/aws/aws-sdk-go-v2/service/iam v1.27.5/go.mod h1:2Q4GJi6OAgj3bLPGUbA4VkKseAlvnICEtCnKAN6hSQo=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1 h1:rpkF4n0CyFcrJUG/rNNohoTmhtWlFTRI4BsZOh9PvLs=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1/go.mod h1:l9ymW25HOqymeU2m1gbUQ3rUIsTwKs8gYHXkqDQUhiI=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.5 h1:OK4q/3E4Kr1bWgcTqSaxmCE5x463TFtSQrF6mQTqMrw=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.5/go.mod h1:T4RMdi6FqSEFaUMLe/YKTD+tj0l+Uz+mxfT7QxljEIA=
github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.5 h1:nt18vYu0XdigeMdoDHJnOQxcCLcAPEeMat18LZUe68I=
github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.5/go.mod h1:6a+eoGEovMG1U+gJ9IkjSCSHg2lIaBsr39auD9kW1xA=
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.39.1 h1:SOJ3xkgrw8W0VQgyBUeep74yuf8kWALToFxNNwlHFvg=
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.39.1/go.mod h1:J8xqRbx7HIc8ids2P8JbrKx9irONPEYq7Z1FpLDpi3I=
github.com/aws/aws-sdk-go-v2/service/iam v1.38.3 h1:2sFIoFzU1IEL9epJWubJm9Dhrn45aTNEJuwsesaCGnk=
github.com/aws/aws-sdk-go-v2/service/iam v1.38.3/go.mod h1:KzlNINwfr/47tKkEhgk0r10/OZq3rjtyWy0txL3lM+I=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.7 h1:tB4tNw83KcajNAzaIMhkhVI2Nt8fAZd5A5ro113FEMY=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.7/go.mod h1:lvpyBGkZ3tZ9iSsUIcC2EWp+0ywa7aK3BLT+FwZi+mQ=
github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.7 h1:EqGlayejoCRXmnVC6lXl6phCm9R2+k35e0gWsO9G5DI=
github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.7/go.mod h1:BTw+t+/E5F3ZnDai/wSOYM54WUVjSdewE7Jvwtb7o+w=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.5 h1:F+XafeiK7Uf4YwTZfe/JLt+3cB6je9sI7l0TY4f2CkY=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.5/go.mod h1:NlZuvlkyu6l/F3+qIBsGGtYLL2Z71tCf5NFoNAaG1NY=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.5 h1:ow5dalHqYM8IbzXFCL86gQY9UJUtZsLyBHUd6OKep9M=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.5/go.mod h1:AcvGHLN2pTXdx1oVFSzcclBvfY2VbBg0AfOE/XjA7oo=
github.com/aws/aws-sdk-go-v2/service/kms v1.26.5 h1:MRNoQVbEtjzhYFeKVMifHae4K5q4FuK9B7tTDskIF/g=
github.com/aws/aws-sdk-go-v2/service/kms v1.26.5/go.mod h1:gfe6e+rOxaiz/gr5Myk83ruBD6F9WvM7TZbLjcTNsDM=
github.com/aws/aws-sdk-go-v2/service/s3 v1.46.0 h1:RaXPp86CLxTKDwCwSTmTW7FvTfaLPXhN48mPtQ881bA=
github.com/aws/aws-sdk-go-v2/service/s3 v1.46.0/go.mod h1:x7gN1BRfTWXdPr/cFGM/iz+c87gRtJ+JMYinObt/0LI=
github.com/aws/aws-sdk-go-v2/service/sqs v1.28.4 h1:Hy1cUZGuZRHe3HPxw7nfA9BFUqdWbyI0JLLiqENgucc=
github.com/aws/aws-sdk-go-v2/service/sqs v1.28.4/go.mod h1:xlxN+2XHAmoRFFkGFZcrmVYQfXSlNpEuqEpN0GZMmaI=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.7 h1:8eUsivBQzZHqe/3FE+cqwfH+0p5Jo8PFM/QYQSmeZ+M=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.7/go.mod h1:kLPQvGUmxn/fqiCrDeohwG33bq2pQpGeY62yRO6Nrh0=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.7 h1:Hi0KGbrnr57bEHWM0bJ1QcBzxLrL/k2DHvGYhb8+W1w=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.7/go.mod h1:wKNgWgExdjjrm4qvfbTorkvocEstaoDl4WCvGfeCy9c=
github.com/aws/aws-sdk-go-v2/service/kms v1.37.6 h1:CZImQdb1QbU9sGgJ9IswhVkxAcjkkD1eQTMA1KHWk+E=
github.com/aws/aws-sdk-go-v2/service/kms v1.37.6/go.mod h1:YJDdlK0zsyxVBxGU48AR/Mi8DMrGdc1E3Yij4fNrONA=
github.com/aws/aws-sdk-go-v2/service/s3 v1.72.1 h1:+IrM0EXV6ozLqJs3Kq2iwQGJBWmgRiYBXWETQQUMZRY=
github.com/aws/aws-sdk-go-v2/service/s3 v1.72.1/go.mod h1:r+xl5yzMk9083rMR+sJ5TYj9Tihvf/l1oxzZXDgGj2Q=
github.com/aws/aws-sdk-go-v2/service/sns v1.33.7 h1:N3o8mXK6/MP24BtD9sb51omEO9J9cgPM3Ughc293dZc=
github.com/aws/aws-sdk-go-v2/service/sns v1.33.7/go.mod h1:AAHZydTB8/V2zn3WNwjLXBK1RAcSEpDNmFfrmjvrJQg=
github.com/aws/aws-sdk-go-v2/service/sqs v1.37.4 h1:WpoMCoS4+qOkkuWQommvDRboKYzK91En6eXO/k5dXr0=
github.com/aws/aws-sdk-go-v2/service/sqs v1.37.4/go.mod h1:171mrsbgz6DahPMnLJzQiH3bXXrdsWhpE9USZiM19Lk=
github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk=
github.com/aws/aws-sdk-go-v2/service/sso v1.17.5 h1:kuK22ZsITfzaZEkxEl5H/lhy2k3G4clBtcQBI93RbIc=
github.com/aws/aws-sdk-go-v2/service/sso v1.17.5/go.mod h1:/tLqstwPfJLHYGBB5/c8P1ITI82pcGs7cJQuXku2pOg=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.3 h1:l5d5nrTFMhiUWNoLnV7QNI4m42/3WVSXqSyqVy+elGk=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.3/go.mod h1:30gKZp2pHQJq3yTmVy+hJKDFynSoYzVqYaxe4yPi+xI=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.8 h1:CvuUmnXI7ebaUAhbJcDy9YQx8wHR69eZ9I7q5hszt/g=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.8/go.mod h1:XDeGv1opzwm8ubxddF0cgqkZWsyOtw4lr6dxwmb6YQg=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.7 h1:F2rBfNAL5UyswqoeWv9zs74N/NanhK16ydHW1pahX6E=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.7/go.mod h1:JfyQ0g2JG8+Krq0EuZNnRwX0mU0HrwY/tG6JNfcqh4k=
github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g=
github.com/aws/aws-sdk-go-v2/service/sts v1.25.6 h1:39dJNBt35p8dFSnQdoy+QbDaPenTxFqqDQFOb1GDYpE=
github.com/aws/aws-sdk-go-v2/service/sts v1.25.6/go.mod h1:6DKEi+8OnUrqEEh6OCam16AYQHWAOyNgRiUGnHoh7Cg=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.4 h1:EzofOvWNMtG9ELt9mPOJjLYh1hz6kN4f5hNCyTtS7Hg=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.4/go.mod h1:5Gn+d+VaaRgsjewpMvGazt0WfcFO+Md4wLOuBfGR9Bc=
github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
github.com/aws/smithy-go v1.17.0 h1:wWJD7LX6PBV6etBUwO0zElG0nWN9rUhp0WdYeHSHAaI=
github.com/aws/smithy-go v1.17.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro=
github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
@ -369,10 +371,8 @@ github.com/bmatcuk/doublestar/v4 v4.6.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTS
github.com/bradleyfalzon/ghinstallation/v2 v2.1.0 h1:5+NghM1Zred9Z078QEZtm28G/kfDfZN/92gkDlLwGVA=
github.com/bradleyfalzon/ghinstallation/v2 v2.1.0/go.mod h1:Xg3xPRN5Mcq6GDqeUVhFbjEWMb4JHCyWEeeBGEYQoTU=
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c=
github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@ -447,8 +447,8 @@ github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQL
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
@ -478,8 +478,8 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/errors v0.20.2 h1:dxy7PGTqEh94zj2E3h1cUmQQWiM1+aeCROfAr02EmK8=
@ -493,8 +493,8 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c=
github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
@ -637,8 +637,8 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rH
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.43 h1:IHnW2UNo8CnKJCKN90Osq+ViH/RzfxeRUBRLzZOA4C0=
github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.43/go.mod h1:vahmnnIdr7LCswcRr+9z5YCTiytyV5qYIYmw7b4QyUE=
github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.60 h1:zh3v/n0DillXuE9iMXqFsZjfMicNCVNB1+leYCjZrQw=
github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.60/go.mod h1:npXAOu74D/9TTX1no1ooctXrq6hyWNRIwHrEu2zeVUo=
github.com/hashicorp/consul/api v1.13.0 h1:2hnLQ0GjQvw7f3O61jMO8gbasZviZTrt9R8WzgiirHc=
github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ=
github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU=
@ -685,16 +685,16 @@ github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5O
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo=
github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I=
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ=
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.9 h1:FW0YttEnUNDJ2WL9XcrrfteS1xW8u+sh4ggM8pN5isQ=
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.9/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
github.com/hashicorp/go-slug v0.15.0 h1:AhMnE6JIyW0KoDJlmRDwv4xd52a5ZK3VdioQ7SMmZhI=
github.com/hashicorp/go-slug v0.15.0/go.mod h1:THWVTAXwJEinbsp4/bBRcmbaO5EYNLTqxbG4tZ3gCYQ=
github.com/hashicorp/go-slug v0.16.3 h1:pe0PMwz2UWN1168QksdW/d7u057itB2gY568iF0E2Ns=
github.com/hashicorp/go-slug v0.16.3/go.mod h1:THWVTAXwJEinbsp4/bBRcmbaO5EYNLTqxbG4tZ3gCYQ=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
github.com/hashicorp/go-sockaddr v1.0.6 h1:RSG8rKU28VTUTvEKghe5gIhIQpv8evvNpnDEyqO4u9I=
github.com/hashicorp/go-sockaddr v1.0.6/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-tfe v1.53.0 h1:FlmR+45MB9rTosra2ZGPp0XtdxiRTmdTAJAgX94WK9k=
github.com/hashicorp/go-tfe v1.53.0/go.mod h1:XnTtBj3tVQ4uFkcFsv8Grn+O1CVcIcceL1uc2AgUcaU=
@ -873,7 +873,6 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
@ -920,8 +919,8 @@ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGV
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/openbao/openbao/api v0.0.0-20240326035453-c075f0ef2c7e h1:LIQFfqW6BA5E2ycx8NNDgyKh0exFubHePM5pF3knogo=
github.com/openbao/openbao/api v0.0.0-20240326035453-c075f0ef2c7e/go.mod h1:NUvBdXCNlmAGQ9TbYV7vS1Y9awHAjrq3QLiBWV+4Glk=
github.com/openbao/openbao/api/v2 v2.1.0 h1:x1I03dGuFfXGofO7Ix8bJ991c6A/cXBV+5bQbBv1UyQ=
github.com/openbao/openbao/api/v2 v2.1.0/go.mod h1:fit0FZr/2diblykkbid4vh0MkT3Iwkhza5IindPKJ70=
github.com/opentofu/hcl/v2 v2.0.0-20240814143621-8048794c5c52 h1:5O3LYVJxOHh7wfp/f3Em9EVMAK22dv7+bPj/k1hRamg=
github.com/opentofu/hcl/v2 v2.0.0-20240814143621-8048794c5c52/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA=
github.com/opentofu/registry-address v0.0.0-20230920144404-f1e51167f633 h1:81TBkM/XGIFlVvyabp0CJl00UHeVUiQjz0fddLMi848=
@ -967,8 +966,8 @@ github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@ -1013,8 +1012,8 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F
github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.194/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.588 h1:DYtBXB7sVc3EOW5horg8j55cLZynhsLYhHrvQ/jXKKM=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.588/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y=
@ -1054,8 +1053,8 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8=
github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
github.com/zclconf/go-cty v1.16.1 h1:a5TZEPzBFFR53udlIKApXzj8JIF4ZNQ6abH79z5R1S0=
github.com/zclconf/go-cty v1.16.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo=
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM=
github.com/zclconf/go-cty-yaml v1.1.0 h1:nP+jp0qPHv2IhUVqmQSzjvqAWcObN0KBkUl2rWBdig0=
@ -1075,16 +1074,18 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/exporters/autoexport v0.0.0-20230703072336-9a582bd098a2 h1:RRYaicUVPzisz2POp/snLfPetL3eBCHlMqtiiNXPnLY=
go.opentelemetry.io/contrib/exporters/autoexport v0.0.0-20230703072336-9a582bd098a2/go.mod h1:mYbddca6uQGV5E5Xzd5LWxzqnNG0SmplGiOKYMBL/S8=
go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.46.1 h1:PGmSzEMllKQwBQHe9SERAsCytvgLhsb8OrRLeW+40xw=
go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.46.1/go.mod h1:h0dNRrQsnnlMonPE/+FXrXtDYZEyZSTaIOfs+n8P/RQ=
go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.58.0 h1:g2rorZw2f1qnyfLOC7FP99argIWsN708Fjs2Zwz6SOk=
go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.58.0/go.mod h1:QzTypGPlQn4NselMPALVKGwm/p3XKLVCB/UG2Dq3PxQ=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk=
@ -1093,12 +1094,12 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkE
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0 h1:+XWJd3jf75RXJq29mxbuXhCXFDG3S3R4vBUeSI2P7tE=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0/go.mod h1:hqgzBPTf4yONMFgdZvL/bK42R/iinTyVQtiWihs3SZc=
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
@ -1131,8 +1132,8 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -1241,8 +1242,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1390,8 +1391,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -1402,8 +1403,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1426,8 +1427,8 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=

View File

@ -9,6 +9,7 @@ import (
"testing"
)
//nolint:cyclop // The complexity of this test naturally scales by the number of test conditions, and would be less readable/maintainable if broken into smaller parts.
func TestMap(t *testing.T) {
variableName := InputVariable{Name: "name"}
localHello := LocalValue{Name: "hello"}

View File

@ -138,6 +138,27 @@ func TestParseModuleSource(t *testing.T) {
Subdir: "example/foo",
},
},
"github.com with branch and subdir": {
input: "github.com/hashicorp/terraform-cidr-subnets//example/foo?ref=bar",
want: ModuleSourceRemote{
Package: ModulePackage("git::https://github.com/hashicorp/terraform-cidr-subnets.git?ref=bar"),
Subdir: "example/foo",
},
},
"github.com with subdir and malformed query params": {
input: "github.com/hashicorp/terraform-cidr-subnets//example/foo?",
want: ModuleSourceRemote{
Package: ModulePackage("git::https://github.com/hashicorp/terraform-cidr-subnets.git"),
Subdir: "example/foo",
},
},
"github.com subdir from a branch containing slash in the name": {
input: "github.com/hashicorp/terraform-cidr-subnets//example/foo?ref=bar/baz",
want: ModuleSourceRemote{
Package: ModulePackage("git::https://github.com/hashicorp/terraform-cidr-subnets.git?ref=bar/baz"),
Subdir: "example/foo",
},
},
"git protocol, URL-style": {
input: "git://example.com/code/baz.git",
want: ModuleSourceRemote{

View File

@ -98,32 +98,22 @@ func ParseRefFromTestingScope(traversal hcl.Traversal) (*Reference, tfdiags.Diag
switch root {
case "output":
name, rng, remain, outputDiags := parseSingleAttrRef(traversal)
reference = &Reference{
Subject: OutputValue{Name: name},
SourceRange: tfdiags.SourceRangeFromHCL(rng),
Remaining: remain,
}
diags = outputDiags
reference, diags = parseSingleAttrRef(traversal, func(name string) Referenceable {
return OutputValue{Name: name}
})
case "check":
name, rng, remain, checkDiags := parseSingleAttrRef(traversal)
reference = &Reference{
Subject: Check{Name: name},
SourceRange: tfdiags.SourceRangeFromHCL(rng),
Remaining: remain,
}
diags = checkDiags
reference, diags = parseSingleAttrRef(traversal, func(name string) Referenceable {
return Check{Name: name}
})
default:
// If it's not an output or a check block, then just parse it as normal.
return ParseRef(traversal)
}
if reference != nil {
if len(reference.Remaining) == 0 {
reference.Remaining = nil
}
return reference, diags
if reference != nil && len(reference.Remaining) == 0 {
reference.Remaining = nil
}
// If it's not an output or a check block, then just parse it as normal.
return ParseRef(traversal)
return reference, diags
}
// ParseRefStr is a helper wrapper around ParseRef that takes a string
@ -178,23 +168,14 @@ func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) {
rootRange := traversal[0].SourceRange()
switch root {
case "count":
name, rng, remain, diags := parseSingleAttrRef(traversal)
return &Reference{
Subject: CountAttr{Name: name},
SourceRange: tfdiags.SourceRangeFromHCL(rng),
Remaining: remain,
}, diags
return parseSingleAttrRef(traversal, func(name string) Referenceable {
return CountAttr{Name: name}
})
case "each":
name, rng, remain, diags := parseSingleAttrRef(traversal)
return &Reference{
Subject: ForEachAttr{Name: name},
SourceRange: tfdiags.SourceRangeFromHCL(rng),
Remaining: remain,
}, diags
return parseSingleAttrRef(traversal, func(name string) Referenceable {
return ForEachAttr{Name: name}
})
case "data":
if len(traversal) < 3 {
diags = diags.Append(&hcl.Diagnostic{
@ -207,7 +188,6 @@ func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) {
}
remain := traversal[1:] // trim off "data" so we can use our shared resource reference parser
return parseResourceRef(DataResourceMode, rootRange, remain)
case "resource":
// This is an alias for the normal case of just using a managed resource
// type as a top-level symbol, which will serve as an escape mechanism
@ -228,126 +208,34 @@ func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) {
}
remain := traversal[1:] // trim off "resource" so we can use our shared resource reference parser
return parseResourceRef(ManagedResourceMode, rootRange, remain)
case "local":
name, rng, remain, diags := parseSingleAttrRef(traversal)
return &Reference{
Subject: LocalValue{Name: name},
SourceRange: tfdiags.SourceRangeFromHCL(rng),
Remaining: remain,
}, diags
case "module":
callName, callRange, remain, diags := parseSingleAttrRef(traversal)
if diags.HasErrors() {
return nil, diags
}
// A traversal starting with "module" can either be a reference to an
// entire module, or to a single output from a module instance,
// depending on what we find after this introducer.
callInstance := ModuleCallInstance{
Call: ModuleCall{
Name: callName,
},
Key: NoKey,
}
if len(remain) == 0 {
// Reference to an entire module. Might alternatively be a
// reference to a single instance of a particular module, but the
// caller will need to deal with that ambiguity since we don't have
// enough context here.
return &Reference{
Subject: callInstance.Call,
SourceRange: tfdiags.SourceRangeFromHCL(callRange),
Remaining: remain,
}, diags
}
if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok {
var err error
callInstance.Key, err = ParseInstanceKey(idxTrav.Key)
if err != nil {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid index key",
Detail: fmt.Sprintf("Invalid index for module instance: %s.", err),
Subject: &idxTrav.SrcRange,
})
return nil, diags
}
remain = remain[1:]
if len(remain) == 0 {
// Also a reference to an entire module instance, but we have a key
// now.
return &Reference{
Subject: callInstance,
SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, idxTrav.SrcRange)),
Remaining: remain,
}, diags
}
}
if attrTrav, ok := remain[0].(hcl.TraverseAttr); ok {
remain = remain[1:]
return &Reference{
Subject: ModuleCallInstanceOutput{
Name: attrTrav.Name,
Call: callInstance,
},
SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, attrTrav.SrcRange)),
Remaining: remain,
}, diags
}
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid reference",
Detail: "Module instance objects do not support this operation.",
Subject: remain[0].SourceRange().Ptr(),
return parseSingleAttrRef(traversal, func(name string) Referenceable {
return LocalValue{Name: name}
})
return nil, diags
case "module":
return parseModuleCallRef(traversal)
case "path":
name, rng, remain, diags := parseSingleAttrRef(traversal)
return &Reference{
Subject: PathAttr{Name: name},
SourceRange: tfdiags.SourceRangeFromHCL(rng),
Remaining: remain,
}, diags
return parseSingleAttrRef(traversal, func(name string) Referenceable {
return PathAttr{Name: name}
})
case "self":
return &Reference{
Subject: Self,
SourceRange: tfdiags.SourceRangeFromHCL(rootRange),
Remaining: traversal[1:],
}, diags
case "terraform":
name, rng, remain, diags := parseSingleAttrRef(traversal)
return &Reference{
Subject: NewTerraformAttr(IdentTerraform, name),
SourceRange: tfdiags.SourceRangeFromHCL(rng),
Remaining: remain,
}, diags
return parseSingleAttrRef(traversal, func(name string) Referenceable {
return NewTerraformAttr(IdentTerraform, name)
})
case "tofu":
name, rng, remain, parsedDiags := parseSingleAttrRef(traversal)
return &Reference{
Subject: NewTerraformAttr(IdentTofu, name),
SourceRange: tfdiags.SourceRangeFromHCL(rng),
Remaining: remain,
}, parsedDiags
return parseSingleAttrRef(traversal, func(name string) Referenceable {
return NewTerraformAttr(IdentTofu, name)
})
case "var":
name, rng, remain, diags := parseSingleAttrRef(traversal)
return &Reference{
Subject: InputVariable{Name: name},
SourceRange: tfdiags.SourceRangeFromHCL(rng),
Remaining: remain,
}, diags
return parseSingleAttrRef(traversal, func(name string) Referenceable {
return InputVariable{Name: name}
})
case "template", "lazy", "arg":
// These names are all pre-emptively reserved in the hope of landing
// some version of "template values" or "lazy expressions" feature
@ -359,7 +247,6 @@ func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) {
Subject: rootRange.Ptr(),
})
return nil, diags
default:
function := ParseFunction(root)
if function.IsNamespace(FunctionNamespaceProvider) {
@ -475,12 +362,102 @@ func parseResourceRef(mode ResourceMode, startRange hcl.Range, traversal hcl.Tra
}, diags
}
func parseSingleAttrRef(traversal hcl.Traversal) (string, hcl.Range, hcl.Traversal, tfdiags.Diagnostics) {
func parseModuleCallRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) {
// The following is a little circuitous just so we can reuse parseSingleAttrRef
// for this slightly-odd case while keeping it relatively simple for all of the
// other cases that use it: we first get the information we need wrapped up
// in a *Reference and then unpack it to perform further work below.
callRef, diags := parseSingleAttrRef(traversal, func(name string) Referenceable {
return ModuleCallInstance{
Call: ModuleCall{
Name: name,
},
Key: NoKey,
}
})
if diags.HasErrors() {
return nil, diags
}
// A traversal starting with "module" can either be a reference to an
// entire module, or to a single output from a module instance,
// depending on what we find after this introducer.
callInstance := callRef.Subject.(ModuleCallInstance) //nolint:errcheck // This was constructed directly above by call to parseSingleAttrRef
callRange := callRef.SourceRange
remain := callRef.Remaining
if len(remain) == 0 {
// Reference to an entire module. Might alternatively be a
// reference to a single instance of a particular module, but the
// caller will need to deal with that ambiguity since we don't have
// enough context here.
return &Reference{
Subject: callInstance.Call,
SourceRange: callRange,
Remaining: remain,
}, diags
}
if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok {
var err error
callInstance.Key, err = ParseInstanceKey(idxTrav.Key)
if err != nil {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid index key",
Detail: fmt.Sprintf("Invalid index for module instance: %s.", err),
Subject: &idxTrav.SrcRange,
})
return nil, diags
}
remain = remain[1:]
if len(remain) == 0 {
// Also a reference to an entire module instance, but we have a key
// now.
return &Reference{
Subject: callInstance,
SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange.ToHCL(), idxTrav.SrcRange)),
Remaining: remain,
}, diags
}
}
if attrTrav, ok := remain[0].(hcl.TraverseAttr); ok {
remain = remain[1:]
return &Reference{
Subject: ModuleCallInstanceOutput{
Name: attrTrav.Name,
Call: callInstance,
},
SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange.ToHCL(), attrTrav.SrcRange)),
Remaining: remain,
}, diags
}
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid reference",
Detail: "Module instance objects do not support this operation.",
Subject: remain[0].SourceRange().Ptr(),
})
return nil, diags
}
func parseSingleAttrRef(traversal hcl.Traversal, makeAddr func(name string) Referenceable) (*Reference, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
root := traversal.RootName()
rootRange := traversal[0].SourceRange()
// NOTE: In a previous version of this file parseSingleAttrRef only returned the component parts
// of a *Reference and then the callers assembled them, which caused the main parseRef function
// to return a non-nil result (with mostly-garbage field values) even in the error cases.
// We've preserved that oddity for now because our code complexity refactoring efforts should
// not change the externally-observable behavior, but to guarantee that we'd need to review
// all uses of parseRef to make sure that they aren't depending on getting a non-nil *Reference
// along with error diagnostics. :(
if len(traversal) < 2 {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
@ -488,10 +465,15 @@ func parseSingleAttrRef(traversal hcl.Traversal) (string, hcl.Range, hcl.Travers
Detail: fmt.Sprintf("The %q object cannot be accessed directly. Instead, access one of its attributes.", root),
Subject: &rootRange,
})
return "", hcl.Range{}, nil, diags
return &Reference{Subject: makeAddr("")}, diags
}
if attrTrav, ok := traversal[1].(hcl.TraverseAttr); ok {
return attrTrav.Name, hcl.RangeBetween(rootRange, attrTrav.SrcRange), traversal[2:], diags
subjectAddr := makeAddr(attrTrav.Name)
return &Reference{
Subject: subjectAddr,
SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(rootRange, attrTrav.SrcRange)),
Remaining: traversal[2:],
}, diags
}
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
@ -499,5 +481,5 @@ func parseSingleAttrRef(traversal hcl.Traversal) (string, hcl.Range, hcl.Travers
Detail: fmt.Sprintf("The %q object does not support this operation.", root),
Subject: traversal[1].SourceRange().Ptr(),
})
return "", hcl.Range{}, nil, diags
return &Reference{Subject: makeAddr("")}, diags
}

View File

@ -144,7 +144,25 @@ func (c *httpClient) Unlock(id string) error {
return nil
}
resp, err := c.httpRequest(c.UnlockMethod, c.UnlockURL, c.jsonLockInfo, "unlock")
var lockInfo statemgr.LockInfo
// force unlock command does not instantiate statemgr.LockInfo
// which means that c.jsonLockInfo will be nil
if c.jsonLockInfo != nil {
if err := json.Unmarshal(c.jsonLockInfo, &lockInfo); err != nil { //nolint:musttag // for now add musttag until we fully adopt the linting rules
return fmt.Errorf("failed to unmarshal jsonLockInfo: %w", err)
}
if lockInfo.ID != id {
return &statemgr.LockError{
Info: &lockInfo,
Err: fmt.Errorf("lock id %q does not match existing lock", id),
}
}
}
lockInfo.ID = id
resp, err := c.httpRequest(c.UnlockMethod, c.UnlockURL, lockInfo.Marshal(), "unlock")
if err != nil {
return err
}

View File

@ -254,6 +254,130 @@ func TestHttpClient_IsLockingEnabled(t *testing.T) {
}
}
// Tests the UnLock method for the HTTP client.
func TestHttpClient_Unlock(t *testing.T) {
stateLockInfoA := statemgr.LockInfo{
ID: "bjarne-stroustrup-state-lock-id",
Who: "BjarneStroustrup",
Operation: "TestTypePlan",
Created: time.Date(2023, time.August, 21, 15, 9, 26, 0, time.UTC),
}
stateLockInfoB := statemgr.LockInfo{
ID: "edsger-dijkstra-state-lock-id",
}
testCases := []struct {
name string
lockID string
jsonLockInfo []byte
lockResponseStatus int
lockResponseBody []byte
expectedErrorMsg error
expectedPayload []byte
}{
{
// Successful unlocking HTTP remote state
name: "Successfully unlocked",
lockID: stateLockInfoA.ID,
jsonLockInfo: stateLockInfoA.Marshal(),
lockResponseStatus: http.StatusOK,
lockResponseBody: nil,
expectedErrorMsg: nil,
expectedPayload: stateLockInfoA.Marshal(),
},
{
// Lock ID parameter does not match with LockInfo object Lock ID
name: "Lock ID's don't match",
lockID: stateLockInfoB.ID,
jsonLockInfo: stateLockInfoA.Marshal(),
lockResponseStatus: 0,
lockResponseBody: nil,
expectedErrorMsg: &statemgr.LockError{
Info: &stateLockInfoA,
Err: fmt.Errorf("lock id %q does not match existing lock", stateLockInfoB.ID),
},
expectedPayload: nil,
},
{
// Failed unmarshal jsonLockInfo into LockInfo object
name: "Failed to unmarshal jsonLockInfo",
lockID: stateLockInfoA.ID,
jsonLockInfo: []byte("Simplicity is prerequisite for reliability."),
lockResponseStatus: 0,
lockResponseBody: nil,
expectedErrorMsg: fmt.Errorf("failed to unmarshal jsonLockInfo: invalid character 'S' looking for beginning of value"),
expectedPayload: nil,
},
{
// Force unlock command being executed
name: "Successful force unlock",
lockID: stateLockInfoB.ID,
jsonLockInfo: nil,
lockResponseStatus: http.StatusOK,
lockResponseBody: nil,
expectedErrorMsg: nil,
expectedPayload: stateLockInfoB.Marshal(),
},
{
// Force unlock command being executed
name: "Unsuccessful force unlock",
lockID: stateLockInfoB.ID,
jsonLockInfo: nil,
lockResponseStatus: http.StatusNotFound,
lockResponseBody: nil,
expectedErrorMsg: fmt.Errorf("Unexpected HTTP response code %d", http.StatusNotFound),
expectedPayload: stateLockInfoB.Marshal(),
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
var receivedPayload []byte
handler := func(w http.ResponseWriter, r *http.Request) {
receivedPayload, _ = io.ReadAll(r.Body)
w.WriteHeader(tt.lockResponseStatus)
_, err := w.Write(tt.lockResponseBody)
if err != nil {
t.Fatalf("Failed to write response body: %v", err)
}
}
ts := httptest.NewServer(http.HandlerFunc(handler))
defer ts.Close()
unlockURL, err := url.Parse(ts.URL)
if err != nil {
t.Fatalf("Failed to parse lockURL: %v", err)
}
client := &httpClient{
UnlockURL: unlockURL,
LockMethod: "UNLOCK",
Client: retryablehttp.NewClient(),
jsonLockInfo: tt.jsonLockInfo,
}
err = client.Unlock(tt.lockID)
if tt.expectedErrorMsg != nil && err == nil {
// no expected error
t.Errorf("UnLock() no expected error = %v", tt.expectedErrorMsg)
}
if tt.expectedErrorMsg == nil && err != nil {
// unexpected error
t.Errorf("UnLock() unexpected error = %v", err)
}
if tt.expectedErrorMsg != nil && err.Error() != tt.expectedErrorMsg.Error() {
// mismatched errors
t.Errorf("UnLock() error = %v, want %v", err, tt.expectedErrorMsg)
}
if !bytes.Equal(receivedPayload, tt.expectedPayload) {
t.Errorf("UnLock() payload = %v, want %v", receivedPayload, tt.expectedPayload)
}
})
}
}
// Tests the Lock method for the HTTP client.
// Test to see correct lock info is returned
func TestHttpClient_lock(t *testing.T) {

View File

@ -160,7 +160,7 @@ func TestBackendConfig(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName))
defer dropSchemaByQuotedName(t, dbCleaner, schemaName)
var diags tfdiags.Diagnostics
b := New(encryption.StateEncryptionDisabled()).(*Backend)
@ -324,7 +324,7 @@ func TestBackendConfigSkipOptions(t *testing.T) {
if tc.Setup != nil {
tc.Setup(t, db, schemaName)
}
defer db.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName))
defer dropSchemaByQuotedName(t, db, schemaName)
b := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), config).(*Backend)
@ -393,7 +393,7 @@ func TestBackendStates(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", pq.QuoteIdentifier(schemaName)))
defer dropSchema(t, dbCleaner, schemaName)
config := backend.TestWrapConfig(map[string]interface{}{
"conn_str": connStr,
@ -418,7 +418,7 @@ func TestBackendStateLocks(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName))
defer dropSchema(t, dbCleaner, schemaName)
config := backend.TestWrapConfig(map[string]interface{}{
"conn_str": connStr,
@ -448,7 +448,6 @@ func TestBackendConcurrentLock(t *testing.T) {
}
getStateMgr := func(schemaName string) (statemgr.Full, *statemgr.LockInfo) {
defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName))
config := backend.TestWrapConfig(map[string]interface{}{
"conn_str": connStr,
"schema_name": schemaName,
@ -470,8 +469,12 @@ func TestBackendConcurrentLock(t *testing.T) {
return stateMgr, info
}
s1, i1 := getStateMgr(fmt.Sprintf("terraform_%s_1", t.Name()))
s2, i2 := getStateMgr(fmt.Sprintf("terraform_%s_2", t.Name()))
firstSchema, secondSchema := fmt.Sprintf("terraform_%s_1", t.Name()), fmt.Sprintf("terraform_%s_2", t.Name())
defer dropSchema(t, dbCleaner, firstSchema)
defer dropSchema(t, dbCleaner, secondSchema)
s1, i1 := getStateMgr(firstSchema)
s2, i2 := getStateMgr(secondSchema)
// First we need to create the workspace as the lock for creating them is
// global
@ -524,3 +527,18 @@ func TestBackendConcurrentLock(t *testing.T) {
func getDatabaseUrl() string {
return os.Getenv("DATABASE_URL")
}
func dropSchema(t *testing.T, db *sql.DB, schemaName string) {
dropSchemaByQuotedName(t, db, pq.QuoteIdentifier(schemaName))
}
func dropSchemaByQuotedName(t *testing.T, db *sql.DB, quotedSchemaName string) {
rows, err := db.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", quotedSchemaName))
if err != nil {
t.Fatal(err)
}
defer rows.Close()
if err = rows.Err(); err != nil {
t.Fatal(err)
}
}

View File

@ -9,6 +9,7 @@ import (
"crypto/md5"
"database/sql"
"fmt"
"hash/fnv"
uuid "github.com/hashicorp/go-uuid"
_ "github.com/lib/pq"
@ -90,15 +91,20 @@ func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) {
return nil
}
// Try to acquire locks for the existing row `id` and the creation lock `-1`.
query := `SELECT %s.id, pg_try_advisory_lock(%s.id), pg_try_advisory_lock(-1) FROM %s.%s WHERE %s.name = $1`
row := c.Client.QueryRow(fmt.Sprintf(query, statesTableName, statesTableName, c.SchemaName, statesTableName, statesTableName), c.Name)
creationLockID := c.composeCreationLockID()
// Try to acquire locks for the existing row `id` and the creation lock.
//nolint:gosec // we only parameterize user passed values
query := fmt.Sprintf(`SELECT %s.id, pg_try_advisory_lock(%s.id), pg_try_advisory_lock(%s) FROM %s.%s WHERE %s.name = $1`,
statesTableName, statesTableName, creationLockID, c.SchemaName, statesTableName, statesTableName)
row := c.Client.QueryRow(query, c.Name)
var pgLockId, didLock, didLockForCreate []byte
err = row.Scan(&pgLockId, &didLock, &didLockForCreate)
switch {
case err == sql.ErrNoRows:
// No rows means we're creating the workspace. Take the creation lock.
innerRow := c.Client.QueryRow(`SELECT pg_try_advisory_lock(-1)`)
innerRow := c.Client.QueryRow(fmt.Sprintf(`SELECT pg_try_advisory_lock(%s)`, creationLockID))
var innerDidLock []byte
err := innerRow.Scan(&innerDidLock)
if err != nil {
@ -107,20 +113,20 @@ func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) {
if string(innerDidLock) == "false" {
return "", &statemgr.LockError{Info: info, Err: fmt.Errorf("Already locked for workspace creation: %s", c.Name)}
}
info.Path = "-1"
info.Path = creationLockID
case err != nil:
return "", &statemgr.LockError{Info: info, Err: err}
case string(didLock) == "false":
// Existing workspace is already locked. Release the attempted creation lock.
lockUnlock("-1")
_ = lockUnlock(creationLockID)
return "", &statemgr.LockError{Info: info, Err: fmt.Errorf("Workspace is already locked: %s", c.Name)}
case string(didLockForCreate) == "false":
// Someone has the creation lock already. Release the existing workspace because it might not be safe to touch.
lockUnlock(string(pgLockId))
_ = lockUnlock(string(pgLockId))
return "", &statemgr.LockError{Info: info, Err: fmt.Errorf("Cannot lock workspace; already locked for workspace creation: %s", c.Name)}
default:
// Existing workspace is now locked. Release the attempted creation lock.
lockUnlock("-1")
_ = lockUnlock(creationLockID)
info.Path = string(pgLockId)
}
c.info = info
@ -128,10 +134,6 @@ func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) {
return info.ID, nil
}
func (c *RemoteClient) getLockInfo() (*statemgr.LockInfo, error) {
return c.info, nil
}
func (c *RemoteClient) Unlock(id string) error {
if c.info != nil && c.info.Path != "" {
query := `SELECT pg_advisory_unlock(%s)`
@ -145,3 +147,9 @@ func (c *RemoteClient) Unlock(id string) error {
}
return nil
}
func (c *RemoteClient) composeCreationLockID() string {
hash := fnv.New32()
hash.Write([]byte(c.SchemaName))
return fmt.Sprintf("%d", int64(hash.Sum32())*-1)
}

View File

@ -12,10 +12,12 @@ import (
"database/sql"
"fmt"
"testing"
"time"
"github.com/opentofu/opentofu/internal/backend"
"github.com/opentofu/opentofu/internal/encryption"
"github.com/opentofu/opentofu/internal/states/remote"
"github.com/opentofu/opentofu/internal/states/statemgr"
)
func TestRemoteClient_impl(t *testing.T) {
@ -31,7 +33,7 @@ func TestRemoteClient(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName))
defer dropSchema(t, dbCleaner, schemaName)
config := backend.TestWrapConfig(map[string]interface{}{
"conn_str": connStr,
@ -59,7 +61,7 @@ func TestRemoteLocks(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer dbCleaner.Query(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName))
defer dropSchema(t, dbCleaner, schemaName)
config := backend.TestWrapConfig(map[string]interface{}{
"conn_str": connStr,
@ -80,3 +82,91 @@ func TestRemoteLocks(t *testing.T) {
remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client)
}
// TestConcurrentCreationLocksInDifferentSchemas tests whether backends with different schemas
// affect each other while taking global workspace creation locks.
func TestConcurrentCreationLocksInDifferentSchemas(t *testing.T) {
testACC(t)
connStr := getDatabaseUrl()
dbCleaner, err := sql.Open("postgres", connStr)
if err != nil {
t.Fatal(err)
}
firstSchema := fmt.Sprintf("terraform_%s_1", t.Name())
secondSchema := fmt.Sprintf("terraform_%s_2", t.Name())
defer dropSchema(t, dbCleaner, firstSchema)
defer dropSchema(t, dbCleaner, secondSchema)
firstConfig := backend.TestWrapConfig(map[string]interface{}{
"conn_str": connStr,
"schema_name": firstSchema,
})
secondConfig := backend.TestWrapConfig(map[string]interface{}{
"conn_str": connStr,
"schema_name": secondSchema,
})
//nolint:errcheck // this is a test, I am fine with panic here
firstBackend := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), firstConfig).(*Backend)
//nolint:errcheck // this is a test, I am fine with panic here
secondBackend := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), secondConfig).(*Backend)
//nolint:errcheck // this is a test, I am fine with panic here
thirdBackend := backend.TestBackendConfig(t, New(encryption.StateEncryptionDisabled()), secondConfig).(*Backend)
// We operate on remote clients instead of state managers to simulate the
// first call to backend.StateMgr(), which creates an empty state in default
// workspace.
firstClient := &RemoteClient{
Client: firstBackend.db,
Name: backend.DefaultStateName,
SchemaName: firstBackend.schemaName,
}
secondClient := &RemoteClient{
Client: secondBackend.db,
Name: backend.DefaultStateName,
SchemaName: secondBackend.schemaName,
}
thirdClient := &RemoteClient{
Client: thirdBackend.db,
Name: backend.DefaultStateName,
SchemaName: thirdBackend.schemaName,
}
// It doesn't matter what lock info to supply for workspace creation.
lock := &statemgr.LockInfo{
ID: "1",
Operation: "test",
Info: "This needs to lock for workspace creation",
Who: "me",
Version: "1",
Created: time.Date(1999, 8, 19, 0, 0, 0, 0, time.UTC),
}
// Those calls with empty database must think they are locking
// for workspace creation, both of them must succeed since they
// are operating on different schemas.
if _, err = firstClient.Lock(lock); err != nil {
t.Fatal(err)
}
if _, err = secondClient.Lock(lock); err != nil {
t.Fatal(err)
}
// This call must fail since we are trying to acquire the same
// lock as the first client. We need to make this call from a
// separate session, since advisory locks are okay to be re-acquired
// during the same session.
if _, err = thirdClient.Lock(lock); err == nil {
t.Fatal("Expected an error to be thrown on a second lock attempt")
} else if lockErr := err.(*statemgr.LockError); lockErr.Info != lock && //nolint:errcheck,errorlint // this is a test, I am fine with panic here
lockErr.Err.Error() != "Already locked for workspace creation: default" {
t.Fatalf("Unexpected error thrown on a second lock attempt: %v", err)
}
}

View File

@ -772,9 +772,9 @@ func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics {
}
if value := obj.GetAttr("assume_role"); !value.IsNull() {
cfg.AssumeRole = configureNestedAssumeRole(obj)
cfg.AssumeRole = []awsbase.AssumeRole{configureNestedAssumeRole(obj)}
} else if value := obj.GetAttr("role_arn"); !value.IsNull() {
cfg.AssumeRole = configureAssumeRole(obj)
cfg.AssumeRole = []awsbase.AssumeRole{configureAssumeRole(obj)}
}
if val := obj.GetAttr("assume_role_with_web_identity"); !val.IsNull() {
@ -885,7 +885,7 @@ func getS3Config(obj cty.Value) func(options *s3.Options) {
}
}
func configureNestedAssumeRole(obj cty.Value) *awsbase.AssumeRole {
func configureNestedAssumeRole(obj cty.Value) awsbase.AssumeRole {
assumeRole := awsbase.AssumeRole{}
obj = obj.GetAttr("assume_role")
@ -922,10 +922,10 @@ func configureNestedAssumeRole(obj cty.Value) *awsbase.AssumeRole {
assumeRole.TransitiveTagKeys = val
}
return &assumeRole
return assumeRole
}
func configureAssumeRole(obj cty.Value) *awsbase.AssumeRole {
func configureAssumeRole(obj cty.Value) awsbase.AssumeRole {
assumeRole := awsbase.AssumeRole{}
assumeRole.RoleARN = stringAttr(obj, "role_arn")
@ -944,7 +944,7 @@ func configureAssumeRole(obj cty.Value) *awsbase.AssumeRole {
assumeRole.TransitiveTagKeys = val
}
return &assumeRole
return assumeRole
}
func configureAssumeRoleWithWebIdentity(obj cty.Value) *awsbase.AssumeRoleWithWebIdentity {

View File

@ -116,7 +116,7 @@ func TestBackendConfig_InvalidRegion(t *testing.T) {
tfdiags.AttributeValue(
tfdiags.Error,
"Invalid region value",
`Invalid AWS Region: nonesuch`,
`invalid AWS Region: nonesuch`,
cty.Path{cty.GetAttrStep{Name: "region"}},
),
},
@ -1284,7 +1284,7 @@ func TestBackendExtraPaths(t *testing.T) {
}
// Write the first state
stateMgr := &remote.State{Client: client}
stateMgr := remote.NewState(client, encryption.StateEncryptionDisabled())
if err := stateMgr.WriteState(s1); err != nil {
t.Fatal(err)
}
@ -1296,7 +1296,7 @@ func TestBackendExtraPaths(t *testing.T) {
// Note a new state manager - otherwise, because these
// states are equal, the state will not Put to the remote
client.path = b.path("s2")
stateMgr2 := &remote.State{Client: client}
stateMgr2 := remote.NewState(client, encryption.StateEncryptionDisabled())
if err := stateMgr2.WriteState(s2); err != nil {
t.Fatal(err)
}

View File

@ -406,6 +406,10 @@ func (c *RemoteClient) getLockInfo(ctx context.Context) (*statemgr.LockInfo, err
return nil, err
}
if len(resp.Item) == 0 {
return nil, fmt.Errorf("no lock info found for: %q within the DynamoDB table: %s", c.lockPath(), c.ddbTable)
}
var infoData string
if v, ok := resp.Item["Info"]; ok {
if v, ok := v.(*dtypes.AttributeValueMemberS); ok {
@ -430,9 +434,6 @@ func (c *RemoteClient) Unlock(id string) error {
lockErr := &statemgr.LockError{}
ctx := context.TODO()
// TODO: store the path and lock ID in separate fields, and have proper
// projection expression only delete the lock if both match, rather than
// checking the ID from the info field first.
lockInfo, err := c.getLockInfo(ctx)
if err != nil {
lockErr.Err = fmt.Errorf("failed to retrieve lock info: %w", err)
@ -445,11 +446,16 @@ func (c *RemoteClient) Unlock(id string) error {
return lockErr
}
// Use a condition expression to ensure both the lock info and lock ID match
params := &dynamodb.DeleteItemInput{
Key: map[string]dtypes.AttributeValue{
"LockID": &dtypes.AttributeValueMemberS{Value: c.lockPath()},
},
TableName: aws.String(c.ddbTable),
TableName: aws.String(c.ddbTable),
ConditionExpression: aws.String("Info = :info"),
ExpressionAttributeValues: map[string]dtypes.AttributeValue{
":info": &dtypes.AttributeValueMemberS{Value: string(lockInfo.Marshal())},
},
}
_, err = c.dynClient.DeleteItem(ctx, params)

View File

@ -163,6 +163,21 @@ func TestForceUnlock(t *testing.T) {
if err = s2.Unlock(lockID); err != nil {
t.Fatal("failed to force-unlock named state")
}
// No State lock information found for the new workspace. The client should throw the appropriate error message.
secondWorkspace := "new-workspace"
s2, err = b2.StateMgr(secondWorkspace)
if err != nil {
t.Fatal(err)
}
err = s2.Unlock(lockID)
if err == nil {
t.Fatal("expected an error to occur:", err)
}
expectedErrorMsg := fmt.Errorf("failed to retrieve lock info: no lock info found for: \"%s/env:/%s/%s\" within the DynamoDB table: %s", bucketName, secondWorkspace, keyName, bucketName)
if err.Error() != expectedErrorMsg.Error() {
t.Errorf("Unlock() error = %v, want: %v", err, expectedErrorMsg)
}
}
func TestRemoteClient_clientMD5(t *testing.T) {

View File

@ -169,7 +169,14 @@ func (p *Provider) ImportResourceState(req providers.ImportResourceStateRequest)
panic("unimplemented - terraform_remote_state has no resources")
}
// ValidateResourceConfig is used to to validate the resource configuration values.
// MoveResourceState is called when the state loader encounters an instance state
// that has been moved to a new type, and the state should be updated to reflect the change.
// This is used to move the old state to the new schema.
func (p *Provider) MoveResourceState(r providers.MoveResourceStateRequest) (resp providers.MoveResourceStateResponse) {
return moveDataStoreResourceState(r)
}
// ValidateResourceConfig is used to validate the resource configuration values.
func (p *Provider) ValidateResourceConfig(req providers.ValidateResourceConfigRequest) providers.ValidateResourceConfigResponse {
return validateDataStoreResourceConfig(req)
}

View File

@ -56,6 +56,54 @@ func upgradeDataStoreResourceState(req providers.UpgradeResourceStateRequest) (r
return resp
}
// nullResourceSchema returns a schema for a null_resource with relevant attributes for type migration.
func nullResourceSchema() providers.Schema {
return providers.Schema{
Block: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"triggers": {Type: cty.Map(cty.String), Optional: true},
"id": {Type: cty.String, Computed: true},
},
},
}
}
func moveDataStoreResourceState(req providers.MoveResourceStateRequest) providers.MoveResourceStateResponse {
var resp providers.MoveResourceStateResponse
if req.SourceTypeName != "null_resource" || req.TargetTypeName != "terraform_data" {
resp.Diagnostics = resp.Diagnostics.Append(
fmt.Errorf("unsupported move: %s -> %s; only move from null_resource to terraform_data is supported",
req.SourceTypeName, req.TargetTypeName))
return resp
}
nullTy := nullResourceSchema().Block.ImpliedType()
oldState, err := ctyjson.Unmarshal(req.SourceStateJSON, nullTy)
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(err)
return resp
}
oldStateMap := oldState.AsValueMap()
newStateMap := map[string]cty.Value{}
if trigger, ok := oldStateMap["triggers"]; ok && !trigger.IsNull() {
newStateMap["triggers_replace"] = cty.ObjectVal(trigger.AsValueMap())
}
if id, ok := oldStateMap["id"]; ok && !id.IsNull() {
newStateMap["id"] = id
}
currentSchema := dataStoreResourceSchema()
newState, err := currentSchema.Block.CoerceValue(cty.ObjectVal(newStateMap))
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(err)
return resp
}
resp.TargetState = newState
resp.TargetPrivate = req.SourcePrivate
return resp
}
func readDataStoreResourceState(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) {
resp.NewState = req.PriorState
return resp

View File

@ -6,6 +6,7 @@
package tf
import (
"bytes"
"strings"
"testing"
@ -82,6 +83,57 @@ func TestManagedDataUpgradeState(t *testing.T) {
}
}
func TestManagedDataMovedState(t *testing.T) {
nullSchema := nullResourceSchema()
nullTy := nullSchema.Block.ImpliedType()
state := cty.ObjectVal(map[string]cty.Value{
"triggers": cty.MapVal(map[string]cty.Value{
"examplekey": cty.StringVal("value"),
}),
"id": cty.StringVal("not-quite-unique"),
})
jsState, err := ctyjson.Marshal(state, nullTy)
if err != nil {
t.Fatal(err)
}
// empty request should fail
req := providers.MoveResourceStateRequest{}
resp := moveDataStoreResourceState(req)
if !resp.Diagnostics.HasErrors() {
t.Fatalf("expected error, got %#v", resp)
}
// valid request
req = providers.MoveResourceStateRequest{
TargetTypeName: "terraform_data",
SourceTypeName: "null_resource",
SourcePrivate: []byte("PRIVATE"),
SourceStateJSON: jsState,
}
resp = moveDataStoreResourceState(req)
expectedState := cty.ObjectVal(map[string]cty.Value{
"triggers_replace": cty.ObjectVal(map[string]cty.Value{
"examplekey": cty.StringVal("value"),
}),
"id": cty.StringVal("not-quite-unique"),
"input": cty.NullVal(cty.DynamicPseudoType),
"output": cty.NullVal(cty.DynamicPseudoType),
})
if !resp.TargetState.RawEquals(expectedState) {
t.Errorf("prior state was:\n%#v\nmoved state is:\n%#v\n", expectedState, resp.TargetState)
}
if !bytes.Equal(resp.TargetPrivate, req.SourcePrivate) {
t.Error("expected private data to be copied")
}
}
func TestManagedDataRead(t *testing.T) {
req := providers.ReadResourceRequest{
TypeName: "terraform_data",

View File

@ -502,6 +502,68 @@ func TestInitProviderNotFound(t *testing.T) {
t.Errorf("wrong output:\n%s", cmp.Diff(stripAnsi(stderr), expectedErr))
}
})
t.Run("implicit provider resource and data not found", func(t *testing.T) {
implicitFixturePath := filepath.Join("testdata", "provider-implicit-ref-not-found/implicit-by-resource-and-data")
tf := e2e.NewBinary(t, tofuBin, implicitFixturePath)
stdout, _, err := tf.Run("init")
if err == nil {
t.Fatal("expected error, got success")
}
// Testing that the warn wrote to the user is containing the resource address from where the provider
// was registered to be downloaded
expectedContentInOutput := []string{
`(and one more similar warning elsewhere)`,
`
Warning: Automatically-inferred provider dependency
on main.tf line 2:
2: resource "nonexistingProv_res" "test1" {
Due to the prefix of the resource type name OpenTofu guessed that you
intended to associate nonexistingProv_res.test1 with a provider whose local
name is "nonexistingprov", but that name is not declared in this module's
required_providers block. OpenTofu therefore guessed that you intended to
use hashicorp/nonexistingprov, but that provider does not exist.
Make at least one of the following changes to tell OpenTofu which provider
to use:
- Add a declaration for local name "nonexistingprov" to this module's
required_providers block, specifying the full source address for the
provider you intended to use.
- Verify that "nonexistingProv_res" is the correct resource type name to
use. Did you omit a prefix which would imply the correct provider?
- Use a "provider" argument within this resource block to override
OpenTofu's automatic selection of the local name "nonexistingprov".
`}
for _, expectedOutput := range expectedContentInOutput {
if cleanOut := strings.TrimSpace(stripAnsi(stdout)); !strings.Contains(cleanOut, expectedOutput) {
t.Errorf("wrong output.\n\toutput:\n%s\n\n\tdoes not contain:\n%s", cleanOut, expectedOutput)
}
}
})
t.Run("resource pointing to a not configured provider does not warn on implicit reference", func(t *testing.T) {
implicitFixturePath := filepath.Join("testdata", "provider-implicit-ref-not-found/resource-with-provider-attribute")
tf := e2e.NewBinary(t, tofuBin, implicitFixturePath)
stdout, _, err := tf.Run("init")
if err == nil {
t.Fatal("expected error, got success")
}
// Ensure that the output does not contain the warning since the resource is pointing already to a specific
// provider (even though it is misspelled)
expectedOutput := `Initializing the backend...
Initializing provider plugins...
- Finding latest version of hashicorp/asw...`
if cleanOut := strings.TrimSpace(stripAnsi(stdout)); cleanOut != expectedOutput {
t.Errorf("wrong output:\n%s", cmp.Diff(cleanOut, expectedOutput))
}
})
}
// The following test is temporarily removed until the OpenTofu registry returns a deprecation warning

View File

@ -0,0 +1,10 @@
# This is for testing that the implicitly defined providers cannot be fetched and the user is getting an info of the root cause
resource "nonexistingProv_res" "test1" {
}
data "nonexistingProv2_data" "test2" {
}
module "testmod" {
source = "./mod"
}

View File

@ -0,0 +1,2 @@
resource "nonexistingProv_res" "test2" {
}

View File

@ -0,0 +1,6 @@
// when a resource is pointing to a provider that is missing required_providers definition, tofu does not show the warn
// about implicit reference of a provider
resource "aws_iam_role" "test" {
assume_role_policy = "test"
provider = asw.test
}

View File

@ -566,7 +566,7 @@ func (c *InitCommand) getProviders(ctx context.Context, config *configs.Config,
// First we'll collect all the provider dependencies we can see in the
// configuration and the state.
reqs, hclDiags := config.ProviderRequirements()
reqs, qualifs, hclDiags := config.ProviderRequirements()
diags = diags.Append(hclDiags)
if hclDiags.HasErrors() {
return false, true, diags
@ -712,6 +712,9 @@ func (c *InitCommand) getProviders(ctx context.Context, config *configs.Config,
suggestion += "\n\nIf you believe this provider is missing from the registry, please submit a issue on the OpenTofu Registry https://github.com/opentofu/registry/issues/new/choose"
}
warnDiags := warnOnFailedImplicitProvReference(provider, qualifs)
diags = diags.Append(warnDiags)
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to query available provider packages",
@ -1039,6 +1042,43 @@ version control system if they represent changes you intended to make.`))
return true, false, diags
}
// warnOnFailedImplicitProvReference returns a warn diagnostic when the downloader fails to fetch a provider that is implicitly referenced.
// In other words, if the failed to download provider is having no required_providers entry, this function is trying to give to the user
// more information on the source of the issue and gives also instructions on how to fix it.
func warnOnFailedImplicitProvReference(provider addrs.Provider, qualifs *getproviders.ProvidersQualification) tfdiags.Diagnostics {
if _, ok := qualifs.Explicit[provider]; ok {
return nil
}
refs, ok := qualifs.Implicit[provider]
if !ok || len(refs) == 0 {
// If there is no implicit reference for that provider, do not write the warn, let just the error to be returned.
return nil
}
// NOTE: if needed, in the future we can use the rest of the "refs" to print all the culprits or at least to give
// a hint on how many resources are causing this
ref := refs[0]
if ref.ProviderAttribute {
return nil
}
details := fmt.Sprintf(
implicitProviderReferenceBody,
ref.CfgRes.String(),
provider.Type,
provider.ForDisplay(),
provider.Type,
ref.CfgRes.Resource.Type,
provider.Type,
)
return tfdiags.Diagnostics{}.Append(
&hcl.Diagnostic{
Severity: hcl.DiagWarning,
Subject: ref.Ref.ToHCL().Ptr(),
Summary: implicitProviderReferenceHead,
Detail: details,
})
}
// backendConfigOverrideBody interprets the raw values of -backend-config
// arguments into a hcl Body that should override the backend settings given
// in the configuration.
@ -1379,3 +1419,14 @@ The current .terraform.lock.hcl file only includes checksums for %s, so OpenTofu
To calculate additional checksums for another platform, run:
tofu providers lock -platform=linux_amd64
(where linux_amd64 is the platform to generate)`
const implicitProviderReferenceHead = `Automatically-inferred provider dependency`
const implicitProviderReferenceBody = `Due to the prefix of the resource type name OpenTofu guessed that you intended to associate %s with a provider whose local name is "%s", but that name is not declared in this module's required_providers block. OpenTofu therefore guessed that you intended to use %s, but that provider does not exist.
Make at least one of the following changes to tell OpenTofu which provider to use:
- Add a declaration for local name "%s" to this module's required_providers block, specifying the full source address for the provider you intended to use.
- Verify that "%s" is the correct resource type name to use. Did you omit a prefix which would imply the correct provider?
- Use a "provider" argument within this resource block to override OpenTofu's automatic selection of the local name "%s".
`

View File

@ -124,7 +124,7 @@ func (c *ProvidersLockCommand) Run(args []string) int {
config, confDiags := c.loadConfig(".")
diags = diags.Append(confDiags)
reqs, hclDiags := config.ProviderRequirements()
reqs, _, hclDiags := config.ProviderRequirements()
diags = diags.Append(hclDiags)
// If we have explicit provider selections on the command line then

View File

@ -83,7 +83,7 @@ func (c *ProvidersMirrorCommand) Run(args []string) int {
config, confDiags := c.loadConfig(".")
diags = diags.Append(confDiags)
reqs, moreDiags := config.ProviderRequirements()
reqs, _, moreDiags := config.ProviderRequirements()
diags = diags.Append(moreDiags)
// Read lock file

View File

@ -7,7 +7,6 @@ package json
import (
"bufio"
"bytes"
"fmt"
"sort"
"strings"
@ -16,9 +15,10 @@ import (
"github.com/hashicorp/hcl/v2/hcled"
"github.com/hashicorp/hcl/v2/hclparse"
"github.com/hashicorp/hcl/v2/hclsyntax"
"github.com/zclconf/go-cty/cty"
"github.com/opentofu/opentofu/internal/lang/marks"
"github.com/opentofu/opentofu/internal/tfdiags"
"github.com/zclconf/go-cty/cty"
)
// These severities map to the tfdiags.Severity values, plus an explicit
@ -132,7 +132,8 @@ type DiagnosticFunctionCall struct {
}
// NewDiagnostic takes a tfdiags.Diagnostic and a map of configuration sources,
// and returns a Diagnostic struct.
// and returns a [Diagnostic] object as a "UI-flavored" representation of the
// diagnostic.
func NewDiagnostic(diag tfdiags.Diagnostic, sources map[string]*hcl.File) *Diagnostic {
var sev string
switch diag.Severity() {
@ -144,269 +145,340 @@ func NewDiagnostic(diag tfdiags.Diagnostic, sources map[string]*hcl.File) *Diagn
sev = DiagnosticSeverityUnknown
}
desc := diag.Description()
sourceRefs := diag.Source()
highlightRange, snippetRange := prepareDiagnosticRanges(sourceRefs.Subject, sourceRefs.Context)
diagnostic := &Diagnostic{
// If the diagnostic has source location information then we will try to construct a snippet
// showing a relevant portion of the source code.
snippet := newDiagnosticSnippet(snippetRange, highlightRange, sources)
if snippet != nil {
// We might be able to annotate the snippet with some dynamic-expression-related information,
// if this is a suitably-enriched diagnostic. These are not strictly part of the "snippet",
// but we return them all together because the human-readable UI presents this information
// all together as one UI element.
snippet.Values = newDiagnosticExpressionValues(diag)
snippet.FunctionCall = newDiagnosticSnippetFunctionCall(diag)
}
desc := diag.Description()
return &Diagnostic{
Severity: sev,
Summary: desc.Summary,
Detail: desc.Detail,
Address: desc.Address,
Range: newDiagnosticRange(highlightRange),
Snippet: snippet,
}
sourceRefs := diag.Source()
if sourceRefs.Subject != nil {
// We'll borrow HCL's range implementation here, because it has some
// handy features to help us produce a nice source code snippet.
highlightRange := sourceRefs.Subject.ToHCL()
// Some diagnostic sources fail to set the end of the subject range.
if highlightRange.End == (hcl.Pos{}) {
highlightRange.End = highlightRange.Start
}
snippetRange := highlightRange
if sourceRefs.Context != nil {
snippetRange = sourceRefs.Context.ToHCL()
}
// Make sure the snippet includes the highlight. This should be true
// for any reasonable diagnostic, but we'll make sure.
snippetRange = hcl.RangeOver(snippetRange, highlightRange)
// Empty ranges result in odd diagnostic output, so extend the end to
// ensure there's at least one byte in the snippet or highlight.
if snippetRange.Empty() {
snippetRange.End.Byte++
snippetRange.End.Column++
}
if highlightRange.Empty() {
highlightRange.End.Byte++
highlightRange.End.Column++
}
diagnostic.Range = &DiagnosticRange{
Filename: highlightRange.Filename,
Start: Pos{
Line: highlightRange.Start.Line,
Column: highlightRange.Start.Column,
Byte: highlightRange.Start.Byte,
},
End: Pos{
Line: highlightRange.End.Line,
Column: highlightRange.End.Column,
Byte: highlightRange.End.Byte,
},
}
var src []byte
if sources != nil {
if f, ok := sources[highlightRange.Filename]; ok {
src = f.Bytes
}
}
// If we have a source file for the diagnostic, we can emit a code
// snippet.
if src != nil {
diagnostic.Snippet = &DiagnosticSnippet{
StartLine: snippetRange.Start.Line,
// Ensure that the default Values struct is an empty array, as this
// makes consuming the JSON structure easier in most languages.
Values: []DiagnosticExpressionValue{},
}
file, offset := parseRange(src, highlightRange)
// Some diagnostics may have a useful top-level context to add to
// the code snippet output.
contextStr := hcled.ContextString(file, offset-1)
if contextStr != "" {
diagnostic.Snippet.Context = &contextStr
}
// Build the string of the code snippet, tracking at which byte of
// the file the snippet starts.
var codeStartByte int
sc := hcl.NewRangeScanner(src, highlightRange.Filename, bufio.ScanLines)
var code strings.Builder
for sc.Scan() {
lineRange := sc.Range()
if lineRange.Overlaps(snippetRange) {
if codeStartByte == 0 && code.Len() == 0 {
codeStartByte = lineRange.Start.Byte
}
code.Write(lineRange.SliceBytes(src))
code.WriteRune('\n')
}
}
codeStr := strings.TrimSuffix(code.String(), "\n")
diagnostic.Snippet.Code = codeStr
// Calculate the start and end byte of the highlight range relative
// to the code snippet string.
start := highlightRange.Start.Byte - codeStartByte
end := start + (highlightRange.End.Byte - highlightRange.Start.Byte)
// We can end up with some quirky results here in edge cases like
// when a source range starts or ends at a newline character,
// so we'll cap the results at the bounds of the highlight range
// so that consumers of this data don't need to contend with
// out-of-bounds errors themselves.
if start < 0 {
start = 0
} else if start > len(codeStr) {
start = len(codeStr)
}
if end < 0 {
end = 0
} else if end > len(codeStr) {
end = len(codeStr)
}
diagnostic.Snippet.HighlightStartOffset = start
diagnostic.Snippet.HighlightEndOffset = end
if fromExpr := diag.FromExpr(); fromExpr != nil {
// We may also be able to generate information about the dynamic
// values of relevant variables at the point of evaluation, then.
// This is particularly useful for expressions that get evaluated
// multiple times with different values, such as blocks using
// "count" and "for_each", or within "for" expressions.
expr := fromExpr.Expression
ctx := fromExpr.EvalContext
vars := expr.Variables()
values := make([]DiagnosticExpressionValue, 0, len(vars))
seen := make(map[string]struct{}, len(vars))
includeUnknown := tfdiags.DiagnosticCausedByUnknown(diag)
includeSensitive := tfdiags.DiagnosticCausedBySensitive(diag)
Traversals:
for _, traversal := range vars {
for len(traversal) > 1 {
val, diags := traversal.TraverseAbs(ctx)
if diags.HasErrors() {
// Skip anything that generates errors, since we probably
// already have the same error in our diagnostics set
// already.
traversal = traversal[:len(traversal)-1]
continue
}
traversalStr := traversalStr(traversal)
if _, exists := seen[traversalStr]; exists {
continue Traversals // don't show duplicates when the same variable is referenced multiple times
}
value := DiagnosticExpressionValue{
Traversal: traversalStr,
}
switch {
case val.HasMark(marks.Sensitive):
// We only mention a sensitive value if the diagnostic
// we're rendering is explicitly marked as being
// caused by sensitive values, because otherwise
// readers tend to be misled into thinking the error
// is caused by the sensitive value even when it isn't.
if !includeSensitive {
continue Traversals
}
// Even when we do mention one, we keep it vague
// in order to minimize the chance of giving away
// whatever was sensitive about it.
value.Statement = "has a sensitive value"
case !val.IsKnown():
// We'll avoid saying anything about unknown or
// "known after apply" unless the diagnostic is
// explicitly marked as being caused by unknown
// values, because otherwise readers tend to be
// misled into thinking the error is caused by the
// unknown value even when it isn't.
if ty := val.Type(); ty != cty.DynamicPseudoType {
if includeUnknown {
switch {
case ty.IsCollectionType():
valRng := val.Range()
minLen := valRng.LengthLowerBound()
maxLen := valRng.LengthUpperBound()
const maxLimit = 1024 // (upper limit is just an arbitrary value to avoid showing distracting large numbers in the UI)
switch {
case minLen == maxLen:
value.Statement = fmt.Sprintf("is a %s of length %d, known only after apply", ty.FriendlyName(), minLen)
case minLen != 0 && maxLen <= maxLimit:
value.Statement = fmt.Sprintf("is a %s with between %d and %d elements, known only after apply", ty.FriendlyName(), minLen, maxLen)
case minLen != 0:
value.Statement = fmt.Sprintf("is a %s with at least %d elements, known only after apply", ty.FriendlyName(), minLen)
case maxLen <= maxLimit:
value.Statement = fmt.Sprintf("is a %s with up to %d elements, known only after apply", ty.FriendlyName(), maxLen)
default:
value.Statement = fmt.Sprintf("is a %s, known only after apply", ty.FriendlyName())
}
default:
value.Statement = fmt.Sprintf("is a %s, known only after apply", ty.FriendlyName())
}
} else {
value.Statement = fmt.Sprintf("is a %s", ty.FriendlyName())
}
} else {
if !includeUnknown {
continue Traversals
}
value.Statement = "will be known only after apply"
}
default:
value.Statement = fmt.Sprintf("is %s", compactValueStr(val))
}
values = append(values, value)
seen[traversalStr] = struct{}{}
}
}
sort.Slice(values, func(i, j int) bool {
return values[i].Traversal < values[j].Traversal
})
diagnostic.Snippet.Values = values
if callInfo := tfdiags.ExtraInfo[hclsyntax.FunctionCallDiagExtra](diag); callInfo != nil && callInfo.CalledFunctionName() != "" {
calledAs := callInfo.CalledFunctionName()
baseName := calledAs
if idx := strings.LastIndex(baseName, "::"); idx >= 0 {
baseName = baseName[idx+2:]
}
callInfo := &DiagnosticFunctionCall{
CalledAs: calledAs,
}
if f, ok := ctx.Functions[calledAs]; ok {
callInfo.Signature = DescribeFunction(baseName, f)
}
diagnostic.Snippet.FunctionCall = callInfo
}
}
}
}
return diagnostic
}
func parseRange(src []byte, rng hcl.Range) (*hcl.File, int) {
filename := rng.Filename
offset := rng.Start.Byte
// We need to re-parse here to get a *hcl.File we can interrogate. This
// is not awesome since we presumably already parsed the file earlier too,
// but this re-parsing is architecturally simpler than retaining all of
// the hcl.File objects and we only do this in the case of an error anyway
// so the overhead here is not a big problem.
parser := hclparse.NewParser()
var file *hcl.File
// Ignore diagnostics here as there is nothing we can do with them.
if strings.HasSuffix(filename, ".json") {
file, _ = parser.ParseJSON(src, filename)
} else {
file, _ = parser.ParseHCL(src, filename)
// prepareDiagnosticRanges takes the raw subject and context source ranges from a
// diagnostic message and returns the more UI-oriented "highlight" and "snippet"
// ranges.
//
// The "highlight" range describes the characters that are considered to be the
// direct cause of the problem, and which are typically presented as underlined
// when producing human-readable diagnostics in a terminal that can support that.
//
// The "snippet" range describes a potentially-larger range of characters that
// should all be included in the source code snippet included in the diagnostic
// message. The highlight range is guaranteed to be contained within the
// snippet range. Some of our diagnostic messages use this, for example, to
// ensure that the whole of an expression gets included in the snippet even if
// the problem is just one operand of the expression and the expression is wrapped
// over multiple lines.
//
//nolint:nonamedreturns // These names are for documentation purposes, to differentiate two results that have the same type
func prepareDiagnosticRanges(subject, context *tfdiags.SourceRange) (highlight, snippet *tfdiags.SourceRange) {
if subject == nil {
// If we don't even have a "subject" then we have no ranges to report at all.
return nil, nil
}
return file, offset
// We'll borrow HCL's range implementation here, because it has some
// handy features to help us produce a nice source code snippet.
highlightRange := subject.ToHCL()
// Some diagnostic sources fail to set the end of the subject range.
if highlightRange.End == (hcl.Pos{}) {
highlightRange.End = highlightRange.Start
}
snippetRange := highlightRange
if context != nil {
snippetRange = context.ToHCL()
}
// Make sure the snippet includes the highlight. This should be true
// for any reasonable diagnostic, but we'll make sure.
snippetRange = hcl.RangeOver(snippetRange, highlightRange)
// Empty ranges result in odd diagnostic output, so extend the end to
// ensure there's at least one byte in the snippet or highlight.
if highlightRange.Empty() {
highlightRange.End.Byte++
highlightRange.End.Column++
}
if snippetRange.Empty() {
snippetRange.End.Byte++
snippetRange.End.Column++
}
retHighlight := tfdiags.SourceRangeFromHCL(highlightRange)
retSnippet := tfdiags.SourceRangeFromHCL(snippetRange)
return &retHighlight, &retSnippet
}
func newDiagnosticRange(highlightRange *tfdiags.SourceRange) *DiagnosticRange {
if highlightRange == nil {
// No particular range to report, then.
return nil
}
return &DiagnosticRange{
Filename: highlightRange.Filename,
Start: Pos{
Line: highlightRange.Start.Line,
Column: highlightRange.Start.Column,
Byte: highlightRange.Start.Byte,
},
End: Pos{
Line: highlightRange.End.Line,
Column: highlightRange.End.Column,
Byte: highlightRange.End.Byte,
},
}
}
func newDiagnosticSnippet(snippetRange, highlightRange *tfdiags.SourceRange, sources map[string]*hcl.File) *DiagnosticSnippet {
if snippetRange == nil || highlightRange == nil {
// There is no code that is relevant to show in a snippet for this diagnostic.
return nil
}
file, ok := sources[snippetRange.Filename]
if !ok {
// If we don't have the source code for the file that the snippet is supposed
// to come from then we can't produce a snippet. (This tends to happen when
// we're rendering a diagnostic from an unusual location that isn't actually
// a source file, like an expression entered into the "tofu console" prompt.)
return nil
}
src := file.Bytes
if src == nil {
// A file without any source bytes? Weird, but perhaps constructed artificially
// for testing or for other unusual reasons.
return nil
}
// If we get this far then we're going to do our best to return at least a minimal
// snippet, though the level of detail depends on what other information we have
// available.
ret := &DiagnosticSnippet{
StartLine: snippetRange.Start.Line,
// Ensure that the default Values struct is an empty array, as this
// makes consuming the JSON structure easier in most languages.
Values: []DiagnosticExpressionValue{},
}
// Some callers pass us *hcl.File objects they directly constructed rather than
// using the HCL parser, in which case they lack the "navigation metadata"
// that HCL's parsers would generate. We need that metadata to extract the
// context string below, so we'll make a best effort to obtain that metadata.
file = tryHCLFileWithNavMetadata(file, snippetRange.Filename)
// Some diagnostics may have a useful top-level context to add to
// the code snippet output. This function needs a file with nav metadata
// to return a useful result, but it will happily return an empty string
// if given a file without that metadata.
contextStr := hcled.ContextString(file, highlightRange.Start.Byte-1)
if contextStr != "" {
ret.Context = &contextStr
}
// Build the string of the code snippet, tracking at which byte of
// the file the snippet starts.
var codeStartByte int
sc := hcl.NewRangeScanner(src, highlightRange.Filename, bufio.ScanLines)
var code strings.Builder
for sc.Scan() {
lineRange := sc.Range()
if lineRange.Overlaps(snippetRange.ToHCL()) {
if codeStartByte == 0 && code.Len() == 0 {
codeStartByte = lineRange.Start.Byte
}
code.Write(lineRange.SliceBytes(src))
code.WriteRune('\n')
}
}
codeStr := strings.TrimSuffix(code.String(), "\n")
ret.Code = codeStr
// Calculate the start and end byte of the highlight range relative
// to the code snippet string.
start := highlightRange.Start.Byte - codeStartByte
end := start + (highlightRange.End.Byte - highlightRange.Start.Byte)
// We can end up with some quirky results here in edge cases like
// when a source range starts or ends at a newline character,
// so we'll cap the results at the bounds of the highlight range
// so that consumers of this data don't need to contend with
// out-of-bounds errors themselves.
if start < 0 {
start = 0
} else if start > len(codeStr) {
start = len(codeStr)
}
if end < 0 {
end = 0
} else if end > len(codeStr) {
end = len(codeStr)
}
ret.HighlightStartOffset = start
ret.HighlightEndOffset = end
return ret
}
func newDiagnosticExpressionValues(diag tfdiags.Diagnostic) []DiagnosticExpressionValue {
fromExpr := diag.FromExpr()
if fromExpr == nil {
// no expression-related information on this diagnostic, but our
// callers always want a non-nil slice in this case because that's
// friendlier for JSON serialization.
return make([]DiagnosticExpressionValue, 0)
}
// We may also be able to generate information about the dynamic
// values of relevant variables at the point of evaluation, then.
// This is particularly useful for expressions that get evaluated
// multiple times with different values, such as blocks using
// "count" and "for_each", or within "for" expressions.
expr := fromExpr.Expression
ctx := fromExpr.EvalContext
vars := expr.Variables()
values := make([]DiagnosticExpressionValue, 0, len(vars))
seen := make(map[string]struct{}, len(vars))
includeUnknown := tfdiags.DiagnosticCausedByUnknown(diag)
includeSensitive := tfdiags.DiagnosticCausedBySensitive(diag)
Traversals:
for _, traversal := range vars {
for len(traversal) > 1 {
val, diags := traversal.TraverseAbs(ctx)
if diags.HasErrors() {
// Skip anything that generates errors, since we probably
// already have the same error in our diagnostics set
// already.
traversal = traversal[:len(traversal)-1]
continue
}
traversalStr := traversalStr(traversal)
if _, exists := seen[traversalStr]; exists {
continue Traversals // don't show duplicates when the same variable is referenced multiple times
}
statement := newDiagnosticSnippetValueDescription(val, includeUnknown, includeSensitive)
if statement == "" {
// If we don't have anything to say about this value then we won't include
// an entry for it at all.
continue Traversals
}
values = append(values, DiagnosticExpressionValue{
Traversal: traversalStr,
Statement: statement,
})
seen[traversalStr] = struct{}{}
}
}
sort.Slice(values, func(i, j int) bool {
return values[i].Traversal < values[j].Traversal
})
return values
}
func newDiagnosticSnippetFunctionCall(diag tfdiags.Diagnostic) *DiagnosticFunctionCall {
fromExpr := diag.FromExpr()
if fromExpr == nil {
return nil // no expression-related information on this diagnostic
}
callInfo := tfdiags.ExtraInfo[hclsyntax.FunctionCallDiagExtra](diag)
if callInfo == nil || callInfo.CalledFunctionName() == "" {
return nil // no function call information
}
ctx := fromExpr.EvalContext
calledAs := callInfo.CalledFunctionName()
baseName := calledAs
if idx := strings.LastIndex(baseName, "::"); idx >= 0 {
baseName = baseName[idx+2:]
}
ret := &DiagnosticFunctionCall{
CalledAs: calledAs,
}
if f, ok := ctx.Functions[calledAs]; ok {
ret.Signature = DescribeFunction(baseName, f)
}
return ret
}
func newDiagnosticSnippetValueDescription(val cty.Value, includeUnknown, includeSensitive bool) string {
switch {
case val.HasMark(marks.Sensitive):
// We only mention a sensitive value if the diagnostic
// we're rendering is explicitly marked as being
// caused by sensitive values, because otherwise
// readers tend to be misled into thinking the error
// is caused by the sensitive value even when it isn't.
if !includeSensitive {
return ""
}
// Even when we do mention one, we keep it vague
// in order to minimize the chance of giving away
// whatever was sensitive about it.
return "has a sensitive value"
case !val.IsKnown():
ty := val.Type()
// We'll avoid saying anything about unknown or
// "known after apply" unless the diagnostic is
// explicitly marked as being caused by unknown
// values, because otherwise readers tend to be
// misled into thinking the error is caused by the
// unknown value even when it isn't.
if !includeUnknown {
if ty == cty.DynamicPseudoType {
return "" // if we can't even name the type then we'll say nothing at all
}
// We can at least say what the type is, without mentioning "known after apply" at all
return fmt.Sprintf("is a %s", ty.FriendlyName())
}
switch {
case ty == cty.DynamicPseudoType:
return "will be known only after apply" // we don't even know what the type will be
case ty.IsCollectionType():
// If the unknown value has collection length refinements then we might at least
// be able to give some hints about the expected length.
valRng := val.Range()
minLen := valRng.LengthLowerBound()
maxLen := valRng.LengthUpperBound()
const maxLimit = 1024 // (upper limit is just an arbitrary value to avoid showing distracting large numbers in the UI)
switch {
case minLen == maxLen:
return fmt.Sprintf("is a %s of length %d, known only after apply", ty.FriendlyName(), minLen)
case minLen != 0 && maxLen <= maxLimit:
return fmt.Sprintf("is a %s with between %d and %d elements, known only after apply", ty.FriendlyName(), minLen, maxLen)
case minLen != 0:
return fmt.Sprintf("is a %s with at least %d elements, known only after apply", ty.FriendlyName(), minLen)
case maxLen <= maxLimit:
return fmt.Sprintf("is a %s with up to %d elements, known only after apply", ty.FriendlyName(), maxLen)
default:
return fmt.Sprintf("is a %s, known only after apply", ty.FriendlyName())
}
default:
return fmt.Sprintf("is a %s, known only after apply", ty.FriendlyName())
}
default:
return fmt.Sprintf("is %s", compactValueStr(val))
}
}
// compactValueStr produces a compact, single-line summary of a given value
@ -493,7 +565,7 @@ func traversalStr(traversal hcl.Traversal) string {
// producing helpful contextual messages in diagnostics. It is not
// comprehensive nor intended to be used for other purposes.
var buf bytes.Buffer
var buf strings.Builder
for _, step := range traversal {
switch tStep := step.(type) {
case hcl.TraverseRoot:
@ -515,3 +587,41 @@ func traversalStr(traversal hcl.Traversal) string {
}
return buf.String()
}
// tryHCLFileWithNavMetadata takes an hcl.File that might have been directly
// constructed rather than produced by an HCL parser, and tries to pass it
// through a suitable HCL parser if it lacks the metadata that an HCL parser
// would normally add.
//
// If parsing would be necessary to produce the metadata but parsing fails
// then this returns the given file verbatim, so the caller must still be
// prepared to deal with a file lacking navigation metadata.
func tryHCLFileWithNavMetadata(file *hcl.File, filename string) *hcl.File {
if file.Nav != nil {
// If there's _something_ in this field then we'll assume that
// an HCL parser put it there. The details of this field are
// HCL-parser-specific so we don't try to dig any deeper.
return file
}
// If we have a nil nav then we'll try to construct a fully-fledged
// file by parsing what we were given. This is best-effort, because
// the file might well have been lacking navigation metadata due to
// having been invalid in the first place.
// Re-parsing a file that might well have already been parsed already
// earlier is a little wasteful, but we only get here when we're
// returning diagnostics and so we'd rather do a little extra work
// if it might allow us to return a better diagnostic.
parser := hclparse.NewParser()
var newFile *hcl.File
if strings.HasSuffix(filename, ".json") {
newFile, _ = parser.ParseJSON(file.Bytes, filename)
} else {
newFile, _ = parser.ParseHCL(file.Bytes, filename)
}
if newFile == nil {
// Our best efforts have failed, then. We'll just return what we had.
return file
}
return newFile
}

View File

@ -12,10 +12,10 @@ import (
version "github.com/hashicorp/go-version"
"github.com/hashicorp/hcl/v2"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/depsfile"
"github.com/opentofu/opentofu/internal/getproviders"
"github.com/opentofu/opentofu/internal/tfdiags"
)
// A Config is a node in the tree of modules within a configuration.
@ -230,7 +230,7 @@ func (c *Config) EntersNewPackage() bool {
func (c *Config) VerifyDependencySelections(depLocks *depsfile.Locks) []error {
var errs []error
reqs, diags := c.ProviderRequirements()
reqs, _, diags := c.ProviderRequirements()
if diags.HasErrors() {
// It should be very unusual to get here, but unfortunately we can
// end up here in some edge cases where the config loader doesn't
@ -301,11 +301,12 @@ func (c *Config) VerifyDependencySelections(depLocks *depsfile.Locks) []error {
//
// If the returned diagnostics includes errors then the resulting Requirements
// may be incomplete.
func (c *Config) ProviderRequirements() (getproviders.Requirements, hcl.Diagnostics) {
func (c *Config) ProviderRequirements() (getproviders.Requirements, *getproviders.ProvidersQualification, hcl.Diagnostics) {
reqs := make(getproviders.Requirements)
diags := c.addProviderRequirements(reqs, true, true)
qualifs := new(getproviders.ProvidersQualification)
diags := c.addProviderRequirements(reqs, qualifs, true, true)
return reqs, diags
return reqs, qualifs, diags
}
// ProviderRequirementsShallow searches only the direct receiver for explicit
@ -315,7 +316,8 @@ func (c *Config) ProviderRequirements() (getproviders.Requirements, hcl.Diagnost
// may be incomplete.
func (c *Config) ProviderRequirementsShallow() (getproviders.Requirements, hcl.Diagnostics) {
reqs := make(getproviders.Requirements)
diags := c.addProviderRequirements(reqs, false, true)
qualifs := new(getproviders.ProvidersQualification)
diags := c.addProviderRequirements(reqs, qualifs, false, true)
return reqs, diags
}
@ -328,7 +330,8 @@ func (c *Config) ProviderRequirementsShallow() (getproviders.Requirements, hcl.D
// may be incomplete.
func (c *Config) ProviderRequirementsByModule() (*ModuleRequirements, hcl.Diagnostics) {
reqs := make(getproviders.Requirements)
diags := c.addProviderRequirements(reqs, false, false)
qualifs := new(getproviders.ProvidersQualification)
diags := c.addProviderRequirements(reqs, qualifs, false, false)
children := make(map[string]*ModuleRequirements)
for name, child := range c.Children {
@ -378,7 +381,7 @@ func (c *Config) ProviderRequirementsByModule() (*ModuleRequirements, hcl.Diagno
// implementation, gradually mutating a shared requirements object to
// eventually return. If the recurse argument is true, the requirements will
// include all descendant modules; otherwise, only the specified module.
func (c *Config) addProviderRequirements(reqs getproviders.Requirements, recurse, tests bool) hcl.Diagnostics {
func (c *Config) addProviderRequirements(reqs getproviders.Requirements, qualifs *getproviders.ProvidersQualification, recurse, tests bool) hcl.Diagnostics {
var diags hcl.Diagnostics
// First we'll deal with the requirements directly in _our_ module...
@ -409,6 +412,7 @@ func (c *Config) addProviderRequirements(reqs getproviders.Requirements, recurse
})
}
reqs[fqn] = append(reqs[fqn], constraints...)
qualifs.AddExplicitProvider(providerReqs.Type)
}
}
@ -418,32 +422,44 @@ func (c *Config) addProviderRequirements(reqs getproviders.Requirements, recurse
for _, rc := range c.Module.ManagedResources {
fqn := rc.Provider
if _, exists := reqs[fqn]; exists {
// If this is called for a child module, and the provider was added from another implicit reference and not
// from a top level required_provider, we need to collect the reference of this resource as well as implicit provider.
qualifs.AddImplicitProvider(fqn, getproviders.ResourceRef{
CfgRes: rc.Addr().InModule(c.Path),
Ref: tfdiags.SourceRangeFromHCL(rc.DeclRange),
ProviderAttribute: rc.ProviderConfigRef != nil,
})
// Explicit dependency already present
continue
}
qualifs.AddImplicitProvider(fqn, getproviders.ResourceRef{
CfgRes: rc.Addr().InModule(c.Path),
Ref: tfdiags.SourceRangeFromHCL(rc.DeclRange),
ProviderAttribute: rc.ProviderConfigRef != nil,
})
reqs[fqn] = nil
}
for _, rc := range c.Module.DataResources {
fqn := rc.Provider
if _, exists := reqs[fqn]; exists {
// If this is called for a child module, and the provider was added from another implicit reference and not
// from a top level required_provider, we need to collect the reference of this resource as well as implicit provider.
qualifs.AddImplicitProvider(fqn, getproviders.ResourceRef{
CfgRes: rc.Addr().InModule(c.Path),
Ref: tfdiags.SourceRangeFromHCL(rc.DeclRange),
ProviderAttribute: rc.ProviderConfigRef != nil,
})
// Explicit dependency already present
continue
}
qualifs.AddImplicitProvider(fqn, getproviders.ResourceRef{
CfgRes: rc.Addr().InModule(c.Path),
Ref: tfdiags.SourceRangeFromHCL(rc.DeclRange),
ProviderAttribute: rc.ProviderConfigRef != nil,
})
reqs[fqn] = nil
}
for _, i := range c.Module.Import {
implied, err := addrs.ParseProviderPart(i.StaticTo.Resource.ImpliedProvider())
if err == nil {
provider := c.Module.ImpliedProviderForUnqualifiedType(implied)
if _, exists := reqs[provider]; exists {
// Explicit dependency already present
continue
}
reqs[provider] = nil
}
// We don't return a diagnostic here, because the invalid address will
// have been caught elsewhere.
}
// Import blocks that are generating config may also have a custom provider
// meta argument. Like the provider meta argument used in resource blocks,
@ -454,6 +470,18 @@ func (c *Config) addProviderRequirements(reqs getproviders.Requirements, recurse
// this will be because the user has written explicit provider arguments
// that don't agree and we'll get them to fix it.
for _, i := range c.Module.Import {
// Add the import's declared or implicit provider
fqn := i.Provider
if _, exists := reqs[fqn]; !exists {
reqs[fqn] = nil
qualifs.AddImplicitProvider(i.Provider, getproviders.ResourceRef{
CfgRes: i.StaticTo,
Ref: tfdiags.SourceRangeFromHCL(i.DeclRange),
})
}
// TODO: This should probably be moved to provider_validation.go so that
// import providers can be properly validated across modules (root -> children)
if len(i.StaticTo.Module) > 0 {
// All provider information for imports into modules should come
// from the module block, so we don't need to load anything for
@ -479,7 +507,7 @@ func (c *Config) addProviderRequirements(reqs getproviders.Requirements, recurse
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid import provider argument",
Detail: "The provider argument can only be specified in import blocks that will generate configuration.\n\nUse the provider argument in the target resource block to configure the provider for a resource with explicit provider configuration.",
Detail: "The provider argument in the target resource block must be specified and match the import block.",
Subject: i.ProviderDeclRange.Ptr(),
})
continue
@ -499,27 +527,13 @@ func (c *Config) addProviderRequirements(reqs getproviders.Requirements, recurse
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid import provider argument",
Detail: "The provider argument can only be specified in import blocks that will generate configuration.\n\nUse the provider argument in the target resource block to configure the provider for a resource with explicit provider configuration.",
Detail: "The provider argument in the target resource block must match the import block.",
Subject: i.ProviderDeclRange.Ptr(),
})
continue
}
}
// All the provider information should come from the target resource
// which has already been processed, so skip the rest of this
// processing.
continue
}
// Otherwise we are generating config for the resource being imported,
// so all the provider information must come from this import block.
fqn := i.Provider
if _, exists := reqs[fqn]; exists {
// Explicit dependency already present
continue
}
reqs[fqn] = nil
}
// "provider" block can also contain version constraints
@ -541,7 +555,7 @@ func (c *Config) addProviderRequirements(reqs getproviders.Requirements, recurse
// Then we'll also look for requirements in testing modules.
for _, run := range file.Runs {
if run.ConfigUnderTest != nil {
moreDiags := run.ConfigUnderTest.addProviderRequirements(reqs, true, false)
moreDiags := run.ConfigUnderTest.addProviderRequirements(reqs, qualifs, true, false)
diags = append(diags, moreDiags...)
}
}
@ -551,7 +565,7 @@ func (c *Config) addProviderRequirements(reqs getproviders.Requirements, recurse
if recurse {
for _, childConfig := range c.Children {
moreDiags := childConfig.addProviderRequirements(reqs, true, false)
moreDiags := childConfig.addProviderRequirements(reqs, qualifs, true, false)
diags = append(diags, moreDiags...)
}
}
@ -791,7 +805,7 @@ func (c *Config) resolveProviderTypesForTests(providers map[string]addrs.Provide
// versions for each provider.
func (c *Config) ProviderTypes() []addrs.Provider {
// Ignore diagnostics here because they relate to version constraints
reqs, _ := c.ProviderRequirements()
reqs, _, _ := c.ProviderRequirements()
ret := make([]addrs.Provider, 0, len(reqs))
for k := range reqs {

View File

@ -18,6 +18,7 @@ import (
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/hclparse"
"github.com/opentofu/opentofu/internal/tfdiags"
"github.com/zclconf/go-cty/cty"
version "github.com/hashicorp/go-version"
@ -149,26 +150,83 @@ func TestConfigProviderRequirements(t *testing.T) {
nullProvider := addrs.NewDefaultProvider("null")
randomProvider := addrs.NewDefaultProvider("random")
impliedProvider := addrs.NewDefaultProvider("implied")
importimpliedProvider := addrs.NewDefaultProvider("importimplied")
importexplicitProvider := addrs.NewDefaultProvider("importexplicit")
terraformProvider := addrs.NewBuiltInProvider("terraform")
configuredProvider := addrs.NewDefaultProvider("configured")
grandchildProvider := addrs.NewDefaultProvider("grandchild")
got, diags := cfg.ProviderRequirements()
got, qualifs, diags := cfg.ProviderRequirements()
assertNoDiagnostics(t, diags)
want := getproviders.Requirements{
// the nullProvider constraints from the two modules are merged
nullProvider: getproviders.MustParseVersionConstraints("~> 2.0.0, 2.0.1"),
randomProvider: getproviders.MustParseVersionConstraints("~> 1.2.0"),
tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"),
configuredProvider: getproviders.MustParseVersionConstraints("~> 1.4"),
impliedProvider: nil,
happycloudProvider: nil,
terraformProvider: nil,
grandchildProvider: nil,
nullProvider: getproviders.MustParseVersionConstraints("~> 2.0.0, 2.0.1"),
randomProvider: getproviders.MustParseVersionConstraints("~> 1.2.0"),
tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"),
configuredProvider: getproviders.MustParseVersionConstraints("~> 1.4"),
impliedProvider: nil,
importimpliedProvider: nil,
importexplicitProvider: nil,
happycloudProvider: nil,
terraformProvider: nil,
grandchildProvider: nil,
}
wantQualifs := &getproviders.ProvidersQualification{
Implicit: map[addrs.Provider][]getproviders.ResourceRef{
grandchildProvider: {
{
CfgRes: addrs.ConfigResource{Module: []string{"kinder", "nested"}, Resource: addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "grandchild_foo", Name: "bar"}},
Ref: tfdiags.SourceRange{Filename: "testdata/provider-reqs/child/grandchild/provider-reqs-grandchild.tf", Start: tfdiags.SourcePos{Line: 3, Column: 1, Byte: 136}, End: tfdiags.SourcePos{Line: 3, Column: 32, Byte: 167}},
},
},
impliedProvider: {
{
CfgRes: addrs.ConfigResource{Resource: addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "implied_foo", Name: "bar"}},
Ref: tfdiags.SourceRange{Filename: "testdata/provider-reqs/provider-reqs-root.tf", Start: tfdiags.SourcePos{Line: 16, Column: 1, Byte: 317}, End: tfdiags.SourcePos{Line: 16, Column: 29, Byte: 345}},
},
},
importexplicitProvider: {
{
CfgRes: addrs.ConfigResource{Resource: addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "importimplied", Name: "targetB"}},
Ref: tfdiags.SourceRange{Filename: "testdata/provider-reqs/provider-reqs-root.tf", Start: tfdiags.SourcePos{Line: 42, Column: 1, Byte: 939}, End: tfdiags.SourcePos{Line: 42, Column: 7, Byte: 945}},
},
},
importimpliedProvider: {
{
CfgRes: addrs.ConfigResource{Resource: addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "importimplied", Name: "targetA"}},
Ref: tfdiags.SourceRange{Filename: "testdata/provider-reqs/provider-reqs-root.tf", Start: tfdiags.SourcePos{Line: 37, Column: 1, Byte: 886}, End: tfdiags.SourcePos{Line: 37, Column: 7, Byte: 892}},
},
},
terraformProvider: {
{
CfgRes: addrs.ConfigResource{Resource: addrs.Resource{Mode: addrs.DataResourceMode, Type: "terraform_remote_state", Name: "bar"}},
Ref: tfdiags.SourceRange{Filename: "testdata/provider-reqs/provider-reqs-root.tf", Start: tfdiags.SourcePos{Line: 27, Column: 1, Byte: 628}, End: tfdiags.SourcePos{Line: 27, Column: 36, Byte: 663}},
},
},
},
Explicit: map[addrs.Provider]struct{}{
happycloudProvider: {},
nullProvider: {},
randomProvider: {},
tlsProvider: {},
},
}
// These 2 assertions are strictly to ensure that later the "provider" blocks are not registered into the qualifications.
// Technically speaking, provider blocks are indeed implicit references, but the current warning message
// on implicitly referenced providers could be misleading for the "provider" blocks.
if _, okExpl := qualifs.Explicit[configuredProvider]; okExpl {
t.Errorf("provider blocks shouldn't be added into the explicit qualifications")
}
if _, okImpl := qualifs.Implicit[configuredProvider]; okImpl {
t.Errorf("provider blocks shouldn't be added into the implicit qualifications")
}
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("wrong result\n%s", diff)
t.Errorf("wrong reqs result\n%s", diff)
}
if diff := cmp.Diff(wantQualifs, qualifs); diff != "" {
t.Errorf("wrong qualifs result\n%s", diff)
}
}
@ -191,7 +249,7 @@ func TestConfigProviderRequirementsInclTests(t *testing.T) {
terraformProvider := addrs.NewBuiltInProvider("terraform")
configuredProvider := addrs.NewDefaultProvider("configured")
got, diags := cfg.ProviderRequirements()
got, qualifs, diags := cfg.ProviderRequirements()
assertNoDiagnostics(t, diags)
want := getproviders.Requirements{
// the nullProvider constraints from the two modules are merged
@ -203,9 +261,35 @@ func TestConfigProviderRequirementsInclTests(t *testing.T) {
terraformProvider: nil,
}
wantQualifs := &getproviders.ProvidersQualification{
Implicit: map[addrs.Provider][]getproviders.ResourceRef{
impliedProvider: {
{
CfgRes: addrs.ConfigResource{Resource: addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "implied_foo", Name: "bar"}},
Ref: tfdiags.SourceRange{Filename: "testdata/provider-reqs-with-tests/provider-reqs-root.tf", Start: tfdiags.SourcePos{Line: 12, Column: 1, Byte: 247}, End: tfdiags.SourcePos{Line: 12, Column: 29, Byte: 275}},
},
},
terraformProvider: {
{
CfgRes: addrs.ConfigResource{Resource: addrs.Resource{Mode: addrs.DataResourceMode, Type: "terraform_remote_state", Name: "bar"}},
Ref: tfdiags.SourceRange{Filename: "testdata/provider-reqs-with-tests/provider-reqs-root.tf", Start: tfdiags.SourcePos{Line: 19, Column: 1, Byte: 516}, End: tfdiags.SourcePos{Line: 19, Column: 36, Byte: 551}},
},
},
},
Explicit: map[addrs.Provider]struct{}{
nullProvider: {},
randomProvider: {},
tlsProvider: {},
},
}
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("wrong result\n%s", diff)
}
if diff := cmp.Diff(wantQualifs, qualifs); diff != "" {
t.Errorf("wrong qualifs result\n%s", diff)
}
}
func TestConfigProviderRequirementsDuplicate(t *testing.T) {
@ -230,6 +314,8 @@ func TestConfigProviderRequirementsShallow(t *testing.T) {
nullProvider := addrs.NewDefaultProvider("null")
randomProvider := addrs.NewDefaultProvider("random")
impliedProvider := addrs.NewDefaultProvider("implied")
importimpliedProvider := addrs.NewDefaultProvider("importimplied")
importexplicitProvider := addrs.NewDefaultProvider("importexplicit")
terraformProvider := addrs.NewBuiltInProvider("terraform")
configuredProvider := addrs.NewDefaultProvider("configured")
@ -237,12 +323,14 @@ func TestConfigProviderRequirementsShallow(t *testing.T) {
assertNoDiagnostics(t, diags)
want := getproviders.Requirements{
// the nullProvider constraint is only from the root module
nullProvider: getproviders.MustParseVersionConstraints("~> 2.0.0"),
randomProvider: getproviders.MustParseVersionConstraints("~> 1.2.0"),
tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"),
configuredProvider: getproviders.MustParseVersionConstraints("~> 1.4"),
impliedProvider: nil,
terraformProvider: nil,
nullProvider: getproviders.MustParseVersionConstraints("~> 2.0.0"),
randomProvider: getproviders.MustParseVersionConstraints("~> 1.2.0"),
tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"),
configuredProvider: getproviders.MustParseVersionConstraints("~> 1.4"),
impliedProvider: nil,
importimpliedProvider: nil,
importexplicitProvider: nil,
terraformProvider: nil,
}
if diff := cmp.Diff(want, got); diff != "" {
@ -301,6 +389,8 @@ func TestConfigProviderRequirementsByModule(t *testing.T) {
nullProvider := addrs.NewDefaultProvider("null")
randomProvider := addrs.NewDefaultProvider("random")
impliedProvider := addrs.NewDefaultProvider("implied")
importimpliedProvider := addrs.NewDefaultProvider("importimplied")
importexplicitProvider := addrs.NewDefaultProvider("importexplicit")
terraformProvider := addrs.NewBuiltInProvider("terraform")
configuredProvider := addrs.NewDefaultProvider("configured")
grandchildProvider := addrs.NewDefaultProvider("grandchild")
@ -313,12 +403,14 @@ func TestConfigProviderRequirementsByModule(t *testing.T) {
SourceDir: "testdata/provider-reqs",
Requirements: getproviders.Requirements{
// Only the root module's version is present here
nullProvider: getproviders.MustParseVersionConstraints("~> 2.0.0"),
randomProvider: getproviders.MustParseVersionConstraints("~> 1.2.0"),
tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"),
configuredProvider: getproviders.MustParseVersionConstraints("~> 1.4"),
impliedProvider: nil,
terraformProvider: nil,
nullProvider: getproviders.MustParseVersionConstraints("~> 2.0.0"),
randomProvider: getproviders.MustParseVersionConstraints("~> 1.2.0"),
tlsProvider: getproviders.MustParseVersionConstraints("~> 3.0"),
configuredProvider: getproviders.MustParseVersionConstraints("~> 1.4"),
impliedProvider: nil,
importimpliedProvider: nil,
importexplicitProvider: nil,
terraformProvider: nil,
},
Children: map[string]*ModuleRequirements{
"kinder": {
@ -433,6 +525,8 @@ func TestVerifyDependencySelections(t *testing.T) {
nullProvider := addrs.NewDefaultProvider("null")
randomProvider := addrs.NewDefaultProvider("random")
impliedProvider := addrs.NewDefaultProvider("implied")
importimpliedProvider := addrs.NewDefaultProvider("importimplied")
importexplicitProvider := addrs.NewDefaultProvider("importexplicit")
configuredProvider := addrs.NewDefaultProvider("configured")
grandchildProvider := addrs.NewDefaultProvider("grandchild")
@ -448,6 +542,8 @@ func TestVerifyDependencySelections(t *testing.T) {
`provider registry.opentofu.org/hashicorp/configured: required by this configuration but no version is selected`,
`provider registry.opentofu.org/hashicorp/grandchild: required by this configuration but no version is selected`,
`provider registry.opentofu.org/hashicorp/implied: required by this configuration but no version is selected`,
`provider registry.opentofu.org/hashicorp/importexplicit: required by this configuration but no version is selected`,
`provider registry.opentofu.org/hashicorp/importimplied: required by this configuration but no version is selected`,
`provider registry.opentofu.org/hashicorp/null: required by this configuration but no version is selected`,
`provider registry.opentofu.org/hashicorp/random: required by this configuration but no version is selected`,
`provider registry.opentofu.org/hashicorp/tls: required by this configuration but no version is selected`,
@ -459,6 +555,8 @@ func TestVerifyDependencySelections(t *testing.T) {
locks.SetProvider(configuredProvider, getproviders.MustParseVersion("1.4.0"), nil, nil)
locks.SetProvider(grandchildProvider, getproviders.MustParseVersion("0.1.0"), nil, nil)
locks.SetProvider(impliedProvider, getproviders.MustParseVersion("0.2.0"), nil, nil)
locks.SetProvider(importimpliedProvider, getproviders.MustParseVersion("0.2.0"), nil, nil)
locks.SetProvider(importexplicitProvider, getproviders.MustParseVersion("0.2.0"), nil, nil)
locks.SetProvider(nullProvider, getproviders.MustParseVersion("2.0.1"), nil, nil)
locks.SetProvider(randomProvider, getproviders.MustParseVersion("1.2.2"), nil, nil)
locks.SetProvider(tlsProvider, getproviders.MustParseVersion("3.0.1"), nil, nil)
@ -471,6 +569,8 @@ func TestVerifyDependencySelections(t *testing.T) {
locks.SetProvider(configuredProvider, getproviders.MustParseVersion("1.4.0"), nil, nil)
locks.SetProvider(grandchildProvider, getproviders.MustParseVersion("0.1.0"), nil, nil)
locks.SetProvider(impliedProvider, getproviders.MustParseVersion("0.2.0"), nil, nil)
locks.SetProvider(importimpliedProvider, getproviders.MustParseVersion("0.2.0"), nil, nil)
locks.SetProvider(importexplicitProvider, getproviders.MustParseVersion("0.2.0"), nil, nil)
locks.SetProvider(nullProvider, getproviders.MustParseVersion("3.0.0"), nil, nil)
locks.SetProvider(randomProvider, getproviders.MustParseVersion("1.2.2"), nil, nil)
locks.SetProvider(tlsProvider, getproviders.MustParseVersion("3.0.1"), nil, nil)
@ -490,6 +590,8 @@ func TestVerifyDependencySelections(t *testing.T) {
locks.SetProvider(configuredProvider, getproviders.MustParseVersion("1.4.0"), nil, nil)
locks.SetProvider(grandchildProvider, getproviders.MustParseVersion("0.1.0"), nil, nil)
locks.SetProvider(impliedProvider, getproviders.MustParseVersion("0.2.0"), nil, nil)
locks.SetProvider(importimpliedProvider, getproviders.MustParseVersion("0.2.0"), nil, nil)
locks.SetProvider(importexplicitProvider, getproviders.MustParseVersion("0.2.0"), nil, nil)
locks.SetProvider(randomProvider, getproviders.MustParseVersion("1.2.2"), nil, nil)
locks.SetProvider(tlsProvider, getproviders.MustParseVersion("3.0.1"), nil, nil)
locks.SetProvider(happycloudProvider, getproviders.MustParseVersion("0.0.1"), nil, nil)
@ -507,6 +609,8 @@ func TestVerifyDependencySelections(t *testing.T) {
`provider registry.opentofu.org/hashicorp/configured: required by this configuration but no version is selected`,
`provider registry.opentofu.org/hashicorp/grandchild: required by this configuration but no version is selected`,
`provider registry.opentofu.org/hashicorp/implied: required by this configuration but no version is selected`,
`provider registry.opentofu.org/hashicorp/importexplicit: required by this configuration but no version is selected`,
`provider registry.opentofu.org/hashicorp/importimplied: required by this configuration but no version is selected`,
`provider registry.opentofu.org/hashicorp/null: required by this configuration but no version is selected`,
`provider registry.opentofu.org/hashicorp/random: required by this configuration but no version is selected`,
`provider registry.opentofu.org/hashicorp/tls: required by this configuration but no version is selected`,
@ -560,7 +664,8 @@ func TestConfigAddProviderRequirements(t *testing.T) {
reqs := getproviders.Requirements{
addrs.NewDefaultProvider("null"): nil,
}
diags = cfg.addProviderRequirements(reqs, true, false)
qualifs := new(getproviders.ProvidersQualification)
diags = cfg.addProviderRequirements(reqs, qualifs, true, false)
assertNoDiagnostics(t, diags)
}
@ -585,12 +690,11 @@ Use the providers argument within the module block to configure providers for al
func TestConfigImportProviderClashesWithResources(t *testing.T) {
cfg, diags := testModuleConfigFromFile("testdata/invalid-import-files/import-and-resource-clash.tf")
assertNoDiagnostics(t, diags)
qualifs := new(getproviders.ProvidersQualification)
diags = cfg.addProviderRequirements(getproviders.Requirements{}, true, false)
diags = cfg.addProviderRequirements(getproviders.Requirements{}, qualifs, true, false)
assertExactDiagnostics(t, diags, []string{
`testdata/invalid-import-files/import-and-resource-clash.tf:9,3-19: Invalid import provider argument; The provider argument can only be specified in import blocks that will generate configuration.
Use the provider argument in the target resource block to configure the provider for a resource with explicit provider configuration.`,
`testdata/invalid-import-files/import-and-resource-clash.tf:9,3-19: Invalid import provider argument; The provider argument in the target resource block must match the import block.`,
})
}
@ -598,11 +702,10 @@ func TestConfigImportProviderWithNoResourceProvider(t *testing.T) {
cfg, diags := testModuleConfigFromFile("testdata/invalid-import-files/import-and-no-resource.tf")
assertNoDiagnostics(t, diags)
diags = cfg.addProviderRequirements(getproviders.Requirements{}, true, false)
qualifs := new(getproviders.ProvidersQualification)
diags = cfg.addProviderRequirements(getproviders.Requirements{}, qualifs, true, false)
assertExactDiagnostics(t, diags, []string{
`testdata/invalid-import-files/import-and-no-resource.tf:5,3-19: Invalid import provider argument; The provider argument can only be specified in import blocks that will generate configuration.
Use the provider argument in the target resource block to configure the provider for a resource with explicit provider configuration.`,
`testdata/invalid-import-files/import-and-no-resource.tf:5,3-19: Invalid import provider argument; The provider argument in the target resource block must be specified and match the import block.`,
})
}

View File

@ -21,7 +21,7 @@ import (
// In the case of any errors, t.Fatal (or similar) will be called to halt
// execution of the test, so the calling test does not need to handle errors
// itself.
func NewLoaderForTests(t *testing.T) (*Loader, func()) {
func NewLoaderForTests(t testing.TB) (*Loader, func()) {
t.Helper()
modulesDir, err := os.MkdirTemp("", "tf-configs")

View File

@ -50,12 +50,15 @@ func (b *Block) Filter(filterAttribute FilterT[*Attribute], filterBlock FilterT[
ret.Attributes = make(map[string]*Attribute, len(b.Attributes))
}
for name, attrS := range b.Attributes {
if filterAttribute == nil || !filterAttribute(name, attrS) {
ret.Attributes[name] = attrS
// Copy the attributes of the block. Otherwise, if the filterNestedType is filtering out some attributes,
// the underlying schema is getting altered too, rendering the providers.SchemaCache invalid.
attr := *attrS
if filterAttribute == nil || !filterAttribute(name, &attr) {
ret.Attributes[name] = &attr
}
if attrS.NestedType != nil {
ret.Attributes[name].NestedType = filterNestedType(attrS.NestedType, filterAttribute)
if attr.NestedType != nil {
ret.Attributes[name].NestedType = filterNestedType((&attr).NestedType, filterAttribute)
}
}
@ -88,10 +91,13 @@ func filterNestedType(obj *Object, filterAttribute FilterT[*Attribute]) *Object
}
for name, attrS := range obj.Attributes {
if filterAttribute == nil || !filterAttribute(name, attrS) {
ret.Attributes[name] = attrS
if attrS.NestedType != nil {
ret.Attributes[name].NestedType = filterNestedType(attrS.NestedType, filterAttribute)
// Copy the attributes of the block. Otherwise, if the filterNestedType is filtering out some attributes,
// the underlying schema is getting altered too, rendering the providers.SchemaCache invalid.
attr := *attrS
if filterAttribute == nil || !filterAttribute(name, &attr) {
ret.Attributes[name] = &attr
if attr.NestedType != nil {
ret.Attributes[name].NestedType = filterNestedType(attr.NestedType, filterAttribute)
}
}
}

View File

@ -8,10 +8,9 @@ package configschema
import (
"testing"
"github.com/zclconf/go-cty/cty"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/zclconf/go-cty/cty"
)
func TestFilter(t *testing.T) {
@ -270,14 +269,108 @@ func TestFilter(t *testing.T) {
},
},
},
"filter_computed_from_optional_block": {
schema: &Block{
Attributes: map[string]*Attribute{
"id": {
Type: cty.String,
Computed: true,
},
"nested_val": {
Type: cty.String,
Optional: true,
NestedType: &Object{
Attributes: map[string]*Attribute{
"child_computed": {
Type: cty.String,
Computed: true,
},
},
},
},
},
},
filterAttribute: FilterReadOnlyAttribute,
filterBlock: FilterDeprecatedBlock,
want: &Block{
Attributes: map[string]*Attribute{
"nested_val": {
Type: cty.String,
Optional: true,
NestedType: &Object{
Attributes: map[string]*Attribute{},
},
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
schemaBeforeFilter := cloneBlock(tc.schema)
got := tc.schema.Filter(tc.filterAttribute, tc.filterBlock)
if !cmp.Equal(got, tc.want, cmp.Comparer(cty.Type.Equals), cmpopts.EquateEmpty()) {
t.Fatal(cmp.Diff(got, tc.want, cmp.Comparer(cty.Type.Equals), cmpopts.EquateEmpty()))
}
if !cmp.Equal(schemaBeforeFilter, tc.schema, cmp.Comparer(cty.Type.Equals), cmpopts.EquateEmpty()) {
t.Fatal("before and after schema differ. the filtering function alters the actual schema", cmp.Diff(schemaBeforeFilter, tc.schema, cmp.Comparer(cty.Type.Equals), cmpopts.EquateEmpty()))
}
})
}
}
func cloneBlock(in *Block) *Block {
if in == nil {
return nil
}
out := Block{
Attributes: make(map[string]*Attribute, len(in.Attributes)),
BlockTypes: make(map[string]*NestedBlock, len(in.BlockTypes)),
Description: in.Description,
DescriptionKind: in.DescriptionKind,
Deprecated: in.Deprecated,
}
for k, v := range in.Attributes {
out.Attributes[k] = cloneAttribute(v)
}
for k, v := range in.BlockTypes {
out.BlockTypes[k] = cloneNestedBlock(v)
}
return &out
}
func cloneNestedBlock(in *NestedBlock) *NestedBlock {
bl := cloneBlock(&in.Block)
out := &NestedBlock{
Block: *bl,
Nesting: in.Nesting,
MinItems: in.MinItems,
MaxItems: in.MaxItems,
}
return out
}
func cloneAttribute(in *Attribute) *Attribute {
out := &Attribute{
Type: in.Type,
NestedType: nil, // handled later
Description: in.Description,
DescriptionKind: in.DescriptionKind,
Required: in.Required,
Optional: in.Optional,
Computed: in.Computed,
Sensitive: in.Sensitive,
Deprecated: in.Deprecated,
}
if in.NestedType != nil {
out.NestedType = &Object{
Attributes: make(map[string]*Attribute, len(in.NestedType.Attributes)),
Nesting: in.NestedType.Nesting,
}
for k, v := range in.NestedType.Attributes {
out.NestedType.Attributes[k] = cloneAttribute(v)
}
}
return out
}

View File

@ -210,6 +210,31 @@ func TestModule_required_provider_overrides(t *testing.T) {
}
}
// When having multiple required providers defined, and one with syntax error,
// ensure that the diagnostics are returned correctly for each and every validation.
// In case a required_provider is containing syntax errors, we are returning an empty one just to allow the
// later validations to add their results.
func TestModule_required_providers_multiple_one_with_syntax_error(t *testing.T) {
_, diags := testModuleFromDir("testdata/invalid-modules/multiple-required-providers-with-syntax-error")
if !diags.HasErrors() {
t.Fatal("module should have error diags, but does not")
}
want := []string{
`Missing attribute value; Expected an attribute value`,
`Unexpected "resource" block; Blocks are not allowed here`,
`Duplicate required providers configuration`,
}
if wantLen, gotLen := len(want), len(diags.Errs()); wantLen != gotLen {
t.Fatalf("expected %d errors but got %d", wantLen, gotLen)
}
for i, e := range diags.Errs() {
if got := e.Error(); !strings.Contains(got, want[i]) {
t.Errorf("expected error to contain %q\nerror was: \n\t%q\n", want[i], got)
}
}
}
// Resources without explicit provider configuration are assigned a provider
// implied based on the resource type. For example, this resource:
//

View File

@ -12,6 +12,7 @@ import (
"github.com/hashicorp/hcl/v2/ext/typeexpr"
"github.com/hashicorp/hcl/v2/gohcl"
"github.com/hashicorp/hcl/v2/hclsyntax"
"github.com/opentofu/opentofu/internal/tfdiags"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/convert"
@ -157,7 +158,7 @@ func decodeVariableBlock(block *hcl.Block, override bool) (*Variable, hcl.Diagno
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid default value for variable",
Detail: fmt.Sprintf("This default value is not compatible with the variable's type constraint: %s.", err),
Detail: fmt.Sprintf("This default value is not compatible with the variable's type constraint: %s.", tfdiags.FormatError(err)),
Subject: attr.Expr.Range().Ptr(),
})
val = cty.DynamicVal

View File

@ -92,47 +92,61 @@ func TestParserLoadConfigFileFailure(t *testing.T) {
// This test uses a subset of the same fixture files as
// TestParserLoadConfigFileFailure, but additionally verifies that each
// file produces the expected diagnostic summary.
// file produces the expected diagnostic summary and detail.
func TestParserLoadConfigFileFailureMessages(t *testing.T) {
tests := []struct {
Filename string
WantSeverity hcl.DiagnosticSeverity
WantDiag string
WantDetail string
}{
{
"invalid-files/data-resource-lifecycle.tf",
hcl.DiagError,
"Invalid data resource lifecycle argument",
`The lifecycle argument "ignore_changes" is defined only for managed resources ("resource" blocks), and is not valid for data resources.`,
},
{
"invalid-files/variable-type-unknown.tf",
hcl.DiagError,
"Invalid type specification",
`The keyword "notatype" is not a valid type specification.`,
},
{
"invalid-files/unexpected-attr.tf",
hcl.DiagError,
"Unsupported argument",
`An argument named "foo" is not expected here.`,
},
{
"invalid-files/unexpected-block.tf",
hcl.DiagError,
"Unsupported block type",
`Blocks of type "varyable" are not expected here. Did you mean "variable"?`,
},
{
"invalid-files/resource-count-and-for_each.tf",
hcl.DiagError,
`Invalid combination of "count" and "for_each"`,
`The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`,
},
{
"invalid-files/data-count-and-for_each.tf",
hcl.DiagError,
`Invalid combination of "count" and "for_each"`,
`The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`,
},
{
"invalid-files/resource-lifecycle-badbool.tf",
hcl.DiagError,
"Unsuitable value type",
`Unsuitable value: a bool is required`,
},
{
"invalid-files/variable-complex-bad-default-inner-obj.tf",
hcl.DiagError,
"Invalid default value for variable",
`This default value is not compatible with the variable's type constraint: ["mykey"].field: a bool is required.`,
},
}
@ -161,6 +175,9 @@ func TestParserLoadConfigFileFailureMessages(t *testing.T) {
if diags[0].Summary != test.WantDiag {
t.Errorf("Wrong diagnostic summary\ngot: %s\nwant: %s", diags[0].Summary, test.WantDiag)
}
if diags[0].Detail != test.WantDetail {
t.Errorf("Wrong diagnostic detail\ngot: %s\nwant: %s", diags[0].Detail, test.WantDetail)
}
})
}
}

View File

@ -180,13 +180,13 @@ func assertExactDiagnostics(t *testing.T, diags hcl.Diagnostics, want []string)
bad := false
for got := range gotDiags {
if _, exists := wantDiags[got]; !exists {
t.Errorf("unexpected diagnostic: %s", got)
t.Errorf("unexpected diagnostic: \n%s", got)
bad = true
}
}
for want := range wantDiags {
if _, exists := gotDiags[want]; !exists {
t.Errorf("missing expected diagnostic: %s", want)
t.Errorf("missing expected diagnostic: \n%s", want)
bad = true
}
}

View File

@ -33,16 +33,18 @@ type RequiredProviders struct {
}
func decodeRequiredProvidersBlock(block *hcl.Block) (*RequiredProviders, hcl.Diagnostics) {
attrs, diags := block.Body.JustAttributes()
if diags.HasErrors() {
return nil, diags
}
ret := &RequiredProviders{
RequiredProviders: make(map[string]*RequiredProvider),
DeclRange: block.DefRange,
}
attrs, diags := block.Body.JustAttributes()
if diags.HasErrors() {
// Returns an empty RequiredProvider to allow further validations to work properly,
// allowing to return all the diagnostics correctly.
return ret, diags
}
for name, attr := range attrs {
rp := &RequiredProvider{
Name: name,

View File

@ -0,0 +1,15 @@
// https://github.com/opentofu/opentofu/issues/2394
// This validates the returned error message when the default value
// inner field type does not match the definition of the variable
variable "bad_type_for_inner_field" {
type = map(object({
field = bool
}))
default = {
"mykey" = {
field = "not a bool"
dont = "mind me"
}
}
}

View File

@ -0,0 +1,10 @@
terraform {
required_providers {
tfcoremock = {
source = "tfcoremock"
version = "0.3.0"
}
}
}
resource "tfcoremock_simple_resource" "foo" {}

View File

@ -0,0 +1,10 @@
terraform {
required_providers {
tfcoremock = {
source = "tfcoremock"
version = "0.3.0"
{
}
}
resource "tfcoremock_simple_resource" "bar" {}

View File

@ -32,3 +32,15 @@ data "terraform_remote_state" "bar" {
provider "configured" {
version = "~> 1.4"
}
# Import using implied provider
import {
to = importimplied.targetA
id = "ii"
}
import {
to = importimplied.targetB
id = "ie"
provider = importexplicit
}

View File

@ -7,11 +7,12 @@ package encryption
import (
"github.com/opentofu/opentofu/internal/encryption/keyprovider/aws_kms"
"github.com/opentofu/opentofu/internal/encryption/keyprovider/external"
externalKeyProvider "github.com/opentofu/opentofu/internal/encryption/keyprovider/external"
"github.com/opentofu/opentofu/internal/encryption/keyprovider/gcp_kms"
"github.com/opentofu/opentofu/internal/encryption/keyprovider/openbao"
"github.com/opentofu/opentofu/internal/encryption/keyprovider/pbkdf2"
"github.com/opentofu/opentofu/internal/encryption/method/aesgcm"
externalMethod "github.com/opentofu/opentofu/internal/encryption/method/external"
"github.com/opentofu/opentofu/internal/encryption/method/unencrypted"
"github.com/opentofu/opentofu/internal/encryption/registry/lockingencryptionregistry"
)
@ -31,12 +32,15 @@ func init() {
if err := DefaultRegistry.RegisterKeyProvider(openbao.New()); err != nil {
panic(err)
}
if err := DefaultRegistry.RegisterKeyProvider(external.New()); err != nil {
if err := DefaultRegistry.RegisterKeyProvider(externalKeyProvider.New()); err != nil {
panic(err)
}
if err := DefaultRegistry.RegisterMethod(aesgcm.New()); err != nil {
panic(err)
}
if err := DefaultRegistry.RegisterMethod(externalMethod.New()); err != nil {
panic(err)
}
if err := DefaultRegistry.RegisterMethod(unencrypted.New()); err != nil {
panic(err)
}

View File

@ -92,6 +92,10 @@ func (c Config) asAWSBase() (*awsbase.Config, error) {
if err != nil {
return nil, err
}
var roles []awsbase.AssumeRole
if assumeRole != nil {
roles = append(roles, *assumeRole)
}
// Get assume role with web identity
assumeRoleWithWebIdentity, err := c.AssumeRoleWithWebIdentity.asAWSBase()
@ -168,7 +172,7 @@ func (c Config) asAWSBase() (*awsbase.Config, error) {
SharedCredentialsFiles: stringArrayAttrEnvFallback(c.SharedCredentialsFiles, "AWS_SHARED_CREDENTIALS_FILE"),
SharedConfigFiles: stringArrayAttrEnvFallback(c.SharedConfigFiles, "AWS_SHARED_CONFIG_FILE"),
AssumeRole: assumeRole,
AssumeRole: roles,
AssumeRoleWithWebIdentity: assumeRoleWithWebIdentity,
AllowedAccountIds: c.AllowedAccountIds,
ForbiddenAccountIds: c.ForbiddenAccountIds,

View File

@ -133,20 +133,22 @@ func TestConfig_asAWSBase(t *testing.T) {
EC2MetadataServiceEndpointMode: "my-emde-mode",
SharedCredentialsFiles: []string{"my-scredf"},
SharedConfigFiles: []string{"my-sconff"},
AssumeRole: &awsbase.AssumeRole{
RoleARN: "ar_arn",
Duration: time.Hour * 4,
ExternalID: "ar_extid",
Policy: "ar_policy",
PolicyARNs: []string{
"arn:aws:iam::123456789012:policy/AR",
},
SessionName: "ar_session_name",
Tags: map[string]string{
"foo": "bar",
},
TransitiveTagKeys: []string{
"ar_tags",
AssumeRole: []awsbase.AssumeRole{
{
RoleARN: "ar_arn",
Duration: time.Hour * 4,
ExternalID: "ar_extid",
Policy: "ar_policy",
PolicyARNs: []string{
"arn:aws:iam::123456789012:policy/AR",
},
SessionName: "ar_session_name",
Tags: map[string]string{
"foo": "bar",
},
TransitiveTagKeys: []string{
"ar_tags",
},
},
},
AssumeRoleWithWebIdentity: &awsbase.AssumeRoleWithWebIdentity{

View File

@ -9,6 +9,8 @@ import (
"github.com/opentofu/opentofu/internal/encryption/keyprovider"
)
// TODO #2386 / 1.11: consider if the external method changes and unify protocol with the external key provider.
// HeaderMagic is the magic string that needs to be present in the header to identify
// the external program as an external keyprovider for OpenTofu.
const HeaderMagic = "OpenTofu-External-Key-Provider"

View File

@ -1,53 +1,54 @@
#!/usr/bin/python
# Copyright (c) The OpenTofu Authors
# SPDX-License-Identifier: MPL-2.0
# Copyright (c) 2023 HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
import base64
import json
import sys
if __name__ == "__main__":
# Make sure that this program isn't running interactively:
if sys.stdout.isatty():
sys.stderr.write("This is an OpenTofu key provider and is not meant to be run interactively. "
"Please configure this program in your OpenTofu encryption block to use it.\n")
sys.exit(1)
# Write the header:
sys.stdout.write((json.dumps({"magic": "OpenTofu-External-Key-Provider", "version": 1}) + "\n"))
# Read the input:
inputData = sys.stdin.read()
data = json.loads(inputData)
# Construct the key:
key = b''
for i in range(1, 17):
key += chr(i).encode('ascii')
# Output the keys:
if data is None:
# No input metadata was passed, we shouldn't output a decryption key. If needed, we can produce
# an output metadata here, which will be stored alongside the encrypted data.
outputMeta = {"external_data":{}}
sys.stdout.write(json.dumps({
"keys": {
"encryption_key": base64.b64encode(key).decode('ascii')
},
"meta": outputMeta
}))
else:
# We had some input metadata, output a decryption key. In a real-life scenario we would
# use the metadata for something like pbdkf2.
inputMeta = data["external_data"]
# Do something with the input metadata if needed and produce the output metadata:
outputMeta = {"external_data":{}}
sys.stdout.write(json.dumps({
"keys": {
"encryption_key": base64.b64encode(key).decode('ascii'),
"decryption_key": base64.b64encode(key).decode('ascii')
},
"meta": outputMeta
}))
#!/usr/bin/python
# Copyright (c) The OpenTofu Authors
# SPDX-License-Identifier: MPL-2.0
# Copyright (c) 2023 HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
import base64
import json
import sys
if __name__ == "__main__":
# Make sure that this program isn't running interactively:
if sys.stdout.isatty():
sys.stderr.write("This is an OpenTofu key provider and is not meant to be run interactively. "
"Please configure this program in your OpenTofu encryption block to use it.\n")
sys.exit(1)
# Write the header:
sys.stdout.write((json.dumps({"magic": "OpenTofu-External-Key-Provider", "version": 1}) + "\n"))
sys.stdout.flush()
# Read the input:
inputData = sys.stdin.read()
data = json.loads(inputData)
# Construct the key:
key = b''
for i in range(1, 17):
key += chr(i).encode('ascii')
# Output the keys:
if data is None:
# No input metadata was passed, we shouldn't output a decryption key. If needed, we can produce
# an output metadata here, which will be stored alongside the encrypted data.
outputMeta = {"external_data":{}}
sys.stdout.write(json.dumps({
"keys": {
"encryption_key": base64.b64encode(key).decode('ascii')
},
"meta": outputMeta
}))
else:
# We had some input metadata, output a decryption key. In a real-life scenario we would
# use the metadata for something like pbdkf2.
inputMeta = data["external_data"]
# Do something with the input metadata if needed and produce the output metadata:
outputMeta = {"external_data":{}}
sys.stdout.write(json.dumps({
"keys": {
"encryption_key": base64.b64encode(key).decode('ascii'),
"decryption_key": base64.b64encode(key).decode('ascii')
},
"meta": outputMeta
}))

View File

@ -25,6 +25,8 @@ var embedFS embed.FS
// This binary will always return []byte{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16} as a hard-coded key.
// You may pass --hello-world to change it to []byte("Hello world! 123")
func Go(t *testing.T) []string {
t.Helper()
// goMod is embedded like this because the go:embed tag doesn't like having module files in embedded paths.
var goMod = []byte(`module testprovider
@ -62,6 +64,8 @@ go 1.22`)
// run the Python script, including the Python interpreter.
// This script will always return []byte{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16} as a hard-coded key.
func Python(t *testing.T) []string {
t.Helper()
tempDir := t.TempDir()
dir := path.Join(tempDir, "testprovider-py")
if err := os.MkdirAll(dir, 0700); err != nil { //nolint:mnd // This check is stupid
@ -78,6 +82,8 @@ func Python(t *testing.T) []string {
// POSIXShell returns a path to a POSIX shell script acting as a key provider.
// This script will always return []byte{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16} as a hard-coded key.
func POSIXShell(t *testing.T) []string {
t.Helper()
tempDir := t.TempDir()
dir := path.Join(tempDir, "testprovider-sh")
if err := os.MkdirAll(dir, 0700); err != nil { //nolint:mnd // This check is stupid

View File

@ -13,7 +13,7 @@ import (
"net/url"
"path"
openbao "github.com/openbao/openbao/api"
openbao "github.com/openbao/openbao/api/v2"
)
type client interface {

View File

@ -14,7 +14,7 @@ import (
"os"
"testing"
openbao "github.com/openbao/openbao/api"
openbao "github.com/openbao/openbao/api/v2"
"github.com/opentofu/opentofu/internal/encryption/keyprovider/compliancetest"
)

View File

@ -8,7 +8,7 @@ package openbao
import (
"fmt"
openbao "github.com/openbao/openbao/api"
openbao "github.com/openbao/openbao/api/v2"
"github.com/opentofu/opentofu/internal/encryption/keyprovider"
)

View File

@ -3,7 +3,7 @@ package openbao
import (
"context"
openbao "github.com/openbao/openbao/api"
openbao "github.com/openbao/openbao/api/v2"
)
type mockClientFunc func(ctx context.Context, path string, data map[string]interface{}) (*openbao.Secret, error)

View File

@ -202,6 +202,9 @@ type EncryptDecryptTestCase[TConfig method.Config, TMethod method.Method] struct
ValidEncryptOnlyConfig TConfig
// ValidFullConfig is a configuration that contains both an encryption and decryption key.
ValidFullConfig TConfig
// DecryptCannotBeVerified allows the decryption to succeed unencrypted data. This is needed for methods that
// cannot verify if data decrypted successfully (e.g. xor).
DecryptCannotBeVerified bool
}
func (m EncryptDecryptTestCase[TConfig, TMethod]) execute(t *testing.T) {
@ -248,16 +251,18 @@ func (m EncryptDecryptTestCase[TConfig, TMethod]) execute(t *testing.T) {
}
typedDecryptError = nil
_, err = decryptMethod.Decrypt(plainData)
if err == nil {
compliancetest.Fail(t, "Decrypt() must return an error when decrypting unencrypted data, no error returned.")
} else {
compliancetest.Log(t, "Decrypt() correctly returned an error when decrypting unencrypted data.")
}
if !errors.As(err, &typedDecryptError) {
compliancetest.Fail(t, "Decrypt() returned a %T instead of a %T when decrypting unencrypted data. Please use the correct typed errors.", err, typedDecryptError)
} else {
compliancetest.Log(t, "Decrypt() returned the correct error type of %T when decrypting unencrypted data.", typedDecryptError)
if !m.DecryptCannotBeVerified {
_, err = decryptMethod.Decrypt(plainData)
if err == nil {
compliancetest.Fail(t, "Decrypt() must return an error when decrypting unencrypted data, no error returned.")
} else {
compliancetest.Log(t, "Decrypt() correctly returned an error when decrypting unencrypted data.")
}
if !errors.As(err, &typedDecryptError) {
compliancetest.Fail(t, "Decrypt() returned a %T instead of a %T when decrypting unencrypted data. Please use the correct typed errors.", err, typedDecryptError)
} else {
compliancetest.Log(t, "Decrypt() returned the correct error type of %T when decrypting unencrypted data.", typedDecryptError)
}
}
decryptedData, err := decryptMethod.Decrypt(encryptedData)

View File

@ -10,15 +10,20 @@ import "fmt"
// ErrCryptoFailure indicates a generic cryptographic failure. This error should be embedded into
// ErrEncryptionFailed, ErrDecryptionFailed, or ErrInvalidConfiguration.
type ErrCryptoFailure struct {
Message string
Cause error
Message string
Cause error
SupplementalData string
}
func (e ErrCryptoFailure) Error() string {
result := e.Message
if e.Cause != nil {
return fmt.Sprintf("%s: %v", e.Message, e.Cause)
result += " (" + e.Cause.Error() + ")"
}
return e.Message
if e.SupplementalData != "" {
result += "\n-----\n" + e.SupplementalData
}
return result
}
func (e ErrCryptoFailure) Unwrap() error {

View File

@ -0,0 +1,25 @@
# External encryption method
> [!WARNING]
> This file is not an end-user documentation, it is intended for developers. Please follow the user documentation on the OpenTofu website unless you want to work on the encryption code.
This directory contains the `external` encryption method. You can configure it like this:
```hcl
terraform {
encryption {
method "external" "foo" {
keys = key_provider.some.provider
encrypt_command = ["/path/to/binary", "arg1", "arg2"]
decrypt_command = ["/path/to/binary", "arg1", "arg2"]
}
}
}
```
The external method must implement the following protocol:
1. On start, the method binary must emit the header line matching [the header schema](protocol/header.schema.json) on the standard output.
2. OpenTofu supplies the input metadata matching [the input schema](protocol/input.schema.json) on the standard input.
3. The method binary must emit the output matching [the output schema](protocol/output.schema.json) on the standard output.

View File

@ -0,0 +1,188 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package external
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"os/exec"
"strings"
"time"
"github.com/opentofu/opentofu/internal/encryption/keyprovider"
"github.com/opentofu/opentofu/internal/encryption/method"
)
type command struct {
keys *keyprovider.Output
encryptCommand []string
decryptCommand []string
}
func (c command) Encrypt(data []byte) ([]byte, error) {
var key []byte
if c.keys != nil {
key = c.keys.EncryptionKey
}
input := InputV1{
Key: key,
Payload: data,
}
result, err := c.run(c.encryptCommand, input)
if err != nil {
return nil, &method.ErrEncryptionFailed{
Cause: err,
}
}
return result, nil
}
func (c command) Decrypt(data []byte) ([]byte, error) {
var key []byte
if c.keys != nil {
key = c.keys.DecryptionKey
if len(c.keys.EncryptionKey) > 0 && len(key) == 0 {
return nil, &method.ErrDecryptionKeyUnavailable{}
}
}
if len(data) == 0 {
return nil, &method.ErrDecryptionFailed{Cause: &method.ErrCryptoFailure{
Message: "Cannot decrypt empty data.",
}}
}
input := InputV1{
Key: key,
Payload: data,
}
result, err := c.run(c.decryptCommand, input)
if err != nil {
return nil, &method.ErrDecryptionFailed{
Cause: err,
}
}
return result, nil
}
func (c command) run(command []string, input any) ([]byte, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
inputData, err := json.Marshal(input)
if err != nil {
return nil, &method.ErrCryptoFailure{
Message: "failed to marshal input",
Cause: err,
}
}
stderr := &bytes.Buffer{}
cmd := exec.CommandContext(ctx, command[0], command[1:]...) //nolint:gosec //Launching external commands here is the entire point.
handler := &ioHandler{
false,
bytes.NewBuffer(inputData),
[]byte{},
cancel,
nil,
}
cmd.Stdin = handler
cmd.Stdout = handler
cmd.Stderr = stderr
if err := cmd.Run(); err != nil {
if handler.err != nil {
return nil, &method.ErrCryptoFailure{
Message: "external encryption method failure",
Cause: handler.err,
SupplementalData: fmt.Sprintf("Stderr:\n-------\n%s\n", stderr.String()),
}
}
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
if exitErr.ExitCode() != 0 {
return nil, &method.ErrCryptoFailure{
Message: "external encryption method exited with non-zero exit code",
Cause: err,
SupplementalData: fmt.Sprintf("Stderr:\n-------\n%s\n", stderr.String()),
}
}
}
return nil, &method.ErrCryptoFailure{
Message: "external encryption method exited with an error",
Cause: err,
SupplementalData: fmt.Sprintf("Stderr:\n-------\n%s\n", stderr.String()),
}
}
var result *OutputV1
decoder := json.NewDecoder(bytes.NewReader(handler.output))
decoder.DisallowUnknownFields()
if err := decoder.Decode(&result); err != nil {
return nil, &method.ErrCryptoFailure{
Message: "external encryption method returned an invalid JSON",
Cause: err,
SupplementalData: fmt.Sprintf("Stderr:\n-------\n%s\n", stderr.String()),
}
}
return result.Payload, nil
}
type ioHandler struct {
headerFinished bool
input *bytes.Buffer
output []byte
cancel func()
err error
}
func (i *ioHandler) Write(p []byte) (int, error) {
i.output = append(i.output, p...)
n := len(p)
if i.headerFinished {
// Header is finished, just collect the output.
return n, nil
}
// Check if the full header is present.
parts := strings.SplitN(string(i.output), "\n", 2) //nolint:mnd //This rule is dumb.
if len(parts) == 1 {
return n, nil
}
var header Header
// Note: this is intentionally not using strict decoding. Later protocol versions may introduce additional header
// fields.
if jsonErr := json.Unmarshal([]byte(parts[0]), &header); jsonErr != nil {
err := fmt.Errorf("failed to unmarshal header from external method (%w)", jsonErr)
i.err = err
i.cancel()
return n, err
}
if header.Magic != Magic {
err := fmt.Errorf("invalid magic received from external method: %s", header.Magic)
i.err = err
i.cancel()
return n, err
}
if header.Version != 1 {
err := fmt.Errorf("invalid version number received from external method: %d", header.Version)
i.err = err
i.cancel()
return n, err
}
i.headerFinished = true
i.output = []byte(parts[1])
return n, nil
}
func (i *ioHandler) Read(p []byte) (int, error) {
return i.input.Read(p)
}

View File

@ -0,0 +1,92 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package external
import (
"fmt"
"slices"
"strings"
"testing"
"github.com/opentofu/opentofu/internal/encryption/keyprovider"
"github.com/opentofu/opentofu/internal/encryption/method/compliancetest"
"github.com/opentofu/opentofu/internal/encryption/method/external/testmethod"
)
func TestComplianceBinary(t *testing.T) {
runTest(t, testmethod.Go(t))
}
func TestCompliancePython(t *testing.T) {
runTest(t, testmethod.Python(t))
}
func runTest(t *testing.T, cmd []string) {
encryptCommand := append(cmd, "--encrypt") //nolint:gocritic //It's intentionally a different slice.
decryptCommand := append(cmd, "--decrypt") //nolint:gocritic //It's intentionally a different slice.
compliancetest.ComplianceTest(t, compliancetest.TestConfiguration[*descriptor, *Config, *command]{
Descriptor: New().(*descriptor), //nolint:errcheck //This is safe.
HCLParseTestCases: map[string]compliancetest.HCLParseTestCase[*descriptor, *Config, *command]{
"empty": {
HCL: `method "external" "foo" {}`,
ValidHCL: false,
ValidBuild: false,
Validate: nil,
},
"empty-command": {
HCL: `method "external" "foo" {
encrypt_command = []
decrypt_command = []
}`,
ValidHCL: true,
},
"command": {
HCL: fmt.Sprintf(`method "external" "foo" {
encrypt_command = ["%s"]
decrypt_command = ["%s"]
}`, strings.Join(encryptCommand, `","`), strings.Join(decryptCommand, `","`)),
ValidHCL: true,
ValidBuild: true,
Validate: func(config *Config, method *command) error {
if !slices.Equal(config.EncryptCommand, encryptCommand) {
return fmt.Errorf("incorrect encrypt command after HCL parsing")
}
if !slices.Equal(config.DecryptCommand, decryptCommand) {
return fmt.Errorf("incorrect decrypt command after HCL parsing")
}
return nil
},
},
},
ConfigStructTestCases: map[string]compliancetest.ConfigStructTestCase[*Config, *command]{
"empty": {
Config: &Config{},
ValidBuild: false,
Validate: nil,
},
},
EncryptDecryptTestCase: compliancetest.EncryptDecryptTestCase[*Config, *command]{
ValidEncryptOnlyConfig: &Config{
Keys: &keyprovider.Output{
EncryptionKey: []byte{20},
DecryptionKey: nil,
},
EncryptCommand: encryptCommand,
DecryptCommand: decryptCommand,
},
ValidFullConfig: &Config{
Keys: &keyprovider.Output{
EncryptionKey: []byte{20},
DecryptionKey: []byte{20},
},
EncryptCommand: encryptCommand,
DecryptCommand: decryptCommand,
},
DecryptCannotBeVerified: true,
},
})
}

View File

@ -0,0 +1,55 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package external
import (
"github.com/opentofu/opentofu/internal/encryption/keyprovider"
"github.com/opentofu/opentofu/internal/encryption/method"
)
// Config is the configuration for the AES-GCM method.
type Config struct {
Keys *keyprovider.Output `hcl:"keys,optional" json:"keys,omitempty" yaml:"keys"`
EncryptCommand []string `hcl:"encrypt_command" json:"encrypt_command" yaml:"encrypt_command"`
DecryptCommand []string `hcl:"decrypt_command" json:"decrypt_command" yaml:"decrypt_command"`
}
// Build checks the validity of the configuration and returns a ready-to-use AES-GCM implementation.
func (c *Config) Build() (method.Method, error) {
if len(c.EncryptCommand) < 1 {
return nil, &method.ErrInvalidConfiguration{
Cause: &method.ErrCryptoFailure{
Message: "the encrypt_command option is required",
},
}
}
if len(c.EncryptCommand[0]) == 0 {
return nil, &method.ErrInvalidConfiguration{
Cause: &method.ErrCryptoFailure{
Message: "the first entry of encrypt_command must not be empty",
},
}
}
if len(c.DecryptCommand) < 1 {
return nil, &method.ErrInvalidConfiguration{
Cause: &method.ErrCryptoFailure{
Message: "the decrypt_command option is required",
},
}
}
if len(c.DecryptCommand[0]) == 0 {
return nil, &method.ErrInvalidConfiguration{
Cause: &method.ErrCryptoFailure{
Message: "the first entry of decrypt_command must not be empty",
},
}
}
return &command{
keys: c.Keys,
encryptCommand: c.EncryptCommand,
decryptCommand: c.DecryptCommand,
}, nil
}

View File

@ -0,0 +1,38 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package external
import (
"github.com/opentofu/opentofu/internal/encryption/method"
)
// Descriptor integrates the method.Descriptor and provides a TypedConfig for easier configuration.
type Descriptor interface {
method.Descriptor
// TypedConfig returns a config typed for this method.
TypedConfig() *Config
}
// New creates a new descriptor for the AES-GCM encryption method, which requires a 32-byte key.
func New() Descriptor {
return &descriptor{}
}
type descriptor struct {
}
func (f *descriptor) TypedConfig() *Config {
return &Config{}
}
func (f *descriptor) ID() method.ID {
return "external"
}
func (f *descriptor) ConfigStruct() method.Config {
return f.TypedConfig()
}

View File

@ -0,0 +1,36 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package external
// TODO #2386 / 1.11: consider if the external method changes and unify protocol with the external key provider.
// Magic is the magic string the external method needs to output in the Header.
const Magic = "OpenTofu-External-Encryption-Method"
// Header is the initial message the external method writes to stdout as a single-line JSON.
type Header struct {
// Magic must always be "OpenTofu-External-Encryption-Method"
Magic string `json:"magic"`
// Version must always be 1.
Version int `json:"version"`
}
// InputV1 is an encryption/decryption request from OpenTofu to the external method. OpenTofu writes this message
// to the standard input of the external method as a JSON message.
type InputV1 struct {
// Key is the encryption or decryption key for this operation. On the wire, this is base64-encoded. If no key is
// present, this will be nil. The method should exit with a non-zero exit code.
Key []byte `json:"key,omitempty"`
// Payload is the payload to encrypt/decrypt.
Payload []byte `json:"payload"`
}
// OutputV1 is the returned encrypted/decrypted payload from the external method. The external method writes this
// to the standard output as JSON.
type OutputV1 struct {
// Payload is the payload that has been encrypted/decrypted by the external method.
Payload []byte `json:"payload"`
}

View File

@ -0,0 +1,22 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://raw.githubusercontent.com/opentofu/opentofu/main/internal/encryption/keyprovider/externalcommand/protocol/header.schema.json",
"title": "OpenTofu External Encryption Method Header",
"description": "Header line output when an external method binary is launched. This must be written on a single line followed by a newline character. Note that the header may contain additional fields in later protocol versions.",
"type": "object",
"properties": {
"magic": {
"title": "Magic string",
"description": "Magic string identifying the external method as such.",
"type": "string",
"enum": ["OpenTofu-External-Encryption-Method"]
},
"version": {
"title": "Protocol version number",
"type": "integer",
"enum": [1]
}
},
"required": ["magic","version"],
"additionalProperties": true
}

View File

@ -0,0 +1,25 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://raw.githubusercontent.com/opentofu/opentofu/main/internal/encryption/keyprovider/externalcommand/protocol/input.schema.json",
"title": "OpenTofu External Encryption Method Input",
"description": "Input schema for the OpenTofu external encryption method protocol. The external encryption method must read the input from stdin and write the output to stdout. It may write to stderr to provide more error details.",
"type": "object",
"properties": {
"key": {
"title": "Key",
"description": "If present, this will contain the encryption or decryption key material. If no key is present (e.g. because no key provider is configured) this field will be missing.",
"type": "string",
"contentEncoding": "base64",
"contentMediaType": "application/octet-stream"
},
"payload": {
"title": "Payload",
"description": "The payload that should be encrypted/decrypted.",
"type": "string",
"contentEncoding": "base64",
"contentMediaType": "application/octet-stream"
}
},
"required": ["payload"],
"additionalProperties": false
}

View File

@ -0,0 +1,18 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://raw.githubusercontent.com/opentofu/opentofu/main/internal/encryption/keyprovider/externalcommand/protocol/output.schema.json",
"title": "OpenTofu External Encryption Method Output",
"description": "Output schema for the OpenTofu external encryption method protocol. The external provider must read the input from stdin and write the output to stdout. It may write to stderr to provide more error details.",
"type": "object",
"properties": {
"payload": {
"title": "Payload",
"description": "The encrypted/decrypted data.",
"type": "string",
"contentEncoding": "base64",
"contentMediaType": "application/octet-stream"
}
},
"required": ["payload"],
"additionalProperties": false
}

View File

@ -0,0 +1,71 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package main
import (
"encoding/json"
"io"
"log"
"os"
)
type Header struct {
Magic string `json:"magic"`
Version int `json:"version"`
}
type Input struct {
Key []byte `json:"key,omitempty"`
Payload []byte `json:"payload"`
}
type Output struct {
// Payload is the payload that has been encrypted/decrypted by the external method.
Payload []byte `json:"payload"`
}
// main implements a simple XOR-encryption. This is meant as an example and not suitable for any production use.
func main() {
// Write logs to stderr
log.Default().SetOutput(os.Stderr)
// Write header
header := Header{
"OpenTofu-External-Encryption-Method",
1,
}
marshalledHeader, err := json.Marshal(header)
if err != nil {
log.Fatalf("%v", err)
}
_, _ = os.Stdout.Write(append(marshalledHeader, []byte("\n")...))
// Read input
input, err := io.ReadAll(os.Stdin)
if err != nil {
log.Fatalf("Failed to read stdin: %v", err)
}
var inputData Input
if err = json.Unmarshal(input, &inputData); err != nil {
log.Fatalf("Failed to parse stdin: %v", err)
}
// Create output as an XOR of the key and input
outputPayload := make([]byte, len(inputData.Payload))
for i, b := range inputData.Payload {
outputPayload[i] = inputData.Key[i%len(inputData.Key)] ^ b
}
// Write output
output := Output{
Payload: outputPayload,
}
outputData, err := json.Marshal(output)
if err != nil {
log.Fatalf("Failed to stringify output: %v", err)
}
_, _ = os.Stdout.Write(outputData)
}

View File

@ -0,0 +1,38 @@
#!/usr/bin/python
# Copyright (c) The OpenTofu Authors
# SPDX-License-Identifier: MPL-2.0
# Copyright (c) 2023 HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
import base64
import json
import sys
if __name__ == "__main__":
# Make sure that this program isn't running interactively:
if sys.stdout.isatty():
sys.stderr.write("This is an OpenTofu encryption method and is not meant to be run interactively. "
"Please configure this program in your OpenTofu encryption block to use it.\n")
sys.exit(1)
# Write the header:
sys.stdout.write((json.dumps({"magic": "OpenTofu-External-Encryption-Method", "version": 1}) + "\n"))
sys.stdout.flush()
# Read the input:
inputData = sys.stdin.read()
data = json.loads(inputData)
key = base64.b64decode(data["key"])
payload = base64.b64decode(data["payload"])
# Encrypt the data:
outputPayload = bytearray()
for i in range(0, len(payload)):
b = payload[i]
outputPayload.append(key[i%len(key)] ^ b)
# Write the output:
sys.stdout.write(json.dumps({
"payload": base64.b64encode(outputPayload).decode('ascii'),
}))

View File

@ -0,0 +1,105 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package testmethod
import (
"context"
"embed"
"fmt"
"os"
"os/exec"
"path"
"runtime"
"strings"
"testing"
"time"
)
//go:embed data/*
var embedFS embed.FS
// Go builds a key provider as a Go binary and returns its path.
func Go(t *testing.T) []string {
t.Helper()
// goMod is embedded like this because the go:embed tag doesn't like having module files in embedded paths.
var goMod = []byte(`module testmethod
go 1.22`)
tempDir := t.TempDir()
dir := path.Join(tempDir, "testmethod-go")
if err := os.MkdirAll(dir, 0700); err != nil { //nolint:mnd // This check is stupid
t.Errorf("Failed to create temporary directory (%v)", err)
}
if err := os.WriteFile(path.Join(dir, "go.mod"), goMod, 0600); err != nil { //nolint:mnd // This check is stupid
t.Errorf("%v", err)
}
if err := ejectFile("testmethod.go", path.Join(dir, "testmethod.go")); err != nil {
t.Errorf("%v", err)
}
targetBinary := path.Join(dir, "testmethod")
if runtime.GOOS == "windows" {
targetBinary += ".exe"
}
t.Logf("\033[32mCompiling test method binary...\033[0m")
cmd := exec.Command("go", "build", "-o", targetBinary)
cmd.Dir = dir
// TODO move this to a proper test logger once available.
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
t.Skipf("Failed to build test method binary (%v)", err)
}
return []string{targetBinary}
}
// Python returns the path to a Python script acting as an encryption method. The function returns all arguments
// required to run the Python script, including the Python interpreter.
func Python(t *testing.T) []string {
t.Helper()
tempDir := t.TempDir()
dir := path.Join(tempDir, "testmethod-py")
if err := os.MkdirAll(dir, 0700); err != nil { //nolint:mnd // This check is stupid
t.Errorf("Failed to create temporary directory (%v)", err)
}
target := path.Join(dir, "testmethod.py")
if err := ejectFile("testmethod.py", target); err != nil {
t.Errorf("%v", err)
}
python := findExecutable(t, []string{"python", "python3"}, []string{"--version"})
return []string{python, target}
}
func ejectFile(file string, target string) error {
contents, err := embedFS.ReadFile(path.Join("data", file))
if err != nil {
return fmt.Errorf("failed to read %s file from embedded dataset (%w)", file, err)
}
if err := os.WriteFile(target, contents, 0600); err != nil { //nolint:mnd // This check is stupid
return fmt.Errorf("failed to create %s file at %s (%w)", file, target, err)
}
return nil
}
func findExecutable(t *testing.T, options []string, testArguments []string) string {
for _, opt := range options {
var lastError error
func() {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
cmd := exec.CommandContext(ctx, opt, testArguments...)
lastError = cmd.Run()
}()
if lastError == nil {
return opt
}
}
t.Skipf("No viable alternative found between %s", strings.Join(options, ", "))
return ""
}

View File

@ -12,12 +12,14 @@ import (
"strings"
"github.com/hashicorp/hcl/v2"
hclsyntax "github.com/hashicorp/hcl/v2/hclsyntax"
"github.com/hashicorp/hcl/v2/hclwrite"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/json"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/configs/configschema"
"github.com/opentofu/opentofu/internal/lang/marks"
"github.com/opentofu/opentofu/internal/tfdiags"
)
@ -89,6 +91,10 @@ func writeConfigAttributes(addr addrs.AbsResourceInstance, buf *strings.Builder,
}
if attrS.Required {
buf.WriteString(strings.Repeat(" ", indent))
// Handle cases where the name should be contained in quotes
if !hclsyntax.ValidIdentifier(name) {
name = string(hclwrite.TokensForValue(cty.StringVal(name)).Bytes())
}
buf.WriteString(fmt.Sprintf("%s = ", name))
tok := hclwrite.TokensForValue(attrS.EmptyValue())
if _, err := tok.WriteTo(buf); err != nil {
@ -103,6 +109,10 @@ func writeConfigAttributes(addr addrs.AbsResourceInstance, buf *strings.Builder,
writeAttrTypeConstraint(buf, attrS)
} else if attrS.Optional {
buf.WriteString(strings.Repeat(" ", indent))
// Handle cases where the name should be contained in quotes
if !hclsyntax.ValidIdentifier(name) {
name = string(hclwrite.TokensForValue(cty.StringVal(name)).Bytes())
}
buf.WriteString(fmt.Sprintf("%s = ", name))
tok := hclwrite.TokensForValue(attrS.EmptyValue())
if _, err := tok.WriteTo(buf); err != nil {
@ -152,7 +162,7 @@ func writeConfigAttributesFromExisting(addr addrs.AbsResourceInstance, buf *stri
} else {
val = attrS.EmptyValue()
}
if attrS.Sensitive || val.IsMarked() {
if attrS.Sensitive || val.HasMark(marks.Sensitive) {
buf.WriteString("null # sensitive")
} else {
if val.Type() == cty.String {
@ -319,7 +329,7 @@ func writeConfigNestedTypeAttributeFromExisting(addr addrs.AbsResourceInstance,
switch schema.NestedType.Nesting {
case configschema.NestingSingle:
if schema.Sensitive || stateVal.IsMarked() {
if schema.Sensitive || stateVal.HasMark(marks.Sensitive) {
buf.WriteString(strings.Repeat(" ", indent))
buf.WriteString(fmt.Sprintf("%s = {} # sensitive\n", name))
return diags
@ -349,7 +359,7 @@ func writeConfigNestedTypeAttributeFromExisting(addr addrs.AbsResourceInstance,
case configschema.NestingList, configschema.NestingSet:
if schema.Sensitive || stateVal.IsMarked() {
if schema.Sensitive || stateVal.HasMark(marks.Sensitive) {
buf.WriteString(strings.Repeat(" ", indent))
buf.WriteString(fmt.Sprintf("%s = [] # sensitive\n", name))
return diags
@ -369,7 +379,7 @@ func writeConfigNestedTypeAttributeFromExisting(addr addrs.AbsResourceInstance,
buf.WriteString(strings.Repeat(" ", indent+2))
// The entire element is marked.
if listVals[i].IsMarked() {
if listVals[i].HasMark(marks.Sensitive) {
buf.WriteString("{}, # sensitive\n")
continue
}
@ -384,7 +394,7 @@ func writeConfigNestedTypeAttributeFromExisting(addr addrs.AbsResourceInstance,
return diags
case configschema.NestingMap:
if schema.Sensitive || stateVal.IsMarked() {
if schema.Sensitive || stateVal.HasMark(marks.Sensitive) {
buf.WriteString(strings.Repeat(" ", indent))
buf.WriteString(fmt.Sprintf("%s = {} # sensitive\n", name))
return diags
@ -412,7 +422,7 @@ func writeConfigNestedTypeAttributeFromExisting(addr addrs.AbsResourceInstance,
buf.WriteString(fmt.Sprintf("%s = {", key))
// This entire value is marked
if vals[key].IsMarked() {
if vals[key].HasMark(marks.Sensitive) {
buf.WriteString("} # sensitive\n")
continue
}
@ -444,7 +454,7 @@ func writeConfigNestedBlockFromExisting(addr addrs.AbsResourceInstance, buf *str
buf.WriteString(fmt.Sprintf("%s {", name))
// If the entire value is marked, don't print any nested attributes
if stateVal.IsMarked() {
if stateVal.HasMark(marks.Sensitive) {
buf.WriteString("} # sensitive\n")
return diags
}
@ -454,7 +464,7 @@ func writeConfigNestedBlockFromExisting(addr addrs.AbsResourceInstance, buf *str
buf.WriteString("}\n")
return diags
case configschema.NestingList, configschema.NestingSet:
if stateVal.IsMarked() {
if stateVal.HasMark(marks.Sensitive) {
buf.WriteString(strings.Repeat(" ", indent))
buf.WriteString(fmt.Sprintf("%s {} # sensitive\n", name))
return diags
@ -470,7 +480,7 @@ func writeConfigNestedBlockFromExisting(addr addrs.AbsResourceInstance, buf *str
return diags
case configschema.NestingMap:
// If the entire value is marked, don't print any nested attributes
if stateVal.IsMarked() {
if stateVal.HasMark(marks.Sensitive) {
buf.WriteString(fmt.Sprintf("%s {} # sensitive\n", name))
return diags
}
@ -485,7 +495,7 @@ func writeConfigNestedBlockFromExisting(addr addrs.AbsResourceInstance, buf *str
buf.WriteString(strings.Repeat(" ", indent))
buf.WriteString(fmt.Sprintf("%s %q {", name, key))
// This entire map element is marked
if vals[key].IsMarked() {
if vals[key].HasMark(marks.Sensitive) {
buf.WriteString("} # sensitive\n")
return diags
}

View File

@ -82,6 +82,104 @@ resource "tfcoremock_simple_resource" "empty" {
list_block { # OPTIONAL block
nested_value = null # OPTIONAL string
}
}`,
},
"simple_resource_with_propertyname_containing_a_dot": {
schema: &configschema.Block{
BlockTypes: map[string]*configschema.NestedBlock{
"list_block": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"nested_value.json": {
Type: cty.String,
Optional: true,
},
},
},
Nesting: configschema.NestingSingle,
},
},
Attributes: map[string]*configschema.Attribute{
"id": {
Type: cty.String,
Computed: true,
},
"value": {
Type: cty.String,
Optional: true,
},
},
},
addr: addrs.AbsResourceInstance{
Module: nil,
Resource: addrs.ResourceInstance{
Resource: addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "tfcoremock_simple_resource",
Name: "empty",
},
Key: nil,
},
},
provider: addrs.LocalProviderConfig{
LocalName: "tfcoremock",
},
value: cty.NilVal,
expected: `
resource "tfcoremock_simple_resource" "empty" {
value = null # OPTIONAL string
list_block { # OPTIONAL block
"nested_value.json" = null # OPTIONAL string
}
}`,
},
"simple_resource_with_propertyname_containing_double_quotes": {
schema: &configschema.Block{
BlockTypes: map[string]*configschema.NestedBlock{
"list_block": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"nested_value\"example": {
Type: cty.String,
Optional: true,
},
},
},
Nesting: configschema.NestingSingle,
},
},
Attributes: map[string]*configschema.Attribute{
"id": {
Type: cty.String,
Computed: true,
},
"value": {
Type: cty.String,
Optional: true,
},
},
},
addr: addrs.AbsResourceInstance{
Module: nil,
Resource: addrs.ResourceInstance{
Resource: addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "tfcoremock_simple_resource",
Name: "empty",
},
Key: nil,
},
},
provider: addrs.LocalProviderConfig{
LocalName: "tfcoremock",
},
value: cty.NilVal,
expected: `
resource "tfcoremock_simple_resource" "empty" {
value = null # OPTIONAL string
list_block { # OPTIONAL block
"nested_value\"example" = null # OPTIONAL string
}
}`,
},
"simple_resource_with_state": {

View File

@ -10,6 +10,7 @@ import (
"fmt"
"log"
"os"
"strings"
cleanhttp "github.com/hashicorp/go-cleanhttp"
getter "github.com/hashicorp/go-getter"
@ -34,7 +35,7 @@ import (
// tradeoffs we're making here.
var goGetterDetectors = []getter.Detector{
new(getter.GitHubDetector),
&withoutQueryParams{d: new(getter.GitHubDetector)},
new(getter.GitDetector),
// Because historically BitBucket supported both Git and Mercurial
@ -167,3 +168,25 @@ func (g reusingGetter) getWithGoGetter(ctx context.Context, instPath, packageAdd
// have got the full module package structure written into instPath.
return nil
}
// withoutQueryParams implements getter.Detector and can be used to wrap another detector.
// This will look for any query params that might exist in the src and strip that away before calling
// getter.Detector#Detect. After the response is returned, the query params are attached back to the resulted src.
type withoutQueryParams struct {
d getter.Detector
}
func (w *withoutQueryParams) Detect(src string, pwd string) (string, bool, error) {
var qp string
if idx := strings.Index(src, "?"); idx > -1 {
qp = src[idx+1:]
src = src[:idx]
}
src, ok, err := w.d.Detect(src, pwd)
// Attach the query params only when the wrapped detector returns a value back
if len(src) > 0 && len(qp) > 0 {
src += "?" + qp
}
return src, ok, err
}

View File

@ -12,7 +12,6 @@ import (
"encoding/hex"
"errors"
"fmt"
"log"
"os"
"strings"
@ -490,42 +489,57 @@ func (s signatureAuthentication) AcceptableHashes() []Hash {
// Note: currently the registry only returns one key, but this may change in
// the future.
func (s signatureAuthentication) findSigningKey() (*SigningKey, string, error) {
var expiredKey *SigningKey
var expiredKeyID string
for _, key := range s.Keys {
keyCopy := key
keyring, err := openpgp.ReadArmoredKeyRing(strings.NewReader(key.ASCIIArmor))
if err != nil {
return nil, "", fmt.Errorf("error decoding signing key: %w", err)
}
entity, err := openpgp.CheckDetachedSignature(keyring, bytes.NewReader(s.Document), bytes.NewReader(s.Signature), nil)
if !s.shouldEnforceGPGExpiration() && (errors.Is(err, openpgpErrors.ErrKeyExpired) || errors.Is(err, openpgpErrors.ErrSignatureExpired)) {
// Internally openpgp will *only* return the Expired errors if all other checks have succeeded
// This is currently the best way to work around expired provider keys
fmt.Printf("[WARN] Provider %s/%s (%v) gpg key expired, this will fail in future versions of OpenTofu\n", s.Meta.Provider.Namespace, s.Meta.Provider.Type, s.Meta.Provider.Hostname)
err = nil
}
// If the signature issuer does not match the key, keep trying the
// rest of the provided keys.
if errors.Is(err, openpgpErrors.ErrUnknownIssuer) {
continue
}
// Any other signature error is terminal.
if err != nil {
return nil, "", fmt.Errorf("error checking signature: %w", err)
// If in enforcing mode (or if the error isnt related to expiry) return immediately.
if !errors.Is(err, openpgpErrors.ErrKeyExpired) && !errors.Is(err, openpgpErrors.ErrSignatureExpired) {
return nil, "", fmt.Errorf("error checking signature: %w", err)
}
// Else if it's an expired key then save it for later incase we don't find a nonexpired key.
if expiredKey == nil {
expiredKey = &keyCopy
if entity != nil && entity.PrimaryKey != nil {
expiredKeyID = entity.PrimaryKey.KeyIdString()
} else {
expiredKeyID = "n/a" //nolint:goconst // This is a placeholder value
}
}
continue
}
// Success! This key verified without an error.
keyID := "n/a"
if entity.PrimaryKey != nil {
keyID = entity.PrimaryKey.KeyIdString()
}
log.Printf("[DEBUG] Provider signed by %s", entityString(entity))
return &key, keyID, nil
}
// If none of the provided keys issued the signature, this package is
// unsigned. This is currently a terminal authentication error.
// Warn only once when ALL keys are expired.
if expiredKey != nil && !s.shouldEnforceGPGExpiration() {
//nolint:forbidigo // This is a warning message and is fine to be handled this way
fmt.Printf("[WARN] Provider %s/%s (%v) gpg key expired, this will fail in future versions of OpenTofu\n",
s.Meta.Provider.Namespace, s.Meta.Provider.Type, s.Meta.Provider.Hostname)
return expiredKey, expiredKeyID, nil
}
// If we got here, no candidate was acceptable.
return nil, "", ErrUnknownIssuer
}

View File

@ -13,8 +13,8 @@ import (
"github.com/apparentlymart/go-versions/versions"
"github.com/apparentlymart/go-versions/versions/constraints"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/tfdiags"
)
// Version represents a particular single version of a provider.
@ -50,6 +50,48 @@ type Warnings = []string
// altogether, which means that it is not required at all.
type Requirements map[addrs.Provider]VersionConstraints
// ProvidersQualification is storing the implicit/explicit reference qualification of the providers.
// This is necessary to be able to warn the user when the resources are referencing a provider that
// is not specifically defined in a required_providers block. When the implicitly referenced
// provider is tried to be downloaded without a specific provider requirement, it will be tried
// from the default namespace (hashicorp), failing to download it when it does not exist in the default namespace.
// Therefore, we want to let the user know what resources are generating this situation.
type ProvidersQualification struct {
Implicit map[addrs.Provider][]ResourceRef
Explicit map[addrs.Provider]struct{}
}
type ResourceRef struct {
CfgRes addrs.ConfigResource
Ref tfdiags.SourceRange
ProviderAttribute bool
}
// AddImplicitProvider saves an addrs.Provider with the place in the configuration where this is generated from.
func (pq *ProvidersQualification) AddImplicitProvider(provider addrs.Provider, ref ResourceRef) {
if pq.Implicit == nil {
pq.Implicit = map[addrs.Provider][]ResourceRef{}
}
// This is avoiding adding the implicit reference of the provider if this is already explicitly configured.
// Done this way, because when collecting these qualifications, if there are at least 2 resources (A from root module and B from an imported module),
// root module could have no explicit definition but the module of B could have an explicit one. But in case none of the modules is having
// an explicit definition, we want to gather all the resources that are implicitly referencing a provider.
if _, ok := pq.Explicit[provider]; ok {
return
}
refs := pq.Implicit[provider]
refs = append(refs, ref)
pq.Implicit[provider] = refs
}
// AddExplicitProvider saves an addrs.Provider that is specifically configured in a required_providers block.
func (pq *ProvidersQualification) AddExplicitProvider(provider addrs.Provider) {
if pq.Explicit == nil {
pq.Explicit = map[addrs.Provider]struct{}{}
}
pq.Explicit[provider] = struct{}{}
}
// Merge takes the requirements in the receiver and the requirements in the
// other given value and produces a new set of requirements that combines
// all of the requirements of both.

View File

@ -288,29 +288,29 @@ var LookupFunc = function.New(&function.Spec{
}
// keep track of marks from the collection and key
var markses []cty.ValueMarks
var marks []cty.ValueMarks
// unmark collection, retain marks to reapply later
mapVar, mapMarks := args[0].Unmark()
markses = append(markses, mapMarks)
marks = append(marks, mapMarks)
// include marks on the key in the result
keyVal, keyMarks := args[1].Unmark()
if len(keyMarks) > 0 {
markses = append(markses, keyMarks)
marks = append(marks, keyMarks)
}
lookupKey := keyVal.AsString()
if !mapVar.IsKnown() {
return cty.UnknownVal(retType).WithMarks(markses...), nil
return cty.UnknownVal(retType).WithMarks(marks...), nil
}
if mapVar.Type().IsObjectType() {
if mapVar.Type().HasAttribute(lookupKey) {
return mapVar.GetAttr(lookupKey).WithMarks(markses...), nil
return mapVar.GetAttr(lookupKey).WithMarks(marks...), nil
}
} else if mapVar.HasIndex(cty.StringVal(lookupKey)) == cty.True {
return mapVar.Index(cty.StringVal(lookupKey)).WithMarks(markses...), nil
return mapVar.Index(cty.StringVal(lookupKey)).WithMarks(marks...), nil
}
if defaultValueSet {
@ -318,7 +318,7 @@ var LookupFunc = function.New(&function.Spec{
if err != nil {
return cty.NilVal, err
}
return defaultVal.WithMarks(markses...), nil
return defaultVal.WithMarks(marks...), nil
}
return cty.UnknownVal(cty.DynamicPseudoType), fmt.Errorf(

View File

@ -30,6 +30,11 @@ func MakeStaticTimestampFunc(static time.Time) function.Function {
Params: []function.Parameter{},
Type: function.StaticReturnType(cty.String),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
// During validation phase, the planTimestamp is zero. By returning unknown value, it's forcing the
// HCL parser to skip any evaluation of other expressions that could use this.
if static.IsZero() {
return cty.UnknownVal(cty.String), nil
}
return cty.StringVal(static.Format(time.RFC3339)), nil
},
})

View File

@ -185,3 +185,49 @@ func TestTimeCmp(t *testing.T) {
})
}
}
func TestMakeStaticTimestampFunc(t *testing.T) {
tests := []struct {
Name string
// Setup made like this to bind the generated time value to the wanted value.
Setup func() (time.Time, cty.Value)
}{
{
Name: "zero",
Setup: func() (time.Time, cty.Value) {
in := time.Time{}
out := cty.UnknownVal(cty.String)
return in, out
},
},
{
Name: "now",
Setup: func() (time.Time, cty.Value) {
in := time.Now()
out := cty.StringVal(in.Format(time.RFC3339))
return in, out
},
},
{
Name: "one year later",
Setup: func() (time.Time, cty.Value) {
in := time.Now().Add(8766 * time.Hour) // 1 year later
out := cty.StringVal(in.Format(time.RFC3339))
return in, out
},
},
}
for _, test := range tests {
t.Run(fmt.Sprintf("MakeStaticTimestampFunc(%s)", test.Name), func(t *testing.T) {
in, want := test.Setup()
got, err := MakeStaticTimestampFunc(in).Call(nil)
if err != nil {
t.Fatalf("MakeStaticTimestampFunc is not meant to return error but got one: %v", err)
}
if !got.RawEquals(want) {
t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, want)
}
})
}
}

View File

@ -181,12 +181,13 @@ var Base64GzipFunc = function.New(&function.Spec{
},
})
// Base64GunzipFunc constructs a function that Bae64 decodes a string and decompresses the result with gunzip.
// Base64GunzipFunc constructs a function that Base64 decodes a string and decompresses the result with gunzip.
var Base64GunzipFunc = function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "str",
Type: cty.String,
Name: "str",
Type: cty.String,
AllowMarked: true,
},
},
Type: function.StaticReturnType(cty.String),
@ -208,7 +209,7 @@ var Base64GunzipFunc = function.New(&function.Spec{
return cty.UnknownVal(cty.String), fmt.Errorf("failed to read gunzip raw data: %w", err)
}
return cty.StringVal(string(gunzip)), nil
return cty.StringVal(string(gunzip)).WithMarks(strMarks), nil
},
})

View File

@ -166,12 +166,27 @@ func TestBase64Gunzip(t *testing.T) {
tests := []struct {
String cty.Value
Want cty.Value
Err bool
Err string
}{
{
cty.StringVal("H4sIAAAAAAAA/ypJLS4BAAAA//8BAAD//wx+f9gEAAAA"),
cty.StringVal("test"),
false,
"",
},
{
cty.StringVal("H4sIAAAAAAAA/ypJLS4BAAAA//8BAAD//wx+f9gEAAAA").Mark(marks.Sensitive),
cty.StringVal("test").Mark(marks.Sensitive),
"",
},
{
cty.StringVal("hello"),
cty.NilVal,
`failed to decode base64 data "hello"`,
},
{
cty.StringVal("hello").Mark(marks.Sensitive),
cty.NilVal,
`failed to decode base64 data (sensitive value)`,
},
}
@ -179,10 +194,13 @@ func TestBase64Gunzip(t *testing.T) {
t.Run(fmt.Sprintf("base64gunzip(%#v)", test.String), func(t *testing.T) {
got, err := Base64Gunzip(test.String)
if test.Err {
if test.Err != "" {
if err == nil {
t.Fatal("succeeded; want error")
}
if err.Error() != test.Err {
t.Fatalf("got unexpected error: %v", err.Error())
}
return
} else if err != nil {
t.Fatalf("unexpected error: %s", err)

View File

@ -12,8 +12,8 @@ import (
"github.com/zclconf/go-cty/cty"
)
func redactIfSensitive(value interface{}, markses ...cty.ValueMarks) string {
if marks.Has(cty.DynamicVal.WithMarks(markses...), marks.Sensitive) {
func redactIfSensitive(value interface{}, valueMarks ...cty.ValueMarks) string {
if marks.Has(cty.DynamicVal.WithMarks(valueMarks...), marks.Sensitive) {
return "(sensitive value)"
}
switch v := value.(type) {

View File

@ -30,8 +30,7 @@ var SensitiveFunc = function.New(&function.Spec{
return args[0].Type(), nil
},
Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
val, _ := args[0].Unmark()
return val.Mark(marks.Sensitive), nil
return args[0].Mark(marks.Sensitive), nil
},
})

View File

@ -48,10 +48,8 @@ func TestSensitive(t *testing.T) {
``,
},
{
// A value with some non-standard mark gets "fixed" to be marked
// with the standard "sensitive" mark. (This situation occurring
// would imply an inconsistency/bug elsewhere, so we're just
// being robust about it here.)
// Any non-sensitive marks must be propagated alongside
// with a sensitive one.
cty.NumberIntVal(1).Mark("bloop"),
``,
},
@ -83,15 +81,11 @@ func TestSensitive(t *testing.T) {
t.Errorf("result is not marked sensitive")
}
inputMarks := test.Input.Marks()
delete(inputMarks, marks.Sensitive)
gotRaw, gotMarks := got.Unmark()
if len(gotMarks) != 1 {
// We're only expecting to have the "sensitive" mark we checked
// above. Any others are an error, even if they happen to
// appear alongside "sensitive". (We might change this rule
// if someday we decide to use marks for some additional
// unrelated thing in OpenTofu, but currently we assume that
// _all_ marks imply sensitive, and so returning any other
// marks would be confusing.)
if len(gotMarks) != len(inputMarks)+1 {
t.Errorf("extraneous marks %#v", gotMarks)
}

View File

@ -349,6 +349,10 @@ func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateReques
return p.ImportResourceStateResponse
}
func (p *MockProvider) MoveResourceState(_ providers.MoveResourceStateRequest) providers.MoveResourceStateResponse {
panic("not implemented")
}
func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) providers.ReadDataSourceResponse {
p.Lock()
defer p.Unlock()

View File

@ -302,7 +302,7 @@ func (p *GRPCProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequ
protoReq := &proto.UpgradeResourceState_Request{
TypeName: r.TypeName,
Version: int64(r.Version),
Version: r.Version,
RawState: &proto.RawState{
Json: r.RawStateJSON,
Flatmap: r.RawStateFlatmap,
@ -638,6 +638,53 @@ func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateReques
return resp
}
func (p *GRPCProvider) MoveResourceState(r providers.MoveResourceStateRequest) providers.MoveResourceStateResponse {
var resp providers.MoveResourceStateResponse
logger.Trace("GRPCProvider: MoveResourceState")
schema := p.GetProviderSchema()
if schema.Diagnostics.HasErrors() {
resp.Diagnostics = schema.Diagnostics
return resp
}
resourceSchema, ok := schema.ResourceTypes[r.TargetTypeName]
if !ok {
schema.Diagnostics = schema.Diagnostics.Append(fmt.Errorf("unknown data source %q", r.TargetTypeName))
return resp
}
protoReq := &proto.MoveResourceState_Request{
SourceProviderAddress: r.SourceProviderAddress,
SourceTypeName: r.SourceTypeName,
//nolint:gosec // this will be refactored eventually
SourceSchemaVersion: int64(r.SourceSchemaVersion),
SourceState: &proto.RawState{
Json: r.SourceStateJSON,
Flatmap: r.SourceStateFlatmap,
},
SourcePrivate: r.SourcePrivate,
TargetTypeName: r.TargetTypeName,
}
protoResp, err := p.client.MoveResourceState(p.ctx, protoReq)
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err))
return resp
}
resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
state, err := decodeDynamicValue(protoResp.TargetState, resourceSchema.Block.ImpliedType())
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(err)
return resp
}
resp.TargetState = state
resp.TargetPrivate = protoResp.TargetPrivate
return resp
}
func (p *GRPCProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) {
logger.Trace("GRPCProvider: ReadDataSource")

View File

@ -388,6 +388,42 @@ func TestGRPCProvider_UpgradeResourceStateJSON(t *testing.T) {
}
}
func TestGRPCProvider_MoveResourceState(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{
client: client,
}
client.EXPECT().MoveResourceState(
gomock.Any(),
gomock.Any(),
).Return(&proto.MoveResourceState_Response{
TargetState: &proto.DynamicValue{
Msgpack: []byte("\x81\xa4attr\xa3bar"),
},
TargetPrivate: []byte(`{"meta": "data"}`),
}, nil)
resp := p.MoveResourceState(providers.MoveResourceStateRequest{
SourceTypeName: "resource_old",
SourceSchemaVersion: 0,
TargetTypeName: "resource",
})
checkDiags(t, resp.Diagnostics)
expectedState := cty.ObjectVal(map[string]cty.Value{
"attr": cty.StringVal("bar"),
})
expectedPrivate := []byte(`{"meta": "data"}`)
if !cmp.Equal(expectedState, resp.TargetState, typeComparer, valueComparer, equateEmpty) {
t.Fatal(cmp.Diff(expectedState, resp.TargetState, typeComparer, valueComparer, equateEmpty))
}
if !bytes.Equal(expectedPrivate, resp.TargetPrivate) {
t.Fatalf("expected %q, got %q", expectedPrivate, resp.TargetPrivate)
}
}
func TestGRPCProvider_Configure(t *testing.T) {
client := mockProviderClient(t)
p := &GRPCProvider{

View File

@ -627,6 +627,53 @@ func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateReques
return resp
}
func (p *GRPCProvider) MoveResourceState(r providers.MoveResourceStateRequest) providers.MoveResourceStateResponse {
logger.Trace("GRPCProvider.v6: MoveResourceState")
var resp providers.MoveResourceStateResponse
schema := p.GetProviderSchema()
if schema.Diagnostics.HasErrors() {
resp.Diagnostics = schema.Diagnostics
return resp
}
resourceSchema, ok := schema.ResourceTypes[r.TargetTypeName]
if !ok {
schema.Diagnostics = schema.Diagnostics.Append(fmt.Errorf("unknown data source %q", r.TargetTypeName))
return resp
}
protoReq := &proto6.MoveResourceState_Request{
SourceProviderAddress: r.SourceProviderAddress,
SourceTypeName: r.SourceTypeName,
//nolint:gosec // this will be refactored eventually
SourceSchemaVersion: int64(r.SourceSchemaVersion),
SourceState: &proto6.RawState{
Json: r.SourceStateJSON,
Flatmap: r.SourceStateFlatmap,
},
SourcePrivate: r.SourcePrivate,
TargetTypeName: r.TargetTypeName,
}
protoResp, err := p.client.MoveResourceState(p.ctx, protoReq)
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(grpcErr(err))
return resp
}
resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
state, err := decodeDynamicValue(protoResp.TargetState, resourceSchema.Block.ImpliedType())
if err != nil {
resp.Diagnostics = resp.Diagnostics.Append(err)
return resp
}
resp.TargetState = state
resp.TargetPrivate = protoResp.TargetPrivate
return resp
}
func (p *GRPCProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) {
logger.Trace("GRPCProvider.v6: ReadDataSource")

Some files were not shown because too many files have changed in this diff Show More