Merge branch 'main' into jd/add-forgot-output

Signed-off-by: Jarrett Duskey <jarrett@duskey.io>
This commit is contained in:
Jarrett Duskey 2025-01-10 23:50:34 -05:00 committed by GitHub
commit 9995e9c799
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
85 changed files with 10296 additions and 2968 deletions

View File

@ -13,12 +13,12 @@ NEW FEATURES:
ENHANCEMENTS:
* OpenTofu will now recommend using `-exclude` instead of `-target`, when possible, in the error messages about unknown values in `count` and `for_each` arguments, thereby providing a more definitive workaround. ([#2154](https://github.com/opentofu/opentofu/pull/2154))
* State encryption now supports using external programs as key providers. Additionally, the PBKDF2 key provider now supports chaining via the `chain` parameter. ([#2023](https://github.com/opentofu/opentofu/pull/2023))
* Added count of forgotten resources to plan and apply outputs. ([#1956](https://github.com/opentofu/opentofu/issues/1956))
BUG FIXES:
* `tofu init` command does not attempt to read encryption keys when `-backend=false` flag is set. (https://github.com/opentofu/opentofu/pull/2293)
* Changes in `create_before_destroy` for resources which require replacement are now properly handled when refresh is disabled. ([#2248](https://github.com/opentofu/opentofu/pull/2248))
- Fixed an issue where an invalid provider name in the `provider_meta` block would crash OpenTofu rather than report an error ([#2347](https://github.com/opentofu/opentofu/pull/2347))
## Previous Releases

View File

@ -1,6 +1,8 @@
Kuba Martin <kubam@spacelift.io> @cube2222
James Humphries <jamesh@spacelift.io> @Yantrio
Andy Hayes <andrew.hayes@harness.io> @Andrew-Hayes
Arel Rabinowitz <arel.rabinowitz@env0.com> @RLRabinowitz
Arthur Bonic <arthurb@opentofu.org> @abstractionfactory
Christian Mesh <christianm@opentofu.org> @cam72cam
János Bonic <janosb@opentofu.org> @janosdebugs
Ronny Orot <ronny.orot@env0.com> @Evi1Pumpkin
Ilia Gogotchuri <gogotchuri@opentofu.org> @Gogotchuri
James Humphries <jamesh@spacelift.io> @Yantrio
Martin Atkins <apparentlymart@opentofu.org> @apparentlymart
Oleksandr Levchenkov <ollevche@opentofu.org> @ollevche

View File

@ -0,0 +1,637 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
// Terraform Plugin RPC protocol version 5.6
//
// This file defines version 5.6 of the RPC protocol. To implement a plugin
// against this protocol, copy this definition into your own codebase and
// use protoc to generate stubs for your target language.
//
// This file will not be updated. Any minor versions of protocol 5 to follow
// should copy this file and modify the copy while maintaing backwards
// compatibility. Breaking changes, if any are required, will come
// in a subsequent major version with its own separate proto definition.
//
// Note that only the proto files included in a release tag of Terraform are
// official protocol releases. Proto files taken from other commits may include
// incomplete changes or features that did not make it into a final release.
// In all reasonable cases, plugin developers should take the proto file from
// the tag of the most recent release of Terraform, and not from the main
// branch or any other development branch.
//
syntax = "proto3";
option go_package = "github.com/opentofu/opentofu/internal/tfplugin5";
package tfplugin5;
// DynamicValue is an opaque encoding of terraform data, with the field name
// indicating the encoding scheme used.
message DynamicValue {
bytes msgpack = 1;
bytes json = 2;
}
message Diagnostic {
enum Severity {
INVALID = 0;
ERROR = 1;
WARNING = 2;
}
Severity severity = 1;
string summary = 2;
string detail = 3;
AttributePath attribute = 4;
}
message FunctionError {
string text = 1;
// The optional function_argument records the index position of the
// argument which caused the error.
optional int64 function_argument = 2;
}
message AttributePath {
message Step {
oneof selector {
// Set "attribute_name" to represent looking up an attribute
// in the current object value.
string attribute_name = 1;
// Set "element_key_*" to represent looking up an element in
// an indexable collection type.
string element_key_string = 2;
int64 element_key_int = 3;
}
}
repeated Step steps = 1;
}
message Stop {
message Request {
}
message Response {
string Error = 1;
}
}
// RawState holds the stored state for a resource to be upgraded by the
// provider. It can be in one of two formats, the current json encoded format
// in bytes, or the legacy flatmap format as a map of strings.
message RawState {
bytes json = 1;
map<string, string> flatmap = 2;
}
enum StringKind {
PLAIN = 0;
MARKDOWN = 1;
}
// Schema is the configuration schema for a Resource, Provider, or Provisioner.
message Schema {
message Block {
int64 version = 1;
repeated Attribute attributes = 2;
repeated NestedBlock block_types = 3;
string description = 4;
StringKind description_kind = 5;
bool deprecated = 6;
}
message Attribute {
string name = 1;
bytes type = 2;
string description = 3;
bool required = 4;
bool optional = 5;
bool computed = 6;
bool sensitive = 7;
StringKind description_kind = 8;
bool deprecated = 9;
}
message NestedBlock {
enum NestingMode {
INVALID = 0;
SINGLE = 1;
LIST = 2;
SET = 3;
MAP = 4;
GROUP = 5;
}
string type_name = 1;
Block block = 2;
NestingMode nesting = 3;
int64 min_items = 4;
int64 max_items = 5;
}
// The version of the schema.
// Schemas are versioned, so that providers can upgrade a saved resource
// state when the schema is changed.
int64 version = 1;
// Block is the top level configuration block for this schema.
Block block = 2;
}
// ServerCapabilities allows providers to communicate extra information
// regarding supported protocol features. This is used to indicate
// availability of certain forward-compatible changes which may be optional
// in a major protocol version, but cannot be tested for directly.
message ServerCapabilities {
// The plan_destroy capability signals that a provider expects a call
// to PlanResourceChange when a resource is going to be destroyed.
bool plan_destroy = 1;
// The get_provider_schema_optional capability indicates that this
// provider does not require calling GetProviderSchema to operate
// normally, and the caller can used a cached copy of the provider's
// schema.
bool get_provider_schema_optional = 2;
// The move_resource_state capability signals that a provider supports the
// MoveResourceState RPC.
bool move_resource_state = 3;
}
// ClientCapabilities allows Terraform to publish information regarding
// supported protocol features. This is used to indicate availability of
// certain forward-compatible changes which may be optional in a major
// protocol version, but cannot be tested for directly.
message ClientCapabilities {
// The deferral_allowed capability signals that the client is able to
// handle deferred responses from the provider.
bool deferral_allowed = 1;
}
message Function {
// parameters is the ordered list of positional function parameters.
repeated Parameter parameters = 1;
// variadic_parameter is an optional final parameter which accepts
// zero or more argument values, in which Terraform will send an
// ordered list of the parameter type.
Parameter variadic_parameter = 2;
// return is the function result.
Return return = 3;
// summary is the human-readable shortened documentation for the function.
string summary = 4;
// description is human-readable documentation for the function.
string description = 5;
// description_kind is the formatting of the description.
StringKind description_kind = 6;
// deprecation_message is human-readable documentation if the
// function is deprecated.
string deprecation_message = 7;
message Parameter {
// name is the human-readable display name for the parameter.
string name = 1;
// type is the type constraint for the parameter.
bytes type = 2;
// allow_null_value when enabled denotes that a null argument value can
// be passed to the provider. When disabled, Terraform returns an error
// if the argument value is null.
bool allow_null_value = 3;
// allow_unknown_values when enabled denotes that only wholly known
// argument values will be passed to the provider. When disabled,
// Terraform skips the function call entirely and assumes an unknown
// value result from the function.
bool allow_unknown_values = 4;
// description is human-readable documentation for the parameter.
string description = 5;
// description_kind is the formatting of the description.
StringKind description_kind = 6;
}
message Return {
// type is the type constraint for the function result.
bytes type = 1;
}
}
// Deferred is a message that indicates that change is deferred for a reason.
message Deferred {
// Reason is the reason for deferring the change.
enum Reason {
// UNKNOWN is the default value, and should not be used.
UNKNOWN = 0;
// RESOURCE_CONFIG_UNKNOWN is used when the config is partially unknown and the real
// values need to be known before the change can be planned.
RESOURCE_CONFIG_UNKNOWN = 1;
// PROVIDER_CONFIG_UNKNOWN is used when parts of the provider configuration
// are unknown, e.g. the provider configuration is only known after the apply is done.
PROVIDER_CONFIG_UNKNOWN = 2;
// ABSENT_PREREQ is used when a hard dependency has not been satisfied.
ABSENT_PREREQ = 3;
}
// reason is the reason for deferring the change.
Reason reason = 1;
}
service Provider {
//////// Information about what a provider supports/expects
// GetMetadata returns upfront information about server capabilities and
// supported resource types without requiring the server to instantiate all
// schema information, which may be memory intensive. This RPC is optional,
// where clients may receive an unimplemented RPC error. Clients should
// ignore the error and call the GetSchema RPC as a fallback.
rpc GetMetadata(GetMetadata.Request) returns (GetMetadata.Response);
// GetSchema returns schema information for the provider, data resources,
// and managed resources.
rpc GetSchema(GetProviderSchema.Request) returns (GetProviderSchema.Response);
rpc PrepareProviderConfig(PrepareProviderConfig.Request) returns (PrepareProviderConfig.Response);
rpc ValidateResourceTypeConfig(ValidateResourceTypeConfig.Request) returns (ValidateResourceTypeConfig.Response);
rpc ValidateDataSourceConfig(ValidateDataSourceConfig.Request) returns (ValidateDataSourceConfig.Response);
rpc UpgradeResourceState(UpgradeResourceState.Request) returns (UpgradeResourceState.Response);
//////// One-time initialization, called before other functions below
rpc Configure(Configure.Request) returns (Configure.Response);
//////// Managed Resource Lifecycle
rpc ReadResource(ReadResource.Request) returns (ReadResource.Response);
rpc PlanResourceChange(PlanResourceChange.Request) returns (PlanResourceChange.Response);
rpc ApplyResourceChange(ApplyResourceChange.Request) returns (ApplyResourceChange.Response);
rpc ImportResourceState(ImportResourceState.Request) returns (ImportResourceState.Response);
rpc MoveResourceState(MoveResourceState.Request) returns (MoveResourceState.Response);
rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response);
// Functions
// GetFunctions returns the definitions of all functions.
rpc GetFunctions(GetFunctions.Request) returns (GetFunctions.Response);
// CallFunction runs the provider-defined function logic and returns
// the result with any diagnostics.
rpc CallFunction(CallFunction.Request) returns (CallFunction.Response);
//////// Graceful Shutdown
rpc Stop(Stop.Request) returns (Stop.Response);
}
message GetMetadata {
message Request {
}
message Response {
ServerCapabilities server_capabilities = 1;
repeated Diagnostic diagnostics = 2;
repeated DataSourceMetadata data_sources = 3;
repeated ResourceMetadata resources = 4;
// functions returns metadata for any functions.
repeated FunctionMetadata functions = 5;
}
message FunctionMetadata {
// name is the function name.
string name = 1;
}
message DataSourceMetadata {
string type_name = 1;
}
message ResourceMetadata {
string type_name = 1;
}
}
message GetProviderSchema {
message Request {
}
message Response {
Schema provider = 1;
map<string, Schema> resource_schemas = 2;
map<string, Schema> data_source_schemas = 3;
repeated Diagnostic diagnostics = 4;
Schema provider_meta = 5;
ServerCapabilities server_capabilities = 6;
// functions is a mapping of function names to definitions.
map<string, Function> functions = 7;
}
}
message PrepareProviderConfig {
message Request {
DynamicValue config = 1;
}
message Response {
DynamicValue prepared_config = 1;
repeated Diagnostic diagnostics = 2;
}
}
message UpgradeResourceState {
// Request is the message that is sent to the provider during the
// UpgradeResourceState RPC.
//
// This message intentionally does not include configuration data as any
// configuration-based or configuration-conditional changes should occur
// during the PlanResourceChange RPC. Additionally, the configuration is
// not guaranteed to exist (in the case of resource destruction), be wholly
// known, nor match the given prior state, which could lead to unexpected
// provider behaviors for practitioners.
message Request {
string type_name = 1;
// version is the schema_version number recorded in the state file
int64 version = 2;
// raw_state is the raw states as stored for the resource. Core does
// not have access to the schema of prior_version, so it's the
// provider's responsibility to interpret this value using the
// appropriate older schema. The raw_state will be the json encoded
// state, or a legacy flat-mapped format.
RawState raw_state = 3;
}
message Response {
// new_state is a msgpack-encoded data structure that, when interpreted with
// the _current_ schema for this resource type, is functionally equivalent to
// that which was given in prior_state_raw.
DynamicValue upgraded_state = 1;
// diagnostics describes any errors encountered during migration that could not
// be safely resolved, and warnings about any possibly-risky assumptions made
// in the upgrade process.
repeated Diagnostic diagnostics = 2;
}
}
message ValidateResourceTypeConfig {
message Request {
string type_name = 1;
DynamicValue config = 2;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}
message ValidateDataSourceConfig {
message Request {
string type_name = 1;
DynamicValue config = 2;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}
message Configure {
message Request {
string terraform_version = 1;
DynamicValue config = 2;
ClientCapabilities client_capabilities = 3;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}
message ReadResource {
// Request is the message that is sent to the provider during the
// ReadResource RPC.
//
// This message intentionally does not include configuration data as any
// configuration-based or configuration-conditional changes should occur
// during the PlanResourceChange RPC. Additionally, the configuration is
// not guaranteed to be wholly known nor match the given prior state, which
// could lead to unexpected provider behaviors for practitioners.
message Request {
string type_name = 1;
DynamicValue current_state = 2;
bytes private = 3;
DynamicValue provider_meta = 4;
ClientCapabilities client_capabilities = 5;
}
message Response {
DynamicValue new_state = 1;
repeated Diagnostic diagnostics = 2;
bytes private = 3;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 4;
}
}
message PlanResourceChange {
message Request {
string type_name = 1;
DynamicValue prior_state = 2;
DynamicValue proposed_new_state = 3;
DynamicValue config = 4;
bytes prior_private = 5;
DynamicValue provider_meta = 6;
ClientCapabilities client_capabilities = 7;
}
message Response {
DynamicValue planned_state = 1;
repeated AttributePath requires_replace = 2;
bytes planned_private = 3;
repeated Diagnostic diagnostics = 4;
// This may be set only by the helper/schema "SDK" in the main Terraform
// repository, to request that Terraform Core >=0.12 permit additional
// inconsistencies that can result from the legacy SDK type system
// and its imprecise mapping to the >=0.12 type system.
// The change in behavior implied by this flag makes sense only for the
// specific details of the legacy SDK type system, and are not a general
// mechanism to avoid proper type handling in providers.
//
// ==== DO NOT USE THIS ====
// ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
// ==== DO NOT USE THIS ====
bool legacy_type_system = 5;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 6;
}
}
message ApplyResourceChange {
message Request {
string type_name = 1;
DynamicValue prior_state = 2;
DynamicValue planned_state = 3;
DynamicValue config = 4;
bytes planned_private = 5;
DynamicValue provider_meta = 6;
}
message Response {
DynamicValue new_state = 1;
bytes private = 2;
repeated Diagnostic diagnostics = 3;
// This may be set only by the helper/schema "SDK" in the main Terraform
// repository, to request that Terraform Core >=0.12 permit additional
// inconsistencies that can result from the legacy SDK type system
// and its imprecise mapping to the >=0.12 type system.
// The change in behavior implied by this flag makes sense only for the
// specific details of the legacy SDK type system, and are not a general
// mechanism to avoid proper type handling in providers.
//
// ==== DO NOT USE THIS ====
// ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
// ==== DO NOT USE THIS ====
bool legacy_type_system = 4;
}
}
message ImportResourceState {
message Request {
string type_name = 1;
string id = 2;
ClientCapabilities client_capabilities = 3;
}
message ImportedResource {
string type_name = 1;
DynamicValue state = 2;
bytes private = 3;
}
message Response {
repeated ImportedResource imported_resources = 1;
repeated Diagnostic diagnostics = 2;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 3;
}
}
message MoveResourceState {
message Request {
// The address of the provider the resource is being moved from.
string source_provider_address = 1;
// The resource type that the resource is being moved from.
string source_type_name = 2;
// The schema version of the resource type that the resource is being
// moved from.
int64 source_schema_version = 3;
// The raw state of the resource being moved. Only the json field is
// populated, as there should be no legacy providers using the flatmap
// format that support newly introduced RPCs.
RawState source_state = 4;
// The resource type that the resource is being moved to.
string target_type_name = 5;
// The private state of the resource being moved.
bytes source_private = 6;
}
message Response {
// The state of the resource after it has been moved.
DynamicValue target_state = 1;
// Any diagnostics that occurred during the move.
repeated Diagnostic diagnostics = 2;
// The private state of the resource after it has been moved.
bytes target_private = 3;
}
}
message ReadDataSource {
message Request {
string type_name = 1;
DynamicValue config = 2;
DynamicValue provider_meta = 3;
ClientCapabilities client_capabilities = 4;
}
message Response {
DynamicValue state = 1;
repeated Diagnostic diagnostics = 2;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 3;
}
}
service Provisioner {
rpc GetSchema(GetProvisionerSchema.Request) returns (GetProvisionerSchema.Response);
rpc ValidateProvisionerConfig(ValidateProvisionerConfig.Request) returns (ValidateProvisionerConfig.Response);
rpc ProvisionResource(ProvisionResource.Request) returns (stream ProvisionResource.Response);
rpc Stop(Stop.Request) returns (Stop.Response);
}
message GetProvisionerSchema {
message Request {
}
message Response {
Schema provisioner = 1;
repeated Diagnostic diagnostics = 2;
}
}
message ValidateProvisionerConfig {
message Request {
DynamicValue config = 1;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}
message ProvisionResource {
message Request {
DynamicValue config = 1;
DynamicValue connection = 2;
}
message Response {
string output = 1;
repeated Diagnostic diagnostics = 2;
}
}
message GetFunctions {
message Request {}
message Response {
// functions is a mapping of function names to definitions.
map<string, Function> functions = 1;
// diagnostics is any warnings or errors.
repeated Diagnostic diagnostics = 2;
}
}
message CallFunction {
message Request {
// name is the name of the function being called.
string name = 1;
// arguments is the data of each function argument value.
repeated DynamicValue arguments = 2;
}
message Response {
// result is result value after running the function logic.
DynamicValue result = 1;
// error is any error from the function logic.
FunctionError error = 2;
}
}

View File

@ -0,0 +1,700 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
// Terraform Plugin RPC protocol version 5.7
//
// This file defines version 5.7 of the RPC protocol. To implement a plugin
// against this protocol, copy this definition into your own codebase and
// use protoc to generate stubs for your target language.
//
// This file will not be updated. Any minor versions of protocol 5 to follow
// should copy this file and modify the copy while maintaing backwards
// compatibility. Breaking changes, if any are required, will come
// in a subsequent major version with its own separate proto definition.
//
// Note that only the proto files included in a release tag of Terraform are
// official protocol releases. Proto files taken from other commits may include
// incomplete changes or features that did not make it into a final release.
// In all reasonable cases, plugin developers should take the proto file from
// the tag of the most recent release of Terraform, and not from the main
// branch or any other development branch.
//
syntax = "proto3";
option go_package = "github.com/opentofu/opentofu/internal/tfplugin5";
import "google/protobuf/timestamp.proto";
package tfplugin5;
// DynamicValue is an opaque encoding of terraform data, with the field name
// indicating the encoding scheme used.
message DynamicValue {
bytes msgpack = 1;
bytes json = 2;
}
message Diagnostic {
enum Severity {
INVALID = 0;
ERROR = 1;
WARNING = 2;
}
Severity severity = 1;
string summary = 2;
string detail = 3;
AttributePath attribute = 4;
}
message FunctionError {
string text = 1;
// The optional function_argument records the index position of the
// argument which caused the error.
optional int64 function_argument = 2;
}
message AttributePath {
message Step {
oneof selector {
// Set "attribute_name" to represent looking up an attribute
// in the current object value.
string attribute_name = 1;
// Set "element_key_*" to represent looking up an element in
// an indexable collection type.
string element_key_string = 2;
int64 element_key_int = 3;
}
}
repeated Step steps = 1;
}
message Stop {
message Request {
}
message Response {
string Error = 1;
}
}
// RawState holds the stored state for a resource to be upgraded by the
// provider. It can be in one of two formats, the current json encoded format
// in bytes, or the legacy flatmap format as a map of strings.
message RawState {
bytes json = 1;
map<string, string> flatmap = 2;
}
enum StringKind {
PLAIN = 0;
MARKDOWN = 1;
}
// Schema is the configuration schema for a Resource, Provider, or Provisioner.
message Schema {
message Block {
int64 version = 1;
repeated Attribute attributes = 2;
repeated NestedBlock block_types = 3;
string description = 4;
StringKind description_kind = 5;
bool deprecated = 6;
}
message Attribute {
string name = 1;
bytes type = 2;
string description = 3;
bool required = 4;
bool optional = 5;
bool computed = 6;
bool sensitive = 7;
StringKind description_kind = 8;
bool deprecated = 9;
}
message NestedBlock {
enum NestingMode {
INVALID = 0;
SINGLE = 1;
LIST = 2;
SET = 3;
MAP = 4;
GROUP = 5;
}
string type_name = 1;
Block block = 2;
NestingMode nesting = 3;
int64 min_items = 4;
int64 max_items = 5;
}
// The version of the schema.
// Schemas are versioned, so that providers can upgrade a saved resource
// state when the schema is changed.
int64 version = 1;
// Block is the top level configuration block for this schema.
Block block = 2;
}
// ServerCapabilities allows providers to communicate extra information
// regarding supported protocol features. This is used to indicate
// availability of certain forward-compatible changes which may be optional
// in a major protocol version, but cannot be tested for directly.
message ServerCapabilities {
// The plan_destroy capability signals that a provider expects a call
// to PlanResourceChange when a resource is going to be destroyed.
bool plan_destroy = 1;
// The get_provider_schema_optional capability indicates that this
// provider does not require calling GetProviderSchema to operate
// normally, and the caller can used a cached copy of the provider's
// schema.
bool get_provider_schema_optional = 2;
// The move_resource_state capability signals that a provider supports the
// MoveResourceState RPC.
bool move_resource_state = 3;
}
// ClientCapabilities allows Terraform to publish information regarding
// supported protocol features. This is used to indicate availability of
// certain forward-compatible changes which may be optional in a major
// protocol version, but cannot be tested for directly.
message ClientCapabilities {
// The deferral_allowed capability signals that the client is able to
// handle deferred responses from the provider.
bool deferral_allowed = 1;
}
message Function {
// parameters is the ordered list of positional function parameters.
repeated Parameter parameters = 1;
// variadic_parameter is an optional final parameter which accepts
// zero or more argument values, in which Terraform will send an
// ordered list of the parameter type.
Parameter variadic_parameter = 2;
// return is the function result.
Return return = 3;
// summary is the human-readable shortened documentation for the function.
string summary = 4;
// description is human-readable documentation for the function.
string description = 5;
// description_kind is the formatting of the description.
StringKind description_kind = 6;
// deprecation_message is human-readable documentation if the
// function is deprecated.
string deprecation_message = 7;
message Parameter {
// name is the human-readable display name for the parameter.
string name = 1;
// type is the type constraint for the parameter.
bytes type = 2;
// allow_null_value when enabled denotes that a null argument value can
// be passed to the provider. When disabled, Terraform returns an error
// if the argument value is null.
bool allow_null_value = 3;
// allow_unknown_values when enabled denotes that only wholly known
// argument values will be passed to the provider. When disabled,
// Terraform skips the function call entirely and assumes an unknown
// value result from the function.
bool allow_unknown_values = 4;
// description is human-readable documentation for the parameter.
string description = 5;
// description_kind is the formatting of the description.
StringKind description_kind = 6;
}
message Return {
// type is the type constraint for the function result.
bytes type = 1;
}
}
// Deferred is a message that indicates that change is deferred for a reason.
message Deferred {
// Reason is the reason for deferring the change.
enum Reason {
// UNKNOWN is the default value, and should not be used.
UNKNOWN = 0;
// RESOURCE_CONFIG_UNKNOWN is used when the config is partially unknown and the real
// values need to be known before the change can be planned.
RESOURCE_CONFIG_UNKNOWN = 1;
// PROVIDER_CONFIG_UNKNOWN is used when parts of the provider configuration
// are unknown, e.g. the provider configuration is only known after the apply is done.
PROVIDER_CONFIG_UNKNOWN = 2;
// ABSENT_PREREQ is used when a hard dependency has not been satisfied.
ABSENT_PREREQ = 3;
}
// reason is the reason for deferring the change.
Reason reason = 1;
}
service Provider {
//////// Information about what a provider supports/expects
// GetMetadata returns upfront information about server capabilities and
// supported resource types without requiring the server to instantiate all
// schema information, which may be memory intensive. This RPC is optional,
// where clients may receive an unimplemented RPC error. Clients should
// ignore the error and call the GetSchema RPC as a fallback.
rpc GetMetadata(GetMetadata.Request) returns (GetMetadata.Response);
// GetSchema returns schema information for the provider, data resources,
// and managed resources.
rpc GetSchema(GetProviderSchema.Request) returns (GetProviderSchema.Response);
rpc PrepareProviderConfig(PrepareProviderConfig.Request) returns (PrepareProviderConfig.Response);
rpc ValidateResourceTypeConfig(ValidateResourceTypeConfig.Request) returns (ValidateResourceTypeConfig.Response);
rpc ValidateDataSourceConfig(ValidateDataSourceConfig.Request) returns (ValidateDataSourceConfig.Response);
rpc UpgradeResourceState(UpgradeResourceState.Request) returns (UpgradeResourceState.Response);
//////// One-time initialization, called before other functions below
rpc Configure(Configure.Request) returns (Configure.Response);
//////// Managed Resource Lifecycle
rpc ReadResource(ReadResource.Request) returns (ReadResource.Response);
rpc PlanResourceChange(PlanResourceChange.Request) returns (PlanResourceChange.Response);
rpc ApplyResourceChange(ApplyResourceChange.Request) returns (ApplyResourceChange.Response);
rpc ImportResourceState(ImportResourceState.Request) returns (ImportResourceState.Response);
rpc MoveResourceState(MoveResourceState.Request) returns (MoveResourceState.Response);
rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response);
//////// Ephemeral Resource Lifecycle
rpc ValidateEphemeralResourceConfig(ValidateEphemeralResourceConfig.Request) returns (ValidateEphemeralResourceConfig.Response);
rpc OpenEphemeralResource(OpenEphemeralResource.Request) returns (OpenEphemeralResource.Response);
rpc RenewEphemeralResource(RenewEphemeralResource.Request) returns (RenewEphemeralResource.Response);
rpc CloseEphemeralResource(CloseEphemeralResource.Request) returns (CloseEphemeralResource.Response);
// Functions
// GetFunctions returns the definitions of all functions.
rpc GetFunctions(GetFunctions.Request) returns (GetFunctions.Response);
// CallFunction runs the provider-defined function logic and returns
// the result with any diagnostics.
rpc CallFunction(CallFunction.Request) returns (CallFunction.Response);
//////// Graceful Shutdown
rpc Stop(Stop.Request) returns (Stop.Response);
}
message GetMetadata {
message Request {
}
message Response {
ServerCapabilities server_capabilities = 1;
repeated Diagnostic diagnostics = 2;
repeated DataSourceMetadata data_sources = 3;
repeated ResourceMetadata resources = 4;
// functions returns metadata for any functions.
repeated FunctionMetadata functions = 5;
repeated EphemeralResourceMetadata ephemeral_resources = 6;
}
message FunctionMetadata {
// name is the function name.
string name = 1;
}
message DataSourceMetadata {
string type_name = 1;
}
message ResourceMetadata {
string type_name = 1;
}
message EphemeralResourceMetadata {
string type_name = 1;
}
}
message GetProviderSchema {
message Request {
}
message Response {
Schema provider = 1;
map<string, Schema> resource_schemas = 2;
map<string, Schema> data_source_schemas = 3;
repeated Diagnostic diagnostics = 4;
Schema provider_meta = 5;
ServerCapabilities server_capabilities = 6;
// functions is a mapping of function names to definitions.
map<string, Function> functions = 7;
map<string, Schema> ephemeral_resource_schemas = 8;
}
}
message PrepareProviderConfig {
message Request {
DynamicValue config = 1;
}
message Response {
DynamicValue prepared_config = 1;
repeated Diagnostic diagnostics = 2;
}
}
message UpgradeResourceState {
// Request is the message that is sent to the provider during the
// UpgradeResourceState RPC.
//
// This message intentionally does not include configuration data as any
// configuration-based or configuration-conditional changes should occur
// during the PlanResourceChange RPC. Additionally, the configuration is
// not guaranteed to exist (in the case of resource destruction), be wholly
// known, nor match the given prior state, which could lead to unexpected
// provider behaviors for practitioners.
message Request {
string type_name = 1;
// version is the schema_version number recorded in the state file
int64 version = 2;
// raw_state is the raw states as stored for the resource. Core does
// not have access to the schema of prior_version, so it's the
// provider's responsibility to interpret this value using the
// appropriate older schema. The raw_state will be the json encoded
// state, or a legacy flat-mapped format.
RawState raw_state = 3;
}
message Response {
// new_state is a msgpack-encoded data structure that, when interpreted with
// the _current_ schema for this resource type, is functionally equivalent to
// that which was given in prior_state_raw.
DynamicValue upgraded_state = 1;
// diagnostics describes any errors encountered during migration that could not
// be safely resolved, and warnings about any possibly-risky assumptions made
// in the upgrade process.
repeated Diagnostic diagnostics = 2;
}
}
message ValidateResourceTypeConfig {
message Request {
string type_name = 1;
DynamicValue config = 2;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}
message ValidateDataSourceConfig {
message Request {
string type_name = 1;
DynamicValue config = 2;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}
message Configure {
message Request {
string terraform_version = 1;
DynamicValue config = 2;
ClientCapabilities client_capabilities = 3;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}
message ReadResource {
// Request is the message that is sent to the provider during the
// ReadResource RPC.
//
// This message intentionally does not include configuration data as any
// configuration-based or configuration-conditional changes should occur
// during the PlanResourceChange RPC. Additionally, the configuration is
// not guaranteed to be wholly known nor match the given prior state, which
// could lead to unexpected provider behaviors for practitioners.
message Request {
string type_name = 1;
DynamicValue current_state = 2;
bytes private = 3;
DynamicValue provider_meta = 4;
ClientCapabilities client_capabilities = 5;
}
message Response {
DynamicValue new_state = 1;
repeated Diagnostic diagnostics = 2;
bytes private = 3;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 4;
}
}
message PlanResourceChange {
message Request {
string type_name = 1;
DynamicValue prior_state = 2;
DynamicValue proposed_new_state = 3;
DynamicValue config = 4;
bytes prior_private = 5;
DynamicValue provider_meta = 6;
ClientCapabilities client_capabilities = 7;
}
message Response {
DynamicValue planned_state = 1;
repeated AttributePath requires_replace = 2;
bytes planned_private = 3;
repeated Diagnostic diagnostics = 4;
// This may be set only by the helper/schema "SDK" in the main Terraform
// repository, to request that Terraform Core >=0.12 permit additional
// inconsistencies that can result from the legacy SDK type system
// and its imprecise mapping to the >=0.12 type system.
// The change in behavior implied by this flag makes sense only for the
// specific details of the legacy SDK type system, and are not a general
// mechanism to avoid proper type handling in providers.
//
// ==== DO NOT USE THIS ====
// ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
// ==== DO NOT USE THIS ====
bool legacy_type_system = 5;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 6;
}
}
message ApplyResourceChange {
message Request {
string type_name = 1;
DynamicValue prior_state = 2;
DynamicValue planned_state = 3;
DynamicValue config = 4;
bytes planned_private = 5;
DynamicValue provider_meta = 6;
}
message Response {
DynamicValue new_state = 1;
bytes private = 2;
repeated Diagnostic diagnostics = 3;
// This may be set only by the helper/schema "SDK" in the main Terraform
// repository, to request that Terraform Core >=0.12 permit additional
// inconsistencies that can result from the legacy SDK type system
// and its imprecise mapping to the >=0.12 type system.
// The change in behavior implied by this flag makes sense only for the
// specific details of the legacy SDK type system, and are not a general
// mechanism to avoid proper type handling in providers.
//
// ==== DO NOT USE THIS ====
// ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
// ==== DO NOT USE THIS ====
bool legacy_type_system = 4;
}
}
message ImportResourceState {
message Request {
string type_name = 1;
string id = 2;
ClientCapabilities client_capabilities = 3;
}
message ImportedResource {
string type_name = 1;
DynamicValue state = 2;
bytes private = 3;
}
message Response {
repeated ImportedResource imported_resources = 1;
repeated Diagnostic diagnostics = 2;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 3;
}
}
message MoveResourceState {
message Request {
// The address of the provider the resource is being moved from.
string source_provider_address = 1;
// The resource type that the resource is being moved from.
string source_type_name = 2;
// The schema version of the resource type that the resource is being
// moved from.
int64 source_schema_version = 3;
// The raw state of the resource being moved. Only the json field is
// populated, as there should be no legacy providers using the flatmap
// format that support newly introduced RPCs.
RawState source_state = 4;
// The resource type that the resource is being moved to.
string target_type_name = 5;
// The private state of the resource being moved.
bytes source_private = 6;
}
message Response {
// The state of the resource after it has been moved.
DynamicValue target_state = 1;
// Any diagnostics that occurred during the move.
repeated Diagnostic diagnostics = 2;
// The private state of the resource after it has been moved.
bytes target_private = 3;
}
}
message ReadDataSource {
message Request {
string type_name = 1;
DynamicValue config = 2;
DynamicValue provider_meta = 3;
ClientCapabilities client_capabilities = 4;
}
message Response {
DynamicValue state = 1;
repeated Diagnostic diagnostics = 2;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 3;
}
}
service Provisioner {
rpc GetSchema(GetProvisionerSchema.Request) returns (GetProvisionerSchema.Response);
rpc ValidateProvisionerConfig(ValidateProvisionerConfig.Request) returns (ValidateProvisionerConfig.Response);
rpc ProvisionResource(ProvisionResource.Request) returns (stream ProvisionResource.Response);
rpc Stop(Stop.Request) returns (Stop.Response);
}
message GetProvisionerSchema {
message Request {
}
message Response {
Schema provisioner = 1;
repeated Diagnostic diagnostics = 2;
}
}
message ValidateProvisionerConfig {
message Request {
DynamicValue config = 1;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}
message ProvisionResource {
message Request {
DynamicValue config = 1;
DynamicValue connection = 2;
}
message Response {
string output = 1;
repeated Diagnostic diagnostics = 2;
}
}
message GetFunctions {
message Request {}
message Response {
// functions is a mapping of function names to definitions.
map<string, Function> functions = 1;
// diagnostics is any warnings or errors.
repeated Diagnostic diagnostics = 2;
}
}
message CallFunction {
message Request {
// name is the name of the function being called.
string name = 1;
// arguments is the data of each function argument value.
repeated DynamicValue arguments = 2;
}
message Response {
// result is result value after running the function logic.
DynamicValue result = 1;
// error is any error from the function logic.
FunctionError error = 2;
}
}
message ValidateEphemeralResourceConfig {
message Request {
string type_name = 1;
DynamicValue config = 2;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}
message OpenEphemeralResource {
message Request {
string type_name = 1;
DynamicValue config = 2;
ClientCapabilities client_capabilities = 3;
}
message Response {
repeated Diagnostic diagnostics = 1;
optional google.protobuf.Timestamp renew_at = 2;
DynamicValue result = 3;
optional bytes private = 4;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 5;
}
}
message RenewEphemeralResource {
message Request {
string type_name = 1;
optional bytes private = 2;
}
message Response {
repeated Diagnostic diagnostics = 1;
optional google.protobuf.Timestamp renew_at = 2;
optional bytes private = 3;
}
}
message CloseEphemeralResource {
message Request {
string type_name = 1;
optional bytes private = 2;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}

View File

@ -0,0 +1,618 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
// Terraform Plugin RPC protocol version 6.6
//
// This file defines version 6.6 of the RPC protocol. To implement a plugin
// against this protocol, copy this definition into your own codebase and
// use protoc to generate stubs for your target language.
//
// This file will not be updated. Any minor versions of protocol 6 to follow
// should copy this file and modify the copy while maintaing backwards
// compatibility. Breaking changes, if any are required, will come
// in a subsequent major version with its own separate proto definition.
//
// Note that only the proto files included in a release tag of Terraform are
// official protocol releases. Proto files taken from other commits may include
// incomplete changes or features that did not make it into a final release.
// In all reasonable cases, plugin developers should take the proto file from
// the tag of the most recent release of Terraform, and not from the main
// branch or any other development branch.
//
syntax = "proto3";
option go_package = "github.com/opentofu/opentofu/internal/tfplugin6";
package tfplugin6;
// DynamicValue is an opaque encoding of terraform data, with the field name
// indicating the encoding scheme used.
message DynamicValue {
bytes msgpack = 1;
bytes json = 2;
}
message Diagnostic {
enum Severity {
INVALID = 0;
ERROR = 1;
WARNING = 2;
}
Severity severity = 1;
string summary = 2;
string detail = 3;
AttributePath attribute = 4;
}
message FunctionError {
string text = 1;
// The optional function_argument records the index position of the
// argument which caused the error.
optional int64 function_argument = 2;
}
message AttributePath {
message Step {
oneof selector {
// Set "attribute_name" to represent looking up an attribute
// in the current object value.
string attribute_name = 1;
// Set "element_key_*" to represent looking up an element in
// an indexable collection type.
string element_key_string = 2;
int64 element_key_int = 3;
}
}
repeated Step steps = 1;
}
message StopProvider {
message Request {
}
message Response {
string Error = 1;
}
}
// RawState holds the stored state for a resource to be upgraded by the
// provider. It can be in one of two formats, the current json encoded format
// in bytes, or the legacy flatmap format as a map of strings.
message RawState {
bytes json = 1;
map<string, string> flatmap = 2;
}
enum StringKind {
PLAIN = 0;
MARKDOWN = 1;
}
// Schema is the configuration schema for a Resource or Provider.
message Schema {
message Block {
int64 version = 1;
repeated Attribute attributes = 2;
repeated NestedBlock block_types = 3;
string description = 4;
StringKind description_kind = 5;
bool deprecated = 6;
}
message Attribute {
string name = 1;
bytes type = 2;
Object nested_type = 10;
string description = 3;
bool required = 4;
bool optional = 5;
bool computed = 6;
bool sensitive = 7;
StringKind description_kind = 8;
bool deprecated = 9;
}
message NestedBlock {
enum NestingMode {
INVALID = 0;
SINGLE = 1;
LIST = 2;
SET = 3;
MAP = 4;
GROUP = 5;
}
string type_name = 1;
Block block = 2;
NestingMode nesting = 3;
int64 min_items = 4;
int64 max_items = 5;
}
message Object {
enum NestingMode {
INVALID = 0;
SINGLE = 1;
LIST = 2;
SET = 3;
MAP = 4;
}
repeated Attribute attributes = 1;
NestingMode nesting = 3;
// MinItems and MaxItems were never used in the protocol, and have no
// effect on validation.
int64 min_items = 4 [deprecated = true];
int64 max_items = 5 [deprecated = true];
}
// The version of the schema.
// Schemas are versioned, so that providers can upgrade a saved resource
// state when the schema is changed.
int64 version = 1;
// Block is the top level configuration block for this schema.
Block block = 2;
}
message Function {
// parameters is the ordered list of positional function parameters.
repeated Parameter parameters = 1;
// variadic_parameter is an optional final parameter which accepts
// zero or more argument values, in which Terraform will send an
// ordered list of the parameter type.
Parameter variadic_parameter = 2;
// return is the function result.
Return return = 3;
// summary is the human-readable shortened documentation for the function.
string summary = 4;
// description is human-readable documentation for the function.
string description = 5;
// description_kind is the formatting of the description.
StringKind description_kind = 6;
// deprecation_message is human-readable documentation if the
// function is deprecated.
string deprecation_message = 7;
message Parameter {
// name is the human-readable display name for the parameter.
string name = 1;
// type is the type constraint for the parameter.
bytes type = 2;
// allow_null_value when enabled denotes that a null argument value can
// be passed to the provider. When disabled, Terraform returns an error
// if the argument value is null.
bool allow_null_value = 3;
// allow_unknown_values when enabled denotes that only wholly known
// argument values will be passed to the provider. When disabled,
// Terraform skips the function call entirely and assumes an unknown
// value result from the function.
bool allow_unknown_values = 4;
// description is human-readable documentation for the parameter.
string description = 5;
// description_kind is the formatting of the description.
StringKind description_kind = 6;
}
message Return {
// type is the type constraint for the function result.
bytes type = 1;
}
}
// ServerCapabilities allows providers to communicate extra information
// regarding supported protocol features. This is used to indicate
// availability of certain forward-compatible changes which may be optional
// in a major protocol version, but cannot be tested for directly.
message ServerCapabilities {
// The plan_destroy capability signals that a provider expects a call
// to PlanResourceChange when a resource is going to be destroyed.
bool plan_destroy = 1;
// The get_provider_schema_optional capability indicates that this
// provider does not require calling GetProviderSchema to operate
// normally, and the caller can used a cached copy of the provider's
// schema.
bool get_provider_schema_optional = 2;
// The move_resource_state capability signals that a provider supports the
// MoveResourceState RPC.
bool move_resource_state = 3;
}
// ClientCapabilities allows Terraform to publish information regarding
// supported protocol features. This is used to indicate availability of
// certain forward-compatible changes which may be optional in a major
// protocol version, but cannot be tested for directly.
message ClientCapabilities {
// The deferral_allowed capability signals that the client is able to
// handle deferred responses from the provider.
bool deferral_allowed = 1;
}
// Deferred is a message that indicates that change is deferred for a reason.
message Deferred {
// Reason is the reason for deferring the change.
enum Reason {
// UNKNOWN is the default value, and should not be used.
UNKNOWN = 0;
// RESOURCE_CONFIG_UNKNOWN is used when the config is partially unknown and the real
// values need to be known before the change can be planned.
RESOURCE_CONFIG_UNKNOWN = 1;
// PROVIDER_CONFIG_UNKNOWN is used when parts of the provider configuration
// are unknown, e.g. the provider configuration is only known after the apply is done.
PROVIDER_CONFIG_UNKNOWN = 2;
// ABSENT_PREREQ is used when a hard dependency has not been satisfied.
ABSENT_PREREQ = 3;
}
// reason is the reason for deferring the change.
Reason reason = 1;
}
service Provider {
//////// Information about what a provider supports/expects
// GetMetadata returns upfront information about server capabilities and
// supported resource types without requiring the server to instantiate all
// schema information, which may be memory intensive. This RPC is optional,
// where clients may receive an unimplemented RPC error. Clients should
// ignore the error and call the GetProviderSchema RPC as a fallback.
rpc GetMetadata(GetMetadata.Request) returns (GetMetadata.Response);
// GetSchema returns schema information for the provider, data resources,
// and managed resources.
rpc GetProviderSchema(GetProviderSchema.Request) returns (GetProviderSchema.Response);
rpc ValidateProviderConfig(ValidateProviderConfig.Request) returns (ValidateProviderConfig.Response);
rpc ValidateResourceConfig(ValidateResourceConfig.Request) returns (ValidateResourceConfig.Response);
rpc ValidateDataResourceConfig(ValidateDataResourceConfig.Request) returns (ValidateDataResourceConfig.Response);
rpc UpgradeResourceState(UpgradeResourceState.Request) returns (UpgradeResourceState.Response);
//////// One-time initialization, called before other functions below
rpc ConfigureProvider(ConfigureProvider.Request) returns (ConfigureProvider.Response);
//////// Managed Resource Lifecycle
rpc ReadResource(ReadResource.Request) returns (ReadResource.Response);
rpc PlanResourceChange(PlanResourceChange.Request) returns (PlanResourceChange.Response);
rpc ApplyResourceChange(ApplyResourceChange.Request) returns (ApplyResourceChange.Response);
rpc ImportResourceState(ImportResourceState.Request) returns (ImportResourceState.Response);
rpc MoveResourceState(MoveResourceState.Request) returns (MoveResourceState.Response);
rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response);
// Functions
// GetFunctions returns the definitions of all functions.
rpc GetFunctions(GetFunctions.Request) returns (GetFunctions.Response);
// CallFunction runs the provider-defined function logic and returns
// the result with any diagnostics.
rpc CallFunction(CallFunction.Request) returns (CallFunction.Response);
//////// Graceful Shutdown
rpc StopProvider(StopProvider.Request) returns (StopProvider.Response);
}
message GetMetadata {
message Request {
}
message Response {
ServerCapabilities server_capabilities = 1;
repeated Diagnostic diagnostics = 2;
repeated DataSourceMetadata data_sources = 3;
repeated ResourceMetadata resources = 4;
// functions returns metadata for any functions.
repeated FunctionMetadata functions = 5;
}
message FunctionMetadata {
// name is the function name.
string name = 1;
}
message DataSourceMetadata {
string type_name = 1;
}
message ResourceMetadata {
string type_name = 1;
}
}
message GetProviderSchema {
message Request {
}
message Response {
Schema provider = 1;
map<string, Schema> resource_schemas = 2;
map<string, Schema> data_source_schemas = 3;
repeated Diagnostic diagnostics = 4;
Schema provider_meta = 5;
ServerCapabilities server_capabilities = 6;
// functions is a mapping of function names to definitions.
map<string, Function> functions = 7;
}
}
message ValidateProviderConfig {
message Request {
DynamicValue config = 1;
}
message Response {
repeated Diagnostic diagnostics = 2;
}
}
message UpgradeResourceState {
// Request is the message that is sent to the provider during the
// UpgradeResourceState RPC.
//
// This message intentionally does not include configuration data as any
// configuration-based or configuration-conditional changes should occur
// during the PlanResourceChange RPC. Additionally, the configuration is
// not guaranteed to exist (in the case of resource destruction), be wholly
// known, nor match the given prior state, which could lead to unexpected
// provider behaviors for practitioners.
message Request {
string type_name = 1;
// version is the schema_version number recorded in the state file
int64 version = 2;
// raw_state is the raw states as stored for the resource. Core does
// not have access to the schema of prior_version, so it's the
// provider's responsibility to interpret this value using the
// appropriate older schema. The raw_state will be the json encoded
// state, or a legacy flat-mapped format.
RawState raw_state = 3;
}
message Response {
// new_state is a msgpack-encoded data structure that, when interpreted with
// the _current_ schema for this resource type, is functionally equivalent to
// that which was given in prior_state_raw.
DynamicValue upgraded_state = 1;
// diagnostics describes any errors encountered during migration that could not
// be safely resolved, and warnings about any possibly-risky assumptions made
// in the upgrade process.
repeated Diagnostic diagnostics = 2;
}
}
message ValidateResourceConfig {
message Request {
string type_name = 1;
DynamicValue config = 2;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}
message ValidateDataResourceConfig {
message Request {
string type_name = 1;
DynamicValue config = 2;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}
message ConfigureProvider {
message Request {
string terraform_version = 1;
DynamicValue config = 2;
ClientCapabilities client_capabilities = 3;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}
message ReadResource {
// Request is the message that is sent to the provider during the
// ReadResource RPC.
//
// This message intentionally does not include configuration data as any
// configuration-based or configuration-conditional changes should occur
// during the PlanResourceChange RPC. Additionally, the configuration is
// not guaranteed to be wholly known nor match the given prior state, which
// could lead to unexpected provider behaviors for practitioners.
message Request {
string type_name = 1;
DynamicValue current_state = 2;
bytes private = 3;
DynamicValue provider_meta = 4;
ClientCapabilities client_capabilities = 5;
}
message Response {
DynamicValue new_state = 1;
repeated Diagnostic diagnostics = 2;
bytes private = 3;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 4;
}
}
message PlanResourceChange {
message Request {
string type_name = 1;
DynamicValue prior_state = 2;
DynamicValue proposed_new_state = 3;
DynamicValue config = 4;
bytes prior_private = 5;
DynamicValue provider_meta = 6;
ClientCapabilities client_capabilities = 7;
}
message Response {
DynamicValue planned_state = 1;
repeated AttributePath requires_replace = 2;
bytes planned_private = 3;
repeated Diagnostic diagnostics = 4;
// This may be set only by the helper/schema "SDK" in the main Terraform
// repository, to request that Terraform Core >=0.12 permit additional
// inconsistencies that can result from the legacy SDK type system
// and its imprecise mapping to the >=0.12 type system.
// The change in behavior implied by this flag makes sense only for the
// specific details of the legacy SDK type system, and are not a general
// mechanism to avoid proper type handling in providers.
//
// ==== DO NOT USE THIS ====
// ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
// ==== DO NOT USE THIS ====
bool legacy_type_system = 5;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 6;
}
}
message ApplyResourceChange {
message Request {
string type_name = 1;
DynamicValue prior_state = 2;
DynamicValue planned_state = 3;
DynamicValue config = 4;
bytes planned_private = 5;
DynamicValue provider_meta = 6;
}
message Response {
DynamicValue new_state = 1;
bytes private = 2;
repeated Diagnostic diagnostics = 3;
// This may be set only by the helper/schema "SDK" in the main Terraform
// repository, to request that Terraform Core >=0.12 permit additional
// inconsistencies that can result from the legacy SDK type system
// and its imprecise mapping to the >=0.12 type system.
// The change in behavior implied by this flag makes sense only for the
// specific details of the legacy SDK type system, and are not a general
// mechanism to avoid proper type handling in providers.
//
// ==== DO NOT USE THIS ====
// ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
// ==== DO NOT USE THIS ====
bool legacy_type_system = 4;
}
}
message ImportResourceState {
message Request {
string type_name = 1;
string id = 2;
ClientCapabilities client_capabilities = 3;
}
message ImportedResource {
string type_name = 1;
DynamicValue state = 2;
bytes private = 3;
}
message Response {
repeated ImportedResource imported_resources = 1;
repeated Diagnostic diagnostics = 2;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 3;
}
}
message MoveResourceState {
message Request {
// The address of the provider the resource is being moved from.
string source_provider_address = 1;
// The resource type that the resource is being moved from.
string source_type_name = 2;
// The schema version of the resource type that the resource is being
// moved from.
int64 source_schema_version = 3;
// The raw state of the resource being moved. Only the json field is
// populated, as there should be no legacy providers using the flatmap
// format that support newly introduced RPCs.
RawState source_state = 4;
// The resource type that the resource is being moved to.
string target_type_name = 5;
// The private state of the resource being moved.
bytes source_private = 6;
}
message Response {
// The state of the resource after it has been moved.
DynamicValue target_state = 1;
// Any diagnostics that occurred during the move.
repeated Diagnostic diagnostics = 2;
// The private state of the resource after it has been moved.
bytes target_private = 3;
}
}
message ReadDataSource {
message Request {
string type_name = 1;
DynamicValue config = 2;
DynamicValue provider_meta = 3;
ClientCapabilities client_capabilities = 4;
}
message Response {
DynamicValue state = 1;
repeated Diagnostic diagnostics = 2;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 3;
}
}
message GetFunctions {
message Request {}
message Response {
// functions is a mapping of function names to definitions.
map<string, Function> functions = 1;
// diagnostics is any warnings or errors.
repeated Diagnostic diagnostics = 2;
}
}
message CallFunction {
message Request {
// name is the name of the function being called.
string name = 1;
// arguments is the data of each function argument value.
repeated DynamicValue arguments = 2;
}
message Response {
// result is result value after running the function logic.
DynamicValue result = 1;
// error is any errors from the function logic.
FunctionError error = 2;
}
}

View File

@ -0,0 +1,682 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
// Terraform Plugin RPC protocol version 6.7
//
// This file defines version 6.7 of the RPC protocol. To implement a plugin
// against this protocol, copy this definition into your own codebase and
// use protoc to generate stubs for your target language.
//
// This file will not be updated. Any minor versions of protocol 6 to follow
// should copy this file and modify the copy while maintaing backwards
// compatibility. Breaking changes, if any are required, will come
// in a subsequent major version with its own separate proto definition.
//
// Note that only the proto files included in a release tag of Terraform are
// official protocol releases. Proto files taken from other commits may include
// incomplete changes or features that did not make it into a final release.
// In all reasonable cases, plugin developers should take the proto file from
// the tag of the most recent release of Terraform, and not from the main
// branch or any other development branch.
//
syntax = "proto3";
option go_package = "github.com/opentofu/opentofu/internal/tfplugin6";
import "google/protobuf/timestamp.proto";
package tfplugin6;
// DynamicValue is an opaque encoding of terraform data, with the field name
// indicating the encoding scheme used.
message DynamicValue {
bytes msgpack = 1;
bytes json = 2;
}
message Diagnostic {
enum Severity {
INVALID = 0;
ERROR = 1;
WARNING = 2;
}
Severity severity = 1;
string summary = 2;
string detail = 3;
AttributePath attribute = 4;
}
message FunctionError {
string text = 1;
// The optional function_argument records the index position of the
// argument which caused the error.
optional int64 function_argument = 2;
}
message AttributePath {
message Step {
oneof selector {
// Set "attribute_name" to represent looking up an attribute
// in the current object value.
string attribute_name = 1;
// Set "element_key_*" to represent looking up an element in
// an indexable collection type.
string element_key_string = 2;
int64 element_key_int = 3;
}
}
repeated Step steps = 1;
}
message StopProvider {
message Request {
}
message Response {
string Error = 1;
}
}
// RawState holds the stored state for a resource to be upgraded by the
// provider. It can be in one of two formats, the current json encoded format
// in bytes, or the legacy flatmap format as a map of strings.
message RawState {
bytes json = 1;
map<string, string> flatmap = 2;
}
enum StringKind {
PLAIN = 0;
MARKDOWN = 1;
}
// Schema is the configuration schema for a Resource or Provider.
message Schema {
message Block {
int64 version = 1;
repeated Attribute attributes = 2;
repeated NestedBlock block_types = 3;
string description = 4;
StringKind description_kind = 5;
bool deprecated = 6;
}
message Attribute {
string name = 1;
bytes type = 2;
Object nested_type = 10;
string description = 3;
bool required = 4;
bool optional = 5;
bool computed = 6;
bool sensitive = 7;
StringKind description_kind = 8;
bool deprecated = 9;
}
message NestedBlock {
enum NestingMode {
INVALID = 0;
SINGLE = 1;
LIST = 2;
SET = 3;
MAP = 4;
GROUP = 5;
}
string type_name = 1;
Block block = 2;
NestingMode nesting = 3;
int64 min_items = 4;
int64 max_items = 5;
}
message Object {
enum NestingMode {
INVALID = 0;
SINGLE = 1;
LIST = 2;
SET = 3;
MAP = 4;
}
repeated Attribute attributes = 1;
NestingMode nesting = 3;
// MinItems and MaxItems were never used in the protocol, and have no
// effect on validation.
int64 min_items = 4 [deprecated = true];
int64 max_items = 5 [deprecated = true];
}
// The version of the schema.
// Schemas are versioned, so that providers can upgrade a saved resource
// state when the schema is changed.
int64 version = 1;
// Block is the top level configuration block for this schema.
Block block = 2;
}
message Function {
// parameters is the ordered list of positional function parameters.
repeated Parameter parameters = 1;
// variadic_parameter is an optional final parameter which accepts
// zero or more argument values, in which Terraform will send an
// ordered list of the parameter type.
Parameter variadic_parameter = 2;
// return is the function result.
Return return = 3;
// summary is the human-readable shortened documentation for the function.
string summary = 4;
// description is human-readable documentation for the function.
string description = 5;
// description_kind is the formatting of the description.
StringKind description_kind = 6;
// deprecation_message is human-readable documentation if the
// function is deprecated.
string deprecation_message = 7;
message Parameter {
// name is the human-readable display name for the parameter.
string name = 1;
// type is the type constraint for the parameter.
bytes type = 2;
// allow_null_value when enabled denotes that a null argument value can
// be passed to the provider. When disabled, Terraform returns an error
// if the argument value is null.
bool allow_null_value = 3;
// allow_unknown_values when enabled denotes that only wholly known
// argument values will be passed to the provider. When disabled,
// Terraform skips the function call entirely and assumes an unknown
// value result from the function.
bool allow_unknown_values = 4;
// description is human-readable documentation for the parameter.
string description = 5;
// description_kind is the formatting of the description.
StringKind description_kind = 6;
}
message Return {
// type is the type constraint for the function result.
bytes type = 1;
}
}
// ServerCapabilities allows providers to communicate extra information
// regarding supported protocol features. This is used to indicate
// availability of certain forward-compatible changes which may be optional
// in a major protocol version, but cannot be tested for directly.
message ServerCapabilities {
// The plan_destroy capability signals that a provider expects a call
// to PlanResourceChange when a resource is going to be destroyed.
bool plan_destroy = 1;
// The get_provider_schema_optional capability indicates that this
// provider does not require calling GetProviderSchema to operate
// normally, and the caller can used a cached copy of the provider's
// schema.
bool get_provider_schema_optional = 2;
// The move_resource_state capability signals that a provider supports the
// MoveResourceState RPC.
bool move_resource_state = 3;
}
// ClientCapabilities allows Terraform to publish information regarding
// supported protocol features. This is used to indicate availability of
// certain forward-compatible changes which may be optional in a major
// protocol version, but cannot be tested for directly.
message ClientCapabilities {
// The deferral_allowed capability signals that the client is able to
// handle deferred responses from the provider.
bool deferral_allowed = 1;
}
// Deferred is a message that indicates that change is deferred for a reason.
message Deferred {
// Reason is the reason for deferring the change.
enum Reason {
// UNKNOWN is the default value, and should not be used.
UNKNOWN = 0;
// RESOURCE_CONFIG_UNKNOWN is used when the config is partially unknown and the real
// values need to be known before the change can be planned.
RESOURCE_CONFIG_UNKNOWN = 1;
// PROVIDER_CONFIG_UNKNOWN is used when parts of the provider configuration
// are unknown, e.g. the provider configuration is only known after the apply is done.
PROVIDER_CONFIG_UNKNOWN = 2;
// ABSENT_PREREQ is used when a hard dependency has not been satisfied.
ABSENT_PREREQ = 3;
}
// reason is the reason for deferring the change.
Reason reason = 1;
}
service Provider {
//////// Information about what a provider supports/expects
// GetMetadata returns upfront information about server capabilities and
// supported resource types without requiring the server to instantiate all
// schema information, which may be memory intensive. This RPC is optional,
// where clients may receive an unimplemented RPC error. Clients should
// ignore the error and call the GetProviderSchema RPC as a fallback.
rpc GetMetadata(GetMetadata.Request) returns (GetMetadata.Response);
// GetSchema returns schema information for the provider, data resources,
// and managed resources.
rpc GetProviderSchema(GetProviderSchema.Request) returns (GetProviderSchema.Response);
rpc ValidateProviderConfig(ValidateProviderConfig.Request) returns (ValidateProviderConfig.Response);
rpc ValidateResourceConfig(ValidateResourceConfig.Request) returns (ValidateResourceConfig.Response);
rpc ValidateDataResourceConfig(ValidateDataResourceConfig.Request) returns (ValidateDataResourceConfig.Response);
rpc UpgradeResourceState(UpgradeResourceState.Request) returns (UpgradeResourceState.Response);
//////// One-time initialization, called before other functions below
rpc ConfigureProvider(ConfigureProvider.Request) returns (ConfigureProvider.Response);
//////// Managed Resource Lifecycle
rpc ReadResource(ReadResource.Request) returns (ReadResource.Response);
rpc PlanResourceChange(PlanResourceChange.Request) returns (PlanResourceChange.Response);
rpc ApplyResourceChange(ApplyResourceChange.Request) returns (ApplyResourceChange.Response);
rpc ImportResourceState(ImportResourceState.Request) returns (ImportResourceState.Response);
rpc MoveResourceState(MoveResourceState.Request) returns (MoveResourceState.Response);
rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response);
//////// Ephemeral Resource Lifecycle
rpc ValidateEphemeralResourceConfig(ValidateEphemeralResourceConfig.Request) returns (ValidateEphemeralResourceConfig.Response);
rpc OpenEphemeralResource(OpenEphemeralResource.Request) returns (OpenEphemeralResource.Response);
rpc RenewEphemeralResource(RenewEphemeralResource.Request) returns (RenewEphemeralResource.Response);
rpc CloseEphemeralResource(CloseEphemeralResource.Request) returns (CloseEphemeralResource.Response);
// Functions
// GetFunctions returns the definitions of all functions.
rpc GetFunctions(GetFunctions.Request) returns (GetFunctions.Response);
// CallFunction runs the provider-defined function logic and returns
// the result with any diagnostics.
rpc CallFunction(CallFunction.Request) returns (CallFunction.Response);
//////// Graceful Shutdown
rpc StopProvider(StopProvider.Request) returns (StopProvider.Response);
}
message GetMetadata {
message Request {
}
message Response {
ServerCapabilities server_capabilities = 1;
repeated Diagnostic diagnostics = 2;
repeated DataSourceMetadata data_sources = 3;
repeated ResourceMetadata resources = 4;
// functions returns metadata for any functions.
repeated FunctionMetadata functions = 5;
repeated EphemeralResourceMetadata ephemeral_resources = 6;
}
message FunctionMetadata {
// name is the function name.
string name = 1;
}
message DataSourceMetadata {
string type_name = 1;
}
message ResourceMetadata {
string type_name = 1;
}
message EphemeralResourceMetadata {
string type_name = 1;
}
}
message GetProviderSchema {
message Request {
}
message Response {
Schema provider = 1;
map<string, Schema> resource_schemas = 2;
map<string, Schema> data_source_schemas = 3;
repeated Diagnostic diagnostics = 4;
Schema provider_meta = 5;
ServerCapabilities server_capabilities = 6;
// functions is a mapping of function names to definitions.
map<string, Function> functions = 7;
map<string, Schema> ephemeral_resource_schemas = 8;
}
}
message ValidateProviderConfig {
message Request {
DynamicValue config = 1;
}
message Response {
repeated Diagnostic diagnostics = 2;
}
}
message UpgradeResourceState {
// Request is the message that is sent to the provider during the
// UpgradeResourceState RPC.
//
// This message intentionally does not include configuration data as any
// configuration-based or configuration-conditional changes should occur
// during the PlanResourceChange RPC. Additionally, the configuration is
// not guaranteed to exist (in the case of resource destruction), be wholly
// known, nor match the given prior state, which could lead to unexpected
// provider behaviors for practitioners.
message Request {
string type_name = 1;
// version is the schema_version number recorded in the state file
int64 version = 2;
// raw_state is the raw states as stored for the resource. Core does
// not have access to the schema of prior_version, so it's the
// provider's responsibility to interpret this value using the
// appropriate older schema. The raw_state will be the json encoded
// state, or a legacy flat-mapped format.
RawState raw_state = 3;
}
message Response {
// new_state is a msgpack-encoded data structure that, when interpreted with
// the _current_ schema for this resource type, is functionally equivalent to
// that which was given in prior_state_raw.
DynamicValue upgraded_state = 1;
// diagnostics describes any errors encountered during migration that could not
// be safely resolved, and warnings about any possibly-risky assumptions made
// in the upgrade process.
repeated Diagnostic diagnostics = 2;
}
}
message ValidateResourceConfig {
message Request {
string type_name = 1;
DynamicValue config = 2;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}
message ValidateDataResourceConfig {
message Request {
string type_name = 1;
DynamicValue config = 2;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}
message ConfigureProvider {
message Request {
string terraform_version = 1;
DynamicValue config = 2;
ClientCapabilities client_capabilities = 3;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}
message ReadResource {
// Request is the message that is sent to the provider during the
// ReadResource RPC.
//
// This message intentionally does not include configuration data as any
// configuration-based or configuration-conditional changes should occur
// during the PlanResourceChange RPC. Additionally, the configuration is
// not guaranteed to be wholly known nor match the given prior state, which
// could lead to unexpected provider behaviors for practitioners.
message Request {
string type_name = 1;
DynamicValue current_state = 2;
bytes private = 3;
DynamicValue provider_meta = 4;
ClientCapabilities client_capabilities = 5;
}
message Response {
DynamicValue new_state = 1;
repeated Diagnostic diagnostics = 2;
bytes private = 3;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 4;
}
}
message PlanResourceChange {
message Request {
string type_name = 1;
DynamicValue prior_state = 2;
DynamicValue proposed_new_state = 3;
DynamicValue config = 4;
bytes prior_private = 5;
DynamicValue provider_meta = 6;
ClientCapabilities client_capabilities = 7;
}
message Response {
DynamicValue planned_state = 1;
repeated AttributePath requires_replace = 2;
bytes planned_private = 3;
repeated Diagnostic diagnostics = 4;
// This may be set only by the helper/schema "SDK" in the main Terraform
// repository, to request that Terraform Core >=0.12 permit additional
// inconsistencies that can result from the legacy SDK type system
// and its imprecise mapping to the >=0.12 type system.
// The change in behavior implied by this flag makes sense only for the
// specific details of the legacy SDK type system, and are not a general
// mechanism to avoid proper type handling in providers.
//
// ==== DO NOT USE THIS ====
// ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
// ==== DO NOT USE THIS ====
bool legacy_type_system = 5;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 6;
}
}
message ApplyResourceChange {
message Request {
string type_name = 1;
DynamicValue prior_state = 2;
DynamicValue planned_state = 3;
DynamicValue config = 4;
bytes planned_private = 5;
DynamicValue provider_meta = 6;
}
message Response {
DynamicValue new_state = 1;
bytes private = 2;
repeated Diagnostic diagnostics = 3;
// This may be set only by the helper/schema "SDK" in the main Terraform
// repository, to request that Terraform Core >=0.12 permit additional
// inconsistencies that can result from the legacy SDK type system
// and its imprecise mapping to the >=0.12 type system.
// The change in behavior implied by this flag makes sense only for the
// specific details of the legacy SDK type system, and are not a general
// mechanism to avoid proper type handling in providers.
//
// ==== DO NOT USE THIS ====
// ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
// ==== DO NOT USE THIS ====
bool legacy_type_system = 4;
}
}
message ImportResourceState {
message Request {
string type_name = 1;
string id = 2;
ClientCapabilities client_capabilities = 3;
}
message ImportedResource {
string type_name = 1;
DynamicValue state = 2;
bytes private = 3;
}
message Response {
repeated ImportedResource imported_resources = 1;
repeated Diagnostic diagnostics = 2;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 3;
}
}
message MoveResourceState {
message Request {
// The address of the provider the resource is being moved from.
string source_provider_address = 1;
// The resource type that the resource is being moved from.
string source_type_name = 2;
// The schema version of the resource type that the resource is being
// moved from.
int64 source_schema_version = 3;
// The raw state of the resource being moved. Only the json field is
// populated, as there should be no legacy providers using the flatmap
// format that support newly introduced RPCs.
RawState source_state = 4;
// The resource type that the resource is being moved to.
string target_type_name = 5;
// The private state of the resource being moved.
bytes source_private = 6;
}
message Response {
// The state of the resource after it has been moved.
DynamicValue target_state = 1;
// Any diagnostics that occurred during the move.
repeated Diagnostic diagnostics = 2;
// The private state of the resource after it has been moved.
bytes target_private = 3;
}
}
message ReadDataSource {
message Request {
string type_name = 1;
DynamicValue config = 2;
DynamicValue provider_meta = 3;
ClientCapabilities client_capabilities = 4;
}
message Response {
DynamicValue state = 1;
repeated Diagnostic diagnostics = 2;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 3;
}
}
message GetFunctions {
message Request {}
message Response {
// functions is a mapping of function names to definitions.
map<string, Function> functions = 1;
// diagnostics is any warnings or errors.
repeated Diagnostic diagnostics = 2;
}
}
message CallFunction {
message Request {
// name is the name of the function being called.
string name = 1;
// arguments is the data of each function argument value.
repeated DynamicValue arguments = 2;
}
message Response {
// result is result value after running the function logic.
DynamicValue result = 1;
// error is any error from the function logic.
FunctionError error = 2;
}
}
message ValidateEphemeralResourceConfig {
message Request {
string type_name = 1;
DynamicValue config = 2;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}
message OpenEphemeralResource {
message Request {
string type_name = 1;
DynamicValue config = 2;
ClientCapabilities client_capabilities = 3;
}
message Response {
repeated Diagnostic diagnostics = 1;
optional google.protobuf.Timestamp renew_at = 2;
DynamicValue result = 3;
optional bytes private = 4;
// deferred is set if the provider is deferring the change. If set the caller
// needs to handle the deferral.
Deferred deferred = 5;
}
}
message RenewEphemeralResource {
message Request {
string type_name = 1;
optional bytes private = 2;
}
message Response {
repeated Diagnostic diagnostics = 1;
optional google.protobuf.Timestamp renew_at = 2;
optional bytes private = 3;
}
}
message CloseEphemeralResource {
message Request {
string type_name = 1;
optional bytes private = 2;
}
message Response {
repeated Diagnostic diagnostics = 1;
}
}

8
go.mod
View File

@ -31,7 +31,6 @@ require (
github.com/dylanmei/winrmtest v0.0.0-20210303004826-fbc9ae56efb6
github.com/go-test/deep v1.0.3
github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1
github.com/golang/mock v1.6.0
github.com/google/go-cmp v0.6.0
github.com/google/uuid v1.6.0
github.com/googleapis/gax-go/v2 v2.12.0
@ -47,12 +46,12 @@ require (
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/go-plugin v1.4.3
github.com/hashicorp/go-retryablehttp v0.7.7
github.com/hashicorp/go-tfe v1.36.0
github.com/hashicorp/go-tfe v1.53.0
github.com/hashicorp/go-uuid v1.0.3
github.com/hashicorp/go-version v1.6.0
github.com/hashicorp/hcl v1.0.0
github.com/hashicorp/hcl/v2 v2.20.1
github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d
github.com/hashicorp/jsonapi v1.3.1
github.com/hashicorp/terraform-svchost v0.1.1
github.com/jmespath/go-jmespath v0.4.0
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
@ -90,6 +89,7 @@ require (
go.opentelemetry.io/otel v1.21.0
go.opentelemetry.io/otel/sdk v1.21.0
go.opentelemetry.io/otel/trace v1.21.0
go.uber.org/mock v0.4.0
golang.org/x/crypto v0.31.0
golang.org/x/exp v0.0.0-20230905200255-921286631fa9
golang.org/x/mod v0.17.0
@ -193,7 +193,7 @@ require (
github.com/hashicorp/go-safetemp v1.0.0 // indirect
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 // indirect
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
github.com/hashicorp/go-slug v0.12.2 // indirect
github.com/hashicorp/go-slug v0.15.0 // indirect
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
github.com/hashicorp/golang-lru v0.5.1 // indirect
github.com/hashicorp/serf v0.9.6 // indirect

19
go.sum
View File

@ -526,7 +526,6 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -691,14 +690,14 @@ github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3
github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
github.com/hashicorp/go-slug v0.12.2 h1:Gb6nxnV5GI1UVa3aLJGUj66J8AOZFnjIoYalNCp2Cbo=
github.com/hashicorp/go-slug v0.12.2/go.mod h1:JZVtycnZZbiJ4oxpJ/zfhyfBD8XxT4f0uOSyjNLCqFY=
github.com/hashicorp/go-slug v0.15.0 h1:AhMnE6JIyW0KoDJlmRDwv4xd52a5ZK3VdioQ7SMmZhI=
github.com/hashicorp/go-slug v0.15.0/go.mod h1:THWVTAXwJEinbsp4/bBRcmbaO5EYNLTqxbG4tZ3gCYQ=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-tfe v1.36.0 h1:Wq73gjjDo/f9gkKQ5MVSb+4NNJ6T7c5MVTivA0s/bZ0=
github.com/hashicorp/go-tfe v1.36.0/go.mod h1:awOuTZ4K9F1EJsKBIoxonJlb7Axn3PIb8YeBLtm/G/0=
github.com/hashicorp/go-tfe v1.53.0 h1:FlmR+45MB9rTosra2ZGPp0XtdxiRTmdTAJAgX94WK9k=
github.com/hashicorp/go-tfe v1.53.0/go.mod h1:XnTtBj3tVQ4uFkcFsv8Grn+O1CVcIcceL1uc2AgUcaU=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
@ -714,8 +713,8 @@ github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d h1:9ARUJJ1VVynB176G1HCwleORqCaXm/Vx0uUi0dL26I0=
github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d/go.mod h1:Yog5+CPEM3c99L1CL2CFCYoSzgWm5vTU58idbRUaLik=
github.com/hashicorp/jsonapi v1.3.1 h1:GtPvnmcWgYwCuDGvYT5VZBHcUyFdq9lSyCzDjn1DdPo=
github.com/hashicorp/jsonapi v1.3.1/go.mod h1:kWfdn49yCjQvbpnvY1dxxAuAFzISwrrMDQOcu6NsFoM=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
github.com/hashicorp/memberlist v0.3.0 h1:8+567mCcFDnS5ADl7lrpxPMWiFCElyUEeW0gtj34fMA=
@ -1014,8 +1013,8 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F
github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.194/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.588 h1:DYtBXB7sVc3EOW5horg8j55cLZynhsLYhHrvQ/jXKKM=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.588/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y=
@ -1106,6 +1105,8 @@ go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v8
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=

View File

@ -1,5 +1,10 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: server_test.go
//
// Generated by this command:
//
// mockgen -package http -source server_test.go -destination mock_server_test.go
//
// Package http is a generated GoMock package.
package http
@ -8,7 +13,7 @@ import (
http "net/http"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
gomock "go.uber.org/mock/gomock"
)
// MockHttpServerCallback is a mock of HttpServerCallback interface.
@ -41,7 +46,7 @@ func (m *MockHttpServerCallback) StateDELETE(req *http.Request) {
}
// StateDELETE indicates an expected call of StateDELETE.
func (mr *MockHttpServerCallbackMockRecorder) StateDELETE(req interface{}) *gomock.Call {
func (mr *MockHttpServerCallbackMockRecorder) StateDELETE(req any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDELETE", reflect.TypeOf((*MockHttpServerCallback)(nil).StateDELETE), req)
}
@ -53,7 +58,7 @@ func (m *MockHttpServerCallback) StateGET(req *http.Request) {
}
// StateGET indicates an expected call of StateGET.
func (mr *MockHttpServerCallbackMockRecorder) StateGET(req interface{}) *gomock.Call {
func (mr *MockHttpServerCallbackMockRecorder) StateGET(req any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGET", reflect.TypeOf((*MockHttpServerCallback)(nil).StateGET), req)
}
@ -65,7 +70,7 @@ func (m *MockHttpServerCallback) StateLOCK(req *http.Request) {
}
// StateLOCK indicates an expected call of StateLOCK.
func (mr *MockHttpServerCallbackMockRecorder) StateLOCK(req interface{}) *gomock.Call {
func (mr *MockHttpServerCallbackMockRecorder) StateLOCK(req any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateLOCK", reflect.TypeOf((*MockHttpServerCallback)(nil).StateLOCK), req)
}
@ -77,7 +82,7 @@ func (m *MockHttpServerCallback) StatePOST(req *http.Request) {
}
// StatePOST indicates an expected call of StatePOST.
func (mr *MockHttpServerCallbackMockRecorder) StatePOST(req interface{}) *gomock.Call {
func (mr *MockHttpServerCallbackMockRecorder) StatePOST(req any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatePOST", reflect.TypeOf((*MockHttpServerCallback)(nil).StatePOST), req)
}
@ -89,7 +94,7 @@ func (m *MockHttpServerCallback) StateUNLOCK(req *http.Request) {
}
// StateUNLOCK indicates an expected call of StateUNLOCK.
func (mr *MockHttpServerCallbackMockRecorder) StateUNLOCK(req interface{}) *gomock.Call {
func (mr *MockHttpServerCallbackMockRecorder) StateUNLOCK(req any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateUNLOCK", reflect.TypeOf((*MockHttpServerCallback)(nil).StateUNLOCK), req)
}

View File

@ -5,7 +5,7 @@
package http
//go:generate go run github.com/golang/mock/mockgen -package $GOPACKAGE -source $GOFILE -destination mock_$GOFILE
//go:generate go run go.uber.org/mock/mockgen -package $GOPACKAGE -source $GOFILE -destination mock_$GOFILE
import (
"context"
@ -27,13 +27,13 @@ import (
"syscall"
"testing"
"github.com/golang/mock/gomock"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/backend"
"github.com/opentofu/opentofu/internal/configs"
"github.com/opentofu/opentofu/internal/encryption"
"github.com/opentofu/opentofu/internal/states"
"github.com/zclconf/go-cty/cty"
"go.uber.org/mock/gomock"
)
const sampleState = `

View File

@ -15,12 +15,12 @@ import (
"testing"
"time"
gomock "github.com/golang/mock/gomock"
"github.com/google/go-cmp/cmp"
tfe "github.com/hashicorp/go-tfe"
mocks "github.com/hashicorp/go-tfe/mocks"
version "github.com/hashicorp/go-version"
"github.com/mitchellh/cli"
gomock "go.uber.org/mock/gomock"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/backend"

View File

@ -11,10 +11,10 @@ import (
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/hashicorp/go-tfe"
tfemocks "github.com/hashicorp/go-tfe/mocks"
"github.com/mitchellh/cli"
"go.uber.org/mock/gomock"
)
func MockAllRunEvents(t *testing.T, client *tfe.Client) (fullRunID string, emptyRunID string) {

View File

@ -11,9 +11,9 @@ import (
"strings"
"testing"
"github.com/golang/mock/gomock"
"github.com/hashicorp/go-tfe"
tfemocks "github.com/hashicorp/go-tfe/mocks"
"go.uber.org/mock/gomock"
)
func MockAllTaskStages(t *testing.T, client *tfe.Client) (RunID string) {

View File

@ -3,6 +3,9 @@
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
//
//nolint:revive,staticcheck //Disabling because we want to just stub out some methods
package cloud
import (
@ -179,6 +182,8 @@ type MockConfigurationVersions struct {
uploadURLs map[string]*tfe.ConfigurationVersion
}
var _ tfe.ConfigurationVersions = &MockConfigurationVersions{}
func newMockConfigurationVersions(client *MockClient) *MockConfigurationVersions {
return &MockConfigurationVersions{
client: client,
@ -268,6 +273,18 @@ func (m *MockConfigurationVersions) Download(ctx context.Context, cvID string) (
panic("not implemented")
}
func (m *MockConfigurationVersions) SoftDeleteBackingData(ctx context.Context, svID string) error {
panic("not implemented")
}
func (m *MockConfigurationVersions) RestoreBackingData(ctx context.Context, svID string) error {
panic("not implemented")
}
func (m *MockConfigurationVersions) PermanentlyDeleteBackingData(ctx context.Context, svID string) error {
panic("not implemented")
}
type MockCostEstimates struct {
client *MockClient
Estimations map[string]*tfe.CostEstimate
@ -355,6 +372,8 @@ type MockOrganizations struct {
organizations map[string]*tfe.Organization
}
var _ tfe.Organizations = &MockOrganizations{}
func newMockOrganizations(client *MockClient) *MockOrganizations {
return &MockOrganizations{
client: client,
@ -479,6 +498,30 @@ func (m *MockOrganizations) ReadRunQueue(ctx context.Context, name string, optio
return rq, nil
}
func (m *MockOrganizations) ReadDataRetentionPolicy(ctx context.Context, organization string) (*tfe.DataRetentionPolicy, error) {
panic("not implemented")
}
func (m *MockOrganizations) ReadDataRetentionPolicyChoice(ctx context.Context, organization string) (*tfe.DataRetentionPolicyChoice, error) {
panic("not implemented")
}
func (m *MockOrganizations) SetDataRetentionPolicy(ctx context.Context, organization string, options tfe.DataRetentionPolicySetOptions) (*tfe.DataRetentionPolicy, error) {
panic("not implemented")
}
func (m *MockOrganizations) SetDataRetentionPolicyDeleteOlder(ctx context.Context, organization string, options tfe.DataRetentionPolicyDeleteOlderSetOptions) (*tfe.DataRetentionPolicyDeleteOlder, error) {
panic("not implemented")
}
func (m *MockOrganizations) SetDataRetentionPolicyDontDelete(ctx context.Context, organization string, options tfe.DataRetentionPolicyDontDeleteSetOptions) (*tfe.DataRetentionPolicyDontDelete, error) {
panic("not implemented")
}
func (m *MockOrganizations) DeleteDataRetentionPolicy(ctx context.Context, organization string) error {
panic("not implemented")
}
type MockRedactedPlans struct {
client *MockClient
redactedPlans map[string][]byte
@ -1343,6 +1386,8 @@ type MockStateVersions struct {
outputStates map[string][]byte
}
var _ tfe.StateVersions = &MockStateVersions{}
func newMockStateVersions(client *MockClient) *MockStateVersions {
return &MockStateVersions{
client: client,
@ -1452,6 +1497,18 @@ func (m *MockStateVersions) ListOutputs(ctx context.Context, svID string, option
panic("not implemented")
}
func (m *MockStateVersions) SoftDeleteBackingData(ctx context.Context, svID string) error {
panic("not implemented")
}
func (m *MockStateVersions) RestoreBackingData(ctx context.Context, svID string) error {
panic("not implemented")
}
func (m *MockStateVersions) PermanentlyDeleteBackingData(ctx context.Context, svID string) error {
panic("not implemented")
}
type MockStateVersionOutputs struct {
client *MockClient
outputs map[string]*tfe.StateVersionOutput
@ -1560,6 +1617,8 @@ type MockWorkspaces struct {
workspaceNames map[string]*tfe.Workspace
}
var _ tfe.Workspaces = &MockWorkspaces{}
func newMockWorkspaces(client *MockClient) *MockWorkspaces {
return &MockWorkspaces{
client: client,
@ -1935,6 +1994,30 @@ func (m *MockWorkspaces) RemoveTags(ctx context.Context, workspaceID string, opt
panic("not implemented")
}
func (m *MockWorkspaces) ReadDataRetentionPolicy(ctx context.Context, workspaceID string) (*tfe.DataRetentionPolicy, error) {
panic("not implemented")
}
func (m *MockWorkspaces) ReadDataRetentionPolicyChoice(ctx context.Context, workspaceID string) (*tfe.DataRetentionPolicyChoice, error) {
panic("not implemented")
}
func (m *MockWorkspaces) SetDataRetentionPolicy(ctx context.Context, workspaceID string, options tfe.DataRetentionPolicySetOptions) (*tfe.DataRetentionPolicy, error) {
panic("not implemented")
}
func (m *MockWorkspaces) SetDataRetentionPolicyDeleteOlder(ctx context.Context, workspaceID string, options tfe.DataRetentionPolicyDeleteOlderSetOptions) (*tfe.DataRetentionPolicyDeleteOlder, error) {
panic("not implemented")
}
func (m *MockWorkspaces) SetDataRetentionPolicyDontDelete(ctx context.Context, workspaceID string, options tfe.DataRetentionPolicyDontDeleteSetOptions) (*tfe.DataRetentionPolicyDontDelete, error) {
panic("not implemented")
}
func (m *MockWorkspaces) DeleteDataRetentionPolicy(ctx context.Context, workspaceID string) error {
panic("not implemented")
}
const alphanumeric = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
func GenerateID(s string) string {

View File

@ -13,9 +13,9 @@ import (
"io"
"testing"
"github.com/golang/mock/gomock"
"github.com/opentofu/opentofu/internal/cloudplugin/cloudproto1"
"github.com/opentofu/opentofu/internal/cloudplugin/mock_cloudproto1"
"go.uber.org/mock/gomock"
)
var mockError = "this is a mock error"

View File

@ -3,6 +3,6 @@
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
//go:generate go run github.com/golang/mock/mockgen -destination mock.go github.com/opentofu/opentofu/internal/cloudplugin/cloudproto1 CommandServiceClient,CommandService_ExecuteClient
//go:generate go run go.uber.org/mock/mockgen -destination mock.go github.com/opentofu/opentofu/internal/cloudplugin/cloudproto1 CommandServiceClient,CommandService_ExecuteClient
package mock_cloudproto1

View File

@ -1,5 +1,10 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/opentofu/opentofu/internal/cloudplugin/cloudproto1 (interfaces: CommandServiceClient,CommandService_ExecuteClient)
//
// Generated by this command:
//
// mockgen -destination mock.go github.com/opentofu/opentofu/internal/cloudplugin/cloudproto1 CommandServiceClient,CommandService_ExecuteClient
//
// Package mock_cloudproto1 is a generated GoMock package.
package mock_cloudproto1
@ -8,8 +13,8 @@ import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
cloudproto1 "github.com/opentofu/opentofu/internal/cloudplugin/cloudproto1"
gomock "go.uber.org/mock/gomock"
grpc "google.golang.org/grpc"
metadata "google.golang.org/grpc/metadata"
)
@ -40,7 +45,7 @@ func (m *MockCommandServiceClient) EXPECT() *MockCommandServiceClientMockRecorde
// Execute mocks base method.
func (m *MockCommandServiceClient) Execute(arg0 context.Context, arg1 *cloudproto1.CommandRequest, arg2 ...grpc.CallOption) (cloudproto1.CommandService_ExecuteClient, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -51,9 +56,9 @@ func (m *MockCommandServiceClient) Execute(arg0 context.Context, arg1 *cloudprot
}
// Execute indicates an expected call of Execute.
func (mr *MockCommandServiceClientMockRecorder) Execute(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockCommandServiceClientMockRecorder) Execute(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Execute", reflect.TypeOf((*MockCommandServiceClient)(nil).Execute), varargs...)
}
@ -139,7 +144,7 @@ func (mr *MockCommandService_ExecuteClientMockRecorder) Recv() *gomock.Call {
}
// RecvMsg mocks base method.
func (m *MockCommandService_ExecuteClient) RecvMsg(arg0 interface{}) error {
func (m *MockCommandService_ExecuteClient) RecvMsg(arg0 any) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RecvMsg", arg0)
ret0, _ := ret[0].(error)
@ -147,13 +152,13 @@ func (m *MockCommandService_ExecuteClient) RecvMsg(arg0 interface{}) error {
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockCommandService_ExecuteClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call {
func (mr *MockCommandService_ExecuteClientMockRecorder) RecvMsg(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockCommandService_ExecuteClient)(nil).RecvMsg), arg0)
}
// SendMsg mocks base method.
func (m *MockCommandService_ExecuteClient) SendMsg(arg0 interface{}) error {
func (m *MockCommandService_ExecuteClient) SendMsg(arg0 any) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SendMsg", arg0)
ret0, _ := ret[0].(error)
@ -161,7 +166,7 @@ func (m *MockCommandService_ExecuteClient) SendMsg(arg0 interface{}) error {
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockCommandService_ExecuteClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call {
func (mr *MockCommandService_ExecuteClientMockRecorder) SendMsg(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockCommandService_ExecuteClient)(nil).SendMsg), arg0)
}

View File

@ -392,6 +392,10 @@ Options:
-show-sensitive If specified, sensitive values will be displayed.
-json Produce output in a machine-readable JSON format,
suitable for use in text editor integrations and
other automated systems. Always disables color.
If you don't provide a saved plan file then this command will also accept
all of the plan-customization options accepted by the tofu plan command.
For more information on those options, run:

View File

@ -310,6 +310,10 @@ Other Options:
information.
-show-sensitive If specified, sensitive values will be displayed.
-json Produce output in a machine-readable JSON format,
suitable for use in text editor integrations and
other automated systems. Always disables color.
`
return strings.TrimSpace(helpText)
}

View File

@ -236,6 +236,10 @@ Options:
a file. If "terraform.tfvars" or any ".auto.tfvars"
files are present, they will be automatically loaded.
-json Produce output in a machine-readable JSON format,
suitable for use in text editor integrations and
other automated systems. Always disables color.
-state, state-out, and -backup are legacy options supported for the local
backend only. For more information, see the local backend's documentation.
`

View File

@ -30,8 +30,11 @@ func decodeProviderMetaBlock(block *hcl.Block) (*ProviderMeta, hcl.Diagnostics)
diags = append(diags, d...)
}
// verify that the local name is already localized or produce an error.
diags = append(diags, checkProviderNameNormalized(block.Labels[0], block.DefRange)...)
// If the name is invalid, we return an error early, lest the invalid value
// is used by the caller and causes a panic further down the line.
if diags = append(diags, checkProviderNameNormalized(block.Labels[0], block.DefRange)...); diags.HasErrors() {
return nil, diags
}
return &ProviderMeta{
Provider: block.Labels[0],

View File

@ -0,0 +1,5 @@
terraform {
provider_meta "chunky_bacon" {
hello = "world"
}
}

View File

@ -7,6 +7,7 @@ package encryption
import (
"github.com/opentofu/opentofu/internal/encryption/keyprovider/aws_kms"
"github.com/opentofu/opentofu/internal/encryption/keyprovider/external"
"github.com/opentofu/opentofu/internal/encryption/keyprovider/gcp_kms"
"github.com/opentofu/opentofu/internal/encryption/keyprovider/openbao"
"github.com/opentofu/opentofu/internal/encryption/keyprovider/pbkdf2"
@ -30,6 +31,9 @@ func init() {
if err := DefaultRegistry.RegisterKeyProvider(openbao.New()); err != nil {
panic(err)
}
if err := DefaultRegistry.RegisterKeyProvider(external.New()); err != nil {
panic(err)
}
if err := DefaultRegistry.RegisterMethod(aesgcm.New()); err != nil {
panic(err)
}

View File

@ -489,7 +489,7 @@ func complianceTestMetadataTestCase[TConfig keyprovider.Config, TKeyProvider key
if len(output.DecryptionKey) != 0 {
compliancetest.Fail(
t,
"The Provide() function a decryption key despite not receiving input meta. This is incorrect, please don't return a decryption key unless you receive the input metadata.",
"The Provide() function returned a decryption key despite not receiving input meta. This is incorrect, please don't return a decryption key unless you receive the input metadata.",
)
} else {
compliancetest.Log(

View File

@ -0,0 +1,23 @@
# External key provider
> [!WARNING]
> This file is not an end-user documentation, it is intended for developers. Please follow the user documentation on the OpenTofu website unless you want to work on the encryption code.
This directory contains the `external` key provider. You can configure it like this:
```hcl
terraform {
encryption {
key_provider "external" "foo" {
command = ["/path/to/binary", "arg1", "arg2"]
}
}
}
```
The external key provider must implement the following protocol:
1. On start, the provider must emit the header line matching [the header schema](protocol/header.schema.json) on the standard output.
2. OpenTofu supplies `null` or the input metadata matching [the input schema](protocol/input.schema.json) on the standard input.
3. The provider must emit the key material matching [the output schema](protocol/output.schema.json) on the standard output.

View File

@ -0,0 +1,86 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package external_test
import (
"fmt"
"strings"
"testing"
"github.com/opentofu/opentofu/internal/configs"
"github.com/opentofu/opentofu/internal/encryption"
"github.com/opentofu/opentofu/internal/encryption/config"
"github.com/opentofu/opentofu/internal/encryption/keyprovider/external"
"github.com/opentofu/opentofu/internal/encryption/keyprovider/external/testprovider"
"github.com/opentofu/opentofu/internal/encryption/keyprovider/pbkdf2"
"github.com/opentofu/opentofu/internal/encryption/method/aesgcm"
"github.com/opentofu/opentofu/internal/encryption/method/unencrypted"
"github.com/opentofu/opentofu/internal/encryption/registry/lockingencryptionregistry"
)
func TestChaining(t *testing.T) {
testProviderBinaryPath := testprovider.Go(t)
reg := lockingencryptionregistry.New()
if err := reg.RegisterKeyProvider(external.New()); err != nil {
panic(err)
}
if err := reg.RegisterKeyProvider(pbkdf2.New()); err != nil {
panic(err)
}
if err := reg.RegisterMethod(aesgcm.New()); err != nil {
panic(err)
}
if err := reg.RegisterMethod(unencrypted.New()); err != nil {
panic(err)
}
testProviderBinaryPath = append(testProviderBinaryPath, "--hello-world")
commandParts := make([]string, len(testProviderBinaryPath))
for i, cmdPart := range testProviderBinaryPath {
commandParts[i] = "\"" + cmdPart + "\""
}
configData := fmt.Sprintf(`key_provider "external" "test" {
command = [%s]
}
key_provider "pbkdf2" "passphrase" {
chain = key_provider.external.test
}
method "aes_gcm" "example" {
keys = key_provider.pbkdf2.passphrase
}
state {
method = method.aes_gcm.example
}
`, strings.Join(commandParts, ", "))
cfg, diags := config.LoadConfigFromString("Test Config Source", configData)
if diags.HasErrors() {
t.Fatalf("%v", diags)
}
staticEval := configs.NewStaticEvaluator(nil, configs.RootModuleCallForTesting())
enc, diags := encryption.New(reg, cfg, staticEval)
if diags.HasErrors() {
t.Fatalf("%v", diags)
}
stateEncryption := enc.State()
fakeState := "{}"
encryptedState, err := stateEncryption.EncryptState([]byte(fakeState))
if err != nil {
t.Fatalf("%v", err)
}
decryptedState, _, err := stateEncryption.DecryptState(encryptedState)
if err != nil {
t.Fatalf("%v", err)
}
if string(decryptedState) != fakeState {
t.Fatalf("Mismatching decrypted state: %s", decryptedState)
}
}

View File

@ -0,0 +1,101 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package external
import (
"fmt"
"testing"
"github.com/opentofu/opentofu/internal/encryption/keyprovider"
"github.com/opentofu/opentofu/internal/encryption/keyprovider/compliancetest"
"github.com/opentofu/opentofu/internal/encryption/keyprovider/external/testprovider"
)
func TestComplianceBinary(t *testing.T) {
runTest(t, testprovider.Go(t))
}
func TestCompliancePython(t *testing.T) {
runTest(t, testprovider.Python(t))
}
func TestCompliancePOSIXShell(t *testing.T) {
runTest(t, testprovider.POSIXShell(t))
}
func runTest(t *testing.T, command []string) {
validConfig := &Config{
Command: command,
}
compliancetest.ComplianceTest(
t,
compliancetest.TestConfiguration[*descriptor, *Config, *MetadataV1, *keyProvider]{
Descriptor: New().(*descriptor), //nolint:errcheck //No clue why errcheck fires here.
HCLParseTestCases: map[string]compliancetest.HCLParseTestCase[*Config, *keyProvider]{
"empty": {
HCL: `key_provider "external" "foo" {
}`,
ValidHCL: false,
ValidBuild: false,
Validate: nil,
},
"basic": {
HCL: `key_provider "external" "foo" {
command = ["test-provider"]
}`,
ValidHCL: true,
ValidBuild: true,
Validate: func(config *Config, keyProvider *keyProvider) error {
if len(config.Command) != 1 {
return fmt.Errorf("invalid command after parsing")
}
if config.Command[0] != "test-provider" {
return fmt.Errorf("invalid command after parsing")
}
return nil
},
},
"empty-binary": {
HCL: `key_provider "external" "foo" {
command = []
}`,
ValidHCL: true,
ValidBuild: false,
},
},
ConfigStructTestCases: map[string]compliancetest.ConfigStructTestCase[*Config, *keyProvider]{},
MetadataStructTestCases: map[string]compliancetest.MetadataStructTestCase[*Config, *MetadataV1]{
"not-present-externaldata": {
ValidConfig: validConfig,
Meta: nil,
IsPresent: false,
},
"present-valid": {
ValidConfig: validConfig,
Meta: &MetadataV1{
ExternalData: map[string]any{},
},
IsPresent: true,
IsValid: true,
},
},
ProvideTestCase: compliancetest.ProvideTestCase[*Config, *MetadataV1]{
ValidConfig: validConfig,
ExpectedOutput: &keyprovider.Output{
EncryptionKey: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
DecryptionKey: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
},
ValidateKeys: nil,
ValidateMetadata: func(meta *MetadataV1) error {
if meta.ExternalData == nil {
return fmt.Errorf("output metadata is not present")
}
return nil
},
},
},
)
}

View File

@ -0,0 +1,25 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package external
import (
"github.com/opentofu/opentofu/internal/encryption/keyprovider"
)
type Config struct {
Command []string `hcl:"command"`
}
func (c *Config) Build() (keyprovider.KeyProvider, keyprovider.KeyMeta, error) {
if len(c.Command) < 1 {
return nil, nil, &keyprovider.ErrInvalidConfiguration{
Message: "the command option is required",
}
}
return &keyProvider{
command: c.Command,
}, &MetadataV1{}, nil
}

View File

@ -0,0 +1,35 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package external
import (
"github.com/opentofu/opentofu/internal/encryption/keyprovider"
)
func New() Descriptor {
return &descriptor{}
}
type Descriptor interface {
keyprovider.Descriptor
TypedConfig() *Config
}
type descriptor struct {
}
func (f descriptor) ID() keyprovider.ID {
return "external"
}
func (f descriptor) TypedConfig() *Config {
return &Config{}
}
func (f descriptor) ConfigStruct() keyprovider.Config {
return f.TypedConfig()
}

View File

@ -0,0 +1,11 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package external
// MetadataV1 describes the metadata structure of the external provider.
type MetadataV1 struct {
ExternalData map[string]any `hcl:"external_data" json:"external_data"`
}

View File

@ -0,0 +1,34 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package external
import (
"github.com/opentofu/opentofu/internal/encryption/keyprovider"
)
// HeaderMagic is the magic string that needs to be present in the header to identify
// the external program as an external keyprovider for OpenTofu.
const HeaderMagic = "OpenTofu-External-Key-Provider"
// Header describes the initial header the external program must output as a single line,
// followed by a single newline.
type Header struct {
// Magic must always be "OpenTofu-External-Key-Provider".
Magic string `json:"magic"`
// Version is the protocol version number. This currently must be 1.
Version int `json:"version"`
}
// InputV1 describes the input datastructure passed in over stdin.
// This structure is valid for protocol version 1.
type InputV1 *MetadataV1
// OutputV1 describes the output datastructure written to stdout by the external program.
// This structure is valid for protocol version 1.
type OutputV1 struct {
Keys keyprovider.Output `json:"keys"`
Meta MetadataV1 `json:"meta,omitempty"`
}

View File

@ -0,0 +1,21 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://raw.githubusercontent.com/opentofu/opentofu/main/internal/encryption/keyprovider/externalcommand/protocol/header.schema.json",
"title": "OpenTofu External Key Provider Header",
"description": "Header line output when an external key provider is launched. This must be written on a single line followed by a newline character. Note that the header may contain additional fields in later protocol versions.",
"type": "object",
"properties": {
"magic": {
"$comment": "Magic string identifying the key provider as such.",
"type": "string",
"enum": ["OpenTofu-External-Key-Provider"]
},
"version": {
"$comment": "Protocol version number",
"type": "integer",
"enum": [1]
}
},
"required": ["magic","version"],
"additionalProperties": true
}

View File

@ -0,0 +1,24 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://raw.githubusercontent.com/opentofu/opentofu/main/internal/encryption/keyprovider/externalcommand/protocol/input.schema.json",
"title": "OpenTofu External Key Provider Input",
"description": "Input schema for the OpenTofu external key provider protocol. The external provider must read the input from stdin and write the output to stdout. It may write to stderr to provide more error details.",
"oneOf": [
{
"type": "null",
"$comment": "When no decryption is desired, OpenTofu will send null."
},
{
"type": "object",
"$comment": "When decryption is needed, OpenTofu sends the stored metadata on the standard input.",
"properties": {
"external_data": {
"$comment": "When decryption is desired, OpenTofu passes along the stored metadata. This metadata may contain additional details, such as the number of hashing rounds, that are not sensitive, but must be set properly when decrypting.",
"type": "object",
"additionalProperties": true
}
},
"additionalProperties": false
}
]
}

View File

@ -0,0 +1,39 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://raw.githubusercontent.com/opentofu/opentofu/main/internal/encryption/keyprovider/externalcommand/protocol/output.schema.json",
"title": "OpenTofu External Key Provider Output",
"description": "Output schema for the OpenTofu external key provider protocol. The external provider must read the input from stdin and write the output to stdout. It may write to stderr to provide more error details.",
"type": "object",
"properties": {
"keys": {
"type": "object",
"additionalProperties": false,
"properties": {
"encryption_key": {
"type": "string",
"contentEncoding": "base64",
"$comment": "Base64-encoded encryption key. The external program must always emit this."
},
"decryption_key": {
"type": "string",
"contentEncoding": "base64",
"$comment": "Base64-encoded decryption key. The external program should only emit this when all the metadata required to construct the key is present in the input."
}
},
"required": ["encryption_key"]
},
"meta": {
"type": "object",
"additionalProperties": false,
"properties": {
"external_data": {
"$comment": "When decryption is desired, OpenTofu passes along the stored metadata. This metadata may contain additional details, such as the number of hashing rounds, that are not sensitive, but must be set properly when decrypting.",
"type": "object",
"additionalProperties": true
}
}
}
},
"additionalProperties": false,
"required": ["keys","meta"]
}

View File

@ -0,0 +1,143 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package external
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"os/exec"
"strings"
"time"
"github.com/opentofu/opentofu/internal/encryption/keyprovider"
)
type keyProvider struct {
command []string
}
func (k keyProvider) Provide(rawMeta keyprovider.KeyMeta) (keyprovider.Output, keyprovider.KeyMeta, error) {
if rawMeta == nil {
return keyprovider.Output{}, nil, &keyprovider.ErrInvalidMetadata{Message: "bug: no metadata struct provided"}
}
inMeta, ok := rawMeta.(*MetadataV1)
if !ok {
return keyprovider.Output{}, nil, &keyprovider.ErrInvalidMetadata{
Message: fmt.Sprintf("bug: incorrect metadata type of %T provided", rawMeta),
}
}
input, err := json.Marshal(inMeta)
if err != nil {
return keyprovider.Output{}, nil, &keyprovider.ErrInvalidMetadata{
Message: fmt.Sprintf("bug: cannot JSON-marshal metadata (%v)", err),
}
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
stderr := &bytes.Buffer{}
cmd := exec.CommandContext(ctx, k.command[0], k.command[1:]...) //nolint:gosec //Launching external commands here is the entire point.
handler := &ioHandler{
false,
bytes.NewBuffer(input),
[]byte{},
cancel,
nil,
}
cmd.Stdin = handler
cmd.Stdout = handler
cmd.Stderr = stderr
if err := cmd.Run(); err != nil {
if handler.err != nil {
return keyprovider.Output{}, nil, &keyprovider.ErrKeyProviderFailure{
Message: "external key provider protocol failure",
Cause: err,
}
}
var exitErr *exec.ExitError
if errors.As(err, &exitErr) {
if exitErr.ExitCode() != 0 {
return keyprovider.Output{}, nil, &keyprovider.ErrKeyProviderFailure{
Message: fmt.Sprintf("the external command exited with a non-zero exit code (%v)\n\nStderr:\n-------\n%s", err, stderr),
}
}
}
return keyprovider.Output{}, nil, &keyprovider.ErrKeyProviderFailure{
Message: fmt.Sprintf("the external command exited with an error (%v)\n\nStderr:\n-------\n%s", err, stderr),
}
}
var result *OutputV1
decoder := json.NewDecoder(bytes.NewReader(handler.output))
decoder.DisallowUnknownFields()
if err := decoder.Decode(&result); err != nil {
return keyprovider.Output{}, nil, &keyprovider.ErrKeyProviderFailure{
Message: fmt.Sprintf("the external command returned an invalid JSON response (%v)\n\nStderr:\n-------\n%s", err, stderr),
}
}
return result.Keys, &result.Meta, nil
}
type ioHandler struct {
headerFinished bool
input *bytes.Buffer
output []byte
cancel func()
err error
}
func (i *ioHandler) Write(p []byte) (int, error) {
i.output = append(i.output, p...)
n := len(p)
if i.headerFinished {
// Header is finished, just collect the output.
return n, nil
}
// Check if the full header is present.
parts := strings.SplitN(string(i.output), "\n", 2) //nolint:mnd //This rule is dumb.
if len(parts) == 1 {
return n, nil
}
var header Header
// Note: this is intentionally not using strict decoding. Later protocol versions may introduce additional header
// fields.
if jsonErr := json.Unmarshal([]byte(parts[0]), &header); jsonErr != nil {
err := fmt.Errorf("failed to unmarshal header from external binary (%w)", jsonErr)
i.err = err
i.cancel()
return n, err
}
if header.Magic != HeaderMagic {
err := fmt.Errorf("invalid magic received from external key provider: %s", header.Magic)
i.err = err
i.cancel()
return n, err
}
if header.Version != 1 {
err := fmt.Errorf("invalid version number received from external key provider: %d", header.Version)
i.err = err
i.cancel()
return n, err
}
i.headerFinished = true
i.output = []byte(parts[1])
return n, nil
}
func (i *ioHandler) Read(p []byte) (int, error) {
return i.input.Read(p)
}

View File

@ -0,0 +1,80 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package main
import (
"encoding/json"
"io"
"log"
"os"
)
type Header struct {
Magic string `json:"magic"`
Version int `json:"version"`
}
type Output struct {
Keys struct {
EncryptionKey []byte `json:"encryption_key,omitempty"`
DecryptionKey []byte `json:"decryption_key,omitempty"`
} `json:"keys"`
Meta struct {
ExternalData map[string]any `json:"external_data"`
} `json:"meta,omitempty"`
}
func main() {
// Write logs to stderr
log.Default().SetOutput(os.Stderr)
header := Header{
"OpenTofu-External-Key-Provider",
1,
}
marshalledHeader, err := json.Marshal(header)
if err != nil {
log.Fatalf("%v", err)
}
_, _ = os.Stdout.Write(append(marshalledHeader, []byte("\n")...))
input, err := io.ReadAll(os.Stdin)
if err != nil {
log.Fatalf("Failed to read stdin: %v", err)
}
var inMeta any
if err = json.Unmarshal(input, &inMeta); err != nil {
log.Fatalf("Failed to parse stdin: %v", err)
}
key := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
if len(os.Args) == 2 && os.Args[1] == "--hello-world" {
key = []byte("Hello world! 123")
}
decryptionKey := key
if inMeta == nil {
decryptionKey = nil
}
output := Output{
Keys: struct {
EncryptionKey []byte `json:"encryption_key,omitempty"`
DecryptionKey []byte `json:"decryption_key,omitempty"`
}{
EncryptionKey: key,
DecryptionKey: decryptionKey,
},
Meta: struct {
ExternalData map[string]any `json:"external_data"`
}{ExternalData: map[string]any{}},
}
outputData, err := json.Marshal(output)
if err != nil {
log.Fatalf("Failed to stringify output: %v", err)
}
_, _ = os.Stdout.Write(outputData)
}

View File

@ -0,0 +1,53 @@
#!/usr/bin/python
# Copyright (c) The OpenTofu Authors
# SPDX-License-Identifier: MPL-2.0
# Copyright (c) 2023 HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
import base64
import json
import sys
if __name__ == "__main__":
# Make sure that this program isn't running interactively:
if sys.stdout.isatty():
sys.stderr.write("This is an OpenTofu key provider and is not meant to be run interactively. "
"Please configure this program in your OpenTofu encryption block to use it.\n")
sys.exit(1)
# Write the header:
sys.stdout.write((json.dumps({"magic": "OpenTofu-External-Key-Provider", "version": 1}) + "\n"))
# Read the input:
inputData = sys.stdin.read()
data = json.loads(inputData)
# Construct the key:
key = b''
for i in range(1, 17):
key += chr(i).encode('ascii')
# Output the keys:
if data is None:
# No input metadata was passed, we shouldn't output a decryption key. If needed, we can produce
# an output metadata here, which will be stored alongside the encrypted data.
outputMeta = {"external_data":{}}
sys.stdout.write(json.dumps({
"keys": {
"encryption_key": base64.b64encode(key).decode('ascii')
},
"meta": outputMeta
}))
else:
# We had some input metadata, output a decryption key. In a real-life scenario we would
# use the metadata for something like pbdkf2.
inputMeta = data["external_data"]
# Do something with the input metadata if needed and produce the output metadata:
outputMeta = {"external_data":{}}
sys.stdout.write(json.dumps({
"keys": {
"encryption_key": base64.b64encode(key).decode('ascii'),
"decryption_key": base64.b64encode(key).decode('ascii')
},
"meta": outputMeta
}))

View File

@ -0,0 +1,41 @@
#!/bin/sh
# Copyright (c) The OpenTofu Authors
# SPDX-License-Identifier: MPL-2.0
# Copyright (c) 2023 HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
set -e
# Output the header as a single line:
echo '{"magic":"OpenTofu-External-Key-Provider","version":1}'
# Read the input metadata.
INPUT=$(printf '%s' "$(cat)")
if [ "${INPUT}" = "null" ]; then
# We don't have metadata and shouldn't output a decryption key.
cat << EOF
{
"keys":{
"encryption_key":"AQIDBAUGBwgJCgsMDQ4PEA=="
},
"meta":{
"external_data":{}
}
}
EOF
else
# We have metadata and should output a decryption key. In our simplified case it's the
# same as the encryption key.
cat << EOF
{
"keys":{
"encryption_key":"AQIDBAUGBwgJCgsMDQ4PEA==",
"decryption_key":"AQIDBAUGBwgJCgsMDQ4PEA=="
},
"meta":{
"external_data":{}
}
}
EOF
fi

View File

@ -0,0 +1,120 @@
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package testprovider
import (
"context"
"embed"
"fmt"
"os"
"os/exec"
"path"
"runtime"
"strings"
"testing"
"time"
)
//go:embed data/*
var embedFS embed.FS
// Go builds a key provider as a Go binary and returns its path.
// This binary will always return []byte{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16} as a hard-coded key.
// You may pass --hello-world to change it to []byte("Hello world! 123")
func Go(t *testing.T) []string {
// goMod is embedded like this because the go:embed tag doesn't like having module files in embedded paths.
var goMod = []byte(`module testprovider
go 1.22`)
tempDir := t.TempDir()
dir := path.Join(tempDir, "testprovider-go")
if err := os.MkdirAll(dir, 0700); err != nil { //nolint:mnd // This check is stupid
t.Errorf("Failed to create temporary directory (%v)", err)
}
if err := os.WriteFile(path.Join(dir, "go.mod"), goMod, 0600); err != nil { //nolint:mnd // This check is stupid
t.Errorf("%v", err)
}
if err := ejectFile("testprovider.go", path.Join(dir, "testprovider.go")); err != nil {
t.Errorf("%v", err)
}
targetBinary := path.Join(dir, "testprovider")
if runtime.GOOS == "windows" {
targetBinary += ".exe"
}
t.Logf("\033[32mCompiling test provider binary...\033[0m")
cmd := exec.Command("go", "build", "-o", targetBinary)
cmd.Dir = dir
// TODO move this to a proper test logger once available.
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
t.Skipf("Failed to build test provider binary (%v)", err)
}
return []string{targetBinary}
}
// Python returns the path to a Python script acting as a key provider. The function returns all arguments required to
// run the Python script, including the Python interpreter.
// This script will always return []byte{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16} as a hard-coded key.
func Python(t *testing.T) []string {
tempDir := t.TempDir()
dir := path.Join(tempDir, "testprovider-py")
if err := os.MkdirAll(dir, 0700); err != nil { //nolint:mnd // This check is stupid
t.Errorf("Failed to create temporary directory (%v)", err)
}
target := path.Join(dir, "testprovider.py")
if err := ejectFile("testprovider.py", target); err != nil {
t.Errorf("%v", err)
}
python := findExecutable(t, []string{"python", "python3"}, []string{"--version"})
return []string{python, target}
}
// POSIXShell returns a path to a POSIX shell script acting as a key provider.
// This script will always return []byte{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16} as a hard-coded key.
func POSIXShell(t *testing.T) []string {
tempDir := t.TempDir()
dir := path.Join(tempDir, "testprovider-sh")
if err := os.MkdirAll(dir, 0700); err != nil { //nolint:mnd // This check is stupid
t.Errorf("Failed to create temporary directory (%v)", err)
}
target := path.Join(dir, "testprovider.sh")
if err := ejectFile("testprovider.sh", target); err != nil {
t.Errorf("%v", err)
}
sh := findExecutable(t, []string{"sh", "/bin/sh", "/usr/bin/sh"}, []string{"-c", "echo \"Hello world!\""})
return []string{sh, target}
}
func ejectFile(file string, target string) error {
contents, err := embedFS.ReadFile(path.Join("data", file))
if err != nil {
return fmt.Errorf("failed to read %s file from embedded dataset (%w)", file, err)
}
if err := os.WriteFile(target, contents, 0600); err != nil { //nolint:mnd // This check is stupid
return fmt.Errorf("failed to create %s file at %s (%w)", file, target, err)
}
return nil
}
func findExecutable(t *testing.T, options []string, testArguments []string) string {
for _, opt := range options {
var lastError error
func() {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
cmd := exec.CommandContext(ctx, opt, testArguments...)
lastError = cmd.Run()
}()
if lastError == nil {
return opt
}
}
t.Skipf("No viable alternative found between %s", strings.Join(options, ", "))
return ""
}

View File

@ -11,8 +11,8 @@ import "github.com/zclconf/go-cty/cty"
// It contains two keys because some key providers may prefer include random data (e.g. salt)
// in the generated keys and this salt will be different for decryption and encryption.
type Output struct {
EncryptionKey []byte `hcl:"encryption_key" cty:"encryption_key" json:"encryption_key" yaml:"encryption_key"`
DecryptionKey []byte `hcl:"decryption_key" cty:"decryption_key" json:"decryption_key" yaml:"decryption_key"`
EncryptionKey []byte `hcl:"encryption_key" cty:"encryption_key" json:"encryption_key,omitempty" yaml:"encryption_key"`
DecryptionKey []byte `hcl:"decryption_key,optional" cty:"decryption_key" json:"decryption_key,omitempty" yaml:"decryption_key"`
}
// Cty turns the Output struct into a CTY value.

View File

@ -15,6 +15,9 @@ terraform {
key_provider "pbkdf2" "myprovider" {
passphrase = "enter a long and complex passphrase here"
# Alternatively, chain the passphrase from an upstream key provider:
chain = key_provider.other.provider
# Adapt the key length to your encryption method needs,
# check the method documentation for the right key length
key_length = 32

View File

@ -6,6 +6,7 @@
package pbkdf2
import (
"bytes"
"crypto/rand"
"fmt"
"testing"
@ -28,10 +29,18 @@ func TestCompliance(t *testing.T) {
compliancetest.TestConfiguration[*descriptor, *Config, *Metadata, *pbkdf2KeyProvider]{
Descriptor: New().(*descriptor),
HCLParseTestCases: map[string]compliancetest.HCLParseTestCase[*Config, *pbkdf2KeyProvider]{
"invalid": {
HCL: `key_provider "pbkdf2" "foo" {
chain = {
encryption_key = "Hello world! 123"
}
}`,
ValidHCL: false,
},
"empty": {
HCL: `key_provider "pbkdf2" "foo" {
}`,
ValidHCL: false,
ValidHCL: true,
ValidBuild: false,
Validate: nil,
},
@ -51,6 +60,37 @@ func TestCompliance(t *testing.T) {
return nil
},
},
"both-passphrase-and-chain": {
HCL: `key_provider "pbkdf2" "foo" {
passphrase = "Hello world! 123"
chain = {
encryption_key = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
}
}`,
ValidHCL: true,
ValidBuild: false,
},
"chain": {
HCL: `key_provider "pbkdf2" "foo" {
chain = {
encryption_key = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
}
}`,
ValidHCL: true,
ValidBuild: true,
Validate: func(config *Config, keyProvider *pbkdf2KeyProvider) error {
if config.Chain == nil {
return fmt.Errorf("no chain after parsing")
}
if len(config.Chain.EncryptionKey) != 16 {
return fmt.Errorf("tncorrect encryption key length")
}
if !bytes.Equal(config.Chain.EncryptionKey, []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}) {
return fmt.Errorf("tncorrect encryption key")
}
return nil
},
},
"extended": {
HCL: fmt.Sprintf(`key_provider "pbkdf2" "foo" {
passphrase = "Hello world! 123"

View File

@ -41,11 +41,15 @@ type Config struct {
// Set by the descriptor.
randomSource io.Reader
Passphrase string `hcl:"passphrase"`
KeyLength int `hcl:"key_length,optional"`
Iterations int `hcl:"iterations,optional"`
HashFunction HashFunctionName `hcl:"hash_function,optional"`
SaltLength int `hcl:"salt_length,optional"`
// Passprase is a single passphrase to use for encryption. This is mutually exclusive with Passphrases.
Passphrase string `hcl:"passphrase,optional"`
// Chain are two separate passphrases supplied from a chained provider. This is mutually exclusive with
// Passphrase.
Chain *keyprovider.Output `hcl:"chain,optional"`
KeyLength int `hcl:"key_length,optional"`
Iterations int `hcl:"iterations,optional"`
HashFunction HashFunctionName `hcl:"hash_function,optional"`
SaltLength int `hcl:"salt_length,optional"`
}
// WithPassphrase adds the passphrase and returns the same config for chaining.
@ -54,6 +58,12 @@ func (c *Config) WithPassphrase(passphrase string) *Config {
return c
}
// WithChain adds a separate encryption/decryption key chained from an upstream keyprovider.
func (c *Config) WithChain(chain *keyprovider.Output) *Config {
c.Chain = chain
return c
}
// WithKeyLength sets the key length and returns the same config for chaining
func (c *Config) WithKeyLength(length int) *Config {
c.KeyLength = length
@ -85,13 +95,36 @@ func (c *Config) Build() (keyprovider.KeyProvider, keyprovider.KeyMeta, error) {
}
}
if c.Passphrase == "" {
if c.Passphrase == "" && c.Chain == nil {
return nil, nil, &keyprovider.ErrInvalidConfiguration{
Message: "no passphrase provided",
Message: "no passphrase provided and no chained provider defined",
}
}
if len(c.Passphrase) < MinimumPassphraseLength {
if c.Passphrase != "" && c.Chain != nil {
return nil, nil, &keyprovider.ErrInvalidConfiguration{
Message: "passphrase and chain are mutually exclusive",
}
}
if c.Chain != nil {
if c.Chain.EncryptionKey == nil {
return nil, nil, &keyprovider.ErrInvalidConfiguration{
Message: "no encryption key provided from upstream key provider",
}
}
if len(c.Chain.EncryptionKey) < MinimumPassphraseLength {
return nil, nil, &keyprovider.ErrInvalidConfiguration{
Message: fmt.Sprintf("upstream key provider supplied an encryption key that is too short (minimum %d characters)", MinimumPassphraseLength),
}
}
if c.Chain.DecryptionKey != nil {
if len(c.Chain.DecryptionKey) < MinimumPassphraseLength {
return nil, nil, &keyprovider.ErrInvalidConfiguration{
Message: fmt.Sprintf("upstream key provider supplied an decryption key that is too short (minimum %d characters)", MinimumPassphraseLength),
}
}
}
}
if c.Passphrase != "" && len(c.Passphrase) < MinimumPassphraseLength {
return nil, nil, &keyprovider.ErrInvalidConfiguration{
Message: fmt.Sprintf("passphrase is too short (minimum %d characters)", MinimumPassphraseLength),
}

View File

@ -49,6 +49,7 @@ func (f descriptor) TypedConfig() *Config {
return &Config{
randomSource: f.randomSource,
Passphrase: "",
Chain: nil,
KeyLength: DefaultKeyLength,
Iterations: DefaultIterations,
HashFunction: DefaultHashFunctionName,

View File

@ -58,8 +58,14 @@ func (p pbkdf2KeyProvider) Provide(rawMeta keyprovider.KeyMeta) (keyprovider.Out
if err := inMeta.validate(); err != nil {
return keyprovider.Output{}, nil, err
}
var decryptionPassphrase []byte
if p.Chain != nil {
decryptionPassphrase = p.Chain.DecryptionKey
} else {
decryptionPassphrase = []byte(p.Passphrase)
}
decryptionKey = goPBKDF2.Key(
[]byte(p.Passphrase),
decryptionPassphrase,
inMeta.Salt,
inMeta.Iterations,
inMeta.KeyLength,
@ -67,9 +73,15 @@ func (p pbkdf2KeyProvider) Provide(rawMeta keyprovider.KeyMeta) (keyprovider.Out
)
}
var encryptionPassphrase []byte
if p.Chain != nil {
encryptionPassphrase = p.Chain.EncryptionKey
} else {
encryptionPassphrase = []byte(p.Passphrase)
}
return keyprovider.Output{
EncryptionKey: goPBKDF2.Key(
[]byte(p.Passphrase),
encryptionPassphrase,
outMeta.Salt,
outMeta.Iterations,
outMeta.KeyLength,

View File

@ -396,6 +396,26 @@ func (p *provider) ReadDataSource(_ context.Context, req *tfplugin5.ReadDataSour
return resp, nil
}
// CloseEphemeralResource implements tfplugin5.ProviderServer.
func (p *provider) CloseEphemeralResource(context.Context, *tfplugin5.CloseEphemeralResource_Request) (*tfplugin5.CloseEphemeralResource_Response, error) {
panic("unimplemented")
}
// OpenEphemeralResource implements tfplugin5.ProviderServer.
func (p *provider) OpenEphemeralResource(context.Context, *tfplugin5.OpenEphemeralResource_Request) (*tfplugin5.OpenEphemeralResource_Response, error) {
panic("unimplemented")
}
// RenewEphemeralResource implements tfplugin5.ProviderServer.
func (p *provider) RenewEphemeralResource(context.Context, *tfplugin5.RenewEphemeralResource_Request) (*tfplugin5.RenewEphemeralResource_Response, error) {
panic("unimplemented")
}
// ValidateEphemeralResourceConfig implements tfplugin5.ProviderServer.
func (p *provider) ValidateEphemeralResourceConfig(context.Context, *tfplugin5.ValidateEphemeralResourceConfig_Request) (*tfplugin5.ValidateEphemeralResourceConfig_Response, error) {
panic("unimplemented")
}
func (p *provider) Stop(context.Context, *tfplugin5.Stop_Request) (*tfplugin5.Stop_Response, error) {
resp := &tfplugin5.Stop_Response{}
err := p.provider.Stop()

View File

@ -396,6 +396,26 @@ func (p *provider6) ReadDataSource(_ context.Context, req *tfplugin6.ReadDataSou
return resp, nil
}
// CloseEphemeralResource implements tfplugin6.ProviderServer.
func (p *provider6) CloseEphemeralResource(context.Context, *tfplugin6.CloseEphemeralResource_Request) (*tfplugin6.CloseEphemeralResource_Response, error) {
panic("unimplemented")
}
// OpenEphemeralResource implements tfplugin6.ProviderServer.
func (p *provider6) OpenEphemeralResource(context.Context, *tfplugin6.OpenEphemeralResource_Request) (*tfplugin6.OpenEphemeralResource_Response, error) {
panic("unimplemented")
}
// RenewEphemeralResource implements tfplugin6.ProviderServer.
func (p *provider6) RenewEphemeralResource(context.Context, *tfplugin6.RenewEphemeralResource_Request) (*tfplugin6.RenewEphemeralResource_Response, error) {
panic("unimplemented")
}
// ValidateEphemeralResourceConfig implements tfplugin6.ProviderServer.
func (p *provider6) ValidateEphemeralResourceConfig(context.Context, *tfplugin6.ValidateEphemeralResourceConfig_Request) (*tfplugin6.ValidateEphemeralResourceConfig_Response, error) {
panic("unimplemented")
}
func (p *provider6) StopProvider(context.Context, *tfplugin6.StopProvider_Request) (*tfplugin6.StopProvider_Response, error) {
resp := &tfplugin6.StopProvider_Response{}
err := p.provider.Stop()

View File

@ -36,15 +36,15 @@ func assertObjectCompatible(schema *configschema.Block, planned, actual cty.Valu
var errs []error
var atRoot string
if len(path) == 0 {
atRoot = "Root object "
atRoot = "root object "
}
if planned.IsNull() && !actual.IsNull() {
errs = append(errs, path.NewErrorf(fmt.Sprintf("%swas absent, but now present", atRoot)))
errs = append(errs, path.NewErrorf("%swas absent, but now present", atRoot))
return errs
}
if actual.IsNull() && !planned.IsNull() {
errs = append(errs, path.NewErrorf(fmt.Sprintf("%swas present, but now absent", atRoot)))
errs = append(errs, path.NewErrorf("%swas present, but now absent", atRoot))
return errs
}
if planned.IsNull() {
@ -57,147 +57,194 @@ func assertObjectCompatible(schema *configschema.Block, planned, actual cty.Valu
actualV := actual.GetAttr(name)
path := append(path, cty.GetAttrStep{Name: name})
// Unmark values here before checking value assertions,
// but save the marks so we can see if we should suppress
// exposing a value through errors
unmarkedActualV, marksA := actualV.UnmarkDeep()
unmarkedPlannedV, marksP := plannedV.UnmarkDeep()
_, isSensitiveActual := marksA[marks.Sensitive]
_, isSensitivePlanned := marksP[marks.Sensitive]
moreErrs := assertValueCompatible(unmarkedPlannedV, unmarkedActualV, path)
if attrS.Sensitive || isSensitiveActual || isSensitivePlanned {
if len(moreErrs) > 0 {
// Use a vague placeholder message instead, to avoid disclosing
// sensitive information.
errs = append(errs, path.NewErrorf("inconsistent values for sensitive attribute"))
}
} else {
errs = append(errs, moreErrs...)
}
errs = append(errs, assertAttributeCompatible(plannedV, actualV, attrS, path)...)
}
for name, blockS := range schema.BlockTypes {
plannedV, _ := planned.GetAttr(name).Unmark()
actualV, _ := actual.GetAttr(name).Unmark()
path := append(path, cty.GetAttrStep{Name: name})
switch blockS.Nesting {
case configschema.NestingSingle, configschema.NestingGroup:
// If an unknown block placeholder was present then the placeholder
// may have expanded out into zero blocks, which is okay.
if !plannedV.IsKnown() && actualV.IsNull() {
errs = append(errs, assertNestedBlockCompatible(plannedV, actualV, blockS, path)...)
}
return errs
}
func assertAttributeCompatible(plannedV, actualV cty.Value, attrS *configschema.Attribute, path cty.Path) []error {
var errs []error
// Unmark values here before checking value assertions,
// but save the marks so we can see if we should suppress
// exposing a value through errors
unmarkedActualV, marksA := actualV.UnmarkDeep()
unmarkedPlannedV, marksP := plannedV.UnmarkDeep()
_, isSensitiveActual := marksA[marks.Sensitive]
_, isSensitivePlanned := marksP[marks.Sensitive]
moreErrs := assertValueCompatible(unmarkedPlannedV, unmarkedActualV, path)
if attrS.Sensitive || isSensitiveActual || isSensitivePlanned {
if len(moreErrs) > 0 {
// Use a vague placeholder message instead, to avoid disclosing
// sensitive information.
errs = append(errs, path.NewErrorf("inconsistent values for sensitive attribute"))
}
} else {
errs = append(errs, moreErrs...)
}
return errs
}
func assertNestedBlockCompatible(plannedV, actualV cty.Value, blockS *configschema.NestedBlock, path cty.Path) []error {
switch blockS.Nesting {
case configschema.NestingSingle, configschema.NestingGroup:
return assertNestedBlockCompatibleSingle(plannedV, actualV, blockS, path)
case configschema.NestingList:
return assertNestedBlockCompatibleList(plannedV, actualV, blockS, path)
case configschema.NestingMap:
// A NestingMap might either be a map or an object, depending on
// whether there are dynamically-typed attributes inside, but
// that's decided statically and so both values will have the same
// kind. Our handling of each is slightly different in the details,
// but both have similar goals.
if plannedV.Type().IsObjectType() {
return assertNestedBlockCompatibleMapAsObject(plannedV, actualV, blockS, path)
} else {
return assertNestedBlockCompatibleMapAsMap(plannedV, actualV, blockS, path)
}
case configschema.NestingSet:
return assertNestedBlockCompatibleSet(plannedV, actualV, blockS, path)
default:
panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting))
}
}
func assertNestedBlockCompatibleSingle(plannedV, actualV cty.Value, blockS *configschema.NestedBlock, path cty.Path) []error {
// If an unknown block placeholder was present then the placeholder
// may have expanded out into zero blocks, which is okay.
if !plannedV.IsKnown() && actualV.IsNull() {
return nil
}
var errs []error
moreErrs := assertObjectCompatible(&blockS.Block, plannedV, actualV, path)
errs = append(errs, moreErrs...)
return errs
}
func assertNestedBlockCompatibleList(plannedV, actualV cty.Value, blockS *configschema.NestedBlock, path cty.Path) []error {
// A NestingList might either be a list or a tuple, depending on
// whether there are dynamically-typed attributes inside. However,
// both support a similar-enough API that we can treat them the
// same for our purposes here.
if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() {
return nil
}
var errs []error
plannedL := plannedV.LengthInt()
actualL := actualV.LengthInt()
if plannedL != actualL {
errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL))
return errs
}
for it := plannedV.ElementIterator(); it.Next(); {
idx, plannedEV := it.Element()
if !actualV.HasIndex(idx).True() {
continue
}
actualEV := actualV.Index(idx)
moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx}))
errs = append(errs, moreErrs...)
}
return errs
}
func assertNestedBlockCompatibleMapAsMap(plannedV, actualV cty.Value, blockS *configschema.NestedBlock, path cty.Path) []error {
if !plannedV.IsKnown() || plannedV.IsNull() || actualV.IsNull() {
return nil
}
var errs []error
plannedL := plannedV.LengthInt()
actualL := actualV.LengthInt()
if plannedL != actualL && plannedV.IsKnown() { // new blocks may appear if unknown blocks were present in the plan
errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL))
return errs
}
for it := plannedV.ElementIterator(); it.Next(); {
idx, plannedEV := it.Element()
if !actualV.HasIndex(idx).True() {
continue
}
actualEV := actualV.Index(idx)
moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx}))
errs = append(errs, moreErrs...)
}
return errs
}
func assertNestedBlockCompatibleMapAsObject(plannedV, actualV cty.Value, blockS *configschema.NestedBlock, path cty.Path) []error {
var errs []error
plannedAtys := plannedV.Type().AttributeTypes()
actualAtys := actualV.Type().AttributeTypes()
for k := range plannedAtys {
if _, ok := actualAtys[k]; !ok {
errs = append(errs, path.NewErrorf("block key %q has vanished", k))
continue
}
plannedEV := plannedV.GetAttr(k)
actualEV := actualV.GetAttr(k)
moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.GetAttrStep{Name: k}))
errs = append(errs, moreErrs...)
}
if plannedV.IsKnown() { // new blocks may appear if unknown blocks were present in the plan
for k := range actualAtys {
if _, ok := plannedAtys[k]; !ok {
errs = append(errs, path.NewErrorf("new block key %q has appeared", k))
continue
}
moreErrs := assertObjectCompatible(&blockS.Block, plannedV, actualV, path)
errs = append(errs, moreErrs...)
case configschema.NestingList:
// A NestingList might either be a list or a tuple, depending on
// whether there are dynamically-typed attributes inside. However,
// both support a similar-enough API that we can treat them the
// same for our purposes here.
if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() {
continue
}
plannedL := plannedV.LengthInt()
actualL := actualV.LengthInt()
if plannedL != actualL {
errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL))
continue
}
for it := plannedV.ElementIterator(); it.Next(); {
idx, plannedEV := it.Element()
if !actualV.HasIndex(idx).True() {
continue
}
actualEV := actualV.Index(idx)
moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx}))
errs = append(errs, moreErrs...)
}
case configschema.NestingMap:
// A NestingMap might either be a map or an object, depending on
// whether there are dynamically-typed attributes inside, but
// that's decided statically and so both values will have the same
// kind.
if plannedV.Type().IsObjectType() {
plannedAtys := plannedV.Type().AttributeTypes()
actualAtys := actualV.Type().AttributeTypes()
for k := range plannedAtys {
if _, ok := actualAtys[k]; !ok {
errs = append(errs, path.NewErrorf("block key %q has vanished", k))
continue
}
plannedEV := plannedV.GetAttr(k)
actualEV := actualV.GetAttr(k)
moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.GetAttrStep{Name: k}))
errs = append(errs, moreErrs...)
}
if plannedV.IsKnown() { // new blocks may appear if unknown blocks were present in the plan
for k := range actualAtys {
if _, ok := plannedAtys[k]; !ok {
errs = append(errs, path.NewErrorf("new block key %q has appeared", k))
continue
}
}
}
} else {
if !plannedV.IsKnown() || plannedV.IsNull() || actualV.IsNull() {
continue
}
plannedL := plannedV.LengthInt()
actualL := actualV.LengthInt()
if plannedL != actualL && plannedV.IsKnown() { // new blocks may appear if unknown blocks were present in the plan
errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL))
continue
}
for it := plannedV.ElementIterator(); it.Next(); {
idx, plannedEV := it.Element()
if !actualV.HasIndex(idx).True() {
continue
}
actualEV := actualV.Index(idx)
moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx}))
errs = append(errs, moreErrs...)
}
}
case configschema.NestingSet:
if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() {
continue
}
if !plannedV.IsKnown() {
// When unknown blocks are present the final number of blocks
// may be different, either because the unknown set values
// become equal and are collapsed, or the count is unknown due
// a dynamic block. Unfortunately this means we can't do our
// usual checks in this case without generating false
// negatives.
continue
}
setErrs := assertSetValuesCompatible(plannedV, actualV, path, func(plannedEV, actualEV cty.Value) bool {
errs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: actualEV}))
return len(errs) == 0
})
errs = append(errs, setErrs...)
// There can be fewer elements in a set after its elements are all
// known (values that turn out to be equal will coalesce) but the
// number of elements must never get larger.
plannedL := plannedV.LengthInt()
actualL := actualV.LengthInt()
if plannedL < actualL {
errs = append(errs, path.NewErrorf("block set length changed from %d to %d", plannedL, actualL))
}
default:
panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting))
}
}
return errs
}
func assertNestedBlockCompatibleSet(plannedV, actualV cty.Value, blockS *configschema.NestedBlock, path cty.Path) []error {
if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() {
return nil
}
if !plannedV.IsKnown() {
// When unknown blocks are present the final number of blocks
// may be different, either because the unknown set values
// become equal and are collapsed, or the count is unknown due
// a dynamic block. Unfortunately this means we can't do our
// usual checks in this case without generating false
// negatives.
return nil
}
var errs []error
setErrs := assertSetValuesCompatible(plannedV, actualV, path, func(plannedEV, actualEV cty.Value) bool {
moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: actualEV}))
return len(moreErrs) == 0
})
errs = append(errs, setErrs...)
// There can be fewer elements in a set after its elements are all
// known (values that turn out to be equal will coalesce) but the
// number of elements must never get larger.
plannedL := plannedV.LengthInt()
actualL := actualV.LengthInt()
if plannedL < actualL {
errs = append(errs, path.NewErrorf("block set length changed from %d to %d", plannedL, actualL))
}
return errs
}
func assertValueCompatible(planned, actual cty.Value, path cty.Path) []error {
// NOTE: We don't normally use the GoString rendering of cty.Value in
// user-facing error messages as a rule, but we make an exception
@ -239,75 +286,100 @@ func assertValueCompatible(planned, actual cty.Value, path cty.Path) []error {
return errs
}
ty := planned.Type()
switch {
case !actual.IsKnown():
if !actual.IsKnown() {
errs = append(errs, path.NewErrorf("was known, but now unknown"))
case ty.IsPrimitiveType():
if !actual.Equals(planned).True() {
errs = append(errs, path.NewErrorf("was %#v, but now %#v", planned, actual))
}
case ty.IsListType() || ty.IsMapType() || ty.IsTupleType():
for it := planned.ElementIterator(); it.Next(); {
k, plannedV := it.Element()
if !actual.HasIndex(k).True() {
errs = append(errs, path.NewErrorf("element %s has vanished", indexStrForErrors(k)))
continue
}
actualV := actual.Index(k)
moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: k}))
errs = append(errs, moreErrs...)
}
for it := actual.ElementIterator(); it.Next(); {
k, _ := it.Element()
if !planned.HasIndex(k).True() {
errs = append(errs, path.NewErrorf("new element %s has appeared", indexStrForErrors(k)))
}
}
case ty.IsObjectType():
atys := ty.AttributeTypes()
for name := range atys {
// Because we already tested that the two values have the same type,
// we can assume that the same attributes are present in both and
// focus just on testing their values.
plannedV := planned.GetAttr(name)
actualV := actual.GetAttr(name)
moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.GetAttrStep{Name: name}))
errs = append(errs, moreErrs...)
}
case ty.IsSetType():
// We can't really do anything useful for sets here because changing
// an unknown element to known changes the identity of the element, and
// so we can't correlate them properly. However, we will at least check
// to ensure that the number of elements is consistent, along with
// the general type-match checks we ran earlier in this function.
if planned.IsKnown() && !planned.IsNull() && !actual.IsNull() {
setErrs := assertSetValuesCompatible(planned, actual, path, func(plannedV, actualV cty.Value) bool {
errs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: actualV}))
return len(errs) == 0
})
errs = append(errs, setErrs...)
// There can be fewer elements in a set after its elements are all
// known (values that turn out to be equal will coalesce) but the
// number of elements must never get larger.
plannedL := planned.LengthInt()
actualL := actual.LengthInt()
if plannedL < actualL {
errs = append(errs, path.NewErrorf("length changed from %d to %d", plannedL, actualL))
}
}
return errs
}
// We no longer use "errs" after this point, because we should already have returned
// if we've added any errors to it. The following is just to minimize the risk of
// mistakes under future maintenence.
if len(errs) != 0 {
return errs
}
ty := planned.Type()
switch {
case ty.IsPrimitiveType():
return assertValueCompatiblePrimitive(planned, actual, path)
case ty.IsListType() || ty.IsMapType() || ty.IsTupleType():
return assertValueCompatibleCompositeWithKeys(planned, actual, path)
case ty.IsObjectType():
atys := ty.AttributeTypes()
return assertValueCompatibleObject(planned, actual, atys, path)
case ty.IsSetType():
return assertValueCompatibleSet(planned, actual, path)
default:
return nil // we don't have specialized checks for any other type kind
}
}
func assertValueCompatiblePrimitive(planned, actual cty.Value, path cty.Path) []error {
var errs []error
if !actual.Equals(planned).True() {
errs = append(errs, path.NewErrorf("was %#v, but now %#v", planned, actual))
}
return errs
}
// assertValueCompatibleCompositeWithKeys is the branch of assertValueCompatible for values
// that are of composite types where elements have comparable keys/indices separate from their
// values that want to be compared on an element-by-element basis: lists, maps, and tuples.
func assertValueCompatibleCompositeWithKeys(planned, actual cty.Value, path cty.Path) []error {
var errs []error
for it := planned.ElementIterator(); it.Next(); {
k, plannedV := it.Element()
if !actual.HasIndex(k).True() {
errs = append(errs, path.NewErrorf("element %s has vanished", indexStrForErrors(k)))
continue
}
actualV := actual.Index(k)
moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: k}))
errs = append(errs, moreErrs...)
}
for it := actual.ElementIterator(); it.Next(); {
k, _ := it.Element()
if !planned.HasIndex(k).True() {
errs = append(errs, path.NewErrorf("new element %s has appeared", indexStrForErrors(k)))
}
}
return errs
}
func assertValueCompatibleObject(planned, actual cty.Value, atys map[string]cty.Type, path cty.Path) []error {
var errs []error
for name := range atys {
// Because we already tested that the two values have the same type,
// we can assume that the same attributes are present in both and
// focus just on testing their values.
plannedV := planned.GetAttr(name)
actualV := actual.GetAttr(name)
moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.GetAttrStep{Name: name}))
errs = append(errs, moreErrs...)
}
return errs
}
func assertValueCompatibleSet(planned, actual cty.Value, path cty.Path) []error {
var errs []error
if planned.IsKnown() && !planned.IsNull() && !actual.IsNull() {
setErrs := assertSetValuesCompatible(planned, actual, path, func(plannedV, actualV cty.Value) bool {
moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: actualV}))
return len(moreErrs) == 0
})
errs = append(errs, setErrs...)
// There can be fewer elements in a set after its elements are all
// known (values that turn out to be equal will coalesce) but the
// number of elements must never get larger.
plannedL := planned.LengthInt()
actualL := actual.LengthInt()
if plannedL < actualL {
errs = append(errs, path.NewErrorf("length changed from %d to %d", plannedL, actualL))
}
}
return errs
}

View File

@ -57,53 +57,65 @@ func normalizeObjectFromLegacySDK(val cty.Value, schema *configschema.Block) cty
switch blockS.Nesting {
case configschema.NestingSingle, configschema.NestingGroup:
if lv.IsKnown() {
if lv.IsNull() && blockS.Nesting == configschema.NestingGroup {
vals[name] = blockS.EmptyValue()
} else {
vals[name] = normalizeObjectFromLegacySDK(lv, &blockS.Block)
}
} else {
vals[name] = unknownBlockStub(&blockS.Block)
}
vals[name] = normalizeNestedBlockFromLegacySDKSingle(lv, blockS)
case configschema.NestingList:
switch {
case !lv.IsKnown():
vals[name] = cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)})
case lv.IsNull() || lv.LengthInt() == 0:
vals[name] = cty.ListValEmpty(blockS.Block.ImpliedType())
default:
subVals := make([]cty.Value, 0, lv.LengthInt())
for it := lv.ElementIterator(); it.Next(); {
_, subVal := it.Element()
subVals = append(subVals, normalizeObjectFromLegacySDK(subVal, &blockS.Block))
}
vals[name] = cty.ListVal(subVals)
}
vals[name] = normalizeNestedBlockFromLegacySDKList(lv, blockS)
case configschema.NestingSet:
switch {
case !lv.IsKnown():
vals[name] = cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)})
case lv.IsNull() || lv.LengthInt() == 0:
vals[name] = cty.SetValEmpty(blockS.Block.ImpliedType())
default:
subVals := make([]cty.Value, 0, lv.LengthInt())
for it := lv.ElementIterator(); it.Next(); {
_, subVal := it.Element()
subVals = append(subVals, normalizeObjectFromLegacySDK(subVal, &blockS.Block))
}
vals[name] = cty.SetVal(subVals)
}
default:
vals[name] = normalizeNestedBlockFromLegacySDKSet(lv, blockS)
case configschema.NestingMap:
// The legacy SDK doesn't support NestingMap, so we just assume
// maps are always okay. (If not, we would've detected and returned
// an error to the user before we got here.)
fallthrough
default:
vals[name] = lv
}
}
return cty.ObjectVal(vals)
}
func normalizeNestedBlockFromLegacySDKSingle(val cty.Value, blockS *configschema.NestedBlock) cty.Value {
if !val.IsKnown() {
return unknownBlockStub(&blockS.Block)
}
if val.IsNull() && blockS.Nesting == configschema.NestingGroup {
return blockS.EmptyValue()
}
return normalizeObjectFromLegacySDK(val, &blockS.Block)
}
func normalizeNestedBlockFromLegacySDKList(val cty.Value, blockS *configschema.NestedBlock) cty.Value {
switch {
case !val.IsKnown():
return cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)})
case val.IsNull() || val.LengthInt() == 0:
return cty.ListValEmpty(blockS.Block.ImpliedType())
default:
subVals := make([]cty.Value, 0, val.LengthInt())
for it := val.ElementIterator(); it.Next(); {
_, subVal := it.Element()
subVals = append(subVals, normalizeObjectFromLegacySDK(subVal, &blockS.Block))
}
return cty.ListVal(subVals)
}
}
func normalizeNestedBlockFromLegacySDKSet(val cty.Value, blockS *configschema.NestedBlock) cty.Value {
switch {
case !val.IsKnown():
return cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)})
case val.IsNull() || val.LengthInt() == 0:
return cty.SetValEmpty(blockS.Block.ImpliedType())
default:
subVals := make([]cty.Value, 0, val.LengthInt())
for it := val.ElementIterator(); it.Next(); {
_, subVal := it.Element()
subVals = append(subVals, normalizeObjectFromLegacySDK(subVal, &blockS.Block))
}
return cty.SetVal(subVals)
}
}
// unknownBlockStub constructs an object value that approximates an unknown
// block by producing a known block object with all of its leaf attribute
// values set to unknown.

View File

@ -10,16 +10,15 @@ import (
"fmt"
"testing"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/golang/mock/gomock"
"github.com/google/go-cmp/cmp"
"github.com/zclconf/go-cty/cty"
"go.uber.org/mock/gomock"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/configs/hcl2shim"
mockproto "github.com/opentofu/opentofu/internal/plugin/mock_proto"
"github.com/opentofu/opentofu/internal/providers"
"github.com/opentofu/opentofu/internal/tfdiags"
"github.com/zclconf/go-cty/cty"
mockproto "github.com/opentofu/opentofu/internal/plugin/mock_proto"
proto "github.com/opentofu/opentofu/internal/tfplugin5"
)

View File

@ -9,13 +9,13 @@ import (
"io"
"testing"
"github.com/golang/mock/gomock"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/opentofu/opentofu/internal/configs/hcl2shim"
"github.com/opentofu/opentofu/internal/provisioners"
proto "github.com/opentofu/opentofu/internal/tfplugin5"
"github.com/zclconf/go-cty/cty"
"go.uber.org/mock/gomock"
mockproto "github.com/opentofu/opentofu/internal/plugin/mock_proto"
)

View File

@ -3,6 +3,6 @@
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
//go:generate go run github.com/golang/mock/mockgen -destination mock.go github.com/opentofu/opentofu/internal/tfplugin5 ProviderClient,ProvisionerClient,Provisioner_ProvisionResourceClient,Provisioner_ProvisionResourceServer
//go:generate go run go.uber.org/mock/mockgen -destination mock.go github.com/opentofu/opentofu/internal/tfplugin5 ProviderClient,ProvisionerClient,Provisioner_ProvisionResourceClient,Provisioner_ProvisionResourceServer
package mock_tfplugin5

View File

@ -1,5 +1,10 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/opentofu/opentofu/internal/tfplugin5 (interfaces: ProviderClient,ProvisionerClient,Provisioner_ProvisionResourceClient,Provisioner_ProvisionResourceServer)
//
// Generated by this command:
//
// mockgen -destination mock.go github.com/opentofu/opentofu/internal/tfplugin5 ProviderClient,ProvisionerClient,Provisioner_ProvisionResourceClient,Provisioner_ProvisionResourceServer
//
// Package mock_tfplugin5 is a generated GoMock package.
package mock_tfplugin5
@ -8,8 +13,8 @@ import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
tfplugin5 "github.com/opentofu/opentofu/internal/tfplugin5"
gomock "go.uber.org/mock/gomock"
grpc "google.golang.org/grpc"
metadata "google.golang.org/grpc/metadata"
)
@ -40,7 +45,7 @@ func (m *MockProviderClient) EXPECT() *MockProviderClientMockRecorder {
// ApplyResourceChange mocks base method.
func (m *MockProviderClient) ApplyResourceChange(arg0 context.Context, arg1 *tfplugin5.ApplyResourceChange_Request, arg2 ...grpc.CallOption) (*tfplugin5.ApplyResourceChange_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -51,16 +56,16 @@ func (m *MockProviderClient) ApplyResourceChange(arg0 context.Context, arg1 *tfp
}
// ApplyResourceChange indicates an expected call of ApplyResourceChange.
func (mr *MockProviderClientMockRecorder) ApplyResourceChange(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) ApplyResourceChange(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyResourceChange", reflect.TypeOf((*MockProviderClient)(nil).ApplyResourceChange), varargs...)
}
// CallFunction mocks base method.
func (m *MockProviderClient) CallFunction(arg0 context.Context, arg1 *tfplugin5.CallFunction_Request, arg2 ...grpc.CallOption) (*tfplugin5.CallFunction_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -71,16 +76,36 @@ func (m *MockProviderClient) CallFunction(arg0 context.Context, arg1 *tfplugin5.
}
// CallFunction indicates an expected call of CallFunction.
func (mr *MockProviderClientMockRecorder) CallFunction(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) CallFunction(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CallFunction", reflect.TypeOf((*MockProviderClient)(nil).CallFunction), varargs...)
}
// CloseEphemeralResource mocks base method.
func (m *MockProviderClient) CloseEphemeralResource(arg0 context.Context, arg1 *tfplugin5.CloseEphemeralResource_Request, arg2 ...grpc.CallOption) (*tfplugin5.CloseEphemeralResource_Response, error) {
m.ctrl.T.Helper()
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "CloseEphemeralResource", varargs...)
ret0, _ := ret[0].(*tfplugin5.CloseEphemeralResource_Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CloseEphemeralResource indicates an expected call of CloseEphemeralResource.
func (mr *MockProviderClientMockRecorder) CloseEphemeralResource(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseEphemeralResource", reflect.TypeOf((*MockProviderClient)(nil).CloseEphemeralResource), varargs...)
}
// Configure mocks base method.
func (m *MockProviderClient) Configure(arg0 context.Context, arg1 *tfplugin5.Configure_Request, arg2 ...grpc.CallOption) (*tfplugin5.Configure_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -91,16 +116,16 @@ func (m *MockProviderClient) Configure(arg0 context.Context, arg1 *tfplugin5.Con
}
// Configure indicates an expected call of Configure.
func (mr *MockProviderClientMockRecorder) Configure(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) Configure(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Configure", reflect.TypeOf((*MockProviderClient)(nil).Configure), varargs...)
}
// GetFunctions mocks base method.
func (m *MockProviderClient) GetFunctions(arg0 context.Context, arg1 *tfplugin5.GetFunctions_Request, arg2 ...grpc.CallOption) (*tfplugin5.GetFunctions_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -111,16 +136,16 @@ func (m *MockProviderClient) GetFunctions(arg0 context.Context, arg1 *tfplugin5.
}
// GetFunctions indicates an expected call of GetFunctions.
func (mr *MockProviderClientMockRecorder) GetFunctions(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) GetFunctions(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFunctions", reflect.TypeOf((*MockProviderClient)(nil).GetFunctions), varargs...)
}
// GetMetadata mocks base method.
func (m *MockProviderClient) GetMetadata(arg0 context.Context, arg1 *tfplugin5.GetMetadata_Request, arg2 ...grpc.CallOption) (*tfplugin5.GetMetadata_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -131,16 +156,16 @@ func (m *MockProviderClient) GetMetadata(arg0 context.Context, arg1 *tfplugin5.G
}
// GetMetadata indicates an expected call of GetMetadata.
func (mr *MockProviderClientMockRecorder) GetMetadata(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) GetMetadata(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetadata", reflect.TypeOf((*MockProviderClient)(nil).GetMetadata), varargs...)
}
// GetSchema mocks base method.
func (m *MockProviderClient) GetSchema(arg0 context.Context, arg1 *tfplugin5.GetProviderSchema_Request, arg2 ...grpc.CallOption) (*tfplugin5.GetProviderSchema_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -151,16 +176,16 @@ func (m *MockProviderClient) GetSchema(arg0 context.Context, arg1 *tfplugin5.Get
}
// GetSchema indicates an expected call of GetSchema.
func (mr *MockProviderClientMockRecorder) GetSchema(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) GetSchema(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSchema", reflect.TypeOf((*MockProviderClient)(nil).GetSchema), varargs...)
}
// ImportResourceState mocks base method.
func (m *MockProviderClient) ImportResourceState(arg0 context.Context, arg1 *tfplugin5.ImportResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin5.ImportResourceState_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -171,16 +196,16 @@ func (m *MockProviderClient) ImportResourceState(arg0 context.Context, arg1 *tfp
}
// ImportResourceState indicates an expected call of ImportResourceState.
func (mr *MockProviderClientMockRecorder) ImportResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) ImportResourceState(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportResourceState", reflect.TypeOf((*MockProviderClient)(nil).ImportResourceState), varargs...)
}
// MoveResourceState mocks base method.
func (m *MockProviderClient) MoveResourceState(arg0 context.Context, arg1 *tfplugin5.MoveResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin5.MoveResourceState_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -191,16 +216,36 @@ func (m *MockProviderClient) MoveResourceState(arg0 context.Context, arg1 *tfplu
}
// MoveResourceState indicates an expected call of MoveResourceState.
func (mr *MockProviderClientMockRecorder) MoveResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) MoveResourceState(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MoveResourceState", reflect.TypeOf((*MockProviderClient)(nil).MoveResourceState), varargs...)
}
// OpenEphemeralResource mocks base method.
func (m *MockProviderClient) OpenEphemeralResource(arg0 context.Context, arg1 *tfplugin5.OpenEphemeralResource_Request, arg2 ...grpc.CallOption) (*tfplugin5.OpenEphemeralResource_Response, error) {
m.ctrl.T.Helper()
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "OpenEphemeralResource", varargs...)
ret0, _ := ret[0].(*tfplugin5.OpenEphemeralResource_Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// OpenEphemeralResource indicates an expected call of OpenEphemeralResource.
func (mr *MockProviderClientMockRecorder) OpenEphemeralResource(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenEphemeralResource", reflect.TypeOf((*MockProviderClient)(nil).OpenEphemeralResource), varargs...)
}
// PlanResourceChange mocks base method.
func (m *MockProviderClient) PlanResourceChange(arg0 context.Context, arg1 *tfplugin5.PlanResourceChange_Request, arg2 ...grpc.CallOption) (*tfplugin5.PlanResourceChange_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -211,16 +256,16 @@ func (m *MockProviderClient) PlanResourceChange(arg0 context.Context, arg1 *tfpl
}
// PlanResourceChange indicates an expected call of PlanResourceChange.
func (mr *MockProviderClientMockRecorder) PlanResourceChange(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) PlanResourceChange(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PlanResourceChange", reflect.TypeOf((*MockProviderClient)(nil).PlanResourceChange), varargs...)
}
// PrepareProviderConfig mocks base method.
func (m *MockProviderClient) PrepareProviderConfig(arg0 context.Context, arg1 *tfplugin5.PrepareProviderConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.PrepareProviderConfig_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -231,16 +276,16 @@ func (m *MockProviderClient) PrepareProviderConfig(arg0 context.Context, arg1 *t
}
// PrepareProviderConfig indicates an expected call of PrepareProviderConfig.
func (mr *MockProviderClientMockRecorder) PrepareProviderConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) PrepareProviderConfig(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareProviderConfig", reflect.TypeOf((*MockProviderClient)(nil).PrepareProviderConfig), varargs...)
}
// ReadDataSource mocks base method.
func (m *MockProviderClient) ReadDataSource(arg0 context.Context, arg1 *tfplugin5.ReadDataSource_Request, arg2 ...grpc.CallOption) (*tfplugin5.ReadDataSource_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -251,16 +296,16 @@ func (m *MockProviderClient) ReadDataSource(arg0 context.Context, arg1 *tfplugin
}
// ReadDataSource indicates an expected call of ReadDataSource.
func (mr *MockProviderClientMockRecorder) ReadDataSource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) ReadDataSource(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadDataSource", reflect.TypeOf((*MockProviderClient)(nil).ReadDataSource), varargs...)
}
// ReadResource mocks base method.
func (m *MockProviderClient) ReadResource(arg0 context.Context, arg1 *tfplugin5.ReadResource_Request, arg2 ...grpc.CallOption) (*tfplugin5.ReadResource_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -271,16 +316,36 @@ func (m *MockProviderClient) ReadResource(arg0 context.Context, arg1 *tfplugin5.
}
// ReadResource indicates an expected call of ReadResource.
func (mr *MockProviderClientMockRecorder) ReadResource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) ReadResource(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadResource", reflect.TypeOf((*MockProviderClient)(nil).ReadResource), varargs...)
}
// RenewEphemeralResource mocks base method.
func (m *MockProviderClient) RenewEphemeralResource(arg0 context.Context, arg1 *tfplugin5.RenewEphemeralResource_Request, arg2 ...grpc.CallOption) (*tfplugin5.RenewEphemeralResource_Response, error) {
m.ctrl.T.Helper()
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "RenewEphemeralResource", varargs...)
ret0, _ := ret[0].(*tfplugin5.RenewEphemeralResource_Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RenewEphemeralResource indicates an expected call of RenewEphemeralResource.
func (mr *MockProviderClientMockRecorder) RenewEphemeralResource(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RenewEphemeralResource", reflect.TypeOf((*MockProviderClient)(nil).RenewEphemeralResource), varargs...)
}
// Stop mocks base method.
func (m *MockProviderClient) Stop(arg0 context.Context, arg1 *tfplugin5.Stop_Request, arg2 ...grpc.CallOption) (*tfplugin5.Stop_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -291,16 +356,16 @@ func (m *MockProviderClient) Stop(arg0 context.Context, arg1 *tfplugin5.Stop_Req
}
// Stop indicates an expected call of Stop.
func (mr *MockProviderClientMockRecorder) Stop(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) Stop(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockProviderClient)(nil).Stop), varargs...)
}
// UpgradeResourceState mocks base method.
func (m *MockProviderClient) UpgradeResourceState(arg0 context.Context, arg1 *tfplugin5.UpgradeResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin5.UpgradeResourceState_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -311,16 +376,16 @@ func (m *MockProviderClient) UpgradeResourceState(arg0 context.Context, arg1 *tf
}
// UpgradeResourceState indicates an expected call of UpgradeResourceState.
func (mr *MockProviderClientMockRecorder) UpgradeResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) UpgradeResourceState(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradeResourceState", reflect.TypeOf((*MockProviderClient)(nil).UpgradeResourceState), varargs...)
}
// ValidateDataSourceConfig mocks base method.
func (m *MockProviderClient) ValidateDataSourceConfig(arg0 context.Context, arg1 *tfplugin5.ValidateDataSourceConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.ValidateDataSourceConfig_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -331,16 +396,36 @@ func (m *MockProviderClient) ValidateDataSourceConfig(arg0 context.Context, arg1
}
// ValidateDataSourceConfig indicates an expected call of ValidateDataSourceConfig.
func (mr *MockProviderClientMockRecorder) ValidateDataSourceConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) ValidateDataSourceConfig(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateDataSourceConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateDataSourceConfig), varargs...)
}
// ValidateEphemeralResourceConfig mocks base method.
func (m *MockProviderClient) ValidateEphemeralResourceConfig(arg0 context.Context, arg1 *tfplugin5.ValidateEphemeralResourceConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.ValidateEphemeralResourceConfig_Response, error) {
m.ctrl.T.Helper()
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ValidateEphemeralResourceConfig", varargs...)
ret0, _ := ret[0].(*tfplugin5.ValidateEphemeralResourceConfig_Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ValidateEphemeralResourceConfig indicates an expected call of ValidateEphemeralResourceConfig.
func (mr *MockProviderClientMockRecorder) ValidateEphemeralResourceConfig(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateEphemeralResourceConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateEphemeralResourceConfig), varargs...)
}
// ValidateResourceTypeConfig mocks base method.
func (m *MockProviderClient) ValidateResourceTypeConfig(arg0 context.Context, arg1 *tfplugin5.ValidateResourceTypeConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.ValidateResourceTypeConfig_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -351,9 +436,9 @@ func (m *MockProviderClient) ValidateResourceTypeConfig(arg0 context.Context, ar
}
// ValidateResourceTypeConfig indicates an expected call of ValidateResourceTypeConfig.
func (mr *MockProviderClientMockRecorder) ValidateResourceTypeConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) ValidateResourceTypeConfig(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateResourceTypeConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateResourceTypeConfig), varargs...)
}
@ -383,7 +468,7 @@ func (m *MockProvisionerClient) EXPECT() *MockProvisionerClientMockRecorder {
// GetSchema mocks base method.
func (m *MockProvisionerClient) GetSchema(arg0 context.Context, arg1 *tfplugin5.GetProvisionerSchema_Request, arg2 ...grpc.CallOption) (*tfplugin5.GetProvisionerSchema_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -394,16 +479,16 @@ func (m *MockProvisionerClient) GetSchema(arg0 context.Context, arg1 *tfplugin5.
}
// GetSchema indicates an expected call of GetSchema.
func (mr *MockProvisionerClientMockRecorder) GetSchema(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProvisionerClientMockRecorder) GetSchema(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSchema", reflect.TypeOf((*MockProvisionerClient)(nil).GetSchema), varargs...)
}
// ProvisionResource mocks base method.
func (m *MockProvisionerClient) ProvisionResource(arg0 context.Context, arg1 *tfplugin5.ProvisionResource_Request, arg2 ...grpc.CallOption) (tfplugin5.Provisioner_ProvisionResourceClient, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -414,16 +499,16 @@ func (m *MockProvisionerClient) ProvisionResource(arg0 context.Context, arg1 *tf
}
// ProvisionResource indicates an expected call of ProvisionResource.
func (mr *MockProvisionerClientMockRecorder) ProvisionResource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProvisionerClientMockRecorder) ProvisionResource(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProvisionResource", reflect.TypeOf((*MockProvisionerClient)(nil).ProvisionResource), varargs...)
}
// Stop mocks base method.
func (m *MockProvisionerClient) Stop(arg0 context.Context, arg1 *tfplugin5.Stop_Request, arg2 ...grpc.CallOption) (*tfplugin5.Stop_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -434,16 +519,16 @@ func (m *MockProvisionerClient) Stop(arg0 context.Context, arg1 *tfplugin5.Stop_
}
// Stop indicates an expected call of Stop.
func (mr *MockProvisionerClientMockRecorder) Stop(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProvisionerClientMockRecorder) Stop(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockProvisionerClient)(nil).Stop), varargs...)
}
// ValidateProvisionerConfig mocks base method.
func (m *MockProvisionerClient) ValidateProvisionerConfig(arg0 context.Context, arg1 *tfplugin5.ValidateProvisionerConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.ValidateProvisionerConfig_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -454,9 +539,9 @@ func (m *MockProvisionerClient) ValidateProvisionerConfig(arg0 context.Context,
}
// ValidateProvisionerConfig indicates an expected call of ValidateProvisionerConfig.
func (mr *MockProvisionerClientMockRecorder) ValidateProvisionerConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProvisionerClientMockRecorder) ValidateProvisionerConfig(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateProvisionerConfig", reflect.TypeOf((*MockProvisionerClient)(nil).ValidateProvisionerConfig), varargs...)
}
@ -542,7 +627,7 @@ func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) Recv() *gomock.Ca
}
// RecvMsg mocks base method.
func (m *MockProvisioner_ProvisionResourceClient) RecvMsg(arg0 interface{}) error {
func (m *MockProvisioner_ProvisionResourceClient) RecvMsg(arg0 any) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RecvMsg", arg0)
ret0, _ := ret[0].(error)
@ -550,13 +635,13 @@ func (m *MockProvisioner_ProvisionResourceClient) RecvMsg(arg0 interface{}) erro
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call {
func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) RecvMsg(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).RecvMsg), arg0)
}
// SendMsg mocks base method.
func (m *MockProvisioner_ProvisionResourceClient) SendMsg(arg0 interface{}) error {
func (m *MockProvisioner_ProvisionResourceClient) SendMsg(arg0 any) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SendMsg", arg0)
ret0, _ := ret[0].(error)
@ -564,7 +649,7 @@ func (m *MockProvisioner_ProvisionResourceClient) SendMsg(arg0 interface{}) erro
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) SendMsg(arg0 interface{}) *gomock.Call {
func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) SendMsg(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockProvisioner_ProvisionResourceClient)(nil).SendMsg), arg0)
}
@ -621,7 +706,7 @@ func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) Context() *gomock
}
// RecvMsg mocks base method.
func (m *MockProvisioner_ProvisionResourceServer) RecvMsg(arg0 interface{}) error {
func (m *MockProvisioner_ProvisionResourceServer) RecvMsg(arg0 any) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RecvMsg", arg0)
ret0, _ := ret[0].(error)
@ -629,7 +714,7 @@ func (m *MockProvisioner_ProvisionResourceServer) RecvMsg(arg0 interface{}) erro
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) RecvMsg(arg0 interface{}) *gomock.Call {
func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) RecvMsg(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).RecvMsg), arg0)
}
@ -643,7 +728,7 @@ func (m *MockProvisioner_ProvisionResourceServer) Send(arg0 *tfplugin5.Provision
}
// Send indicates an expected call of Send.
func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) Send(arg0 interface{}) *gomock.Call {
func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) Send(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).Send), arg0)
}
@ -657,13 +742,13 @@ func (m *MockProvisioner_ProvisionResourceServer) SendHeader(arg0 metadata.MD) e
}
// SendHeader indicates an expected call of SendHeader.
func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call {
func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SendHeader(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).SendHeader), arg0)
}
// SendMsg mocks base method.
func (m *MockProvisioner_ProvisionResourceServer) SendMsg(arg0 interface{}) error {
func (m *MockProvisioner_ProvisionResourceServer) SendMsg(arg0 any) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SendMsg", arg0)
ret0, _ := ret[0].(error)
@ -671,7 +756,7 @@ func (m *MockProvisioner_ProvisionResourceServer) SendMsg(arg0 interface{}) erro
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SendMsg(arg0 interface{}) *gomock.Call {
func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SendMsg(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).SendMsg), arg0)
}
@ -685,7 +770,7 @@ func (m *MockProvisioner_ProvisionResourceServer) SetHeader(arg0 metadata.MD) er
}
// SetHeader indicates an expected call of SetHeader.
func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call {
func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SetHeader(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).SetHeader), arg0)
}
@ -697,7 +782,7 @@ func (m *MockProvisioner_ProvisionResourceServer) SetTrailer(arg0 metadata.MD) {
}
// SetTrailer indicates an expected call of SetTrailer.
func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call {
func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) SetTrailer(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockProvisioner_ProvisionResourceServer)(nil).SetTrailer), arg0)
}

View File

@ -10,17 +10,16 @@ import (
"fmt"
"testing"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/golang/mock/gomock"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/zclconf/go-cty/cty"
"go.uber.org/mock/gomock"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/configs/hcl2shim"
mockproto "github.com/opentofu/opentofu/internal/plugin6/mock_proto"
"github.com/opentofu/opentofu/internal/providers"
"github.com/opentofu/opentofu/internal/tfdiags"
"github.com/zclconf/go-cty/cty"
mockproto "github.com/opentofu/opentofu/internal/plugin6/mock_proto"
proto "github.com/opentofu/opentofu/internal/tfplugin6"
)

View File

@ -3,6 +3,6 @@
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
//go:generate go run github.com/golang/mock/mockgen -destination mock.go github.com/opentofu/opentofu/internal/tfplugin6 ProviderClient
//go:generate go run go.uber.org/mock/mockgen -destination mock.go github.com/opentofu/opentofu/internal/tfplugin6 ProviderClient
package mock_tfplugin6

View File

@ -1,5 +1,10 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/opentofu/opentofu/internal/tfplugin6 (interfaces: ProviderClient)
//
// Generated by this command:
//
// mockgen -destination mock.go github.com/opentofu/opentofu/internal/tfplugin6 ProviderClient
//
// Package mock_tfplugin6 is a generated GoMock package.
package mock_tfplugin6
@ -8,8 +13,8 @@ import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
tfplugin6 "github.com/opentofu/opentofu/internal/tfplugin6"
gomock "go.uber.org/mock/gomock"
grpc "google.golang.org/grpc"
)
@ -39,7 +44,7 @@ func (m *MockProviderClient) EXPECT() *MockProviderClientMockRecorder {
// ApplyResourceChange mocks base method.
func (m *MockProviderClient) ApplyResourceChange(arg0 context.Context, arg1 *tfplugin6.ApplyResourceChange_Request, arg2 ...grpc.CallOption) (*tfplugin6.ApplyResourceChange_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -50,16 +55,16 @@ func (m *MockProviderClient) ApplyResourceChange(arg0 context.Context, arg1 *tfp
}
// ApplyResourceChange indicates an expected call of ApplyResourceChange.
func (mr *MockProviderClientMockRecorder) ApplyResourceChange(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) ApplyResourceChange(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyResourceChange", reflect.TypeOf((*MockProviderClient)(nil).ApplyResourceChange), varargs...)
}
// CallFunction mocks base method.
func (m *MockProviderClient) CallFunction(arg0 context.Context, arg1 *tfplugin6.CallFunction_Request, arg2 ...grpc.CallOption) (*tfplugin6.CallFunction_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -70,16 +75,36 @@ func (m *MockProviderClient) CallFunction(arg0 context.Context, arg1 *tfplugin6.
}
// CallFunction indicates an expected call of CallFunction.
func (mr *MockProviderClientMockRecorder) CallFunction(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) CallFunction(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CallFunction", reflect.TypeOf((*MockProviderClient)(nil).CallFunction), varargs...)
}
// CloseEphemeralResource mocks base method.
func (m *MockProviderClient) CloseEphemeralResource(arg0 context.Context, arg1 *tfplugin6.CloseEphemeralResource_Request, arg2 ...grpc.CallOption) (*tfplugin6.CloseEphemeralResource_Response, error) {
m.ctrl.T.Helper()
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "CloseEphemeralResource", varargs...)
ret0, _ := ret[0].(*tfplugin6.CloseEphemeralResource_Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CloseEphemeralResource indicates an expected call of CloseEphemeralResource.
func (mr *MockProviderClientMockRecorder) CloseEphemeralResource(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseEphemeralResource", reflect.TypeOf((*MockProviderClient)(nil).CloseEphemeralResource), varargs...)
}
// ConfigureProvider mocks base method.
func (m *MockProviderClient) ConfigureProvider(arg0 context.Context, arg1 *tfplugin6.ConfigureProvider_Request, arg2 ...grpc.CallOption) (*tfplugin6.ConfigureProvider_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -90,16 +115,16 @@ func (m *MockProviderClient) ConfigureProvider(arg0 context.Context, arg1 *tfplu
}
// ConfigureProvider indicates an expected call of ConfigureProvider.
func (mr *MockProviderClientMockRecorder) ConfigureProvider(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) ConfigureProvider(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConfigureProvider", reflect.TypeOf((*MockProviderClient)(nil).ConfigureProvider), varargs...)
}
// GetFunctions mocks base method.
func (m *MockProviderClient) GetFunctions(arg0 context.Context, arg1 *tfplugin6.GetFunctions_Request, arg2 ...grpc.CallOption) (*tfplugin6.GetFunctions_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -110,16 +135,16 @@ func (m *MockProviderClient) GetFunctions(arg0 context.Context, arg1 *tfplugin6.
}
// GetFunctions indicates an expected call of GetFunctions.
func (mr *MockProviderClientMockRecorder) GetFunctions(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) GetFunctions(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFunctions", reflect.TypeOf((*MockProviderClient)(nil).GetFunctions), varargs...)
}
// GetMetadata mocks base method.
func (m *MockProviderClient) GetMetadata(arg0 context.Context, arg1 *tfplugin6.GetMetadata_Request, arg2 ...grpc.CallOption) (*tfplugin6.GetMetadata_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -130,16 +155,16 @@ func (m *MockProviderClient) GetMetadata(arg0 context.Context, arg1 *tfplugin6.G
}
// GetMetadata indicates an expected call of GetMetadata.
func (mr *MockProviderClientMockRecorder) GetMetadata(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) GetMetadata(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetadata", reflect.TypeOf((*MockProviderClient)(nil).GetMetadata), varargs...)
}
// GetProviderSchema mocks base method.
func (m *MockProviderClient) GetProviderSchema(arg0 context.Context, arg1 *tfplugin6.GetProviderSchema_Request, arg2 ...grpc.CallOption) (*tfplugin6.GetProviderSchema_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -150,16 +175,16 @@ func (m *MockProviderClient) GetProviderSchema(arg0 context.Context, arg1 *tfplu
}
// GetProviderSchema indicates an expected call of GetProviderSchema.
func (mr *MockProviderClientMockRecorder) GetProviderSchema(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) GetProviderSchema(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProviderSchema", reflect.TypeOf((*MockProviderClient)(nil).GetProviderSchema), varargs...)
}
// ImportResourceState mocks base method.
func (m *MockProviderClient) ImportResourceState(arg0 context.Context, arg1 *tfplugin6.ImportResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin6.ImportResourceState_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -170,16 +195,16 @@ func (m *MockProviderClient) ImportResourceState(arg0 context.Context, arg1 *tfp
}
// ImportResourceState indicates an expected call of ImportResourceState.
func (mr *MockProviderClientMockRecorder) ImportResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) ImportResourceState(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportResourceState", reflect.TypeOf((*MockProviderClient)(nil).ImportResourceState), varargs...)
}
// MoveResourceState mocks base method.
func (m *MockProviderClient) MoveResourceState(arg0 context.Context, arg1 *tfplugin6.MoveResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin6.MoveResourceState_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -190,16 +215,36 @@ func (m *MockProviderClient) MoveResourceState(arg0 context.Context, arg1 *tfplu
}
// MoveResourceState indicates an expected call of MoveResourceState.
func (mr *MockProviderClientMockRecorder) MoveResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) MoveResourceState(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MoveResourceState", reflect.TypeOf((*MockProviderClient)(nil).MoveResourceState), varargs...)
}
// OpenEphemeralResource mocks base method.
func (m *MockProviderClient) OpenEphemeralResource(arg0 context.Context, arg1 *tfplugin6.OpenEphemeralResource_Request, arg2 ...grpc.CallOption) (*tfplugin6.OpenEphemeralResource_Response, error) {
m.ctrl.T.Helper()
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "OpenEphemeralResource", varargs...)
ret0, _ := ret[0].(*tfplugin6.OpenEphemeralResource_Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// OpenEphemeralResource indicates an expected call of OpenEphemeralResource.
func (mr *MockProviderClientMockRecorder) OpenEphemeralResource(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenEphemeralResource", reflect.TypeOf((*MockProviderClient)(nil).OpenEphemeralResource), varargs...)
}
// PlanResourceChange mocks base method.
func (m *MockProviderClient) PlanResourceChange(arg0 context.Context, arg1 *tfplugin6.PlanResourceChange_Request, arg2 ...grpc.CallOption) (*tfplugin6.PlanResourceChange_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -210,16 +255,16 @@ func (m *MockProviderClient) PlanResourceChange(arg0 context.Context, arg1 *tfpl
}
// PlanResourceChange indicates an expected call of PlanResourceChange.
func (mr *MockProviderClientMockRecorder) PlanResourceChange(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) PlanResourceChange(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PlanResourceChange", reflect.TypeOf((*MockProviderClient)(nil).PlanResourceChange), varargs...)
}
// ReadDataSource mocks base method.
func (m *MockProviderClient) ReadDataSource(arg0 context.Context, arg1 *tfplugin6.ReadDataSource_Request, arg2 ...grpc.CallOption) (*tfplugin6.ReadDataSource_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -230,16 +275,16 @@ func (m *MockProviderClient) ReadDataSource(arg0 context.Context, arg1 *tfplugin
}
// ReadDataSource indicates an expected call of ReadDataSource.
func (mr *MockProviderClientMockRecorder) ReadDataSource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) ReadDataSource(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadDataSource", reflect.TypeOf((*MockProviderClient)(nil).ReadDataSource), varargs...)
}
// ReadResource mocks base method.
func (m *MockProviderClient) ReadResource(arg0 context.Context, arg1 *tfplugin6.ReadResource_Request, arg2 ...grpc.CallOption) (*tfplugin6.ReadResource_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -250,16 +295,36 @@ func (m *MockProviderClient) ReadResource(arg0 context.Context, arg1 *tfplugin6.
}
// ReadResource indicates an expected call of ReadResource.
func (mr *MockProviderClientMockRecorder) ReadResource(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) ReadResource(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadResource", reflect.TypeOf((*MockProviderClient)(nil).ReadResource), varargs...)
}
// RenewEphemeralResource mocks base method.
func (m *MockProviderClient) RenewEphemeralResource(arg0 context.Context, arg1 *tfplugin6.RenewEphemeralResource_Request, arg2 ...grpc.CallOption) (*tfplugin6.RenewEphemeralResource_Response, error) {
m.ctrl.T.Helper()
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "RenewEphemeralResource", varargs...)
ret0, _ := ret[0].(*tfplugin6.RenewEphemeralResource_Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RenewEphemeralResource indicates an expected call of RenewEphemeralResource.
func (mr *MockProviderClientMockRecorder) RenewEphemeralResource(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RenewEphemeralResource", reflect.TypeOf((*MockProviderClient)(nil).RenewEphemeralResource), varargs...)
}
// StopProvider mocks base method.
func (m *MockProviderClient) StopProvider(arg0 context.Context, arg1 *tfplugin6.StopProvider_Request, arg2 ...grpc.CallOption) (*tfplugin6.StopProvider_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -270,16 +335,16 @@ func (m *MockProviderClient) StopProvider(arg0 context.Context, arg1 *tfplugin6.
}
// StopProvider indicates an expected call of StopProvider.
func (mr *MockProviderClientMockRecorder) StopProvider(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) StopProvider(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopProvider", reflect.TypeOf((*MockProviderClient)(nil).StopProvider), varargs...)
}
// UpgradeResourceState mocks base method.
func (m *MockProviderClient) UpgradeResourceState(arg0 context.Context, arg1 *tfplugin6.UpgradeResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin6.UpgradeResourceState_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -290,16 +355,16 @@ func (m *MockProviderClient) UpgradeResourceState(arg0 context.Context, arg1 *tf
}
// UpgradeResourceState indicates an expected call of UpgradeResourceState.
func (mr *MockProviderClientMockRecorder) UpgradeResourceState(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) UpgradeResourceState(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradeResourceState", reflect.TypeOf((*MockProviderClient)(nil).UpgradeResourceState), varargs...)
}
// ValidateDataResourceConfig mocks base method.
func (m *MockProviderClient) ValidateDataResourceConfig(arg0 context.Context, arg1 *tfplugin6.ValidateDataResourceConfig_Request, arg2 ...grpc.CallOption) (*tfplugin6.ValidateDataResourceConfig_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -310,16 +375,36 @@ func (m *MockProviderClient) ValidateDataResourceConfig(arg0 context.Context, ar
}
// ValidateDataResourceConfig indicates an expected call of ValidateDataResourceConfig.
func (mr *MockProviderClientMockRecorder) ValidateDataResourceConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) ValidateDataResourceConfig(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateDataResourceConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateDataResourceConfig), varargs...)
}
// ValidateEphemeralResourceConfig mocks base method.
func (m *MockProviderClient) ValidateEphemeralResourceConfig(arg0 context.Context, arg1 *tfplugin6.ValidateEphemeralResourceConfig_Request, arg2 ...grpc.CallOption) (*tfplugin6.ValidateEphemeralResourceConfig_Response, error) {
m.ctrl.T.Helper()
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ValidateEphemeralResourceConfig", varargs...)
ret0, _ := ret[0].(*tfplugin6.ValidateEphemeralResourceConfig_Response)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ValidateEphemeralResourceConfig indicates an expected call of ValidateEphemeralResourceConfig.
func (mr *MockProviderClientMockRecorder) ValidateEphemeralResourceConfig(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateEphemeralResourceConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateEphemeralResourceConfig), varargs...)
}
// ValidateProviderConfig mocks base method.
func (m *MockProviderClient) ValidateProviderConfig(arg0 context.Context, arg1 *tfplugin6.ValidateProviderConfig_Request, arg2 ...grpc.CallOption) (*tfplugin6.ValidateProviderConfig_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -330,16 +415,16 @@ func (m *MockProviderClient) ValidateProviderConfig(arg0 context.Context, arg1 *
}
// ValidateProviderConfig indicates an expected call of ValidateProviderConfig.
func (mr *MockProviderClientMockRecorder) ValidateProviderConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) ValidateProviderConfig(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateProviderConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateProviderConfig), varargs...)
}
// ValidateResourceConfig mocks base method.
func (m *MockProviderClient) ValidateResourceConfig(arg0 context.Context, arg1 *tfplugin6.ValidateResourceConfig_Request, arg2 ...grpc.CallOption) (*tfplugin6.ValidateResourceConfig_Response, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
varargs := []any{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
@ -350,8 +435,8 @@ func (m *MockProviderClient) ValidateResourceConfig(arg0 context.Context, arg1 *
}
// ValidateResourceConfig indicates an expected call of ValidateResourceConfig.
func (mr *MockProviderClientMockRecorder) ValidateResourceConfig(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
func (mr *MockProviderClientMockRecorder) ValidateResourceConfig(arg0, arg1 any, arg2 ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
varargs := append([]any{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateResourceConfig", reflect.TypeOf((*MockProviderClient)(nil).ValidateResourceConfig), varargs...)
}

File diff suppressed because it is too large Load Diff

View File

@ -1 +1 @@
../../docs/plugin-protocol/tfplugin5.5.proto
../../docs/plugin-protocol/tfplugin5.7.proto

File diff suppressed because it is too large Load Diff

View File

@ -1 +1 @@
../../docs/plugin-protocol/tfplugin6.5.proto
../../docs/plugin-protocol/tfplugin6.7.proto

View File

@ -621,6 +621,61 @@ resource "test_object" "x" {
}
// This test is a copy and paste from TestContext2Apply_destroyWithDeposed
// with modifications to test the same scenario with a dynamic provider instance.
func TestContext2Apply_destroyWithDeposedWithDynamicProvider(t *testing.T) {
m := testModuleInline(t, map[string]string{
"main.tf": `
provider "test" {
alias = "for_eached"
for_each = {a: {}}
}
resource "test_object" "x" {
test_string = "ok"
lifecycle {
create_before_destroy = true
}
provider = test.for_eached["a"]
}`,
})
p := simpleMockProvider()
deposedKey := states.NewDeposedKey()
state := states.NewState()
root := state.EnsureModule(addrs.RootModuleInstance)
root.SetResourceInstanceDeposed(
mustResourceInstanceAddr("test_object.x").Resource,
deposedKey,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectTainted,
AttrsJSON: []byte(`{"test_string":"deposed"}`),
},
mustProviderConfig(`provider["registry.opentofu.org/hashicorp/test"].for_eached`),
addrs.StringKey("a"),
)
ctx := testContext2(t, &ContextOpts{
Providers: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
},
})
plan, diags := ctx.Plan(context.Background(), m, state, &PlanOpts{
Mode: plans.DestroyMode,
})
if diags.HasErrors() {
t.Fatalf("plan: %s", diags.Err())
}
_, diags = ctx.Apply(context.Background(), plan, m)
if diags.HasErrors() {
t.Fatalf("apply: %s", diags.Err())
}
}
func TestContext2Apply_nullableVariables(t *testing.T) {
m := testModule(t, "apply-nullable-variables")
state := states.NewState()
@ -2379,6 +2434,67 @@ func TestContext2Apply_forgetOrphanAndDeposed(t *testing.T) {
}
}
// This test is a copy and paste from TestContext2Apply_forgetOrphanAndDeposed
// with modifications to test the same scenario with a dynamic provider instance.
func TestContext2Apply_forgetOrphanAndDeposedWithDynamicProvider(t *testing.T) {
desposedKey := states.DeposedKey("deposed")
addr := "aws_instance.baz"
m := testModuleInline(t, map[string]string{
"main.tf": `
provider aws {
alias = "for_eached"
for_each = {a: {}}
}
removed {
from = aws_instance.baz
}
`,
})
p := testProvider("aws")
state := states.NewState()
root := state.EnsureModule(addrs.RootModuleInstance)
root.SetResourceInstanceCurrent(
mustResourceInstanceAddr(addr).Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"bar"}`),
},
mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"].for_eached`),
addrs.StringKey("a"),
)
root.SetResourceInstanceDeposed(
mustResourceInstanceAddr(addr).Resource,
desposedKey,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectTainted,
AttrsJSON: []byte(`{"id":"bar"}`),
Dependencies: []addrs.ConfigResource{},
},
mustProviderConfig(`provider["registry.opentofu.org/hashicorp/aws"].for_eached`),
addrs.StringKey("a"),
)
ctx := testContext2(t, &ContextOpts{
Providers: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p),
},
})
p.PlanResourceChangeFn = testDiffFn
plan, diags := ctx.Plan(context.Background(), m, state, DefaultPlanOpts)
assertNoErrors(t, diags)
s, diags := ctx.Apply(context.Background(), plan, m)
if diags.HasErrors() {
t.Fatalf("diags: %s", diags.Err())
}
if !s.Empty() {
t.Fatalf("State should be empty")
}
}
func TestContext2Apply_providerExpandWithTargetOrExclude(t *testing.T) {
// This test is covering a potentially-tricky interaction between the
// logic that updates the provider instance references for resource

View File

@ -7042,6 +7042,88 @@ import {
}
}
func TestContext2Plan_providerForEachWithOrphanResourceInstanceNotUsingForEach(t *testing.T) {
// This test is to cover the bug reported in this issue:
// https://github.com/opentofu/opentofu/issues/2334
//
// The bug there was that OpenTofu was failing to evaluate the provider
// instance key expression for a graph node representing a "orphaned"
// resource instance, and was thus always treating them as belonging to
// the no-key instance of the given provider. Since a for_each provider
// has no instance without an instance key, that caused an error trying
// to use a non-existent provider instance.
instAddr := addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_thing",
Name: "a",
}.Instance(addrs.StringKey("orphaned")).Absolute(addrs.RootModuleInstance)
providerConfigAddr := addrs.AbsProviderConfig{
Module: addrs.RootModule,
Provider: addrs.NewBuiltInProvider("test"),
Alias: "multi",
}
m := testModuleInline(t, map[string]string{
"main.tf": `
terraform {
required_providers {
test = {
source = "terraform.io/builtin/test"
}
}
}
provider "test" {
alias = "multi"
for_each = toset(["a"])
}
resource "test_thing" "a" {
for_each = toset([])
provider = test.multi["a"]
}
`,
})
s := states.BuildState(func(ss *states.SyncState) {
ss.SetResourceInstanceCurrent(
instAddr,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{}`),
},
providerConfigAddr,
addrs.NoKey, // NOTE: The prior state object is associated with a no-key instance of provider.test
)
})
p := &MockProvider{}
p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{
Provider: providers.Schema{
Block: &configschema.Block{},
},
ResourceTypes: map[string]providers.Schema{
"test_thing": {
Block: &configschema.Block{},
},
},
}
tofuCtx := testContext2(t, &ContextOpts{
Providers: map[addrs.Provider]providers.Factory{
providerConfigAddr.Provider: testProviderFuncFixed(p),
},
})
_, diags := tofuCtx.Plan(context.Background(), m, s, DefaultPlanOpts)
err := diags.Err()
if err == nil {
t.Fatalf("unexpected success; want an error about %s not being declared", providerConfigAddr.InstanceString(addrs.NoKey))
}
got := err.Error()
wantSubstring := `To proceed, return to your previous single-instance configuration for provider["terraform.io/builtin/test"].multi and ensure that test_thing.a["orphaned"] has been destroyed or forgotten before using for_each with this provider, or change the resource configuration to still declare an instance with the key ["orphaned"].`
if !strings.Contains(got, wantSubstring) {
t.Errorf("missing expected error message\ngot:\n%s\nwant substring: %s", got, wantSubstring)
}
}
func TestContext2Plan_plannedState(t *testing.T) {
addr := mustResourceInstanceAddr("test_object.a")
m := testModuleInline(t, map[string]string{

View File

@ -107,7 +107,7 @@ func (n *NodeAbstractResourceInstance) References() []*addrs.Reference {
return nil
}
func (n *NodeAbstractResourceInstance) resolveProvider(ctx EvalContext, hasExpansionData bool) tfdiags.Diagnostics {
func (n *NodeAbstractResourceInstance) resolveProvider(ctx EvalContext, hasExpansionData bool, deposedKey states.DeposedKey) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
log.Printf("[TRACE] Resolving provider key for %s", n.Addr)
@ -204,19 +204,75 @@ func (n *NodeAbstractResourceInstance) resolveProvider(ctx EvalContext, hasExpan
return nil
}
if n.ResolvedProviderKey == nil {
// Probably an OpenTofu bug
return diags.Append(fmt.Errorf("provider %s not initialized for resource %s", n.ResolvedProvider.ProviderConfig.InstanceString(n.ResolvedProviderKey), n.Addr))
// If we get here then the provider instance address tracked in the state refers to
// an instance of the provider configuration that is no longer declared in the
// configuration. This could either mean that the provider was previously using
// for_each but one of the keys has been removed, or that the "for_each"-ness
// of the provider configuration has changed since this state snapshot was created.
// There are therefore two different error cases to handle, although we need
// slightly different messaging for deposed vs. orphaned instances.
if deposedKey == states.NotDeposed {
if n.ResolvedProviderKey != nil {
// We're associated with an for_each instance key that isn't declared anymore.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Provider instance not present",
fmt.Sprintf(
"To work with %s its original provider instance at %s is required, but it has been removed. This occurs when an element is removed from the provider configuration's for_each collection while objects created by that the associated provider instance still exist in the state. Re-add the for_each element to destroy %s, after which you can remove the provider configuration again.\n\nThis is commonly caused by using the same for_each collection both for a resource (or its containing module) and its associated provider configuration. To successfully remove an instance of a resource it must be possible to remove the corresponding element from the resource's for_each collection while retaining the corresponding element in the provider's for_each collection.",
n.Addr, n.ResolvedProvider.ProviderConfig.InstanceString(n.ResolvedProviderKey), n.Addr,
),
))
} else {
// We're associated with the no-key instance of a provider configuration, which
// suggests that someone is in the process of adopting provider for_each for
// a provider configuration that didn't previously use it but has some
// orphaned resource instance objects in the state that need to have
// their destroy completed first.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Provider instance not present",
fmt.Sprintf(
"To work with %s its original provider instance at %s is required, but it has been removed. This suggests that you've added for_each to this provider configuration while there are existing instances of %s that need to be destroyed by the original single-instance provider configuration.\n\nTo proceed, return to your previous single-instance configuration for %s and ensure that %s has been destroyed or forgotten before using for_each with this provider, or change the resource configuration to still declare an instance with the key %s.",
n.Addr, n.ResolvedProvider.ProviderConfig.InstanceString(n.ResolvedProviderKey), n.Addr.ContainingResource(),
n.ResolvedProvider.ProviderConfig, n.Addr, n.Addr.Resource.Key,
),
))
}
} else {
if n.ResolvedProviderKey != nil {
// We're associated with an for_each instance key that isn't declared anymore.
// This particualr case is similar to the non-deposed variant above, but we
// mention the deposed key in the message and drop the irrelevant note about
// using the same for_each for the resource and the provider, since deposed
// objects are caused by a failed create_before_destroy (a kind of "replace")
// rather than by entirely removing an instance.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Provider instance not present",
fmt.Sprintf(
"To work with %s's deposed object %s its original provider instance at %s is required, but it has been removed. This occurs when an element is removed from the provider configuration's for_each collection while objects created by that the associated provider instance still exist in the state. Re-add the for_each element to destroy this deposed object for %s, after which you can remove the provider configuration again.",
n.Addr, deposedKey, n.ResolvedProvider.ProviderConfig.InstanceString(n.ResolvedProviderKey), n.Addr,
),
))
} else {
// We're associated with the no-key instance of a provider configuration, which
// suggests that someone is in the process of adopting provider for_each for
// a provider configuration that didn't previously use it but has some
// deposed resource instance objects in the state that need to have
// their destroy completed first.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Provider instance not present",
fmt.Sprintf(
"To work with %s's deposed object %s its original provider instance at %s is required, but it has been removed. This suggests that you've added for_each to this provider configuration while there are existing deposed objects of %s that need to be destroyed by the original single-instance provider configuration.\n\nTo proceed, return to your previous single-instance configuration for %s and ensure that all deposed instances of %s have been destroyed before using for_each with this provider.",
n.Addr, deposedKey, n.ResolvedProvider.ProviderConfig.InstanceString(n.ResolvedProviderKey),
n.Addr,
n.ResolvedProvider.ProviderConfig, n.Addr,
),
))
}
}
return diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Provider instance not present",
fmt.Sprintf(
"To work with %s its original provider instance at %s is required, but it has been removed. This occurs when an element is removed from the provider configuration's for_each collection while objects created by that the associated provider instance still exist in the state. Re-add the for_each element to destroy %s, after which you can remove the provider configuration again.\n\nThis is commonly caused by using the same for_each collection both for a resource (or its containing module) and its associated provider configuration. To successfully remove an instance of a resource it must be possible to remove the corresponding element from the resource's for_each collection while retaining the corresponding element in the provider's for_each collection.",
n.Addr, n.ResolvedProvider.ProviderConfig.InstanceString(n.ResolvedProviderKey), n.Addr,
),
))
return diags
}
// StateDependencies returns the dependencies which will be saved in the state

View File

@ -141,7 +141,7 @@ func (n *NodeApplyableResourceInstance) Execute(ctx EvalContext, op walkOperatio
return diags
}
diags = n.resolveProvider(ctx, true)
diags = n.resolveProvider(ctx, true, states.NotDeposed)
if diags.HasErrors() {
return diags
}

View File

@ -87,6 +87,11 @@ func (n *NodePlanDeposedResourceInstanceObject) References() []*addrs.Reference
func (n *NodePlanDeposedResourceInstanceObject) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
log.Printf("[TRACE] NodePlanDeposedResourceInstanceObject: planning %s deposed object %s", n.Addr, n.DeposedKey)
diags = n.resolveProvider(ctx, false, n.DeposedKey)
if diags.HasErrors() {
return diags
}
// Read the state for the deposed resource instance
state, err := n.readResourceInstanceStateDeposed(ctx, n.Addr, n.DeposedKey)
diags = diags.Append(err)
@ -252,6 +257,11 @@ func (n *NodeDestroyDeposedResourceInstanceObject) ModifyCreateBeforeDestroy(v b
func (n *NodeDestroyDeposedResourceInstanceObject) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
var change *plans.ResourceInstanceChange
diags = n.resolveProvider(ctx, false, n.DeposedKey)
if diags.HasErrors() {
return diags
}
// Read the state for the deposed resource instance
state, err := n.readResourceInstanceStateDeposed(ctx, n.Addr, n.DeposedKey)
if err != nil {
@ -399,6 +409,11 @@ func (n *NodeForgetDeposedResourceInstanceObject) References() []*addrs.Referenc
// GraphNodeExecutable impl.
func (n *NodeForgetDeposedResourceInstanceObject) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
diags = n.resolveProvider(ctx, false, n.DeposedKey)
if diags.HasErrors() {
return diags
}
// Read the state for the deposed resource instance
state, err := n.readResourceInstanceStateDeposed(ctx, n.Addr, n.DeposedKey)
if err != nil {

View File

@ -143,7 +143,7 @@ func (n *NodeDestroyResourceInstance) Execute(ctx EvalContext, op walkOperation)
// Eval info is different depending on what kind of resource this is
switch addr.Resource.Resource.Mode {
case addrs.ManagedResourceMode:
diags = n.resolveProvider(ctx, false)
diags = n.resolveProvider(ctx, false, states.NotDeposed)
if diags.HasErrors() {
return diags
}

View File

@ -53,6 +53,11 @@ func (n *NodeForgetResourceInstance) Execute(ctx EvalContext, op walkOperation)
log.Printf("[WARN] NodeForgetResourceInstance for %s with no state", addr)
}
diags = n.resolveProvider(ctx, false, states.NotDeposed)
if diags.HasErrors() {
return diags
}
var state *states.ResourceInstanceObject
state, readDiags := n.readResourceInstanceState(ctx, addr)

View File

@ -92,7 +92,7 @@ func (n *graphNodeImportState) Execute(ctx EvalContext, op walkOperation) (diags
ResolvedProvider: n.ResolvedProvider,
},
}
diags = diags.Append(asAbsNode.resolveProvider(ctx, true))
diags = diags.Append(asAbsNode.resolveProvider(ctx, true, states.NotDeposed))
if diags.HasErrors() {
return diags
}

View File

@ -47,7 +47,7 @@ func (n *NodePlanDestroyableResourceInstance) DestroyAddr() *addrs.AbsResourceIn
func (n *NodePlanDestroyableResourceInstance) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
addr := n.ResourceInstanceAddr()
diags = diags.Append(n.resolveProvider(ctx, false))
diags = diags.Append(n.resolveProvider(ctx, false, states.NotDeposed))
if diags.HasErrors() {
return diags
}

View File

@ -86,7 +86,7 @@ var (
func (n *NodePlannableResourceInstance) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics {
addr := n.ResourceInstanceAddr()
diags := n.resolveProvider(ctx, true)
diags := n.resolveProvider(ctx, true, states.NotDeposed)
if diags.HasErrors() {
return diags
}

View File

@ -57,7 +57,7 @@ func (n *NodePlannableResourceInstanceOrphan) Execute(ctx EvalContext, op walkOp
// Eval info is different depending on what kind of resource this is
switch addr.Resource.Resource.Mode {
case addrs.ManagedResourceMode:
diags := n.resolveProvider(ctx, true)
diags := n.resolveProvider(ctx, true, states.NotDeposed)
if diags.HasErrors() {
return diags
}

View File

@ -9,9 +9,9 @@
package tools
import (
_ "github.com/golang/mock/mockgen"
_ "github.com/mitchellh/gox"
_ "github.com/nishanths/exhaustive"
_ "go.uber.org/mock/mockgen"
_ "golang.org/x/tools/cmd/stringer"
_ "google.golang.org/grpc/cmd/protoc-gen-go-grpc"
_ "honnef.co/go/tools/cmd/staticcheck"

View File

@ -10,7 +10,7 @@ import DocVersionBadge from "@theme/DocVersionBadge";
### OpenTofu Documentation <DocVersionBadge />
Welcome to OpenTofu 1.8! This version brings you a host of new features like early evaluation, provider mocking in tests and more. If you are coming from a previous OpenTofu or Terraform version you can [read the details about the new features here &raquo;](intro/whats-new.mdx)
Welcome to OpenTofu 1.10! This version brings a lot of new features and improvements to OpenTofu. If you are coming from a previous OpenTofu or Terraform version you can [read the details about the new features here &raquo;](intro/whats-new.mdx)
<DocCardList
items={[

View File

@ -1,29 +1,29 @@
---
sidebar_position: 5
sidebar_label: Upgrading from OpenTofu 1.6.x/1.7.x
sidebar_label: Upgrading from OpenTofu 1.7.x/1.8.x/1.9.x
description: |-
Learn how to upgrade OpenTofu from version 1.6.x/1.7.x to 1.8.0.
Learn how to upgrade OpenTofu from version 1.7.x/1.8.x/1.9.x to 1.10.0.
---
# Upgrading from OpenTofu 1.6.x/1.7.x
# Upgrading from OpenTofu 1.7.x/1.8.x/1.9.x
OpenTofu 1.8.x is mostly compatible with OpenTofu 1.6.x and 1.7.x (other than one minor breaking change in the S3 backend). This migration guide will take you through the process of
upgrading OpenTofu to version 1.8.0.
OpenTofu 1.8.x is mostly compatible with previous OpenTofu versions (other than one minor breaking change in the S3 backend). This migration guide will take you through the process of upgrading OpenTofu to version 1.8.0.
## Step 0: Prepare a disaster recovery plan
Although OpenTofu 1.8 is mostly compatible with version 1.6 and 1.7, you should take the necessary precautions to prevent
accidents. Make sure you have an up to date and *tested* disaster recovery plan.
Although OpenTofu 1.10 is mostly compatible with previous versions, you should take the necessary precautions to prevent accidents. Make sure you have an up to date and *tested* disaster recovery plan.
## Step 1: If using an S3 backend - Remove any use of `use_legacy_workflow` from S3 backend configurations
If you are using the S3 backend, with `use_legacy_workflow` set, you'd have to remove it. This field has been deprecated in version 1.7.0, and has been changed to default to `false`.
The legacy workflow of authentication is no longer supported. Please start using the new authentication method, which is more consistent with other AWS tools
If you are using the S3 backend, with `use_legacy_workflow` set, you'd have to remove it. This field has been deprecated in version 1.7.0, and has been changed to default to `false`. The legacy workflow of authentication is no longer supported. Please start using the new authentication method, which is more consistent with other AWS tools
## Step 2: Apply all changes with OpenTofu 1.6.x/1.7.x
## Step 2: If you are using `ghcr.io/opentofu/opentofu` as a base image
Before proceeding, make sure that you apply all changes with `tofu apply`. Running `tofu plan` should result
in no planned changes. While you can switch to OpenTofu with pending changes, it is not recommended.
If you are using `ghcr.io/opentofu/opentofu` as a base image for your containers, you will need to move away from this setup. Please follow the instructions on the container installation page to build your own base image.
## Step 3: Apply all changes with OpenTofu 1.7.x/1.8.x/1.9.x
Before proceeding, make sure that you apply all changes with `tofu apply`. Running `tofu plan` should result in no planned changes. While you can switch to OpenTofu with pending changes, it is not recommended.
```
$ tofu plan
@ -36,18 +36,18 @@ OpenTofu has compared your real infrastructure against your
configuration and found no differences, so no changes are needed.
```
## Step 3: Install OpenTofu 1.8.x
## Step 4: Install OpenTofu 1.10.x
As a first step, please [follow the installation instructions for the OpenTofu CLI tool](intro/install/index.mdx). Please test
if you can successfully execute the `tofu` command and receive the correct version:
```
$ tofu --version
OpenTofu v1.8.0
OpenTofu v1.10.0
on linux_amd64
```
## Step 4: Back up your state file
## Step 5: Back up your state file
Before you begin using the `tofu` binary on your Terraform code, make sure to back up your state file. If you are using
a local state file, you can simply make a copy of your `terraform.tfstate` file in your project directory.
@ -55,7 +55,7 @@ a local state file, you can simply make a copy of your `terraform.tfstate` file
If you are using a remote backend such as an S3 bucket, make sure that you follow the backup procedures for the
backend and that you exercise the restore procedure at least once.
## Step 5: Initialize OpenTofu 1.8.x
## Step 6: Initialize OpenTofu 1.10.x
:::warning
@ -75,7 +75,7 @@ If you are using the S3 backend - You will need to run `tofu init -reconfigure`
:::
## Step 6: Inspect the plan
## Step 7: Inspect the plan
Once initialized, run `tofu plan` and ensure that there are no pending changes similar to step 1 above. If there are
unexpected changes in the plan, roll back to OpenTofu 1.6.x/1.7.x and troubleshoot your migration. (See the Troubleshooting
@ -92,17 +92,17 @@ OpenTofu has compared your real infrastructure against your
configuration and found no differences, so no changes are needed.
```
## Step 7: Test out a small change
## Step 8: Test out a small change
Before you begin using OpenTofu for larger changes, test out `tofu apply` with a smaller, non-critical
change.
## Rolling back and reporting issues
If you have issues migrating to OpenTofu you can follow these steps to roll back to OpenTofu 1.6.x/1.7.x:
If you have issues migrating to OpenTofu you can follow these steps to roll back to OpenTofu 1.7.x/1.8.x/1.9.x:
1. Create another backup of your state file.
2. Remove OpenTofu 1.8.x and verify that you are running OpenTofu 1.6.x/1.7.x.
2. Remove OpenTofu 1.19.x and verify that you are running OpenTofu 1.7.x/1.8.x/1.9.x.
3. Run `tofu init`.
4. Run `tofu plan` and verify that no unexpected changes are in the plan.
5. Test the rollback with a small, non-critical change.
@ -117,11 +117,11 @@ If you encounter any issues during the migration to OpenTofu, you can join the <
### Error: Failed to query available provider packages
This error happens when a provider you specified in your configuration is not available in the OpenTofu registry.
Please roll back to OpenTofu 1.6.x/1.7.x and make sure your code works with that version. If your code works, please
Please roll back to OpenTofu 1.7.x/1.8.x/1.9.x and make sure your code works with that version. If your code works, please
[submit an issue to include the provider in the registry](https://github.com/opentofu/registry/issues/).
### Error: Module not found
This error happens when a module you specified in your configuration is not available in the OpenTofu registry.
Please roll back to OpenTofu 1.6.x/1.7.x and make sure your code works with that version. If your code works, please
Please roll back to OpenTofu 1.7.x/1.8.x/1.9.x and make sure your code works with that version. If your code works, please
[submit an issue to include the module in the registry](https://github.com/opentofu/registry/issues/).

View File

@ -1,209 +1,53 @@
---
sidebar_position: 2
sidebar_label: What's new in version 1.7?
sidebar_label: What's new in version 1.10?
description: |-
Learn all about the new features in OpenTofu 1.7.
Learn all about the new features in OpenTofu 1.10.
---
# What's new in OpenTofu 1.7?
# What's new in OpenTofu 1.10?
This page will run you through the most important changes in OpenTofu 1.7:
This page will run you through the most important changes in OpenTofu 1.10:
- [State encryption](#state-encryption)
- [Provider-defined functions](#provider-defined-functions)
- [Removed block](#removed-block)
- [Loopable import blocks](#loopable-import-blocks)
- [Built-in function changes](#built-in-function-changes)
- [CLI changes](#cli-changes)
- [Testing feature changes](#testing-feature-changes)
- [New features](#new-features)
- [New built-in functions](#new-built-in-functions)
- [Improvements to existing features](#improvements-to-existing-features)
- [External programs as encryption key providers (experimental)](#external-programs-as-encryption-key-providers-experimental)
- [Smaller improvements](#smaller-improvements)
- [Deprecations](#deprecations)
- [Using `ghcr.io/opentofu/opentofu` as a base image](#using-ghcrioopentofuopentofu-as-a-base-image)
- [Bugfixes](#bugfixes)
## State encryption
## New features
State encryption is one of the flagship features of this release. We have prepared a [full documentation](../language/state/encryption.mdx) for this feature.
### New built-in functions
Before you test this feature, please **make a backup** of your state file. You can then add the following block to enable state encryption:
New builtin provider functions added ([#2306](https://github.com/opentofu/opentofu/pull/2306)) :
```hcl
terraform {
encryption {
key_provider "pbkdf2" "my_passphrase" {
## Enter a passphrase here:
passphrase = ""
}
- `provider::terraform::decode_tfvars` - Decode a TFVars file content into an object.
- `provider::terraform::encode_tfvars` - Encode an object into a string with the same format as a TFVars file.
- `provider::terraform::encode_expr` - Encode an arbitrary expression into a string with valid OpenTofu syntax.
method "aes_gcm" "my_method" {
keys = key_provider.pbkdf2.my_passphrase
}
## Improvements to existing features
## Remove this after the migration:
method "unencrypted" "migration" {
}
### External programs as encryption key providers (experimental)
state {
method = method.aes_gcm.my_method
State encryption now supports using external programs as key providers. Additionally, the PBKDF2 key provider now supports chaining via the `chain` parameter. For details, [see the related documentation](../language/state/encryption.mdx#external-experimental)
## Remove the fallback block after migration:
fallback{
method = method.unencrypted.migration
}
## Enable this after migration:
#enforced = true
}
}
}
```
This functionality is experimental and may be changed in future OpenTofu versions.
You can migrate back using the following syntax:
### Smaller improvements
```hcl
terraform {
encryption {
key_provider "pbkdf2" "my_passphrase" {
## Enter a passphrase here:
passphrase = ""
}
* OpenTofu will now recommend using `-exclude` instead of `-target`, when possible, in the error messages about unknown values in `count` and `for_each` arguments, thereby providing a more definitive workaround. ([#2154](https://github.com/opentofu/opentofu/pull/2154))
method "aes_gcm" "my_method" {
keys = key_provider.pbkdf2.my_passphrase
}
## Deprecations
method "unencrypted" "migration" {
}
### Using `ghcr.io/opentofu/opentofu` as a base image
state {
method = method.unencrypted.migration
enforced = false
fallback{
method = method.aes_gcm.my_method
}
}
}
}
```
Using the `ghcr.io/opentofu/opentofu` image as a base image for custom images is deprecated and doesn't work as of OpenTofu 1.10. The reason behind this change is security: while we regularly update OpenTofu itself when security-relevant updates arise, providing a base image would require an update roughly once a week, which we are currently not equipped to do.
If you have access to an AWS, GCP account, or an OpenBao/MPL-licensed HashiCorp Vault installation, you can also [test these key providers](../language/state/encryption.mdx#key-providers).
However, we have added instructions on building your own base image in the [containerized installation instructions](./install/docker.mdx#building-your-own-image).
## Provider-defined functions
## Bugfixes
The new Terraform Plugin SDK added support for [provider-defined functions](../language/functions/index.mdx#provider-defined-functions) that you can use directly in OpenTofu. This is a significant improvement over using data sources as provider-defined functions don't increase the size of your state file and require less code to write.
If you want to test provider-defined functions, you can use the [corefunc](https://library.tf/providers/northwood-labs/corefunc/latest) provider by [Ryan Parman](https://github.com/skyzyx):
```hcl
terraform {
required_providers {
corefunc = {
source = "northwood-labs/corefunc"
version = "1.4.0"
}
}
}
provider "corefunc" {
}
output "test" {
value = provider::corefunc::str_snake("Hello world!")
# Prints: hello_world
}
```
What's more, we added an OpenTofu-only feature to let providers dynamically define custom functions based on your configuration. This enhancement allows you to fully integrate other programming languages as [shown in our live stream](https://www.youtube.com/watch?v=6OXBv0MYalY). You can try out this functionality with our experimental [Lua](https://github.com/opentofu/terraform-provider-lua) and [Go](https://github.com/opentofu/terraform-provider-go) providers.
## Removed block
The removed block lets you remove a resource from the state file but keep it on the infrastructure. We have prepared a [full documentation](../language/resources/syntax.mdx#removing-resources) for this feature. You can test it by creating a resource first:
```hcl
resource "local_file" "test" {
content = "Hello world!"
filename = "test.txt"
}
```
After applying, you can replace the resource with a removed block:
```hcl
removed {
from = local_file.test
}
```
After the next apply, you will see that the `local_file.test` resource no longer exists in your state file, but the `test.txt` file should still exist on your disk. You can now remove the removed block safely.
## Loopable import blocks
We made several improvements to the declarative import blocks, most prominently you can now use the `for_each` instruction on the block. We have prepared a [full documentation](../language/import/index.mdx#importing-multiple-resources) for this feature.
In previous OpenTofu versions, you could already use the `import` block to declaratively import resources, for example:
```hcl
resource "random_id" "test_id" {
byte_length = 8
}
import {
to = random_id.test_id
id = "Y2FpOGV1Mkk"
}
output "id" {
value = random_id.test_id.b64_url
}
```
In this new version you can now also declaratively import resources in a loop:
```hcl
variable "server_ids" {
type = list(string)
}
resource "random_id" "test_id" {
byte_length = 8
count = 2
}
import {
to = random_id.test_id[tonumber(each.key)]
id = each.value
for_each = {
for idx, item in var.server_ids: idx => item
}
}
output "id" {
value = random_id.test_id.*.b64_url
}
```
The example above will let you specify some random IDs from a variable, and let others be automatically generated.
## Built-in function changes
This release also contains several new functions and changes to existing functions:
- New function: [templatestring](../language/functions/templatestring.mdx)
- New function: [base64gunzip](../language/functions/base64gunzip.mdx)
- New function: [cidrcontains](../language/functions/cidrcontains.mdx)
- New function: [urldecode](../language/functions/urldecode.mdx)
- New function: [issensitive](../language/functions/issensitive.mdx)
- [nonsensitive](../language/functions/nonsensitive.mdx) no longer returns an error when the applied values are not sensitive.
- [templatefile](../language/functions/templatefile.mdx) now supports recursion up to a depth of 1024.
## CLI changes
There are also several changes to the CLI:
- `tofu init` now supports the `-json` flag for JSON output.
- `tofu plan` now has a `-concise` flag to shorten the plan output.
- `tofu console` now works on Solaris and AIX.
- The CLI now supports the XDG directory specification.
- Aliases for:
- `state list` &rarr; `state ls`
- `state mv` &rarr; `state move`
- `state rm` &rarr; `state remove`
## Testing feature changes
- Tofu now reads the `.tfvars` file from the tests folder.
- Fixed an issue where an invalid provider name in the `provider_meta` block would crash OpenTofu rather than report an error ([#2347](https://github.com/opentofu/opentofu/pull/2347))

View File

@ -5,6 +5,7 @@ description: >-
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import Button from "@site/src/components/Button";
import CodeBlock from '@theme/CodeBlock';
import ConfigurationTF from '!!raw-loader!./examples/encryption/configuration.tf'
import ConfigurationSH from '!!raw-loader!./examples/encryption/configuration.sh'
@ -15,6 +16,13 @@ import PBKDF2 from '!!raw-loader!./examples/encryption/pbkdf2.tf'
import AWSKMS from '!!raw-loader!./examples/encryption/aws_kms.tf'
import GCPKMS from '!!raw-loader!./examples/encryption/gcp_kms.tf'
import OpenBao from '!!raw-loader!./examples/encryption/openbao.tf'
import External from '!!raw-loader!./examples/encryption/keyprovider-external.tofu'
import ExternalHeader from '!!raw-loader!./examples/encryption/keyprovider-external-header.json'
import ExternalInput from '!!raw-loader!./examples/encryption/keyprovider-external-input.json'
import ExternalOutput from '!!raw-loader!./examples/encryption/keyprovider-external-output.json'
import ExternalGo from '!!raw-loader!./examples/encryption/keyprovider-external-provider.go'
import ExternalPython from '!!raw-loader!./examples/encryption/keyprovider-external-provider.py'
import ExternalSH from '!!raw-loader!./examples/encryption/keyprovider-external-provider.sh'
import Sample from '!!raw-loader!./examples/encryption/sample.tf'
import Fallback from '!!raw-loader!./examples/encryption/fallback.tf'
import FallbackFromUnencrypted from '!!raw-loader!./examples/encryption/fallback_from_unencrypted.tf'
@ -161,7 +169,8 @@ The PBKDF2 key provider allows you to use a long passphrase as to generate a key
| Option | Description | Min. | Default |
|--------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|-----------|------------------------------------|
| passphrase *(required)* | Enter a long and complex passphrase. | 16 chars. | - |
| passphrase *(required)* | Enter a long and complex passphrase. Required if `chain` is not specified. | 16 chars. | - |
| chain *(required)* | Receive the passphrase from another key provider. Required if `passphrase` is not specified. | | - |
| key_length | Number of bytes to generate as a key. | 1 | 32 |
| iterations | Number of iterations. See [this document](https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2) for recommendations. | 200.000 | 600.000 |
| salt_length | Length of the salt for the key derivation. | 1 | 32 |
@ -225,8 +234,78 @@ The OpenBao key provider is compatible with the last MPL-licensed version of Has
:::
## Methods
### External (experimental)
The external command provider lets you run external commands in order to obtain encryption keys. These programs must be specifically written to work with OpenTofu. This key provider has the following fields:
| Option | Description | Min. | Default |
|-----------|---------------------------------------------------------------------------------------|------|---------|
| `command` | External command to run in an array format, each parameter being an item in an array. | 1 | |
For example, you can configure the external program as follows:
<CodeBlock language="hcl">{External}</CodeBlock>
:::note
You can use this provider in conjunction with the `chain` option in the [PBKDF2](#pbkdf2) key provider to input a passphrase from an external program.
:::
#### Writing an external key provider
An external provider can be anything as long as it is runnable as an application. The protocol consists of 3 steps:
1. The external program writes the header to the standard output.
2. OpenTofu sends the metadata to the external program over the standard input.
3. The external program writes the key information to the standard output.
<Tabs>
<TabItem value="step1" label="Step 1: Writing the header" default>
As a first step, the external program must output a header to the standard output so OpenTofu knows it is a valid external key provider. The header must always be a single line and contain the following:
<CodeBlock language={"json"}>{ExternalHeader}</CodeBlock>
<Button
href="https://github.com/opentofu/opentofu/tree/main/internal/encryption/keyprovider/externalcommand/protocol/header.schema.json"
className="inline-flex"
target="_blank"
>
Open JSON schema file
</Button>
</TabItem>
<TabItem value="step2" label="Step 2: Reading the input">
Once the header is written, OpenTofu writes the input data to the standard input of the external program. If OpenTofu only needs to encrypt data, this will be `null`. If OpenTofu needs to decrypt data, it will write the metadata previously stored with the encrypted form to the standard input:
<CodeBlock language={"json"}>{ExternalInput}</CodeBlock>
<Button
href="https://github.com/opentofu/opentofu/tree/main/internal/encryption/keyprovider/externalcommand/protocol/input.schema.json"
className="inline-flex"
target="_blank"
>
Open JSON schema file
</Button>
</TabItem>
<TabItem value="step3" label="Step 3: Writing the output">
With the input, the external program can now construct the output. If no input is present, the external program only needs to produce an encryption key. If an input is present, it needs to produce a decryption key as well. If needed, the output can also contain metadata that will be stored with the encrypted data and passed as an input on the next run.
<CodeBlock language={"json"}>{ExternalOutput}</CodeBlock>
<Button
href="https://github.com/opentofu/opentofu/tree/main/internal/encryption/keyprovider/externalcommand/protocol/output.schema.json"
className="inline-flex"
target="_blank"
>
Open JSON schema file
</Button>
</TabItem>
<TabItem value="example-go" label="Example: Go">
<CodeBlock language={"go"}>{ExternalGo}</CodeBlock>
</TabItem>
<TabItem value="example-python" label="Example: Python">
<CodeBlock language={"python"}>{ExternalPython}</CodeBlock>
</TabItem>
<TabItem value="example-sh" label="Example: POSIX Shell">
<CodeBlock language={"sh"}>{ExternalSH}</CodeBlock>
</TabItem>
</Tabs>
## Methods
### AES-GCM

View File

@ -0,0 +1 @@
{"magic":"OpenTofu-External-Key-Provider","version":1}

View File

@ -0,0 +1,6 @@
{
"external_data": {
"key1": "value1",
"key2": "value2"
}
}

View File

@ -0,0 +1,12 @@
{
"key": {
"encryption_key": "newly generated base64-encoded encryption key",
"decryption_key": "base64-encoded decryption key, if input meta was present, omitted otherwise"
},
"meta": {
"external_data": {
"key1": "value1",
"key2": "value2"
}
}
}

View File

@ -0,0 +1,78 @@
package main
import (
"encoding/json"
"io"
"log"
"os"
)
// Header is the initial greeting the key provider sends out.
type Header struct {
// Magic must always be OpenTofu-External-Keyprovider
Magic string `json:"magic"`
// Version must be 1.
Version int `json:"version"`
}
// Metadata describes both the input and the output metadata.
type Metadata struct {
ExternalData map[string]any `json:"external_data"`
}
// Input describes the input data structure. This is nil on input if no existing
// data needs to be decrypted.
type Input *Metadata
// Output describes the output data written to stdout.
type Output struct {
Key struct {
// EncryptionKey must always be provided.
EncryptionKey []byte `json:"encryption_key,omitempty"`
// DecryptionKey must be provided when the input metadata is present.
DecryptionKey []byte `json:"decryption_key,omitempty"`
} `json:"key"`
// Meta contains the metadata to store alongside the encrypted data. You can
// store data here you need to reconstruct the decryption key later.
Meta Metadata `json:"meta"`
}
func main() {
// Write logs to stderr
log.Default().SetOutput(os.Stderr)
// Write the header:
header := Header{
"OpenTofu-External-Key-Provider",
1,
}
marshalledHeader, err := json.Marshal(header)
if err != nil {
log.Fatalf("%v", err)
}
_, _ = os.Stdout.Write(append(marshalledHeader, []byte("\n")...))
// Read the input
input, err := io.ReadAll(os.Stdin)
if err != nil {
log.Fatalf("Failed to read stdin: %v", err)
}
var inMeta Input
if err := json.Unmarshal(input, &inMeta); err != nil {
log.Fatalf("Failed to parse stdin: %v", err)
}
// TODO produce the encryption key
if inMeta != nil {
// TODO produce decryption key
}
output := Output{
// TODO: produce output
}
outputData, err := json.Marshal(output)
if err != nil {
log.Fatalf("Failed to encode output: %v", err)
}
_, _ = os.Stdout.Write(outputData)
}

View File

@ -0,0 +1,47 @@
#!/usr/bin/python
import base64
import json
import sys
if __name__ == "__main__":
# Write the header:
sys.stdout.write((json.dumps(
{"magic": "OpenTofu-External-Key-Provider", "version": 1}) + "\n"
))
# Read the input:
inputData = sys.stdin.read()
data = json.loads(inputData)
# Construct the key:
key = b''
for i in range(1, 17):
key += chr(i).encode('ascii')
# Output the keys:
if data is None:
# No input metadata was passed, we shouldn't output a decryption key.
# If needed, we can produce an output metadata here, which will be
# stored alongside the encrypted data.
outputMeta = {"external_data":{}}
sys.stdout.write(json.dumps({
"keys": {
"encryption_key": base64.b64encode(key).decode('ascii')
},
"meta": outputMeta
}))
else:
# We had some input metadata, output a decryption key. In a real-life
# scenario we would use the metadata for something like pbdkf2.
inputMeta = data["external_data"]
# Do something with the input metadata if needed and produce the output
# metadata:
outputMeta = {"external_data":{}}
sys.stdout.write(json.dumps({
"keys": {
"encryption_key": base64.b64encode(key).decode('ascii'),
"decryption_key": base64.b64encode(key).decode('ascii')
},
"meta": outputMeta
}))

View File

@ -0,0 +1,38 @@
#!/bin/sh
set -e
# Output the header as a single line:
echo '{"magic":"OpenTofu-External-Key-Provider","version":1}'
# Read the input metadata.
INPUT=$(echo -n $(cat))
if [ "${INPUT}" = "null" ]; then
# We don't have metadata and shouldn't output a decryption key.
cat << EOF
{
"keys":{
"encryption_key":"AQIDBAUGBwgJCgsMDQ4PEA=="
},
"meta":{
"external_data":{}
}
}
EOF
else
# We have metadata and should output a decryption key. In our simplified case
# it is the same as the encryption key.
cat << EOF
{
"keys":{
"encryption_key":"AQIDBAUGBwgJCgsMDQ4PEA==",
"decryption_key":"AQIDBAUGBwgJCgsMDQ4PEA=="
},
"meta":{
"external_data":{}
}
}
EOF
fi

View File

@ -0,0 +1,7 @@
terraform {
encryption {
key_provider "externalcommand" "foo" {
command = ["./some_program", "some_parameter"]
}
}
}

View File

@ -4,6 +4,9 @@ terraform {
# Specify a long / complex passphrase (min. 16 characters)
passphrase = "correct-horse-battery-staple"
# Alternatively, receive the passphrase from another key provider:
chain = key_provider.other.provider
# Adjust the key length to the encryption method (default: 32)
key_length = 32