Fix destroy/orphan path with provider instances (#2150)

Signed-off-by: Christian Mesh <christianmesh1@gmail.com>
This commit is contained in:
Christian Mesh 2024-11-07 12:41:13 -05:00 committed by GitHub
parent 30b5088da4
commit c57e634bc4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 365 additions and 9 deletions

View File

@ -20,6 +20,7 @@ import (
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/checks"
"github.com/opentofu/opentofu/internal/configs"
"github.com/opentofu/opentofu/internal/configs/configschema"
"github.com/opentofu/opentofu/internal/encryption"
"github.com/opentofu/opentofu/internal/lang/marks"
@ -4150,3 +4151,301 @@ func TestContext2Apply_excludedModuleRecursive(t *testing.T) {
<no state>
`)
}
func TestContext2Apply_providerResourceIteration(t *testing.T) {
localComplete := `
locals {
providers = { "primary": "eu-west-1", "secondary": "eu-west-2" }
resources = ["primary", "secondary"]
}
`
localPartial := `
locals {
providers = { "primary": "eu-west-1", "secondary": "eu-west-2" }
resources = ["primary"]
}
`
providerConfig := `
provider "test" {
alias = "al"
for_each = local.providers
region = each.value
}
`
resourceConfig := `
resource "test_instance" "a" {
for_each = toset(local.resources)
provider = test.al[each.key]
}
`
complete := testModuleInline(t, map[string]string{
"locals.tofu": localComplete,
"providers.tofu": providerConfig,
"resources.tofu": resourceConfig,
})
partial := testModuleInline(t, map[string]string{
"locals.tofu": localPartial,
"providers.tofu": providerConfig,
"resources.tofu": resourceConfig,
})
removed := testModuleInline(t, map[string]string{
"locals.tofu": localPartial,
"providers.tofu": providerConfig,
})
provider := testProvider("test")
provider.ReadDataSourceResponse = &providers.ReadDataSourceResponse{
State: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("data_source"),
}),
}
provider.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) providers.ConfigureProviderResponse {
var resp providers.ConfigureProviderResponse
region := req.Config.GetAttr("region")
if region.AsString() != "eu-west-1" && region.AsString() != "eu-west-2" {
resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("incorrect config val: %#v\n", region))
}
return resp
}
ps := map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("test"): testProviderFuncFixed(provider),
}
apply := func(t *testing.T, m *configs.Config, prevState *states.State) *states.State {
t.Helper()
ctx := testContext2(t, &ContextOpts{
Providers: ps,
})
plan, diags := ctx.Plan(m, prevState, DefaultPlanOpts)
if diags.HasErrors() {
t.Fatal(diags.Err())
}
newState, diags := ctx.Apply(plan, m)
if diags.HasErrors() {
t.Fatal(diags.Err())
}
return newState
}
destroy := func(t *testing.T, m *configs.Config, prevState *states.State) *states.State {
ctx := testContext2(t, &ContextOpts{
Providers: ps,
})
plan, diags := ctx.Plan(m, prevState, &PlanOpts{
Mode: plans.DestroyMode,
})
if diags.HasErrors() {
t.Fatal(diags.Err())
}
newState, diags := ctx.Apply(plan, m)
if diags.HasErrors() {
t.Fatal(diags.Err())
}
return newState
}
primaryResource := mustResourceInstanceAddr(`test_instance.a["primary"]`)
secondaryResource := mustResourceInstanceAddr(`test_instance.a["secondary"]`)
t.Run("apply_destroy", func(t *testing.T) {
state := apply(t, complete, states.NewState())
if state.ResourceInstance(primaryResource).ProviderKey != addrs.StringKey("primary") {
t.Fatal("Wrong provider key")
}
if state.ResourceInstance(secondaryResource).ProviderKey != addrs.StringKey("secondary") {
t.Fatal("Wrong provider key")
}
destroy(t, complete, state)
})
t.Run("apply_removed", func(t *testing.T) {
state := apply(t, complete, states.NewState())
state = apply(t, removed, state)
// Expect destroyed
if state.ResourceInstance(primaryResource) != nil {
t.Fatal(primaryResource.String())
}
if state.ResourceInstance(secondaryResource) != nil {
t.Fatal(secondaryResource.String())
}
})
t.Run("apply_orphan_destroy", func(t *testing.T) {
state := apply(t, complete, states.NewState())
state = apply(t, partial, state)
// Expect primary
if state.ResourceInstance(primaryResource) == nil {
t.Fatal(primaryResource.String())
}
// Missing secondary
if state.ResourceInstance(secondaryResource) != nil {
t.Fatal(secondaryResource.String())
}
destroy(t, partial, state)
})
}
func TestContext2Apply_providerModuleIteration(t *testing.T) {
localComplete := `
locals {
providers = { "primary": "eu-west-1", "secondary": "eu-west-2" }
mods = ["primary", "secondary"]
}
`
localPartial := `
locals {
providers = { "primary": "eu-west-1", "secondary": "eu-west-2" }
mods = ["primary"]
}
`
providerConfig := `
provider "test" {
alias = "al"
for_each = local.providers
region = each.value
}
`
moduleCall := `
module "mod" {
source = "./mod"
for_each = toset(local.mods)
providers = {
test = test.al[each.key]
}
}
`
resourceConfig := `
resource "test_instance" "a" {
}
`
complete := testModuleInline(t, map[string]string{
"locals.tofu": localComplete,
"providers.tofu": providerConfig,
"modules.tofu": moduleCall,
"mod/resources.tofu": resourceConfig,
})
partial := testModuleInline(t, map[string]string{
"locals.tofu": localPartial,
"providers.tofu": providerConfig,
"modules.tofu": moduleCall,
"mod/resources.tofu": resourceConfig,
})
removed := testModuleInline(t, map[string]string{
"locals.tofu": localPartial,
"providers.tofu": providerConfig,
})
provider := testProvider("test")
provider.ReadDataSourceResponse = &providers.ReadDataSourceResponse{
State: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("data_source"),
}),
}
provider.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) providers.ConfigureProviderResponse {
var resp providers.ConfigureProviderResponse
region := req.Config.GetAttr("region")
if region.AsString() != "eu-west-1" && region.AsString() != "eu-west-2" {
resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("incorrect config val: %#v\n", region))
}
return resp
}
ps := map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("test"): testProviderFuncFixed(provider),
}
apply := func(t *testing.T, m *configs.Config, prevState *states.State) *states.State {
t.Helper()
ctx := testContext2(t, &ContextOpts{
Providers: ps,
})
plan, diags := ctx.Plan(m, prevState, DefaultPlanOpts)
if diags.HasErrors() {
t.Fatal(diags.Err())
}
newState, diags := ctx.Apply(plan, m)
if diags.HasErrors() {
t.Fatal(diags.Err())
}
return newState
}
destroy := func(t *testing.T, m *configs.Config, prevState *states.State) *states.State {
ctx := testContext2(t, &ContextOpts{
Providers: ps,
})
plan, diags := ctx.Plan(m, prevState, &PlanOpts{
Mode: plans.DestroyMode,
})
if diags.HasErrors() {
t.Fatal(diags.Err())
}
newState, diags := ctx.Apply(plan, m)
if diags.HasErrors() {
t.Fatal(diags.Err())
}
return newState
}
primaryResource := mustResourceInstanceAddr(`module.mod["primary"].test_instance.a`)
secondaryResource := mustResourceInstanceAddr(`module.mod["secondary"].test_instance.a`)
t.Run("apply_destroy", func(t *testing.T) {
state := apply(t, complete, states.NewState())
if state.ResourceInstance(primaryResource).ProviderKey != addrs.StringKey("primary") {
t.Fatal("Wrong provider key")
}
if state.ResourceInstance(secondaryResource).ProviderKey != addrs.StringKey("secondary") {
t.Fatal("Wrong provider key")
}
destroy(t, complete, state)
})
t.Run("apply_removed", func(t *testing.T) {
state := apply(t, complete, states.NewState())
state = apply(t, removed, state)
// Expect destroyed
if state.ResourceInstance(primaryResource) != nil {
t.Fatal(primaryResource.String())
}
if state.ResourceInstance(secondaryResource) != nil {
t.Fatal(secondaryResource.String())
}
})
t.Run("apply_orphan_destroy", func(t *testing.T) {
state := apply(t, complete, states.NewState())
state = apply(t, partial, state)
// Expect primary
if state.ResourceInstance(primaryResource) == nil {
t.Fatal(primaryResource.String())
}
// Missing secondary
if state.ResourceInstance(secondaryResource) != nil {
t.Fatal(secondaryResource.String())
}
destroy(t, partial, state)
})
}

View File

@ -107,7 +107,7 @@ func (n *NodeAbstractResourceInstance) References() []*addrs.Reference {
return nil
}
func (n *NodeAbstractResourceInstance) resolveProvider(ctx EvalContext) tfdiags.Diagnostics {
func (n *NodeAbstractResourceInstance) resolveProvider(ctx EvalContext, hasExpansionData bool) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
log.Printf("[TRACE] Resolving provider key for %s", n.Addr)
@ -116,21 +116,57 @@ func (n *NodeAbstractResourceInstance) resolveProvider(ctx EvalContext) tfdiags.
return diags.Append(fmt.Errorf("attempting to resolve an unset provider at %s", n.Addr))
}
useStateFallback := false
//nolint:nestif // complexity
if n.ResolvedProvider.KeyExact != nil {
// Pass through from state
n.ResolvedProviderKey = n.ResolvedProvider.KeyExact
} else if n.ResolvedProvider.KeyExpression != nil {
// This path get's a bit convoluted when considering scenarios in which the configuration has been
// significantly altered from the state when considering fallback logic
if n.ResolvedProvider.KeyResource {
// Resolved from resource instance
n.ResolvedProviderKey, diags = resolveProviderResourceInstance(ctx, n.Config.ProviderConfigRef.KeyExpression, n.Addr)
validExpansion := false
if hasExpansionData {
existingExpansion := ctx.InstanceExpander().ExpandResource(n.Addr.ContainingResource())
for _, expanded := range existingExpansion {
if n.Addr.Equal(expanded) {
validExpansion = true
break
}
}
}
if validExpansion {
n.ResolvedProviderKey, diags = resolveProviderResourceInstance(ctx, n.Config.ProviderConfigRef.KeyExpression, n.Addr)
} else {
useStateFallback = true
}
} else {
// Resolved fro module instance
// Resolved from module instance
moduleInstanceForKey := n.Addr.Module[:len(n.ResolvedProvider.KeyModule)]
if !moduleInstanceForKey.Module().Equal(n.ResolvedProvider.KeyModule) {
panic(fmt.Sprintf("Invalid module key expression location %s in resource %s", n.ResolvedProvider.KeyModule, n.Addr))
}
n.ResolvedProviderKey, diags = resolveProviderModuleInstance(ctx, n.ResolvedProvider.KeyExpression, moduleInstanceForKey, n.Addr.String())
// Make sure that the configured expansion is valid for this instance
validExpansion := false
if hasExpansionData {
existingExpansion := ctx.InstanceExpander().ExpandModule(n.ResolvedProvider.KeyModule)
for _, expanded := range existingExpansion {
if moduleInstanceForKey.Equal(expanded) {
validExpansion = true
break
}
}
}
if validExpansion {
// We can use the standard resolver
n.ResolvedProviderKey, diags = resolveProviderModuleInstance(ctx, n.ResolvedProvider.KeyExpression, moduleInstanceForKey, n.Addr.String())
} else {
useStateFallback = true
}
}
}
@ -138,6 +174,22 @@ func (n *NodeAbstractResourceInstance) resolveProvider(ctx EvalContext) tfdiags.
return diags
}
if useStateFallback {
// We are in a orphan or destroy code path where the existing configuration / transformations have not built up the required expansion.
// In practice, this only happens for orphaned resource instances. Destroy has already re-planned and overwritten state
if n.ResolvedProvider.ProviderConfig.String() != n.storedProviderConfig.ProviderConfig.String() {
// Config has been altered too severely!
// In this scenario, we could consider modifying the provider transformer to add optional
// dependencies on providers from the state to keep that provider from being pruned.
return diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Unable to use fallback provider from state",
fmt.Sprintf("Provider from configuration %s does not match provider from state %s for resource %s", n.ResolvedProvider.ProviderConfig, n.storedProviderConfig.ProviderConfig, n.Addr),
))
}
n.ResolvedProviderKey = n.storedProviderConfig.KeyExact
}
log.Printf("[TRACE] Resolved provider key for %s as %s", n.Addr, n.ResolvedProviderKey)
// This duplicates a lot of getProvider() and should be refactored as the only place to resolve the provider eventually

View File

@ -141,7 +141,7 @@ func (n *NodeApplyableResourceInstance) Execute(ctx EvalContext, op walkOperatio
return diags
}
diags = n.resolveProvider(ctx)
diags = n.resolveProvider(ctx, true)
if diags.HasErrors() {
return diags
}

View File

@ -143,7 +143,7 @@ func (n *NodeDestroyResourceInstance) Execute(ctx EvalContext, op walkOperation)
// Eval info is different depending on what kind of resource this is
switch addr.Resource.Resource.Mode {
case addrs.ManagedResourceMode:
diags = n.resolveProvider(ctx)
diags = n.resolveProvider(ctx, false)
if diags.HasErrors() {
return diags
}

View File

@ -92,7 +92,7 @@ func (n *graphNodeImportState) Execute(ctx EvalContext, op walkOperation) (diags
ResolvedProvider: n.ResolvedProvider,
},
}
diags = diags.Append(asAbsNode.resolveProvider(ctx))
diags = diags.Append(asAbsNode.resolveProvider(ctx, true))
if diags.HasErrors() {
return diags
}

View File

@ -60,6 +60,11 @@ func (n *NodePlanDestroyableResourceInstance) Execute(ctx EvalContext, op walkOp
func (n *NodePlanDestroyableResourceInstance) managedResourceExecute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
addr := n.ResourceInstanceAddr()
diags = diags.Append(n.resolveProvider(ctx, false))
if diags.HasErrors() {
return diags
}
// Declare a bunch of variables that are used for state during
// evaluation. These are written to by address in the EvalNodes we
// declare below.

View File

@ -86,7 +86,7 @@ var (
func (n *NodePlannableResourceInstance) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics {
addr := n.ResourceInstanceAddr()
diags := n.resolveProvider(ctx)
diags := n.resolveProvider(ctx, true)
if diags.HasErrors() {
return diags
}

View File

@ -57,7 +57,7 @@ func (n *NodePlannableResourceInstanceOrphan) Execute(ctx EvalContext, op walkOp
// Eval info is different depending on what kind of resource this is
switch addr.Resource.Resource.Mode {
case addrs.ManagedResourceMode:
diags := n.resolveProvider(ctx)
diags := n.resolveProvider(ctx, true)
if diags.HasErrors() {
return diags
}