mirror of
https://github.com/opentofu/opentofu.git
synced 2025-02-25 18:45:20 -06:00
Merge pull request #32208 from hashicorp/jbardin/pre-desstroy-refresh
Make the pre-destroy refresh a full plan
This commit is contained in:
commit
b5168eb6f4
@ -33,6 +33,16 @@ type PlanOpts struct {
|
||||
// instance using its corresponding provider.
|
||||
SkipRefresh bool
|
||||
|
||||
// PreDestroyRefresh indicated that this is being passed to a plan used to
|
||||
// refresh the state immediately before a destroy plan.
|
||||
// FIXME: This is a temporary fix to allow the pre-destroy refresh to
|
||||
// succeed. The refreshing operation during destroy must be a special case,
|
||||
// which can allow for missing instances in the state, and avoid blocking
|
||||
// on failing condition tests. The destroy plan itself should be
|
||||
// responsible for this special case of refreshing, and the separate
|
||||
// pre-destroy plan removed entirely.
|
||||
PreDestroyRefresh bool
|
||||
|
||||
// SetVariables are the raw values for root module variables as provided
|
||||
// by the user who is requesting the run, prior to any normalization or
|
||||
// substitution of defaults. See the documentation for the InputValue
|
||||
@ -337,8 +347,16 @@ func (c *Context) destroyPlan(config *configs.Config, prevRunState *states.State
|
||||
if !opts.SkipRefresh && !prevRunState.Empty() {
|
||||
log.Printf("[TRACE] Context.destroyPlan: calling Context.plan to get the effect of refreshing the prior state")
|
||||
refreshOpts := *opts
|
||||
refreshOpts.Mode = plans.RefreshOnlyMode
|
||||
refreshPlan, refreshDiags := c.refreshOnlyPlan(config, prevRunState, &refreshOpts)
|
||||
refreshOpts.Mode = plans.NormalMode
|
||||
refreshOpts.PreDestroyRefresh = true
|
||||
|
||||
// FIXME: A normal plan is required here to refresh the state, because
|
||||
// the state and configuration may not match during a destroy, and a
|
||||
// normal refresh plan can fail with evaluation errors. In the future
|
||||
// the destroy plan should take care of refreshing instances itself,
|
||||
// where the special cases of evaluation and skipping condition checks
|
||||
// can be done.
|
||||
refreshPlan, refreshDiags := c.plan(config, prevRunState, &refreshOpts)
|
||||
if refreshDiags.HasErrors() {
|
||||
// NOTE: Normally we'd append diagnostics regardless of whether
|
||||
// there are errors, just in case there are warnings we'd want to
|
||||
@ -558,6 +576,7 @@ func (c *Context) planGraph(config *configs.Config, prevRunState *states.State,
|
||||
Targets: opts.Targets,
|
||||
ForceReplace: opts.ForceReplace,
|
||||
skipRefresh: opts.SkipRefresh,
|
||||
preDestroyRefresh: opts.PreDestroyRefresh,
|
||||
Operation: walkPlan,
|
||||
}).Build(addrs.RootModuleInstance)
|
||||
return graph, walkPlan, diags
|
||||
|
@ -3827,3 +3827,103 @@ resource "test_object" "b" {
|
||||
_, diags = ctx.Plan(m, state, opts)
|
||||
assertNoErrors(t, diags)
|
||||
}
|
||||
|
||||
func TestContext2Plan_destroyPartialState(t *testing.T) {
|
||||
m := testModuleInline(t, map[string]string{
|
||||
"main.tf": `
|
||||
resource "test_object" "a" {
|
||||
}
|
||||
|
||||
output "out" {
|
||||
value = module.mod.out
|
||||
}
|
||||
|
||||
module "mod" {
|
||||
source = "./mod"
|
||||
}
|
||||
`,
|
||||
|
||||
"./mod/main.tf": `
|
||||
resource "test_object" "a" {
|
||||
count = 2
|
||||
|
||||
lifecycle {
|
||||
precondition {
|
||||
# test_object_b has already been destroyed, so referencing the first
|
||||
# instance must not fail during a destroy plan.
|
||||
condition = test_object.b[0].test_string == "invalid"
|
||||
error_message = "should not block destroy"
|
||||
}
|
||||
precondition {
|
||||
# this failing condition should bot block a destroy plan
|
||||
condition = !local.continue
|
||||
error_message = "should not block destroy"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "test_object" "b" {
|
||||
count = 2
|
||||
}
|
||||
|
||||
locals {
|
||||
continue = true
|
||||
}
|
||||
|
||||
output "out" {
|
||||
# the reference to test_object.b[0] may not be valid during a destroy plan,
|
||||
# but should not fail.
|
||||
value = local.continue ? test_object.a[1].test_string != "invalid" && test_object.b[0].test_string != "invalid" : false
|
||||
|
||||
precondition {
|
||||
# test_object_b has already been destroyed, so referencing the first
|
||||
# instance must not fail during a destroy plan.
|
||||
condition = test_object.b[0].test_string == "invalid"
|
||||
error_message = "should not block destroy"
|
||||
}
|
||||
precondition {
|
||||
# this failing condition should bot block a destroy plan
|
||||
condition = test_object.a[0].test_string == "invalid"
|
||||
error_message = "should not block destroy"
|
||||
}
|
||||
}
|
||||
`})
|
||||
|
||||
p := simpleMockProvider()
|
||||
|
||||
// This state could be the result of a failed destroy, leaving only 2
|
||||
// remaining instances. We want to be able to continue the destroy to
|
||||
// remove everything without blocking on invalid references or failing
|
||||
// conditions.
|
||||
state := states.NewState()
|
||||
mod := state.EnsureModule(addrs.RootModuleInstance.Child("mod", addrs.NoKey))
|
||||
mod.SetResourceInstanceCurrent(
|
||||
mustResourceInstanceAddr("test_object.a[0]").Resource,
|
||||
&states.ResourceInstanceObjectSrc{
|
||||
Status: states.ObjectTainted,
|
||||
AttrsJSON: []byte(`{"test_string":"current"}`),
|
||||
Dependencies: []addrs.ConfigResource{},
|
||||
},
|
||||
mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`),
|
||||
)
|
||||
mod.SetResourceInstanceCurrent(
|
||||
mustResourceInstanceAddr("test_object.a[1]").Resource,
|
||||
&states.ResourceInstanceObjectSrc{
|
||||
Status: states.ObjectTainted,
|
||||
AttrsJSON: []byte(`{"test_string":"current"}`),
|
||||
Dependencies: []addrs.ConfigResource{},
|
||||
},
|
||||
mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`),
|
||||
)
|
||||
|
||||
ctx := testContext2(t, &ContextOpts{
|
||||
Providers: map[addrs.Provider]providers.Factory{
|
||||
addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
|
||||
},
|
||||
})
|
||||
|
||||
_, diags := ctx.Plan(m, state, &PlanOpts{
|
||||
Mode: plans.DestroyMode,
|
||||
})
|
||||
assertNoErrors(t, diags)
|
||||
}
|
||||
|
@ -49,6 +49,11 @@ type PlanGraphBuilder struct {
|
||||
// skipRefresh indicates that we should skip refreshing managed resources
|
||||
skipRefresh bool
|
||||
|
||||
// preDestroyRefresh indicates that we are executing the refresh which
|
||||
// happens immediately before a destroy plan, which happens to use the
|
||||
// normal planing mode so skipPlanChanges cannot be set.
|
||||
preDestroyRefresh bool
|
||||
|
||||
// skipPlanChanges indicates that we should skip the step of comparing
|
||||
// prior state with configuration and generating planned changes to
|
||||
// resource instances. (This is for the "refresh only" planning mode,
|
||||
@ -111,7 +116,7 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer {
|
||||
&LocalTransformer{Config: b.Config},
|
||||
&OutputTransformer{
|
||||
Config: b.Config,
|
||||
RefreshOnly: b.skipPlanChanges,
|
||||
RefreshOnly: b.skipPlanChanges || b.preDestroyRefresh,
|
||||
PlanDestroy: b.Operation == walkPlanDestroy,
|
||||
|
||||
// NOTE: We currently treat anything built with the plan graph
|
||||
@ -214,6 +219,7 @@ func (b *PlanGraphBuilder) initPlan() {
|
||||
NodeAbstractResource: a,
|
||||
skipRefresh: b.skipRefresh,
|
||||
skipPlanChanges: b.skipPlanChanges,
|
||||
preDestroyRefresh: b.preDestroyRefresh,
|
||||
forceReplace: b.ForceReplace,
|
||||
}
|
||||
}
|
||||
|
@ -37,6 +37,8 @@ type NodeAbstractResourceInstance struct {
|
||||
storedProviderConfig addrs.AbsProviderConfig
|
||||
|
||||
Dependencies []addrs.ConfigResource
|
||||
|
||||
preDestroyRefresh bool
|
||||
}
|
||||
|
||||
// NewNodeAbstractResourceInstance creates an abstract resource instance graph
|
||||
@ -682,6 +684,11 @@ func (n *NodeAbstractResourceInstance) plan(
|
||||
return plan, state, keyData, diags.Append(err)
|
||||
}
|
||||
|
||||
checkRuleSeverity := tfdiags.Error
|
||||
if n.preDestroyRefresh {
|
||||
checkRuleSeverity = tfdiags.Warning
|
||||
}
|
||||
|
||||
if plannedChange != nil {
|
||||
// If we already planned the action, we stick to that plan
|
||||
createBeforeDestroy = plannedChange.Action == plans.CreateThenDelete
|
||||
@ -708,7 +715,7 @@ func (n *NodeAbstractResourceInstance) plan(
|
||||
addrs.ResourcePrecondition,
|
||||
n.Config.Preconditions,
|
||||
ctx, n.Addr, keyData,
|
||||
tfdiags.Error,
|
||||
checkRuleSeverity,
|
||||
)
|
||||
diags = diags.Append(checkDiags)
|
||||
if diags.HasErrors() {
|
||||
|
@ -25,6 +25,8 @@ type nodeExpandPlannableResource struct {
|
||||
// skipRefresh indicates that we should skip refreshing individual instances
|
||||
skipRefresh bool
|
||||
|
||||
preDestroyRefresh bool
|
||||
|
||||
// skipPlanChanges indicates we should skip trying to plan change actions
|
||||
// for any instances.
|
||||
skipPlanChanges bool
|
||||
@ -328,6 +330,7 @@ func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext,
|
||||
a.ProviderMetas = n.ProviderMetas
|
||||
a.dependsOn = n.dependsOn
|
||||
a.Dependencies = n.dependencies
|
||||
a.preDestroyRefresh = n.preDestroyRefresh
|
||||
|
||||
return &NodePlannableResourceInstance{
|
||||
NodeAbstractResourceInstance: a,
|
||||
|
@ -83,7 +83,7 @@ func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (di
|
||||
}
|
||||
|
||||
checkRuleSeverity := tfdiags.Error
|
||||
if n.skipPlanChanges {
|
||||
if n.skipPlanChanges || n.preDestroyRefresh {
|
||||
checkRuleSeverity = tfdiags.Warning
|
||||
}
|
||||
|
||||
@ -128,6 +128,11 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
|
||||
var change *plans.ResourceInstanceChange
|
||||
var instanceRefreshState *states.ResourceInstanceObject
|
||||
|
||||
checkRuleSeverity := tfdiags.Error
|
||||
if n.skipPlanChanges || n.preDestroyRefresh {
|
||||
checkRuleSeverity = tfdiags.Warning
|
||||
}
|
||||
|
||||
_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
|
||||
diags = diags.Append(err)
|
||||
if diags.HasErrors() {
|
||||
@ -280,7 +285,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
|
||||
addrs.ResourcePostcondition,
|
||||
n.Config.Postconditions,
|
||||
ctx, n.ResourceInstanceAddr(), repeatData,
|
||||
tfdiags.Error,
|
||||
checkRuleSeverity,
|
||||
)
|
||||
diags = diags.Append(checkDiags)
|
||||
} else {
|
||||
@ -298,7 +303,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
|
||||
addrs.ResourcePrecondition,
|
||||
n.Config.Preconditions,
|
||||
ctx, addr, repeatData,
|
||||
tfdiags.Warning,
|
||||
checkRuleSeverity,
|
||||
)
|
||||
diags = diags.Append(checkDiags)
|
||||
|
||||
@ -321,7 +326,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
|
||||
addrs.ResourcePostcondition,
|
||||
n.Config.Postconditions,
|
||||
ctx, addr, repeatData,
|
||||
tfdiags.Warning,
|
||||
checkRuleSeverity,
|
||||
)
|
||||
diags = diags.Append(checkDiags)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user