From 0b3b84acc1ade2e394fcb7e3b956bf2ea0789eb1 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Mon, 4 Jan 2021 17:17:35 -0500 Subject: [PATCH] refresh state during a destroy plan Because the destroy plan only creates the necessary changes for apply to remove all the resources, it does no reading of resources or data sources, leading to stale data in the state. In most cases this is not a problem, but when a provider configuration is using resource values, the provider may not be able to run correctly during apply. In prior versions of terraform, the implicit refresh that happened during `terraform destroy` would update the data sources and remove missing resources from state as required. The destroy plan graph has a minimal amount of information, so it is not feasible to work the reading of resources into the operation without completely replicating the normal plan graph, and updating the plan graph and all destroy node implementation is also a considerable amount of refactoring. Instead, we can run a normal plan which is used to refresh the state before creating the destroy plan. This brings back similar behavior to core versions prior to 0.14, and the refresh can still be skipped using the `-refresh=false` cli flag. --- terraform/context.go | 78 +++++++++++++++---- terraform/graph_builder_destroy_plan.go | 8 +- terraform/node_resource_abstract_instance.go | 7 ++ .../apply-destroy-data-resource/main.tf | 4 +- .../plan-module-destroy-gh-1835/b/main.tf | 2 +- 5 files changed, 79 insertions(+), 20 deletions(-) diff --git a/terraform/context.go b/terraform/context.go index 7e24de3415..34dea4c873 100644 --- a/terraform/context.go +++ b/terraform/context.go @@ -547,44 +547,96 @@ The -target option is not for routine use, and is provided only for exceptional varVals[k] = dv } - p := &plans.Plan{ + plan := &plans.Plan{ VariableValues: varVals, TargetAddrs: c.targets, ProviderSHA256s: c.providerSHA256s, } - operation := walkPlan - graphType := GraphTypePlan - if c.destroy { - operation = walkPlanDestroy - graphType = GraphTypePlanDestroy + switch { + case c.destroy: + diags = diags.Append(c.destroyPlan(plan)) + default: + diags = diags.Append(c.plan(plan)) } - graph, graphDiags := c.Graph(graphType, nil) + return plan, diags +} + +func (c *Context) plan(plan *plans.Plan) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + graph, graphDiags := c.Graph(GraphTypePlan, nil) diags = diags.Append(graphDiags) if graphDiags.HasErrors() { - return nil, diags + return diags } // Do the walk - walker, walkDiags := c.walk(graph, operation) + walker, walkDiags := c.walk(graph, walkPlan) diags = diags.Append(walker.NonFatalDiagnostics) diags = diags.Append(walkDiags) if walkDiags.HasErrors() { - return nil, diags + return diags } - p.Changes = c.changes + plan.Changes = c.changes c.refreshState.SyncWrapper().RemovePlannedResourceInstanceObjects() refreshedState := c.refreshState.DeepCopy() - p.State = refreshedState + plan.State = refreshedState // replace the working state with the updated state, so that immediate calls // to Apply work as expected. c.state = refreshedState - return p, diags + return diags +} + +func (c *Context) destroyPlan(destroyPlan *plans.Plan) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + c.changes = plans.NewChanges() + + // A destroy plan starts by running Refresh to read any pending data + // sources, and remove missing managed resources. This is required because + // a "destroy plan" is only creating delete changes, and is essentially a + // local operation. + if !c.skipRefresh { + refreshPlan := &plans.Plan{ + VariableValues: destroyPlan.VariableValues, + TargetAddrs: c.targets, + ProviderSHA256s: c.providerSHA256s, + } + + refreshDiags := c.plan(refreshPlan) + + diags = diags.Append(refreshDiags) + if diags.HasErrors() { + return diags + } + + // insert the refreshed state into the destroy plan result, and discard + // the changes recorded from the refresh. + destroyPlan.State = refreshPlan.State + c.changes = plans.NewChanges() + } + + graph, graphDiags := c.Graph(GraphTypePlanDestroy, nil) + diags = diags.Append(graphDiags) + if graphDiags.HasErrors() { + return diags + } + + // Do the walk + walker, walkDiags := c.walk(graph, walkPlan) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return diags + } + + destroyPlan.Changes = c.changes + return diags } // Refresh goes through all the resources in the state and refreshes them diff --git a/terraform/graph_builder_destroy_plan.go b/terraform/graph_builder_destroy_plan.go index d821622820..a167295074 100644 --- a/terraform/graph_builder_destroy_plan.go +++ b/terraform/graph_builder_destroy_plan.go @@ -12,7 +12,10 @@ import ( // planning a pure-destroy. // // Planning a pure destroy operation is simple because we can ignore most -// ordering configuration and simply reverse the state. +// ordering configuration and simply reverse the state. This graph mainly +// exists for targeting, because we need to walk the destroy dependencies to +// ensure we plan the required resources. Without the requirement for +// targeting, the plan could theoretically be created directly from the state. type DestroyPlanGraphBuilder struct { // Config is the configuration tree to build the plan from. Config *configs.Config @@ -72,6 +75,7 @@ func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer { State: b.State, }, + // Create the delete changes for root module outputs. &OutputTransformer{ Config: b.Config, Destroy: true, @@ -93,8 +97,6 @@ func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer { Schemas: b.Schemas, }, - // Target. Note we don't set "Destroy: true" here since we already - // created proper destroy ordering. &TargetsTransformer{Targets: b.Targets}, // Close opened plugin connections diff --git a/terraform/node_resource_abstract_instance.go b/terraform/node_resource_abstract_instance.go index 417d94bf1a..3278759e55 100644 --- a/terraform/node_resource_abstract_instance.go +++ b/terraform/node_resource_abstract_instance.go @@ -1401,6 +1401,13 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentSt return plannedChange, plannedNewState, diags } + // While this isn't a "diff", continue to call this for data sources. + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(n.Addr, states.CurrentGen, priorVal, configVal) + })) + if diags.HasErrors() { + return nil, nil, diags + } // We have a complete configuration with no dependencies to wait on, so we // can read the data source into the state. newVal, readDiags := n.readDataSource(ctx, configVal) diff --git a/terraform/testdata/apply-destroy-data-resource/main.tf b/terraform/testdata/apply-destroy-data-resource/main.tf index cb16d9f341..0d941a7077 100644 --- a/terraform/testdata/apply-destroy-data-resource/main.tf +++ b/terraform/testdata/apply-destroy-data-resource/main.tf @@ -1,5 +1,3 @@ data "null_data_source" "testing" { - inputs = { - test = "yes" - } + foo = "yes" } diff --git a/terraform/testdata/plan-module-destroy-gh-1835/b/main.tf b/terraform/testdata/plan-module-destroy-gh-1835/b/main.tf index c3b0270b01..3b0cc66645 100644 --- a/terraform/testdata/plan-module-destroy-gh-1835/b/main.tf +++ b/terraform/testdata/plan-module-destroy-gh-1835/b/main.tf @@ -1,5 +1,5 @@ variable "a_id" {} resource "aws_instance" "b" { - command = "echo ${var.a_id}" + foo = "echo ${var.a_id}" }