opentofu/terraform/node_resource_plan_instance.go
Martin Atkins f2adfb6e2a core: Treat deposed objects the same as orphaned current objects
In many ways a deposed object is equivalent to an orphaned current object
in that the only action we can take with it is to destroy it. However, we
do still need to take some preparation steps in both cases: first, we must
ensure we track the upgraded version of the existing object so that we'll
be able to successfully render our plan, and secondly we must refresh the
existing object to make sure it still exists in the remote system.

We were previously doing these extra steps for orphan objects but not for
deposed ones, which meant that the behavior for deposed objects would be
subtly different and violate the invariants our callers expect in order
to display a plan. This also created the risk that a deposed object
already deleted in the remote system would become "stuck" because
Terraform would still plan to destroy it, which might cause the provider
to return an error when it tries to delete an already-absent object.

This also makes the deposed object planning take into account the
"skipPlanChanges" flag, which is important to get a correct result in
the "refresh only" planning mode.

It's a shame that we have almost identical code handling both the orphan
and deposed situations, but they differ in that the latter must call
different functions to interact with the deposed rather than the current
objects in the state. Perhaps a later change can improve on this with some
more refactoring, but this commit is already a little more disruptive than
I'd like and so I'm intentionally deferring that for another day.
2021-05-13 09:05:06 -07:00

292 lines
9.0 KiB
Go

package terraform
import (
"fmt"
"log"
"sort"
"github.com/hashicorp/terraform/plans"
"github.com/hashicorp/terraform/states"
"github.com/hashicorp/terraform/tfdiags"
"github.com/hashicorp/terraform/addrs"
)
// NodePlannableResourceInstance represents a _single_ resource
// instance that is plannable. This means this represents a single
// count index, for example.
type NodePlannableResourceInstance struct {
*NodeAbstractResourceInstance
ForceCreateBeforeDestroy bool
// skipRefresh indicates that we should skip refreshing individual instances
skipRefresh bool
// skipPlanChanges indicates we should skip trying to plan change actions
// for any instances.
skipPlanChanges bool
// forceReplace are resource instance addresses where the user wants to
// force generating a replace action. This set isn't pre-filtered, so
// it might contain addresses that have nothing to do with the resource
// that this node represents, which the node itself must therefore ignore.
forceReplace []addrs.AbsResourceInstance
}
var (
_ GraphNodeModuleInstance = (*NodePlannableResourceInstance)(nil)
_ GraphNodeReferenceable = (*NodePlannableResourceInstance)(nil)
_ GraphNodeReferencer = (*NodePlannableResourceInstance)(nil)
_ GraphNodeConfigResource = (*NodePlannableResourceInstance)(nil)
_ GraphNodeResourceInstance = (*NodePlannableResourceInstance)(nil)
_ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil)
_ GraphNodeAttachResourceState = (*NodePlannableResourceInstance)(nil)
_ GraphNodeExecutable = (*NodePlannableResourceInstance)(nil)
)
// GraphNodeEvalable
func (n *NodePlannableResourceInstance) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics {
addr := n.ResourceInstanceAddr()
// Eval info is different depending on what kind of resource this is
switch addr.Resource.Resource.Mode {
case addrs.ManagedResourceMode:
return n.managedResourceExecute(ctx)
case addrs.DataResourceMode:
return n.dataResourceExecute(ctx)
default:
panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
}
}
func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) {
config := n.Config
addr := n.ResourceInstanceAddr()
var change *plans.ResourceInstanceChange
_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
diags = diags.Append(err)
if diags.HasErrors() {
return diags
}
state, readDiags := n.readResourceInstanceState(ctx, addr)
diags = diags.Append(readDiags)
if diags.HasErrors() {
return diags
}
// We'll save a snapshot of what we just read from the state into the
// prevRunState which will capture the result read in the previous
// run, possibly tweaked by any upgrade steps that
// readResourceInstanceState might've made.
// However, note that we don't have any explicit mechanism for upgrading
// data resource results as we do for managed resources, and so the
// prevRunState might not conform to the current schema if the
// previous run was with a different provider version.
diags = diags.Append(n.writeResourceInstanceState(ctx, state, prevRunState))
if diags.HasErrors() {
return diags
}
diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema))
if diags.HasErrors() {
return diags
}
change, state, planDiags := n.planDataSource(ctx, state)
diags = diags.Append(planDiags)
if diags.HasErrors() {
return diags
}
// write the data source into both the refresh state and the
// working state
diags = diags.Append(n.writeResourceInstanceState(ctx, state, refreshState))
if diags.HasErrors() {
return diags
}
diags = diags.Append(n.writeResourceInstanceState(ctx, state, workingState))
if diags.HasErrors() {
return diags
}
diags = diags.Append(n.writeChange(ctx, change, ""))
return diags
}
func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) {
config := n.Config
addr := n.ResourceInstanceAddr()
var change *plans.ResourceInstanceChange
var instanceRefreshState *states.ResourceInstanceObject
_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
diags = diags.Append(err)
if diags.HasErrors() {
return diags
}
diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema))
if diags.HasErrors() {
return diags
}
instanceRefreshState, readDiags := n.readResourceInstanceState(ctx, addr)
diags = diags.Append(readDiags)
if diags.HasErrors() {
return diags
}
// We'll save a snapshot of what we just read from the state into the
// prevRunState before we do anything else, since this will capture the
// result of any schema upgrading that readResourceInstanceState just did,
// but not include any out-of-band changes we might detect in in the
// refresh step below.
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, prevRunState))
if diags.HasErrors() {
return diags
}
// Also the refreshState, because that should still reflect schema upgrades
// even if it doesn't reflect upstream changes.
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
if diags.HasErrors() {
return diags
}
// In 0.13 we could be refreshing a resource with no config.
// We should be operating on managed resource, but check here to be certain
if n.Config == nil || n.Config.Managed == nil {
log.Printf("[WARN] managedResourceExecute: no Managed config value found in instance state for %q", n.Addr)
} else {
if instanceRefreshState != nil {
instanceRefreshState.CreateBeforeDestroy = n.Config.Managed.CreateBeforeDestroy || n.ForceCreateBeforeDestroy
}
}
// Refresh, maybe
if !n.skipRefresh {
s, refreshDiags := n.refresh(ctx, states.NotDeposed, instanceRefreshState)
diags = diags.Append(refreshDiags)
if diags.HasErrors() {
return diags
}
instanceRefreshState = s
if instanceRefreshState != nil {
// When refreshing we start by merging the stored dependencies and
// the configured dependencies. The configured dependencies will be
// stored to state once the changes are applied. If the plan
// results in no changes, we will re-write these dependencies
// below.
instanceRefreshState.Dependencies = mergeDeps(n.Dependencies, instanceRefreshState.Dependencies)
}
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
if diags.HasErrors() {
return diags
}
}
// Plan the instance, unless we're in the refresh-only mode
if !n.skipPlanChanges {
change, instancePlanState, planDiags := n.plan(
ctx, change, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace,
)
diags = diags.Append(planDiags)
if diags.HasErrors() {
return diags
}
diags = diags.Append(n.checkPreventDestroy(change))
if diags.HasErrors() {
return diags
}
diags = diags.Append(n.writeResourceInstanceState(ctx, instancePlanState, workingState))
if diags.HasErrors() {
return diags
}
// If this plan resulted in a NoOp, then apply won't have a chance to make
// any changes to the stored dependencies. Since this is a NoOp we know
// that the stored dependencies will have no effect during apply, and we can
// write them out now.
if change.Action == plans.NoOp && !depsEqual(instanceRefreshState.Dependencies, n.Dependencies) {
// the refresh state will be the final state for this resource, so
// finalize the dependencies here if they need to be updated.
instanceRefreshState.Dependencies = n.Dependencies
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
if diags.HasErrors() {
return diags
}
}
diags = diags.Append(n.writeChange(ctx, change, ""))
} else {
// Even if we don't plan changes, we do still need to at least update
// the working state to reflect the refresh result. If not, then e.g.
// any output values refering to this will not react to the drift.
// (Even if we didn't actually refresh above, this will still save
// the result of any schema upgrading we did in readResourceInstanceState.)
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, workingState))
if diags.HasErrors() {
return diags
}
}
return diags
}
// mergeDeps returns the union of 2 sets of dependencies
func mergeDeps(a, b []addrs.ConfigResource) []addrs.ConfigResource {
switch {
case len(a) == 0:
return b
case len(b) == 0:
return a
}
set := make(map[string]addrs.ConfigResource)
for _, dep := range a {
set[dep.String()] = dep
}
for _, dep := range b {
set[dep.String()] = dep
}
newDeps := make([]addrs.ConfigResource, 0, len(set))
for _, dep := range set {
newDeps = append(newDeps, dep)
}
return newDeps
}
func depsEqual(a, b []addrs.ConfigResource) bool {
if len(a) != len(b) {
return false
}
less := func(s []addrs.ConfigResource) func(i, j int) bool {
return func(i, j int) bool {
return s[i].String() < s[j].String()
}
}
sort.Slice(a, less(a))
sort.Slice(b, less(b))
for i := range a {
if !a[i].Equal(b[i]) {
return false
}
}
return true
}