opentofu/internal/terraform/node_resource_plan_instance.go
Martin Atkins 5573868cd0 core: Check pre- and postconditions for resources and output values
If the configuration contains preconditions and/or postconditions for any
objects, we'll check them during evaluation of those objects and generate
errors if any do not pass.

The handling of post-conditions is particularly interesting here because
we intentionally evaluate them _after_ we've committed our record of the
resulting side-effects to the state/plan, with the intent that future
plans against the same object will keep failing until the problem is
addressed either by changing the object so it would pass the precondition
or changing the precondition to accept the current object. That then
avoids the need for us to proactively taint managed resources whose
postconditions fail, as we would for provisioner failures: instead, we can
leave the resolution approach up to the user to decide.

Co-authored-by: Alisdair McDiarmid <alisdair@users.noreply.github.com>
2022-01-31 14:02:53 -05:00

331 lines
11 KiB
Go

package terraform
import (
"fmt"
"log"
"sort"
"github.com/hashicorp/terraform/internal/plans"
"github.com/hashicorp/terraform/internal/states"
"github.com/hashicorp/terraform/internal/tfdiags"
"github.com/hashicorp/terraform/internal/addrs"
)
// NodePlannableResourceInstance represents a _single_ resource
// instance that is plannable. This means this represents a single
// count index, for example.
type NodePlannableResourceInstance struct {
*NodeAbstractResourceInstance
ForceCreateBeforeDestroy bool
// skipRefresh indicates that we should skip refreshing individual instances
skipRefresh bool
// skipPlanChanges indicates we should skip trying to plan change actions
// for any instances.
skipPlanChanges bool
// forceReplace are resource instance addresses where the user wants to
// force generating a replace action. This set isn't pre-filtered, so
// it might contain addresses that have nothing to do with the resource
// that this node represents, which the node itself must therefore ignore.
forceReplace []addrs.AbsResourceInstance
}
var (
_ GraphNodeModuleInstance = (*NodePlannableResourceInstance)(nil)
_ GraphNodeReferenceable = (*NodePlannableResourceInstance)(nil)
_ GraphNodeReferencer = (*NodePlannableResourceInstance)(nil)
_ GraphNodeConfigResource = (*NodePlannableResourceInstance)(nil)
_ GraphNodeResourceInstance = (*NodePlannableResourceInstance)(nil)
_ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil)
_ GraphNodeAttachResourceState = (*NodePlannableResourceInstance)(nil)
_ GraphNodeExecutable = (*NodePlannableResourceInstance)(nil)
)
// GraphNodeEvalable
func (n *NodePlannableResourceInstance) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics {
addr := n.ResourceInstanceAddr()
// Eval info is different depending on what kind of resource this is
switch addr.Resource.Resource.Mode {
case addrs.ManagedResourceMode:
return n.managedResourceExecute(ctx)
case addrs.DataResourceMode:
return n.dataResourceExecute(ctx)
default:
panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
}
}
func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) {
config := n.Config
addr := n.ResourceInstanceAddr()
var change *plans.ResourceInstanceChange
_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
diags = diags.Append(err)
if diags.HasErrors() {
return diags
}
state, readDiags := n.readResourceInstanceState(ctx, addr)
diags = diags.Append(readDiags)
if diags.HasErrors() {
return diags
}
// We'll save a snapshot of what we just read from the state into the
// prevRunState which will capture the result read in the previous
// run, possibly tweaked by any upgrade steps that
// readResourceInstanceState might've made.
// However, note that we don't have any explicit mechanism for upgrading
// data resource results as we do for managed resources, and so the
// prevRunState might not conform to the current schema if the
// previous run was with a different provider version.
diags = diags.Append(n.writeResourceInstanceState(ctx, state, prevRunState))
if diags.HasErrors() {
return diags
}
diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema))
if diags.HasErrors() {
return diags
}
change, state, repeatData, planDiags := n.planDataSource(ctx, state)
diags = diags.Append(planDiags)
if diags.HasErrors() {
return diags
}
// write the data source into both the refresh state and the
// working state
diags = diags.Append(n.writeResourceInstanceState(ctx, state, refreshState))
if diags.HasErrors() {
return diags
}
diags = diags.Append(n.writeResourceInstanceState(ctx, state, workingState))
if diags.HasErrors() {
return diags
}
diags = diags.Append(n.writeChange(ctx, change, ""))
// Post-conditions might block further progress. We intentionally do this
// _after_ writing the state/diff because we want to check against
// the result of the operation, and to fail on future operations
// until the user makes the condition succeed.
checkDiags := evalCheckRules(
checkResourcePostcondition,
n.Config.Postconditions,
ctx, addr.Resource, repeatData,
)
diags = diags.Append(checkDiags)
return diags
}
func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) {
config := n.Config
addr := n.ResourceInstanceAddr()
var change *plans.ResourceInstanceChange
var instanceRefreshState *states.ResourceInstanceObject
_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
diags = diags.Append(err)
if diags.HasErrors() {
return diags
}
diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema))
if diags.HasErrors() {
return diags
}
instanceRefreshState, readDiags := n.readResourceInstanceState(ctx, addr)
diags = diags.Append(readDiags)
if diags.HasErrors() {
return diags
}
// We'll save a snapshot of what we just read from the state into the
// prevRunState before we do anything else, since this will capture the
// result of any schema upgrading that readResourceInstanceState just did,
// but not include any out-of-band changes we might detect in in the
// refresh step below.
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, prevRunState))
if diags.HasErrors() {
return diags
}
// Also the refreshState, because that should still reflect schema upgrades
// even if it doesn't reflect upstream changes.
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
if diags.HasErrors() {
return diags
}
// In 0.13 we could be refreshing a resource with no config.
// We should be operating on managed resource, but check here to be certain
if n.Config == nil || n.Config.Managed == nil {
log.Printf("[WARN] managedResourceExecute: no Managed config value found in instance state for %q", n.Addr)
} else {
if instanceRefreshState != nil {
instanceRefreshState.CreateBeforeDestroy = n.Config.Managed.CreateBeforeDestroy || n.ForceCreateBeforeDestroy
}
}
// Refresh, maybe
if !n.skipRefresh {
s, refreshDiags := n.refresh(ctx, states.NotDeposed, instanceRefreshState)
diags = diags.Append(refreshDiags)
if diags.HasErrors() {
return diags
}
instanceRefreshState = s
if instanceRefreshState != nil {
// When refreshing we start by merging the stored dependencies and
// the configured dependencies. The configured dependencies will be
// stored to state once the changes are applied. If the plan
// results in no changes, we will re-write these dependencies
// below.
instanceRefreshState.Dependencies = mergeDeps(n.Dependencies, instanceRefreshState.Dependencies)
}
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
if diags.HasErrors() {
return diags
}
}
// Plan the instance, unless we're in the refresh-only mode
if !n.skipPlanChanges {
change, instancePlanState, repeatData, planDiags := n.plan(
ctx, change, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace,
)
diags = diags.Append(planDiags)
if diags.HasErrors() {
return diags
}
diags = diags.Append(n.checkPreventDestroy(change))
if diags.HasErrors() {
return diags
}
// FIXME: it is currently important that we write resource changes to
// the plan (n.writeChange) before we write the corresponding state
// (n.writeResourceInstanceState).
//
// This is because the planned resource state will normally have the
// status of states.ObjectPlanned, which causes later logic to refer to
// the contents of the plan to retrieve the resource data. Because
// there is no shared lock between these two data structures, reversing
// the order of these writes will cause a brief window of inconsistency
// which can lead to a failed safety check.
//
// Future work should adjust these APIs such that it is impossible to
// update these two data structures incorrectly through any objects
// reachable via the terraform.EvalContext API.
diags = diags.Append(n.writeChange(ctx, change, ""))
diags = diags.Append(n.writeResourceInstanceState(ctx, instancePlanState, workingState))
if diags.HasErrors() {
return diags
}
// If this plan resulted in a NoOp, then apply won't have a chance to make
// any changes to the stored dependencies. Since this is a NoOp we know
// that the stored dependencies will have no effect during apply, and we can
// write them out now.
if change.Action == plans.NoOp && !depsEqual(instanceRefreshState.Dependencies, n.Dependencies) {
// the refresh state will be the final state for this resource, so
// finalize the dependencies here if they need to be updated.
instanceRefreshState.Dependencies = n.Dependencies
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
if diags.HasErrors() {
return diags
}
}
// Post-conditions might block completion. We intentionally do this
// _after_ writing the state/diff because we want to check against
// the result of the operation, and to fail on future operations
// until the user makes the condition succeed.
// (Note that some preconditions will end up being skipped during
// planning, because their conditions depend on values not yet known.)
checkDiags := evalCheckRules(
checkResourcePostcondition,
n.Config.Postconditions,
ctx, addr.Resource, repeatData,
)
diags = diags.Append(checkDiags)
} else {
// Even if we don't plan changes, we do still need to at least update
// the working state to reflect the refresh result. If not, then e.g.
// any output values refering to this will not react to the drift.
// (Even if we didn't actually refresh above, this will still save
// the result of any schema upgrading we did in readResourceInstanceState.)
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, workingState))
if diags.HasErrors() {
return diags
}
}
return diags
}
// mergeDeps returns the union of 2 sets of dependencies
func mergeDeps(a, b []addrs.ConfigResource) []addrs.ConfigResource {
switch {
case len(a) == 0:
return b
case len(b) == 0:
return a
}
set := make(map[string]addrs.ConfigResource)
for _, dep := range a {
set[dep.String()] = dep
}
for _, dep := range b {
set[dep.String()] = dep
}
newDeps := make([]addrs.ConfigResource, 0, len(set))
for _, dep := range set {
newDeps = append(newDeps, dep)
}
return newDeps
}
func depsEqual(a, b []addrs.ConfigResource) bool {
if len(a) != len(b) {
return false
}
less := func(s []addrs.ConfigResource) func(i, j int) bool {
return func(i, j int) bool {
return s[i].String() < s[j].String()
}
}
sort.Slice(a, less(a))
sort.Slice(b, less(b))
for i := range a {
if !a[i].Equal(b[i]) {
return false
}
}
return true
}