opentofu/terraform/node_resource_apply.go
Mitchell Hashimoto 0e4a6e3e89
terraform: apply resource must depend on destroy deps
Fixes #10440

This updates the behavior of "apply" resources to depend on the
destroy versions of their dependencies.

We make an exception to this behavior when the "apply" resource is CBD.
This is odd and not 100% correct, but it mimics the behavior of the
legacy graphs and avoids us having to do major core work to support the
100% correct solution.

I'll explain this in examples...

Given the following configuration:

    resource "null_resource" "a" {
       count = "${var.count}"
    }

    resource "null_resource" "b" {
      triggers { key = "${join(",", null_resource.a.*.id)}" }
    }

Assume we've successfully created this configuration with count = 2.
When going from count = 2 to count = 1, `null_resource.b` should wait
for `null_resource.a.1` to destroy.

If it doesn't, then it is a race: depending when we interpolate the
`triggers.key` attribute of `null_resource.b`, we may get 1 value or 2.
If `null_resource.a.1` is destroyed, we'll get 1. Otherwise, we'll get
2. This was the root cause of #10440

In the legacy graphs, `null_resource.b` would depend on the destruction
of any `null_resource.a` (orphans, tainted, anything!). This would
ensure proper ordering. We mimic that behavior here.

The difference is CBD. If `null_resource.b` has CBD enabled, then the
ordering **in the legacy graph** becomes:

  1. null_resource.b (create)
  2. null_resource.b (destroy)
  3. null_resource.a (destroy)

In this case, the update would always have 2 values for `triggers.key`,
even though we were destroying a resource later! This scenario required
two `terraform apply` operations.

This is what the CBD check is for in this PR. We do this to mimic the
behavior of the legacy graph.

The correct solution to do one day is to allow splat references
(`null_resource.a.*.id`) to happen in parallel and only read up to to
the `count` amount in the state. This requires some fairly significant
work close to the 0.8 release date, so we can defer this to later and
adopt the 0.7.x behavior for now.
2016-12-03 23:54:29 -08:00

359 lines
8.4 KiB
Go

package terraform
import (
"fmt"
"github.com/hashicorp/terraform/config"
)
// NodeApplyableResource represents a resource that is "applyable":
// it is ready to be applied and is represented by a diff.
type NodeApplyableResource struct {
*NodeAbstractResource
}
// GraphNodeCreator
func (n *NodeApplyableResource) CreateAddr() *ResourceAddress {
return n.NodeAbstractResource.Addr
}
// GraphNodeReferencer, overriding NodeAbstractResource
func (n *NodeApplyableResource) References() []string {
result := n.NodeAbstractResource.References()
// The "apply" side of a resource generally also depends on the
// destruction of its dependencies as well. For example, if a LB
// references a set of VMs with ${vm.foo.*.id}, then we must wait for
// the destruction so we get the newly updated list of VMs.
//
// The exception here is CBD. When CBD is set, we don't do this since
// it would create a cycle. By not creating a cycle, we require two
// applies since the first apply the creation step will use the OLD
// values (pre-destroy) and the second step will update.
//
// This is how Terraform behaved with "legacy" graphs (TF <= 0.7.x).
// We mimic that behavior here now and can improve upon it in the future.
//
// This behavior is tested in graph_build_apply_test.go to test ordering.
cbd := n.Config != nil && n.Config.Lifecycle.CreateBeforeDestroy
if !cbd {
// The "apply" side of a resource always depends on the destruction
// of all its dependencies in addition to the creation.
for _, v := range result {
result = append(result, v+".destroy")
}
}
return result
}
// GraphNodeEvalable
func (n *NodeApplyableResource) EvalTree() EvalNode {
addr := n.NodeAbstractResource.Addr
// stateId is the ID to put into the state
stateId := addr.stateId()
// Build the instance info. More of this will be populated during eval
info := &InstanceInfo{
Id: stateId,
Type: addr.Type,
}
// Build the resource for eval
resource := &Resource{
Name: addr.Name,
Type: addr.Type,
CountIndex: addr.Index,
}
if resource.CountIndex < 0 {
resource.CountIndex = 0
}
// Determine the dependencies for the state. We use some older
// code for this that we've used for a long time.
var stateDeps []string
{
oldN := &graphNodeExpandedResource{
Resource: n.Config,
Index: addr.Index,
}
stateDeps = oldN.StateDependencies()
}
// Eval info is different depending on what kind of resource this is
switch n.Config.Mode {
case config.ManagedResourceMode:
return n.evalTreeManagedResource(
stateId, info, resource, stateDeps,
)
case config.DataResourceMode:
return n.evalTreeDataResource(
stateId, info, resource, stateDeps)
default:
panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
}
}
func (n *NodeApplyableResource) evalTreeDataResource(
stateId string, info *InstanceInfo,
resource *Resource, stateDeps []string) EvalNode {
var provider ResourceProvider
var config *ResourceConfig
var diff *InstanceDiff
var state *InstanceState
return &EvalSequence{
Nodes: []EvalNode{
// Build the instance info
&EvalInstanceInfo{
Info: info,
},
// Get the saved diff for apply
&EvalReadDiff{
Name: stateId,
Diff: &diff,
},
// Stop here if we don't actually have a diff
&EvalIf{
If: func(ctx EvalContext) (bool, error) {
if diff == nil {
return true, EvalEarlyExitError{}
}
if diff.GetAttributesLen() == 0 {
return true, EvalEarlyExitError{}
}
return true, nil
},
Then: EvalNoop{},
},
// We need to re-interpolate the config here, rather than
// just using the diff's values directly, because we've
// potentially learned more variable values during the
// apply pass that weren't known when the diff was produced.
&EvalInterpolate{
Config: n.Config.RawConfig.Copy(),
Resource: resource,
Output: &config,
},
&EvalGetProvider{
Name: n.ProvidedBy()[0],
Output: &provider,
},
// Make a new diff with our newly-interpolated config.
&EvalReadDataDiff{
Info: info,
Config: &config,
Previous: &diff,
Provider: &provider,
Output: &diff,
},
&EvalReadDataApply{
Info: info,
Diff: &diff,
Provider: &provider,
Output: &state,
},
&EvalWriteState{
Name: stateId,
ResourceType: n.Config.Type,
Provider: n.Config.Provider,
Dependencies: stateDeps,
State: &state,
},
// Clear the diff now that we've applied it, so
// later nodes won't see a diff that's now a no-op.
&EvalWriteDiff{
Name: stateId,
Diff: nil,
},
&EvalUpdateStateHook{},
},
}
}
func (n *NodeApplyableResource) evalTreeManagedResource(
stateId string, info *InstanceInfo,
resource *Resource, stateDeps []string) EvalNode {
// Declare a bunch of variables that are used for state during
// evaluation. Most of this are written to by-address below.
var provider ResourceProvider
var diff, diffApply *InstanceDiff
var state *InstanceState
var resourceConfig *ResourceConfig
var err error
var createNew bool
var createBeforeDestroyEnabled bool
return &EvalSequence{
Nodes: []EvalNode{
// Build the instance info
&EvalInstanceInfo{
Info: info,
},
// Get the saved diff for apply
&EvalReadDiff{
Name: stateId,
Diff: &diffApply,
},
// We don't want to do any destroys
&EvalIf{
If: func(ctx EvalContext) (bool, error) {
if diffApply == nil {
return true, EvalEarlyExitError{}
}
if diffApply.GetDestroy() && diffApply.GetAttributesLen() == 0 {
return true, EvalEarlyExitError{}
}
diffApply.SetDestroy(false)
return true, nil
},
Then: EvalNoop{},
},
&EvalIf{
If: func(ctx EvalContext) (bool, error) {
destroy := false
if diffApply != nil {
destroy = diffApply.GetDestroy() || diffApply.RequiresNew()
}
createBeforeDestroyEnabled =
n.Config.Lifecycle.CreateBeforeDestroy &&
destroy
return createBeforeDestroyEnabled, nil
},
Then: &EvalDeposeState{
Name: stateId,
},
},
&EvalInterpolate{
Config: n.Config.RawConfig.Copy(),
Resource: resource,
Output: &resourceConfig,
},
&EvalGetProvider{
Name: n.ProvidedBy()[0],
Output: &provider,
},
&EvalReadState{
Name: stateId,
Output: &state,
},
// Re-run validation to catch any errors we missed, e.g. type
// mismatches on computed values.
&EvalValidateResource{
Provider: &provider,
Config: &resourceConfig,
ResourceName: n.Config.Name,
ResourceType: n.Config.Type,
ResourceMode: n.Config.Mode,
IgnoreWarnings: true,
},
&EvalDiff{
Info: info,
Config: &resourceConfig,
Resource: n.Config,
Provider: &provider,
Diff: &diffApply,
State: &state,
OutputDiff: &diffApply,
},
// Get the saved diff
&EvalReadDiff{
Name: stateId,
Diff: &diff,
},
// Compare the diffs
&EvalCompareDiff{
Info: info,
One: &diff,
Two: &diffApply,
},
&EvalGetProvider{
Name: n.ProvidedBy()[0],
Output: &provider,
},
&EvalReadState{
Name: stateId,
Output: &state,
},
&EvalApply{
Info: info,
State: &state,
Diff: &diffApply,
Provider: &provider,
Output: &state,
Error: &err,
CreateNew: &createNew,
},
&EvalWriteState{
Name: stateId,
ResourceType: n.Config.Type,
Provider: n.Config.Provider,
Dependencies: stateDeps,
State: &state,
},
&EvalApplyProvisioners{
Info: info,
State: &state,
Resource: n.Config,
InterpResource: resource,
CreateNew: &createNew,
Error: &err,
},
&EvalIf{
If: func(ctx EvalContext) (bool, error) {
return createBeforeDestroyEnabled && err != nil, nil
},
Then: &EvalUndeposeState{
Name: stateId,
State: &state,
},
Else: &EvalWriteState{
Name: stateId,
ResourceType: n.Config.Type,
Provider: n.Config.Provider,
Dependencies: stateDeps,
State: &state,
},
},
// We clear the diff out here so that future nodes
// don't see a diff that is already complete. There
// is no longer a diff!
&EvalWriteDiff{
Name: stateId,
Diff: nil,
},
&EvalApplyPost{
Info: info,
State: &state,
Error: &err,
},
&EvalUpdateStateHook{},
},
}
}