core: Eliminate NodePlannableResource indirection

We previously did two levels of DynamicExpand to go from ConfigResource to
AbsResource and then from AbsResource to AbsResourceInstance.

We'll now do the full expansion from ConfigResource to AbsResourceInstance
in a single DynamicExpand step inside nodeExpandPlannableResource.

The new approach is essentially functionally equivalent to the old except
that it fixes a bug in the previous implementation: we will now call
checkState.ReportCheckableObjects only once for the entire set of
instances for a particular resource, which is what the checkable objects
infrastructure expects so that it can always mention all of the checkable
objects in the check report even if we bail out partway through due to
a downstream error.

This is essentially the same code but now turned into additional methods
on nodeExpandPlannableResource instead of having the extra graph node
type. This has the further advantage of this now being straight-through
code with standard control flow, instead of the unusual inversion of
control we were doing before bouncing in and out of different Execute and
DynamicExpand implementations to get this done.
This commit is contained in:
Martin Atkins 2022-09-23 12:29:48 -07:00
parent a9bd4099d3
commit 2e177cd632
4 changed files with 213 additions and 181 deletions

View File

@ -230,6 +230,28 @@ func (g *Graph) Connect(edge Edge) {
s.Add(source)
}
// Subsume imports all of the nodes and edges from the given graph into the
// reciever, leaving the given graph unchanged.
//
// If any of the nodes in the given graph are already present in the reciever
// then the existing node will be retained and any new edges from the given
// graph will be connected with it.
//
// If the given graph has edges in common with the reciever then they will be
// ignored, because each pair of nodes can only be connected once.
func (g *Graph) Subsume(other *Graph) {
// We're using Set.Filter just as a "visit each element" here, so we're
// not doing anything with the result (which will always be empty).
other.vertices.Filter(func(i interface{}) bool {
g.Add(i)
return false
})
other.edges.Filter(func(i interface{}) bool {
g.Connect(i.(Edge))
return false
})
}
// String outputs some human-friendly output for the graph structure.
func (g *Graph) StringWithNodeTypes() string {
var buf bytes.Buffer

View File

@ -401,6 +401,111 @@ resource "test_resource" "b" {
}
}
func TestContext2Plan_resourceChecksInExpandedModule(t *testing.T) {
// When a resource is in a nested module we have two levels of expansion
// to do: first expand the module the resource is declared in, and then
// expand the resource itself.
//
// In earlier versions of Terraform we did that expansion as two levels
// of DynamicExpand, which led to a bug where we didn't have any central
// location from which to register all of the instances of a checkable
// resource.
//
// We now handle the full expansion all in one graph node and one dynamic
// subgraph, which avoids the problem. This is a regression test for the
// earlier bug. If this test is panicking with "duplicate checkable objects
// report" then that suggests the bug is reintroduced and we're now back
// to reporting each module instance separately again, which is incorrect.
p := testProvider("test")
p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{
Provider: providers.Schema{
Block: &configschema.Block{},
},
ResourceTypes: map[string]providers.Schema{
"test": {
Block: &configschema.Block{},
},
},
}
p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) {
resp.NewState = req.PriorState
return resp
}
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) {
resp.PlannedState = cty.EmptyObjectVal
return resp
}
p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) {
resp.NewState = req.PlannedState
return resp
}
m := testModuleInline(t, map[string]string{
"main.tf": `
module "child" {
source = "./child"
count = 2 # must be at least 2 for this test to be valid
}
`,
"child/child.tf": `
locals {
a = "a"
}
resource "test" "test" {
lifecycle {
postcondition {
# It doesn't matter what this checks as long as it
# passes, because if we don't handle expansion properly
# then we'll crash before we even get to evaluating this.
condition = local.a == local.a
error_message = "Postcondition failed."
}
}
}
`,
})
ctx := testContext2(t, &ContextOpts{
Providers: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
},
})
priorState := states.NewState()
plan, diags := ctx.Plan(m, priorState, DefaultPlanOpts)
assertNoErrors(t, diags)
resourceInsts := []addrs.AbsResourceInstance{
mustResourceInstanceAddr("module.child[0].test.test"),
mustResourceInstanceAddr("module.child[1].test.test"),
}
for _, instAddr := range resourceInsts {
t.Run(fmt.Sprintf("results for %s", instAddr), func(t *testing.T) {
if rc := plan.Changes.ResourceInstance(instAddr); rc != nil {
if got, want := rc.Action, plans.Create; got != want {
t.Errorf("wrong action for %s\ngot: %s\nwant: %s", instAddr, got, want)
}
if got, want := rc.ActionReason, plans.ResourceInstanceChangeNoReason; got != want {
t.Errorf("wrong action reason for %s\ngot: %s\nwant: %s", instAddr, got, want)
}
} else {
t.Errorf("no planned change for %s", instAddr)
}
if checkResult := plan.Checks.GetObjectResult(instAddr); checkResult != nil {
if got, want := checkResult.Status, checks.StatusPass; got != want {
t.Errorf("wrong check status for %s\ngot: %s\nwant: %s", instAddr, got, want)
}
} else {
t.Errorf("no check result for %s", instAddr)
}
})
}
}
func TestContext2Plan_dataResourceChecksManagedResourceChange(t *testing.T) {
// This tests the situation where the remote system contains data that
// isn't valid per a data resource postcondition, but that the

View File

@ -11,10 +11,9 @@ import (
"github.com/hashicorp/terraform/internal/tfdiags"
)
// nodeExpandPlannableResource handles the first layer of resource
// expansion. We need this extra layer so DynamicExpand is called twice for
// the resource, the first to expand the Resource for each module instance, and
// the second to expand each ResourceInstance for the expanded Resources.
// nodeExpandPlannableResource represents an addrs.ConfigResource and implements
// DynamicExpand to a subgraph containing all of the addrs.AbsResourceInstance
// resulting from both the containing module and resource-specific expansion.
type nodeExpandPlannableResource struct {
*NodeAbstractResource
@ -94,23 +93,8 @@ func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, er
expander := ctx.InstanceExpander()
moduleInstances := expander.ExpandModule(n.Addr.Module)
// Add the current expanded resource to the graph
for _, module := range moduleInstances {
resAddr := n.Addr.Resource.Absolute(module)
g.Add(&NodePlannableResource{
NodeAbstractResource: n.NodeAbstractResource,
Addr: resAddr,
ForceCreateBeforeDestroy: n.ForceCreateBeforeDestroy,
dependencies: n.dependencies,
skipRefresh: n.skipRefresh,
skipPlanChanges: n.skipPlanChanges,
forceReplace: n.forceReplace,
})
}
// Lock the state while we inspect it
state := ctx.State().Lock()
defer ctx.State().Unlock()
var orphans []*states.Resource
for _, res := range state.Resources(n.Addr) {
@ -121,12 +105,18 @@ func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, er
break
}
}
// Address form state was not found in the current config
// The module instance of the resource in the state doesn't exist
// in the current config, so this whole resource is orphaned.
if !found {
orphans = append(orphans, res)
}
}
// We'll no longer use the state directly here, and the other functions
// we'll call below may use it so we'll release the lock.
state = nil
ctx.State().Unlock()
// The concrete resource factory we'll use for orphans
concreteResourceOrphan := func(a *NodeAbstractResourceInstance) *NodePlannableResourceInstanceOrphan {
// Add the config and state since we don't do that via transforms
@ -154,72 +144,68 @@ func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, er
}
}
return &g, nil
// The above dealt with the expansion of the containing module, so now
// we need to deal with the expansion of the resource itself across all
// instances of the module.
//
// We'll gather up all of the leaf instances we learn about along the way
// so that we can inform the checks subsystem of which instances it should
// be expecting check results for, below.
var diags tfdiags.Diagnostics
instAddrs := addrs.MakeSet[addrs.Checkable]()
for _, module := range moduleInstances {
resAddr := n.Addr.Resource.Absolute(module)
err := n.expandResourceInstances(ctx, resAddr, &g, instAddrs)
diags = diags.Append(err)
}
if diags.HasErrors() {
return nil, diags.ErrWithWarnings()
}
// If this is a resource that participates in custom condition checks
// (i.e. it has preconditions or postconditions) then the check state
// wants to know the addresses of the checkable objects so that it can
// treat them as unknown status if we encounter an error before actually
// visiting the checks.
if checkState := ctx.Checks(); checkState.ConfigHasChecks(n.NodeAbstractResource.Addr) {
checkState.ReportCheckableObjects(n.NodeAbstractResource.Addr, instAddrs)
}
return &g, diags.ErrWithWarnings()
}
// NodePlannableResource represents a resource that is "plannable":
// it is ready to be planned in order to create a diff.
type NodePlannableResource struct {
*NodeAbstractResource
Addr addrs.AbsResource
// ForceCreateBeforeDestroy might be set via our GraphNodeDestroyerCBD
// during graph construction, if dependencies require us to force this
// on regardless of what the configuration says.
ForceCreateBeforeDestroy *bool
// skipRefresh indicates that we should skip refreshing individual instances
skipRefresh bool
// skipPlanChanges indicates we should skip trying to plan change actions
// for any instances.
skipPlanChanges bool
// forceReplace are resource instance addresses where the user wants to
// force generating a replace action. This set isn't pre-filtered, so
// it might contain addresses that have nothing to do with the resource
// that this node represents, which the node itself must therefore ignore.
forceReplace []addrs.AbsResourceInstance
dependencies []addrs.ConfigResource
}
var (
_ GraphNodeModuleInstance = (*NodePlannableResource)(nil)
_ GraphNodeDestroyerCBD = (*NodePlannableResource)(nil)
_ GraphNodeDynamicExpandable = (*NodePlannableResource)(nil)
_ GraphNodeReferenceable = (*NodePlannableResource)(nil)
_ GraphNodeReferencer = (*NodePlannableResource)(nil)
_ GraphNodeConfigResource = (*NodePlannableResource)(nil)
_ GraphNodeAttachResourceConfig = (*NodePlannableResource)(nil)
)
func (n *NodePlannableResource) Path() addrs.ModuleInstance {
return n.Addr.Module
}
func (n *NodePlannableResource) Name() string {
return n.Addr.String()
}
// GraphNodeExecutable
func (n *NodePlannableResource) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics {
// expandResourceInstances calculates the dynamic expansion for the resource
// itself in the context of a particular module instance.
//
// It has several side-effects:
// - Adds a node to Graph g for each leaf resource instance it discovers, whether present or orphaned.
// - Registers the expansion of the resource in the "expander" object embedded inside EvalContext ctx.
// - Adds each present (non-orphaned) resource instance address to instAddrs (guaranteed to always be addrs.AbsResourceInstance, despite being declared as addrs.Checkable).
//
// After calling this for each of the module instances the resource appears
// within, the caller must register the final superset instAddrs with the
// checks subsystem so that it knows the fully expanded set of checkable
// object instances for this resource instance.
func (n *nodeExpandPlannableResource) expandResourceInstances(globalCtx EvalContext, resAddr addrs.AbsResource, g *Graph, instAddrs addrs.Set[addrs.Checkable]) error {
var diags tfdiags.Diagnostics
if n.Config == nil {
// Nothing to do, then.
log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", n.Name())
return diags
log.Printf("[TRACE] nodeExpandPlannableResource: no configuration present for %s", n.Name())
return diags.ErrWithWarnings()
}
// The rest of our work here needs to know which module instance it's
// working in, so that it can evaluate expressions in the appropriate scope.
moduleCtx := globalCtx.WithPath(resAddr.Module)
// writeResourceState is responsible for informing the expander of what
// repetition mode this resource has, which allows expander.ExpandResource
// to work below.
moreDiags := n.writeResourceState(ctx, n.Addr)
moreDiags := n.writeResourceState(moduleCtx, resAddr)
diags = diags.Append(moreDiags)
if moreDiags.HasErrors() {
return diags
return diags.ErrWithWarnings()
}
// Before we expand our resource into potentially many resource instances,
@ -227,8 +213,8 @@ func (n *NodePlannableResource) Execute(ctx EvalContext, op walkOperation) tfdia
// consistent with the repetition mode of the resource. In other words,
// we're aiming to catch a situation where naming a particular resource
// instance would require an instance key but the given address has none.
expander := ctx.InstanceExpander()
instanceAddrs := expander.ExpandResource(n.ResourceAddr().Absolute(ctx.Path()))
expander := moduleCtx.InstanceExpander()
instanceAddrs := expander.ExpandResource(resAddr)
// If there's a number of instances other than 1 then we definitely need
// an index.
@ -283,60 +269,42 @@ func (n *NodePlannableResource) Execute(ctx EvalContext, op walkOperation) tfdia
}
}
// NOTE: The actual interpretation of n.forceReplace to produce replace
// actions is in NodeAbstractResourceInstance.plan, because we must do so
// on a per-instance basis rather than for the whole resource.
// actions is in the per-instance function we're about to call, because
// we need to evaluate it on a per-instance basis.
return diags
}
// GraphNodeDestroyerCBD
func (n *NodePlannableResource) CreateBeforeDestroy() bool {
if n.ForceCreateBeforeDestroy != nil {
return *n.ForceCreateBeforeDestroy
for _, addr := range instanceAddrs {
// If this resource is participating in the "checks" mechanism then our
// caller will need to know all of our expanded instance addresses as
// checkable object instances.
// (NOTE: instAddrs probably already has other instance addresses in it
// from earlier calls to this function with different resource addresses,
// because its purpose is to aggregate them all together into a single set.)
instAddrs.Add(addr)
}
// If we have no config, we just assume no
if n.Config == nil || n.Config.Managed == nil {
return false
// Our graph builder mechanism expects to always be constructing new
// graphs rather than adding to existing ones, so we'll first
// construct a subgraph just for this individual modules's instances and
// then we'll steal all of its nodes and edges to incorporate into our
// main graph which contains all of the resource instances together.
instG, err := n.resourceInstanceSubgraph(moduleCtx, resAddr, instanceAddrs)
if err != nil {
diags = diags.Append(err)
return diags.ErrWithWarnings()
}
g.Subsume(&instG.AcyclicGraph.Graph)
return n.Config.Managed.CreateBeforeDestroy
return diags.ErrWithWarnings()
}
// GraphNodeDestroyerCBD
func (n *NodePlannableResource) ModifyCreateBeforeDestroy(v bool) error {
n.ForceCreateBeforeDestroy = &v
return nil
}
// GraphNodeDynamicExpandable
func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext, addr addrs.AbsResource, instanceAddrs []addrs.AbsResourceInstance) (*Graph, error) {
var diags tfdiags.Diagnostics
// Our instance expander should already have been informed about the
// expansion of this resource and of all of its containing modules, so
// it can tell us which instance addresses we need to process.
expander := ctx.InstanceExpander()
instanceAddrs := expander.ExpandResource(n.ResourceAddr().Absolute(ctx.Path()))
// Our graph transformers require access to the full state, so we'll
// temporarily lock it while we work on this.
state := ctx.State().Lock()
defer ctx.State().Unlock()
// If this is a resource that participates in custom condition checks
// (i.e. it has preconditions or postconditions) then the check state
// wants to know the addresses of the checkable objects so that it can
// treat them as unknown status if we encounter an error before actually
// visiting the checks.
if checkState := ctx.Checks(); checkState.ConfigHasChecks(n.NodeAbstractResource.Addr) {
checkableAddrs := addrs.MakeSet[addrs.Checkable]()
for _, addr := range instanceAddrs {
checkableAddrs.Add(addr)
}
checkState.ReportCheckableObjects(n.NodeAbstractResource.Addr, checkableAddrs)
}
// The concrete resource factory we'll use
concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex {
// check if this node is being imported first
@ -401,7 +369,7 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
// Add the count/for_each orphans
&OrphanResourceInstanceCountTransformer{
Concrete: concreteResourceOrphan,
Addr: n.Addr,
Addr: addr,
InstanceAddrs: instanceAddrs,
State: state,
},
@ -422,8 +390,8 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
// Build the graph
b := &BasicGraphBuilder{
Steps: steps,
Name: "NodePlannableResource",
Name: "nodeExpandPlannableResource",
}
graph, diags := b.Build(ctx.Path())
graph, diags := b.Build(addr.Module)
return graph, diags.ErrWithWarnings()
}

View File

@ -1,63 +0,0 @@
package terraform
import (
"testing"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/configs"
"github.com/hashicorp/terraform/internal/instances"
"github.com/hashicorp/terraform/internal/states"
)
func TestNodePlannableResourceExecute(t *testing.T) {
state := states.NewState()
ctx := &MockEvalContext{
StateState: state.SyncWrapper(),
InstanceExpanderExpander: instances.NewExpander(),
}
t.Run("no config", func(t *testing.T) {
node := NodePlannableResource{
NodeAbstractResource: &NodeAbstractResource{
Config: nil,
},
Addr: mustAbsResourceAddr("test_instance.foo"),
}
diags := node.Execute(ctx, walkApply)
if diags.HasErrors() {
t.Fatalf("unexpected error: %s", diags.Err())
}
if !state.Empty() {
t.Fatalf("expected no state, got:\n %s", state.String())
}
})
t.Run("simple", func(t *testing.T) {
node := NodePlannableResource{
NodeAbstractResource: &NodeAbstractResource{
Config: &configs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_instance",
Name: "foo",
},
ResolvedProvider: addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: addrs.RootModule,
},
},
Addr: mustAbsResourceAddr("test_instance.foo"),
}
diags := node.Execute(ctx, walkApply)
if diags.HasErrors() {
t.Fatalf("unexpected error: %s", diags.Err())
}
if state.Empty() {
t.Fatal("expected resources in state, got empty state")
}
r := state.Resource(mustAbsResourceAddr("test_instance.foo"))
if r == nil {
t.Fatal("test_instance.foo not found in state")
}
})
}