Merge pull request #30832 from hashicorp/jbardin/data-readResourceInstanceState

remove the use of data source prior state from planning
This commit is contained in:
James Bardin 2022-04-14 09:47:25 -04:00 committed by GitHub
commit d360a78771
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 45 additions and 72 deletions

View File

@ -394,14 +394,6 @@ func (n *NodeAbstractResource) readResourceInstanceState(ctx EvalContext, addr a
obj, err := src.Decode(schema.ImpliedType())
if err != nil {
// In the case of a data source which contains incompatible state
// migrations, we can just ignore decoding errors and skip comparing
// the prior state.
if addr.Resource.Resource.Mode == addrs.DataResourceMode {
log.Printf("[DEBUG] readResourceInstanceState: data source schema change for %s prevents decoding: %s", addr, err)
return nil, diags
}
diags = diags.Append(err)
}

View File

@ -1477,7 +1477,7 @@ func (n *NodeAbstractResourceInstance) providerMetas(ctx EvalContext) (cty.Value
// value, but it still matches the previous state, then we can record a NoNop
// change. If the states don't match then we record a Read change so that the
// new value is applied to the state.
func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentState *states.ResourceInstanceObject, checkRuleSeverity tfdiags.Severity) (*plans.ResourceInstanceChange, *states.ResourceInstanceObject, instances.RepetitionData, tfdiags.Diagnostics) {
func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, checkRuleSeverity tfdiags.Severity) (*plans.ResourceInstanceChange, *states.ResourceInstanceObject, instances.RepetitionData, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
var keyData instances.RepetitionData
var configVal cty.Value
@ -1500,9 +1500,6 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentSt
objTy := schema.ImpliedType()
priorVal := cty.NullVal(objTy)
if currentState != nil {
priorVal = currentState.Value
}
forEach, _ := evaluateForEachExpression(config.ForEach, ctx)
keyData = EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach)
@ -1515,9 +1512,6 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentSt
)
diags = diags.Append(checkDiags)
if diags.HasErrors() {
diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostApply(n.Addr, states.CurrentGen, priorVal, diags.Err())
}))
return nil, nil, keyData, diags // failed preconditions prevent further evaluation
}
@ -1529,9 +1523,6 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentSt
}
unmarkedConfigVal, configMarkPaths := configVal.UnmarkDeepWithPaths()
// We drop marks on the values used here as the result is only
// temporarily used for validation.
unmarkedPriorVal, _ := priorVal.UnmarkDeep()
configKnown := configVal.IsWhollyKnown()
// If our configuration contains any unknown values, or we depend on any
@ -1581,28 +1572,6 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentSt
return nil, nil, keyData, diags
}
// if we have a prior value, we can check for any irregularities in the response
if !priorVal.IsNull() {
// While we don't propose planned changes for data sources, we can
// generate a proposed value for comparison to ensure the data source
// is returning a result following the rules of the provider contract.
proposedVal := objchange.ProposedNew(schema, unmarkedPriorVal, unmarkedConfigVal)
if errs := objchange.AssertObjectCompatible(schema, proposedVal, newVal); len(errs) > 0 {
// Resources have the LegacyTypeSystem field to signal when they are
// using an SDK which may not produce precise values. While data
// sources are read-only, they can still return a value which is not
// compatible with the config+schema. Since we can't detect the legacy
// type system, we can only warn about this for now.
var buf strings.Builder
fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s.",
n.ResolvedProvider, n.Addr)
for _, err := range errs {
fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err))
}
log.Print(buf.String())
}
}
plannedNewState := &states.ResourceInstanceObject{
Value: newVal,
Status: states.ObjectReady,

View File

@ -264,12 +264,6 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext)
return diags
}
state, readDiags = n.readResourceInstanceState(ctx, n.ResourceInstanceAddr())
diags = diags.Append(readDiags)
if diags.HasErrors() {
return diags
}
diffApply = reducePlan(addr, diffApply, false)
// reducePlan may have simplified our planned change
// into a NoOp if it only requires destroying, since destroying

View File

@ -1,10 +1,13 @@
package terraform
import (
"fmt"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/plans"
"github.com/hashicorp/terraform/internal/states"
"github.com/hashicorp/terraform/internal/tfdiags"
"github.com/zclconf/go-cty/cty"
)
// NodePlanDestroyableResourceInstance represents a resource that is ready
@ -39,6 +42,19 @@ func (n *NodePlanDestroyableResourceInstance) DestroyAddr() *addrs.AbsResourceIn
func (n *NodePlanDestroyableResourceInstance) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
addr := n.ResourceInstanceAddr()
switch addr.Resource.Resource.Mode {
case addrs.ManagedResourceMode:
return n.managedResourceExecute(ctx, op)
case addrs.DataResourceMode:
return n.dataResourceExecute(ctx, op)
default:
panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
}
}
func (n *NodePlanDestroyableResourceInstance) managedResourceExecute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
addr := n.ResourceInstanceAddr()
// Declare a bunch of variables that are used for state during
// evaluation. These are written to by address in the EvalNodes we
// declare below.
@ -85,3 +101,22 @@ func (n *NodePlanDestroyableResourceInstance) Execute(ctx EvalContext, op walkOp
diags = diags.Append(n.writeChange(ctx, change, ""))
return diags
}
func (n *NodePlanDestroyableResourceInstance) dataResourceExecute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
// We may not be able to read a prior data source from the state if the
// schema was upgraded and we are destroying before ever refreshing that
// data source. Regardless, a data source "destroy" is simply writing a
// null state, which we can do with a null prior state too.
change := &plans.ResourceInstanceChange{
Addr: n.ResourceInstanceAddr(),
PrevRunAddr: n.prevRunAddr(ctx),
Change: plans.Change{
Action: plans.Delete,
Before: cty.NullVal(cty.DynamicPseudoType),
After: cty.NullVal(cty.DynamicPseudoType),
},
ProviderAddr: n.ResolvedProvider,
}
return diags.Append(n.writeChange(ctx, change, ""))
}

View File

@ -71,26 +71,6 @@ func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (di
return diags
}
state, readDiags := n.readResourceInstanceState(ctx, addr)
diags = diags.Append(readDiags)
if diags.HasErrors() {
return diags
}
// We'll save a snapshot of what we just read from the state into the
// prevRunState which will capture the result read in the previous
// run, possibly tweaked by any upgrade steps that
// readResourceInstanceState might've made.
// However, note that we don't have any explicit mechanism for upgrading
// data resource results as we do for managed resources, and so the
// prevRunState might not conform to the current schema if the
// previous run was with a different provider version. In that case the
// snapshot will be null if we could not decode it at all.
diags = diags.Append(n.writeResourceInstanceState(ctx, state, prevRunState))
if diags.HasErrors() {
return diags
}
diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema))
if diags.HasErrors() {
return diags
@ -101,7 +81,7 @@ func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (di
checkRuleSeverity = tfdiags.Warning
}
change, state, repeatData, planDiags := n.planDataSource(ctx, state, checkRuleSeverity)
change, state, repeatData, planDiags := n.planDataSource(ctx, checkRuleSeverity)
diags = diags.Append(planDiags)
if diags.HasErrors() {
return diags

View File

@ -21,6 +21,14 @@ import (
// If any errors occur during upgrade, error diagnostics are returned. In that
// case it is not safe to proceed with using the original state object.
func upgradeResourceState(addr addrs.AbsResourceInstance, provider providers.Interface, src *states.ResourceInstanceObjectSrc, currentSchema *configschema.Block, currentVersion uint64) (*states.ResourceInstanceObjectSrc, tfdiags.Diagnostics) {
if addr.Resource.Resource.Mode != addrs.ManagedResourceMode {
// We only do state upgrading for managed resources.
// This was a part of the normal workflow in older versions and
// returned early, so we are only going to log the error for now.
log.Printf("[ERROR] data resource %s should not require state upgrade", addr)
return src, nil
}
// Remove any attributes from state that are not present in the schema.
// This was previously taken care of by the provider, but data sources do
// not go through the UpgradeResourceState process.
@ -32,11 +40,6 @@ func upgradeResourceState(addr addrs.AbsResourceInstance, provider providers.Int
src.AttrsJSON = stripRemovedStateAttributes(src.AttrsJSON, currentSchema.ImpliedType())
}
if addr.Resource.Resource.Mode != addrs.ManagedResourceMode {
// We only do state upgrading for managed resources.
return src, nil
}
stateIsFlatmap := len(src.AttrsJSON) == 0
// TODO: This should eventually use a proper FQN.