diff --git a/helper/resource/testing_config.go b/helper/resource/testing_config.go index 8f31e38bed..04194dddf9 100644 --- a/helper/resource/testing_config.go +++ b/helper/resource/testing_config.go @@ -9,7 +9,6 @@ import ( "sort" "strings" - "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs/hcl2shim" "github.com/hashicorp/terraform/states" @@ -61,28 +60,18 @@ func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep) log.Printf("[WARN] Config warnings:\n%s", stepDiags) } - // Refresh! - newState, stepDiags := ctx.Refresh() - // shim the state first so the test can check the state on errors - - state, err = shimNewState(newState, step.providers) - if err != nil { - return nil, err - } - if stepDiags.HasErrors() { - return state, newOperationError("refresh", stepDiags) - } - // If this step is a PlanOnly step, skip over this first Plan and subsequent // Apply, and use the follow up Plan that checks for perpetual diffs if !step.PlanOnly { // Plan! - if p, stepDiags := ctx.Plan(); stepDiags.HasErrors() { + p, stepDiags := ctx.Plan() + if stepDiags.HasErrors() { return state, newOperationError("plan", stepDiags) - } else { - log.Printf("[WARN] Test: Step plan: %s", legacyPlanComparisonString(newState, p.Changes)) } + newState := p.State + log.Printf("[WARN] Test: Step plan: %s", legacyPlanComparisonString(newState, p.Changes)) + // We need to keep a copy of the state prior to destroying // such that destroy steps can verify their behavior in the check // function @@ -115,11 +104,21 @@ func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep) // Now, verify that Plan is now empty and we don't have a perpetual diff issue // We do this with TWO plans. One without a refresh. - var p *plans.Plan - if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { + p, stepDiags := ctx.Plan() + if stepDiags.HasErrors() { return state, newOperationError("follow-up plan", stepDiags) } - if !p.Changes.Empty() { + + // we don't technically need this any longer with plan handling refreshing, + // but run it anyway to ensure the context is working as expected. + p, stepDiags = ctx.Plan() + if stepDiags.HasErrors() { + return state, newOperationError("second follow-up plan", stepDiags) + } + empty := p.Changes.Empty() + newState := p.State + + if !empty { if step.ExpectNonEmptyPlan { log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) } else { @@ -128,39 +127,6 @@ func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep) } } - // And another after a Refresh. - if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { - newState, stepDiags = ctx.Refresh() - if stepDiags.HasErrors() { - return state, newOperationError("follow-up refresh", stepDiags) - } - - state, err = shimNewState(newState, step.providers) - if err != nil { - return nil, err - } - } - if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { - return state, newOperationError("second follow-up refresh", stepDiags) - } - empty := p.Changes.Empty() - - // Data resources are tricky because they legitimately get instantiated - // during refresh so that they will be already populated during the - // plan walk. Because of this, if we have any data resources in the - // config we'll end up wanting to destroy them again here. This is - // acceptable and expected, and we'll treat it as "empty" for the - // sake of this testing. - if step.Destroy && !empty { - empty = true - for _, change := range p.Changes.Resources { - if change.Addr.Resource.Resource.Mode != addrs.DataResourceMode { - empty = false - break - } - } - } - if !empty { if step.ExpectNonEmptyPlan { log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes))