mirror of
https://github.com/opentofu/opentofu.git
synced 2024-12-28 18:01:01 -06:00
89b05050ec
Previously terraform.Context was built in an unfortunate way where all of the data was provided up front in terraform.NewContext and then mutated directly by subsequent operations. That made the data flow hard to follow, commonly leading to bugs, and also meant that we were forced to take various actions too early in terraform.NewContext, rather than waiting until a more appropriate time during an operation. This (enormous) commit changes terraform.Context so that its fields are broadly just unchanging data about the execution context (current workspace name, available plugins, etc) whereas the main data Terraform works with arrives via individual method arguments and is returned in return values. Specifically, this means that terraform.Context no longer "has-a" config, state, and "planned changes", instead holding on to those only temporarily during an operation. The caller is responsible for propagating the outcome of one step into the next step so that the data flow between operations is actually visible. However, since that's a change to the main entry points in the "terraform" package, this commit also touches every file in the codebase which interacted with those APIs. Most of the noise here is in updating tests to take the same actions using the new API style, but this also affects the main-code callers in the backends and in the command package. My goal here was to refactor without changing observable behavior, but in practice there are a couple externally-visible behavior variations here that seemed okay in service of the broader goal: - The "terraform graph" command is no longer hooked directly into the core graph builders, because that's no longer part of the public API. However, I did include a couple new Context functions whose contract is to produce a UI-oriented graph, and _for now_ those continue to return the physical graph we use for those operations. There's no exported API for generating the "validate" and "eval" graphs, because neither is particularly interesting in its own right, and so "terraform graph" no longer supports those graph types. - terraform.NewContext no longer has the responsibility for collecting all of the provider schemas up front. Instead, we wait until we need them. However, that means that some of our error messages now have a slightly different shape due to unwinding through a differently-shaped call stack. As of this commit we also end up reloading the schemas multiple times in some cases, which is functionally acceptable but likely represents a performance regression. I intend to rework this to use caching, but I'm saving that for a later commit because this one is big enough already. The proximal reason for this change is to resolve the chicken/egg problem whereby there was previously no single point where we could apply "moved" statements to the previous run state before creating a plan. With this change in place, we can now do that as part of Context.Plan, prior to forking the input state into the three separate state artifacts we use during planning. However, this is at least the third project in a row where the previous API design led to piling more functionality into terraform.NewContext and then working around the incorrect order of operations that produces, so I intend that by paying the cost/risk of this large diff now we can in turn reduce the cost/risk of future projects that relate to our main workflow actions.
166 lines
5.4 KiB
Go
166 lines
5.4 KiB
Go
package local
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"log"
|
|
|
|
"github.com/hashicorp/terraform/internal/backend"
|
|
"github.com/hashicorp/terraform/internal/plans"
|
|
"github.com/hashicorp/terraform/internal/plans/planfile"
|
|
"github.com/hashicorp/terraform/internal/states/statefile"
|
|
"github.com/hashicorp/terraform/internal/states/statemgr"
|
|
"github.com/hashicorp/terraform/internal/terraform"
|
|
"github.com/hashicorp/terraform/internal/tfdiags"
|
|
)
|
|
|
|
func (b *Local) opPlan(
|
|
stopCtx context.Context,
|
|
cancelCtx context.Context,
|
|
op *backend.Operation,
|
|
runningOp *backend.RunningOperation) {
|
|
|
|
log.Printf("[INFO] backend/local: starting Plan operation")
|
|
|
|
var diags tfdiags.Diagnostics
|
|
|
|
if op.PlanFile != nil {
|
|
diags = diags.Append(tfdiags.Sourceless(
|
|
tfdiags.Error,
|
|
"Can't re-plan a saved plan",
|
|
"The plan command was given a saved plan file as its input. This command generates "+
|
|
"a new plan, and so it requires a configuration directory as its argument.",
|
|
))
|
|
op.ReportResult(runningOp, diags)
|
|
return
|
|
}
|
|
|
|
// Local planning requires a config, unless we're planning to destroy.
|
|
if op.PlanMode != plans.DestroyMode && !op.HasConfig() {
|
|
diags = diags.Append(tfdiags.Sourceless(
|
|
tfdiags.Error,
|
|
"No configuration files",
|
|
"Plan requires configuration to be present. Planning without a configuration would "+
|
|
"mark everything for destruction, which is normally not what is desired. If you "+
|
|
"would like to destroy everything, run plan with the -destroy option. Otherwise, "+
|
|
"create a Terraform configuration file (.tf file) and try again.",
|
|
))
|
|
op.ReportResult(runningOp, diags)
|
|
return
|
|
}
|
|
|
|
if b.ContextOpts == nil {
|
|
b.ContextOpts = new(terraform.ContextOpts)
|
|
}
|
|
|
|
// Get our context
|
|
lr, configSnap, opState, ctxDiags := b.localRun(op)
|
|
diags = diags.Append(ctxDiags)
|
|
if ctxDiags.HasErrors() {
|
|
op.ReportResult(runningOp, diags)
|
|
return
|
|
}
|
|
// the state was locked during succesfull context creation; unlock the state
|
|
// when the operation completes
|
|
defer func() {
|
|
diags := op.StateLocker.Unlock()
|
|
if diags.HasErrors() {
|
|
op.View.Diagnostics(diags)
|
|
runningOp.Result = backend.OperationFailure
|
|
}
|
|
}()
|
|
|
|
// Since planning doesn't immediately change the persisted state, the
|
|
// resulting state is always just the input state.
|
|
runningOp.State = lr.InputState
|
|
|
|
// Perform the plan in a goroutine so we can be interrupted
|
|
var plan *plans.Plan
|
|
var planDiags tfdiags.Diagnostics
|
|
doneCh := make(chan struct{})
|
|
go func() {
|
|
defer close(doneCh)
|
|
log.Printf("[INFO] backend/local: plan calling Plan")
|
|
plan, planDiags = lr.Core.Plan(lr.Config, lr.InputState, lr.PlanOpts)
|
|
}()
|
|
|
|
if b.opWait(doneCh, stopCtx, cancelCtx, lr.Core, opState, op.View) {
|
|
// If we get in here then the operation was cancelled, which is always
|
|
// considered to be a failure.
|
|
log.Printf("[INFO] backend/local: plan operation was force-cancelled by interrupt")
|
|
runningOp.Result = backend.OperationFailure
|
|
return
|
|
}
|
|
log.Printf("[INFO] backend/local: plan operation completed")
|
|
|
|
diags = diags.Append(planDiags)
|
|
if planDiags.HasErrors() {
|
|
op.ReportResult(runningOp, diags)
|
|
return
|
|
}
|
|
|
|
// Record whether this plan includes any side-effects that could be applied.
|
|
runningOp.PlanEmpty = !plan.CanApply()
|
|
|
|
// Save the plan to disk
|
|
if path := op.PlanOutPath; path != "" {
|
|
if op.PlanOutBackend == nil {
|
|
// This is always a bug in the operation caller; it's not valid
|
|
// to set PlanOutPath without also setting PlanOutBackend.
|
|
diags = diags.Append(fmt.Errorf(
|
|
"PlanOutPath set without also setting PlanOutBackend (this is a bug in Terraform)"),
|
|
)
|
|
op.ReportResult(runningOp, diags)
|
|
return
|
|
}
|
|
plan.Backend = *op.PlanOutBackend
|
|
|
|
// We may have updated the state in the refresh step above, but we
|
|
// will freeze that updated state in the plan file for now and
|
|
// only write it if this plan is subsequently applied.
|
|
plannedStateFile := statemgr.PlannedStateUpdate(opState, plan.PriorState)
|
|
|
|
// We also include a file containing the state as it existed before
|
|
// we took any action at all, but this one isn't intended to ever
|
|
// be saved to the backend (an equivalent snapshot should already be
|
|
// there) and so we just use a stub state file header in this case.
|
|
// NOTE: This won't be exactly identical to the latest state snapshot
|
|
// in the backend because it's still been subject to state upgrading
|
|
// to make it consumable by the current Terraform version, and
|
|
// intentionally doesn't preserve the header info.
|
|
prevStateFile := &statefile.File{
|
|
State: plan.PrevRunState,
|
|
}
|
|
|
|
log.Printf("[INFO] backend/local: writing plan output to: %s", path)
|
|
err := planfile.Create(path, configSnap, prevStateFile, plannedStateFile, plan)
|
|
if err != nil {
|
|
diags = diags.Append(tfdiags.Sourceless(
|
|
tfdiags.Error,
|
|
"Failed to write plan file",
|
|
fmt.Sprintf("The plan file could not be written: %s.", err),
|
|
))
|
|
op.ReportResult(runningOp, diags)
|
|
return
|
|
}
|
|
}
|
|
|
|
// Render the plan
|
|
schemas, moreDiags := lr.Core.Schemas(lr.Config, lr.InputState)
|
|
diags = diags.Append(moreDiags)
|
|
if moreDiags.HasErrors() {
|
|
op.ReportResult(runningOp, diags)
|
|
return
|
|
}
|
|
op.View.Plan(plan, schemas)
|
|
|
|
// If we've accumulated any warnings along the way then we'll show them
|
|
// here just before we show the summary and next steps. If we encountered
|
|
// errors then we would've returned early at some other point above.
|
|
op.View.Diagnostics(diags)
|
|
|
|
if !runningOp.PlanEmpty {
|
|
op.View.PlanNextStep(op.PlanOutPath)
|
|
}
|
|
}
|