mirror of
https://github.com/opentofu/opentofu.git
synced 2024-12-28 18:01:01 -06:00
89b05050ec
Previously terraform.Context was built in an unfortunate way where all of the data was provided up front in terraform.NewContext and then mutated directly by subsequent operations. That made the data flow hard to follow, commonly leading to bugs, and also meant that we were forced to take various actions too early in terraform.NewContext, rather than waiting until a more appropriate time during an operation. This (enormous) commit changes terraform.Context so that its fields are broadly just unchanging data about the execution context (current workspace name, available plugins, etc) whereas the main data Terraform works with arrives via individual method arguments and is returned in return values. Specifically, this means that terraform.Context no longer "has-a" config, state, and "planned changes", instead holding on to those only temporarily during an operation. The caller is responsible for propagating the outcome of one step into the next step so that the data flow between operations is actually visible. However, since that's a change to the main entry points in the "terraform" package, this commit also touches every file in the codebase which interacted with those APIs. Most of the noise here is in updating tests to take the same actions using the new API style, but this also affects the main-code callers in the backends and in the command package. My goal here was to refactor without changing observable behavior, but in practice there are a couple externally-visible behavior variations here that seemed okay in service of the broader goal: - The "terraform graph" command is no longer hooked directly into the core graph builders, because that's no longer part of the public API. However, I did include a couple new Context functions whose contract is to produce a UI-oriented graph, and _for now_ those continue to return the physical graph we use for those operations. There's no exported API for generating the "validate" and "eval" graphs, because neither is particularly interesting in its own right, and so "terraform graph" no longer supports those graph types. - terraform.NewContext no longer has the responsibility for collecting all of the provider schemas up front. Instead, we wait until we need them. However, that means that some of our error messages now have a slightly different shape due to unwinding through a differently-shaped call stack. As of this commit we also end up reloading the schemas multiple times in some cases, which is functionally acceptable but likely represents a performance regression. I intend to rework this to use caching, but I'm saving that for a later commit because this one is big enough already. The proximal reason for this change is to resolve the chicken/egg problem whereby there was previously no single point where we could apply "moved" statements to the previous run state before creating a plan. With this change in place, we can now do that as part of Context.Plan, prior to forking the input state into the three separate state artifacts we use during planning. However, this is at least the third project in a row where the previous API design led to piling more functionality into terraform.NewContext and then working around the incorrect order of operations that produces, so I intend that by paying the cost/risk of this large diff now we can in turn reduce the cost/risk of future projects that relate to our main workflow actions.
276 lines
9.3 KiB
Go
276 lines
9.3 KiB
Go
package local
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"log"
|
|
|
|
"github.com/hashicorp/terraform/internal/backend"
|
|
"github.com/hashicorp/terraform/internal/command/views"
|
|
"github.com/hashicorp/terraform/internal/plans"
|
|
"github.com/hashicorp/terraform/internal/states"
|
|
"github.com/hashicorp/terraform/internal/states/statefile"
|
|
"github.com/hashicorp/terraform/internal/states/statemgr"
|
|
"github.com/hashicorp/terraform/internal/terraform"
|
|
"github.com/hashicorp/terraform/internal/tfdiags"
|
|
)
|
|
|
|
func (b *Local) opApply(
|
|
stopCtx context.Context,
|
|
cancelCtx context.Context,
|
|
op *backend.Operation,
|
|
runningOp *backend.RunningOperation) {
|
|
log.Printf("[INFO] backend/local: starting Apply operation")
|
|
|
|
var diags, moreDiags tfdiags.Diagnostics
|
|
|
|
// If we have a nil module at this point, then set it to an empty tree
|
|
// to avoid any potential crashes.
|
|
if op.PlanFile == nil && op.PlanMode != plans.DestroyMode && !op.HasConfig() {
|
|
diags = diags.Append(tfdiags.Sourceless(
|
|
tfdiags.Error,
|
|
"No configuration files",
|
|
"Apply requires configuration to be present. Applying without a configuration "+
|
|
"would mark everything for destruction, which is normally not what is desired. "+
|
|
"If you would like to destroy everything, run 'terraform destroy' instead.",
|
|
))
|
|
op.ReportResult(runningOp, diags)
|
|
return
|
|
}
|
|
|
|
stateHook := new(StateHook)
|
|
op.Hooks = append(op.Hooks, stateHook)
|
|
|
|
// Get our context
|
|
lr, _, opState, contextDiags := b.localRun(op)
|
|
diags = diags.Append(contextDiags)
|
|
if contextDiags.HasErrors() {
|
|
op.ReportResult(runningOp, diags)
|
|
return
|
|
}
|
|
// the state was locked during succesfull context creation; unlock the state
|
|
// when the operation completes
|
|
defer func() {
|
|
diags := op.StateLocker.Unlock()
|
|
if diags.HasErrors() {
|
|
op.View.Diagnostics(diags)
|
|
runningOp.Result = backend.OperationFailure
|
|
}
|
|
}()
|
|
|
|
// We'll start off with our result being the input state, and replace it
|
|
// with the result state only if we eventually complete the apply
|
|
// operation.
|
|
runningOp.State = lr.InputState
|
|
|
|
var plan *plans.Plan
|
|
// If we weren't given a plan, then we refresh/plan
|
|
if op.PlanFile == nil {
|
|
// Perform the plan
|
|
log.Printf("[INFO] backend/local: apply calling Plan")
|
|
plan, moreDiags = lr.Core.Plan(lr.Config, lr.InputState, lr.PlanOpts)
|
|
diags = diags.Append(moreDiags)
|
|
if moreDiags.HasErrors() {
|
|
op.ReportResult(runningOp, diags)
|
|
return
|
|
}
|
|
|
|
schemas, moreDiags := lr.Core.Schemas(lr.Config, lr.InputState)
|
|
diags = diags.Append(moreDiags)
|
|
if moreDiags.HasErrors() {
|
|
op.ReportResult(runningOp, diags)
|
|
return
|
|
}
|
|
|
|
trivialPlan := !plan.CanApply()
|
|
hasUI := op.UIOut != nil && op.UIIn != nil
|
|
mustConfirm := hasUI && !op.AutoApprove && !trivialPlan
|
|
op.View.Plan(plan, schemas)
|
|
|
|
if mustConfirm {
|
|
var desc, query string
|
|
switch op.PlanMode {
|
|
case plans.DestroyMode:
|
|
if op.Workspace != "default" {
|
|
query = "Do you really want to destroy all resources in workspace \"" + op.Workspace + "\"?"
|
|
} else {
|
|
query = "Do you really want to destroy all resources?"
|
|
}
|
|
desc = "Terraform will destroy all your managed infrastructure, as shown above.\n" +
|
|
"There is no undo. Only 'yes' will be accepted to confirm."
|
|
case plans.RefreshOnlyMode:
|
|
if op.Workspace != "default" {
|
|
query = "Would you like to update the Terraform state for \"" + op.Workspace + "\" to reflect these detected changes?"
|
|
} else {
|
|
query = "Would you like to update the Terraform state to reflect these detected changes?"
|
|
}
|
|
desc = "Terraform will write these changes to the state without modifying any real infrastructure.\n" +
|
|
"There is no undo. Only 'yes' will be accepted to confirm."
|
|
default:
|
|
if op.Workspace != "default" {
|
|
query = "Do you want to perform these actions in workspace \"" + op.Workspace + "\"?"
|
|
} else {
|
|
query = "Do you want to perform these actions?"
|
|
}
|
|
desc = "Terraform will perform the actions described above.\n" +
|
|
"Only 'yes' will be accepted to approve."
|
|
}
|
|
|
|
// We'll show any accumulated warnings before we display the prompt,
|
|
// so the user can consider them when deciding how to answer.
|
|
if len(diags) > 0 {
|
|
op.View.Diagnostics(diags)
|
|
diags = nil // reset so we won't show the same diagnostics again later
|
|
}
|
|
|
|
v, err := op.UIIn.Input(stopCtx, &terraform.InputOpts{
|
|
Id: "approve",
|
|
Query: "\n" + query,
|
|
Description: desc,
|
|
})
|
|
if err != nil {
|
|
diags = diags.Append(fmt.Errorf("error asking for approval: %w", err))
|
|
op.ReportResult(runningOp, diags)
|
|
return
|
|
}
|
|
if v != "yes" {
|
|
op.View.Cancelled(op.PlanMode)
|
|
runningOp.Result = backend.OperationFailure
|
|
return
|
|
}
|
|
}
|
|
} else {
|
|
plan = lr.Plan
|
|
for _, change := range plan.Changes.Resources {
|
|
if change.Action != plans.NoOp {
|
|
op.View.PlannedChange(change)
|
|
}
|
|
}
|
|
}
|
|
|
|
// Set up our hook for continuous state updates
|
|
stateHook.StateMgr = opState
|
|
|
|
// Start the apply in a goroutine so that we can be interrupted.
|
|
var applyState *states.State
|
|
var applyDiags tfdiags.Diagnostics
|
|
doneCh := make(chan struct{})
|
|
go func() {
|
|
defer close(doneCh)
|
|
log.Printf("[INFO] backend/local: apply calling Apply")
|
|
applyState, applyDiags = lr.Core.Apply(plan, lr.Config)
|
|
}()
|
|
|
|
if b.opWait(doneCh, stopCtx, cancelCtx, lr.Core, opState, op.View) {
|
|
return
|
|
}
|
|
diags = diags.Append(applyDiags)
|
|
|
|
// Store the final state
|
|
runningOp.State = applyState
|
|
err := statemgr.WriteAndPersist(opState, applyState)
|
|
if err != nil {
|
|
// Export the state file from the state manager and assign the new
|
|
// state. This is needed to preserve the existing serial and lineage.
|
|
stateFile := statemgr.Export(opState)
|
|
if stateFile == nil {
|
|
stateFile = &statefile.File{}
|
|
}
|
|
stateFile.State = applyState
|
|
|
|
diags = diags.Append(b.backupStateForError(stateFile, err, op.View))
|
|
op.ReportResult(runningOp, diags)
|
|
return
|
|
}
|
|
|
|
if applyDiags.HasErrors() {
|
|
op.ReportResult(runningOp, diags)
|
|
return
|
|
}
|
|
|
|
// If we've accumulated any warnings along the way then we'll show them
|
|
// here just before we show the summary and next steps. If we encountered
|
|
// errors then we would've returned early at some other point above.
|
|
op.View.Diagnostics(diags)
|
|
}
|
|
|
|
// backupStateForError is called in a scenario where we're unable to persist the
|
|
// state for some reason, and will attempt to save a backup copy of the state
|
|
// to local disk to help the user recover. This is a "last ditch effort" sort
|
|
// of thing, so we really don't want to end up in this codepath; we should do
|
|
// everything we possibly can to get the state saved _somewhere_.
|
|
func (b *Local) backupStateForError(stateFile *statefile.File, err error, view views.Operation) tfdiags.Diagnostics {
|
|
var diags tfdiags.Diagnostics
|
|
|
|
diags = diags.Append(tfdiags.Sourceless(
|
|
tfdiags.Error,
|
|
"Failed to save state",
|
|
fmt.Sprintf("Error saving state: %s", err),
|
|
))
|
|
|
|
local := statemgr.NewFilesystem("errored.tfstate")
|
|
writeErr := local.WriteStateForMigration(stateFile, true)
|
|
if writeErr != nil {
|
|
diags = diags.Append(tfdiags.Sourceless(
|
|
tfdiags.Error,
|
|
"Failed to create local state file",
|
|
fmt.Sprintf("Error creating local state file for recovery: %s", writeErr),
|
|
))
|
|
|
|
// To avoid leaving the user with no state at all, our last resort
|
|
// is to print the JSON state out onto the terminal. This is an awful
|
|
// UX, so we should definitely avoid doing this if at all possible,
|
|
// but at least the user has _some_ path to recover if we end up
|
|
// here for some reason.
|
|
if dumpErr := view.EmergencyDumpState(stateFile); dumpErr != nil {
|
|
diags = diags.Append(tfdiags.Sourceless(
|
|
tfdiags.Error,
|
|
"Failed to serialize state",
|
|
fmt.Sprintf(stateWriteFatalErrorFmt, dumpErr),
|
|
))
|
|
}
|
|
|
|
diags = diags.Append(tfdiags.Sourceless(
|
|
tfdiags.Error,
|
|
"Failed to persist state to backend",
|
|
stateWriteConsoleFallbackError,
|
|
))
|
|
return diags
|
|
}
|
|
|
|
diags = diags.Append(tfdiags.Sourceless(
|
|
tfdiags.Error,
|
|
"Failed to persist state to backend",
|
|
stateWriteBackedUpError,
|
|
))
|
|
|
|
return diags
|
|
}
|
|
|
|
const stateWriteBackedUpError = `The error shown above has prevented Terraform from writing the updated state to the configured backend. To allow for recovery, the state has been written to the file "errored.tfstate" in the current working directory.
|
|
|
|
Running "terraform apply" again at this point will create a forked state, making it harder to recover.
|
|
|
|
To retry writing this state, use the following command:
|
|
terraform state push errored.tfstate
|
|
`
|
|
|
|
const stateWriteConsoleFallbackError = `The errors shown above prevented Terraform from writing the updated state to
|
|
the configured backend and from creating a local backup file. As a fallback,
|
|
the raw state data is printed above as a JSON object.
|
|
|
|
To retry writing this state, copy the state data (from the first { to the last } inclusive) and save it into a local file called errored.tfstate, then run the following command:
|
|
terraform state push errored.tfstate
|
|
`
|
|
|
|
const stateWriteFatalErrorFmt = `Failed to save state after apply.
|
|
|
|
Error serializing state: %s
|
|
|
|
A catastrophic error has prevented Terraform from persisting the state file or creating a backup. Unfortunately this means that the record of any resources created during this apply has been lost, and such resources may exist outside of Terraform's management.
|
|
|
|
For resources that support import, it is possible to recover by manually importing each resource using its id from the target system.
|
|
|
|
This is a serious bug in Terraform and should be reported.
|
|
`
|