2017-01-18 22:47:56 -06:00
|
|
|
package local
|
|
|
|
|
|
|
|
import (
|
2017-05-17 17:26:21 -05:00
|
|
|
"bytes"
|
2017-01-18 22:47:56 -06:00
|
|
|
"context"
|
2017-05-17 17:26:21 -05:00
|
|
|
"errors"
|
2017-01-18 22:47:56 -06:00
|
|
|
"fmt"
|
|
|
|
"log"
|
2017-02-15 18:00:59 -06:00
|
|
|
"strings"
|
2017-01-18 22:47:56 -06:00
|
|
|
|
|
|
|
"github.com/hashicorp/errwrap"
|
|
|
|
"github.com/hashicorp/go-multierror"
|
|
|
|
"github.com/hashicorp/terraform/backend"
|
2017-04-01 13:58:19 -05:00
|
|
|
"github.com/hashicorp/terraform/command/clistate"
|
2017-02-15 18:00:59 -06:00
|
|
|
"github.com/hashicorp/terraform/config/module"
|
2017-02-15 10:53:19 -06:00
|
|
|
"github.com/hashicorp/terraform/state"
|
2017-01-18 22:47:56 -06:00
|
|
|
"github.com/hashicorp/terraform/terraform"
|
|
|
|
)
|
|
|
|
|
|
|
|
func (b *Local) opApply(
|
|
|
|
ctx context.Context,
|
|
|
|
op *backend.Operation,
|
|
|
|
runningOp *backend.RunningOperation) {
|
|
|
|
log.Printf("[INFO] backend/local: starting Apply operation")
|
|
|
|
|
2017-02-15 18:00:59 -06:00
|
|
|
// If we have a nil module at this point, then set it to an empty tree
|
|
|
|
// to avoid any potential crashes.
|
2017-02-16 12:56:39 -06:00
|
|
|
if op.Plan == nil && op.Module == nil && !op.Destroy {
|
2017-02-15 18:00:59 -06:00
|
|
|
runningOp.Err = fmt.Errorf(strings.TrimSpace(applyErrNoConfig))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have a nil module at this point, then set it to an empty tree
|
|
|
|
// to avoid any potential crashes.
|
|
|
|
if op.Module == nil {
|
|
|
|
op.Module = module.NewEmptyTree()
|
|
|
|
}
|
|
|
|
|
2017-01-18 22:47:56 -06:00
|
|
|
// Setup our count hook that keeps track of resource changes
|
|
|
|
countHook := new(CountHook)
|
|
|
|
stateHook := new(StateHook)
|
|
|
|
if b.ContextOpts == nil {
|
|
|
|
b.ContextOpts = new(terraform.ContextOpts)
|
|
|
|
}
|
|
|
|
old := b.ContextOpts.Hooks
|
|
|
|
defer func() { b.ContextOpts.Hooks = old }()
|
|
|
|
b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, countHook, stateHook)
|
|
|
|
|
|
|
|
// Get our context
|
2017-02-01 17:16:16 -06:00
|
|
|
tfCtx, opState, err := b.context(op)
|
2017-01-18 22:47:56 -06:00
|
|
|
if err != nil {
|
|
|
|
runningOp.Err = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-02-14 13:17:28 -06:00
|
|
|
if op.LockState {
|
2017-04-01 14:42:13 -05:00
|
|
|
lockCtx, cancel := context.WithTimeout(ctx, op.StateLockTimeout)
|
|
|
|
defer cancel()
|
|
|
|
|
2017-02-15 10:53:19 -06:00
|
|
|
lockInfo := state.NewLockInfo()
|
|
|
|
lockInfo.Operation = op.Type.String()
|
2017-04-01 14:42:13 -05:00
|
|
|
lockID, err := clistate.Lock(lockCtx, opState, lockInfo, b.CLI, b.Colorize())
|
2017-02-15 10:53:19 -06:00
|
|
|
if err != nil {
|
|
|
|
runningOp.Err = errwrap.Wrapf("Error locking state: {{err}}", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-02-14 13:17:28 -06:00
|
|
|
defer func() {
|
2017-02-15 10:53:19 -06:00
|
|
|
if err := clistate.Unlock(opState, lockID, b.CLI, b.Colorize()); err != nil {
|
2017-02-14 13:17:28 -06:00
|
|
|
runningOp.Err = multierror.Append(runningOp.Err, err)
|
2017-02-01 17:16:16 -06:00
|
|
|
}
|
2017-02-14 13:17:28 -06:00
|
|
|
}()
|
|
|
|
}
|
2017-02-01 17:16:16 -06:00
|
|
|
|
2017-01-18 22:47:56 -06:00
|
|
|
// Setup the state
|
|
|
|
runningOp.State = tfCtx.State()
|
|
|
|
|
|
|
|
// If we weren't given a plan, then we refresh/plan
|
|
|
|
if op.Plan == nil {
|
|
|
|
// If we're refreshing before apply, perform that
|
|
|
|
if op.PlanRefresh {
|
|
|
|
log.Printf("[INFO] backend/local: apply calling Refresh")
|
|
|
|
_, err := tfCtx.Refresh()
|
|
|
|
if err != nil {
|
|
|
|
runningOp.Err = errwrap.Wrapf("Error refreshing state: {{err}}", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform the plan
|
|
|
|
log.Printf("[INFO] backend/local: apply calling Plan")
|
|
|
|
if _, err := tfCtx.Plan(); err != nil {
|
|
|
|
runningOp.Err = errwrap.Wrapf("Error running plan: {{err}}", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup our hook for continuous state updates
|
2017-02-01 17:16:16 -06:00
|
|
|
stateHook.State = opState
|
2017-01-18 22:47:56 -06:00
|
|
|
|
|
|
|
// Start the apply in a goroutine so that we can be interrupted.
|
|
|
|
var applyState *terraform.State
|
|
|
|
var applyErr error
|
|
|
|
doneCh := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
defer close(doneCh)
|
2017-04-14 09:53:08 -05:00
|
|
|
_, applyErr = tfCtx.Apply()
|
|
|
|
// we always want the state, even if apply failed
|
|
|
|
applyState = tfCtx.State()
|
2017-01-18 22:47:56 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
// Record any shadow errors for later
|
|
|
|
if err := ctx.ShadowError(); err != nil {
|
|
|
|
shadowErr = multierror.Append(shadowErr, multierror.Prefix(
|
|
|
|
err, "apply operation:"))
|
|
|
|
}
|
|
|
|
*/
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Wait for the apply to finish or for us to be interrupted so
|
|
|
|
// we can handle it properly.
|
|
|
|
err = nil
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
if b.CLI != nil {
|
2017-04-25 10:44:51 -05:00
|
|
|
b.CLI.Output("stopping apply operation...")
|
2017-01-18 22:47:56 -06:00
|
|
|
}
|
|
|
|
|
2017-05-25 08:29:51 -05:00
|
|
|
// try to force a PersistState just in case the process is terminated
|
|
|
|
// before we can complete.
|
|
|
|
if err := opState.PersistState(); err != nil {
|
|
|
|
// We can't error out from here, but warn the user if there was an error.
|
|
|
|
// If this isn't transient, we will catch it again below, and
|
|
|
|
// attempt to save the state another way.
|
|
|
|
if b.CLI != nil {
|
|
|
|
b.CLI.Error(fmt.Sprintf(earlyStateWriteErrorFmt, err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-18 22:47:56 -06:00
|
|
|
// Stop execution
|
|
|
|
go tfCtx.Stop()
|
|
|
|
|
|
|
|
// Wait for completion still
|
|
|
|
<-doneCh
|
|
|
|
case <-doneCh:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store the final state
|
|
|
|
runningOp.State = applyState
|
|
|
|
|
|
|
|
// Persist the state
|
2017-02-01 17:16:16 -06:00
|
|
|
if err := opState.WriteState(applyState); err != nil {
|
2017-05-17 17:26:21 -05:00
|
|
|
runningOp.Err = b.backupStateForError(applyState, err)
|
2017-01-18 22:47:56 -06:00
|
|
|
return
|
|
|
|
}
|
2017-02-01 17:16:16 -06:00
|
|
|
if err := opState.PersistState(); err != nil {
|
2017-05-17 17:26:21 -05:00
|
|
|
runningOp.Err = b.backupStateForError(applyState, err)
|
2017-01-18 22:47:56 -06:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if applyErr != nil {
|
|
|
|
runningOp.Err = fmt.Errorf(
|
|
|
|
"Error applying plan:\n\n"+
|
|
|
|
"%s\n\n"+
|
|
|
|
"Terraform does not automatically rollback in the face of errors.\n"+
|
|
|
|
"Instead, your Terraform state file has been partially updated with\n"+
|
|
|
|
"any resources that successfully completed. Please address the error\n"+
|
|
|
|
"above and apply again to incrementally change your infrastructure.",
|
|
|
|
multierror.Flatten(applyErr))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have a UI, output the results
|
|
|
|
if b.CLI != nil {
|
|
|
|
if op.Destroy {
|
|
|
|
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
|
|
|
"[reset][bold][green]\n"+
|
|
|
|
"Destroy complete! Resources: %d destroyed.",
|
|
|
|
countHook.Removed)))
|
|
|
|
} else {
|
|
|
|
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
|
|
|
"[reset][bold][green]\n"+
|
|
|
|
"Apply complete! Resources: %d added, %d changed, %d destroyed.",
|
|
|
|
countHook.Added,
|
|
|
|
countHook.Changed,
|
|
|
|
countHook.Removed)))
|
|
|
|
}
|
|
|
|
|
|
|
|
if countHook.Added > 0 || countHook.Changed > 0 {
|
|
|
|
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
|
|
|
"[reset]\n"+
|
|
|
|
"The state of your infrastructure has been saved to the path\n"+
|
|
|
|
"below. This state is required to modify and destroy your\n"+
|
|
|
|
"infrastructure, so keep it safe. To inspect the complete state\n"+
|
|
|
|
"use the `terraform show` command.\n\n"+
|
|
|
|
"State path: %s",
|
|
|
|
b.StateOutPath)))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-02-15 18:00:59 -06:00
|
|
|
|
2017-05-17 17:26:21 -05:00
|
|
|
// backupStateForError is called in a scenario where we're unable to persist the
|
|
|
|
// state for some reason, and will attempt to save a backup copy of the state
|
|
|
|
// to local disk to help the user recover. This is a "last ditch effort" sort
|
|
|
|
// of thing, so we really don't want to end up in this codepath; we should do
|
|
|
|
// everything we possibly can to get the state saved _somewhere_.
|
|
|
|
func (b *Local) backupStateForError(applyState *terraform.State, err error) error {
|
|
|
|
b.CLI.Error(fmt.Sprintf("Failed to save state: %s\n", err))
|
|
|
|
|
|
|
|
local := &state.LocalState{Path: "errored.tfstate"}
|
|
|
|
writeErr := local.WriteState(applyState)
|
|
|
|
if writeErr != nil {
|
|
|
|
b.CLI.Error(fmt.Sprintf(
|
|
|
|
"Also failed to create local state file for recovery: %s\n\n", writeErr,
|
|
|
|
))
|
|
|
|
// To avoid leaving the user with no state at all, our last resort
|
|
|
|
// is to print the JSON state out onto the terminal. This is an awful
|
|
|
|
// UX, so we should definitely avoid doing this if at all possible,
|
|
|
|
// but at least the user has _some_ path to recover if we end up
|
|
|
|
// here for some reason.
|
|
|
|
stateBuf := new(bytes.Buffer)
|
|
|
|
jsonErr := terraform.WriteState(applyState, stateBuf)
|
|
|
|
if jsonErr != nil {
|
|
|
|
b.CLI.Error(fmt.Sprintf(
|
|
|
|
"Also failed to JSON-serialize the state to print it: %s\n\n", jsonErr,
|
|
|
|
))
|
|
|
|
return errors.New(stateWriteFatalError)
|
|
|
|
}
|
|
|
|
|
|
|
|
b.CLI.Output(stateBuf.String())
|
|
|
|
|
|
|
|
return errors.New(stateWriteConsoleFallbackError)
|
|
|
|
}
|
|
|
|
|
|
|
|
return errors.New(stateWriteBackedUpError)
|
|
|
|
}
|
|
|
|
|
2017-02-15 18:00:59 -06:00
|
|
|
const applyErrNoConfig = `
|
|
|
|
No configuration files found!
|
|
|
|
|
|
|
|
Apply requires configuration to be present. Applying without a configuration
|
|
|
|
would mark everything for destruction, which is normally not what is desired.
|
|
|
|
If you would like to destroy everything, please run 'terraform destroy' instead
|
|
|
|
which does not require any configuration files.
|
|
|
|
`
|
2017-05-17 17:26:21 -05:00
|
|
|
|
|
|
|
const stateWriteBackedUpError = `Failed to persist state to backend.
|
|
|
|
|
|
|
|
The error shown above has prevented Terraform from writing the updated state
|
|
|
|
to the configured backend. To allow for recovery, the state has been written
|
|
|
|
to the file "errored.tfstate" in the current working directory.
|
|
|
|
|
|
|
|
Running "terraform apply" again at this point will create a forked state,
|
|
|
|
making it harder to recover.
|
|
|
|
|
|
|
|
To retry writing this state, use the following command:
|
|
|
|
terraform state push errored.tfstate
|
|
|
|
`
|
|
|
|
|
|
|
|
const stateWriteConsoleFallbackError = `Failed to persist state to backend.
|
|
|
|
|
|
|
|
The errors shown above prevented Terraform from writing the updated state to
|
|
|
|
the configured backend and from creating a local backup file. As a fallback,
|
|
|
|
the raw state data is printed above as a JSON object.
|
|
|
|
|
|
|
|
To retry writing this state, copy the state data (from the first { to the
|
|
|
|
last } inclusive) and save it into a local file called errored.tfstate, then
|
|
|
|
run the following command:
|
|
|
|
terraform state push errored.tfstate
|
|
|
|
`
|
|
|
|
|
|
|
|
const stateWriteFatalError = `Failed to save state after apply.
|
|
|
|
|
|
|
|
A catastrophic error has prevented Terraform from persisting the state file
|
|
|
|
or creating a backup. Unfortunately this means that the record of any resources
|
|
|
|
created during this apply has been lost, and such resources may exist outside
|
|
|
|
of Terraform's management.
|
|
|
|
|
|
|
|
For resources that support import, it is possible to recover by manually
|
|
|
|
importing each resource using its id from the target system.
|
|
|
|
|
|
|
|
This is a serious bug in Terraform and should be reported.
|
|
|
|
`
|
2017-05-25 08:29:51 -05:00
|
|
|
|
|
|
|
const earlyStateWriteErrorFmt = `Error saving current state: %s
|
|
|
|
|
|
|
|
Terraform encountered an error attempting to save the state before canceling
|
|
|
|
the current operation. Once the operation is complete another attempt will be
|
|
|
|
made to save the final state.
|
|
|
|
`
|