mirror of
https://github.com/opentofu/opentofu.git
synced 2024-12-24 08:00:17 -06:00
core: Track both previous run state and refresh state during planning
The "previous run state" is our record of what the previous run of Terraform considered to be its outcome, but in order to do anything useful with that we must ensure that the data inside conforms to the current resource type schemas, which might be different than the schemas that were current during the previous run if the relevant provider has since been upgraded. For that reason then, we'll start off with the previous run state set exactly equal to what was saved in the prior snapshot (modulo any changes that happened during a state file format upgrade) but then during our planning operation we'll overwrite individual resource instance objects with the result of upgrading, so that in a situation where we successfully run plan to completion the previous run state should always have a compatible schema with the "prior state" (the result of refreshing) for managed resources, and thus the caller can meaningfully compare the two in order to detect and describe any out-of-band changes that occurred since the previous run.
This commit is contained in:
parent
7c6e78bcb0
commit
3443621227
@ -97,8 +97,6 @@ type ContextMeta struct {
|
||||
type Context struct {
|
||||
config *configs.Config
|
||||
changes *plans.Changes
|
||||
state *states.State
|
||||
refreshState *states.State
|
||||
skipRefresh bool
|
||||
targets []addrs.Targetable
|
||||
forceReplace []addrs.AbsResourceInstance
|
||||
@ -106,6 +104,30 @@ type Context struct {
|
||||
meta *ContextMeta
|
||||
planMode plans.Mode
|
||||
|
||||
// state, refreshState, and prevRunState simultaneously track three
|
||||
// different incarnations of the Terraform state:
|
||||
//
|
||||
// "state" is always the most "up-to-date". During planning it represents
|
||||
// our best approximation of the planned new state, and during applying
|
||||
// it represents the results of all of the actions we've taken so far.
|
||||
//
|
||||
// "refreshState" is populated and relevant only during planning, where we
|
||||
// update it to reflect a provider's sense of the current state of the
|
||||
// remote object each resource instance is bound to but don't include
|
||||
// any changes implied by the configuration.
|
||||
//
|
||||
// "prevRunState" is similar to refreshState except that it doesn't even
|
||||
// include the result of the provider's refresh step, and instead reflects
|
||||
// the state as we found it prior to any changes, although it does reflect
|
||||
// the result of running the provider's schema upgrade actions so that the
|
||||
// resource instance objects will all conform to the _current_ resource
|
||||
// type schemas if planning is successful, so that in that case it will
|
||||
// be meaningful to compare prevRunState to refreshState to detect changes
|
||||
// made outside of Terraform.
|
||||
state *states.State
|
||||
refreshState *states.State
|
||||
prevRunState *states.State
|
||||
|
||||
hooks []Hook
|
||||
components contextComponentFactory
|
||||
schemas *Schemas
|
||||
@ -314,6 +336,7 @@ func NewContext(opts *ContextOpts) (*Context, tfdiags.Diagnostics) {
|
||||
config: config,
|
||||
state: state,
|
||||
refreshState: state.DeepCopy(),
|
||||
prevRunState: state.DeepCopy(),
|
||||
skipRefresh: opts.SkipRefresh,
|
||||
targets: opts.Targets,
|
||||
forceReplace: opts.ForceReplace,
|
||||
@ -388,12 +411,13 @@ func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, tfdiags.
|
||||
|
||||
case GraphTypePlanDestroy:
|
||||
return (&DestroyPlanGraphBuilder{
|
||||
Config: c.config,
|
||||
State: c.state,
|
||||
Components: c.components,
|
||||
Schemas: c.schemas,
|
||||
Targets: c.targets,
|
||||
Validate: opts.Validate,
|
||||
Config: c.config,
|
||||
State: c.state,
|
||||
Components: c.components,
|
||||
Schemas: c.schemas,
|
||||
Targets: c.targets,
|
||||
Validate: opts.Validate,
|
||||
skipRefresh: c.skipRefresh,
|
||||
}).Build(addrs.RootModuleInstance)
|
||||
|
||||
case GraphTypePlanRefreshOnly:
|
||||
@ -631,8 +655,6 @@ The -target option is not for routine use, and is provided only for exceptional
|
||||
func (c *Context) plan() (*plans.Plan, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
prevRunState := c.state.DeepCopy()
|
||||
|
||||
graph, graphDiags := c.Graph(GraphTypePlan, nil)
|
||||
diags = diags.Append(graphDiags)
|
||||
if graphDiags.HasErrors() {
|
||||
@ -650,7 +672,7 @@ func (c *Context) plan() (*plans.Plan, tfdiags.Diagnostics) {
|
||||
UIMode: plans.NormalMode,
|
||||
Changes: c.changes,
|
||||
ForceReplaceAddrs: c.forceReplace,
|
||||
PrevRunState: prevRunState,
|
||||
PrevRunState: c.prevRunState.DeepCopy(),
|
||||
}
|
||||
|
||||
c.refreshState.SyncWrapper().RemovePlannedResourceInstanceObjects()
|
||||
@ -668,8 +690,7 @@ func (c *Context) plan() (*plans.Plan, tfdiags.Diagnostics) {
|
||||
func (c *Context) destroyPlan() (*plans.Plan, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
destroyPlan := &plans.Plan{
|
||||
PrevRunState: c.state.DeepCopy(),
|
||||
PriorState: c.state.DeepCopy(),
|
||||
PriorState: c.state.DeepCopy(),
|
||||
}
|
||||
c.changes = plans.NewChanges()
|
||||
|
||||
@ -677,6 +698,13 @@ func (c *Context) destroyPlan() (*plans.Plan, tfdiags.Diagnostics) {
|
||||
// sources, and remove missing managed resources. This is required because
|
||||
// a "destroy plan" is only creating delete changes, and is essentially a
|
||||
// local operation.
|
||||
//
|
||||
// NOTE: if skipRefresh _is_ set then we'll rely on the destroy-plan walk
|
||||
// below to upgrade the prevRunState and priorState both to the latest
|
||||
// resource type schemas, so NodePlanDestroyableResourceInstance.Execute
|
||||
// must coordinate with this by taking that action only when c.skipRefresh
|
||||
// _is_ set. This coupling between the two is unfortunate but necessary
|
||||
// to work within our current structure.
|
||||
if !c.skipRefresh {
|
||||
refreshPlan, refreshDiags := c.plan()
|
||||
diags = diags.Append(refreshDiags)
|
||||
@ -687,6 +715,7 @@ func (c *Context) destroyPlan() (*plans.Plan, tfdiags.Diagnostics) {
|
||||
// insert the refreshed state into the destroy plan result, and discard
|
||||
// the changes recorded from the refresh.
|
||||
destroyPlan.PriorState = refreshPlan.PriorState.DeepCopy()
|
||||
destroyPlan.PrevRunState = refreshPlan.PrevRunState.DeepCopy()
|
||||
c.changes = plans.NewChanges()
|
||||
}
|
||||
|
||||
@ -704,6 +733,17 @@ func (c *Context) destroyPlan() (*plans.Plan, tfdiags.Diagnostics) {
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
if c.skipRefresh {
|
||||
// If we didn't do refreshing then both the previous run state and
|
||||
// the prior state are the result of upgrading the previous run state,
|
||||
// which we should've upgraded as part of the plan-destroy walk
|
||||
// in NodePlanDestroyableResourceInstance.Execute, so they'll have the
|
||||
// current schema but neither will reflect any out-of-band changes in
|
||||
// the remote system.
|
||||
destroyPlan.PrevRunState = c.prevRunState.DeepCopy()
|
||||
destroyPlan.PriorState = c.prevRunState.DeepCopy()
|
||||
}
|
||||
|
||||
destroyPlan.UIMode = plans.DestroyMode
|
||||
destroyPlan.Changes = c.changes
|
||||
return destroyPlan, diags
|
||||
@ -712,8 +752,6 @@ func (c *Context) destroyPlan() (*plans.Plan, tfdiags.Diagnostics) {
|
||||
func (c *Context) refreshOnlyPlan() (*plans.Plan, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
prevRunState := c.state.DeepCopy()
|
||||
|
||||
graph, graphDiags := c.Graph(GraphTypePlanRefreshOnly, nil)
|
||||
diags = diags.Append(graphDiags)
|
||||
if graphDiags.HasErrors() {
|
||||
@ -730,7 +768,7 @@ func (c *Context) refreshOnlyPlan() (*plans.Plan, tfdiags.Diagnostics) {
|
||||
plan := &plans.Plan{
|
||||
UIMode: plans.RefreshOnlyMode,
|
||||
Changes: c.changes,
|
||||
PrevRunState: prevRunState,
|
||||
PrevRunState: c.prevRunState.DeepCopy(),
|
||||
}
|
||||
|
||||
// If the graph builder and graph nodes correctly obeyed our directive
|
||||
@ -923,6 +961,7 @@ func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalk
|
||||
func (c *Context) graphWalker(operation walkOperation) *ContextGraphWalker {
|
||||
var state *states.SyncState
|
||||
var refreshState *states.SyncState
|
||||
var prevRunState *states.SyncState
|
||||
|
||||
switch operation {
|
||||
case walkValidate:
|
||||
@ -930,12 +969,14 @@ func (c *Context) graphWalker(operation walkOperation) *ContextGraphWalker {
|
||||
state = states.NewState().SyncWrapper()
|
||||
|
||||
// validate currently uses the plan graph, so we have to populate the
|
||||
// refreshState.
|
||||
// refreshState and the prevRunState.
|
||||
refreshState = states.NewState().SyncWrapper()
|
||||
prevRunState = states.NewState().SyncWrapper()
|
||||
|
||||
case walkPlan:
|
||||
case walkPlan, walkPlanDestroy:
|
||||
state = c.state.SyncWrapper()
|
||||
refreshState = c.refreshState.SyncWrapper()
|
||||
prevRunState = c.prevRunState.SyncWrapper()
|
||||
|
||||
default:
|
||||
state = c.state.SyncWrapper()
|
||||
@ -945,6 +986,7 @@ func (c *Context) graphWalker(operation walkOperation) *ContextGraphWalker {
|
||||
Context: c,
|
||||
State: state,
|
||||
RefreshState: refreshState,
|
||||
PrevRunState: prevRunState,
|
||||
Changes: c.changes.SyncWrapper(),
|
||||
InstanceExpander: instances.NewExpander(),
|
||||
Operation: operation,
|
||||
|
@ -16,9 +16,10 @@ import (
|
||||
)
|
||||
|
||||
func TestContext2Plan_removedDuringRefresh(t *testing.T) {
|
||||
// The resource was added to state but actually failed to create and was
|
||||
// left tainted. This should be removed during plan and result in a Create
|
||||
// action.
|
||||
// This tests the situation where an object tracked in the previous run
|
||||
// state has been deleted outside of Terraform, which we should detect
|
||||
// during the refresh step and thus ultimately produce a plan to recreate
|
||||
// the object, since it's still present in the configuration.
|
||||
m := testModuleInline(t, map[string]string{
|
||||
"main.tf": `
|
||||
resource "test_object" "a" {
|
||||
@ -27,15 +28,41 @@ resource "test_object" "a" {
|
||||
})
|
||||
|
||||
p := simpleMockProvider()
|
||||
p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{
|
||||
Provider: providers.Schema{Block: simpleTestSchema()},
|
||||
ResourceTypes: map[string]providers.Schema{
|
||||
"test_object": {
|
||||
Block: &configschema.Block{
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"arg": {Type: cty.String, Optional: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) {
|
||||
resp.NewState = cty.NullVal(req.PriorState.Type())
|
||||
return resp
|
||||
}
|
||||
p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) {
|
||||
// We should've been given the prior state JSON as our input to upgrade.
|
||||
if !bytes.Contains(req.RawStateJSON, []byte("previous_run")) {
|
||||
t.Fatalf("UpgradeResourceState request doesn't contain the previous run object\n%s", req.RawStateJSON)
|
||||
}
|
||||
|
||||
// We'll put something different in "arg" as part of upgrading, just
|
||||
// so that we can verify below that PrevRunState contains the upgraded
|
||||
// (but NOT refreshed) version of the object.
|
||||
resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{
|
||||
"arg": cty.StringVal("upgraded"),
|
||||
})
|
||||
return resp
|
||||
}
|
||||
|
||||
addr := mustResourceInstanceAddr("test_object.a")
|
||||
state := states.BuildState(func(s *states.SyncState) {
|
||||
s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{
|
||||
AttrsJSON: []byte(`{"test_string":"foo"}`),
|
||||
AttrsJSON: []byte(`{"arg":"previous_run"}`),
|
||||
Status: states.ObjectTainted,
|
||||
}, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`))
|
||||
})
|
||||
@ -53,6 +80,37 @@ resource "test_object" "a" {
|
||||
t.Fatal(diags.Err())
|
||||
}
|
||||
|
||||
if !p.UpgradeResourceStateCalled {
|
||||
t.Errorf("Provider's UpgradeResourceState wasn't called; should've been")
|
||||
}
|
||||
if !p.ReadResourceCalled {
|
||||
t.Errorf("Provider's ReadResource wasn't called; should've been")
|
||||
}
|
||||
|
||||
// The object should be absent from the plan's prior state, because that
|
||||
// records the result of refreshing.
|
||||
if got := plan.PriorState.ResourceInstance(addr); got != nil {
|
||||
t.Errorf(
|
||||
"instance %s is in the prior state after planning; should've been removed\n%s",
|
||||
addr, spew.Sdump(got),
|
||||
)
|
||||
}
|
||||
|
||||
// However, the object should still be in the PrevRunState, because
|
||||
// that reflects what we believed to exist before refreshing.
|
||||
if got := plan.PrevRunState.ResourceInstance(addr); got == nil {
|
||||
t.Errorf(
|
||||
"instance %s is missing from the previous run state after planning; should've been preserved",
|
||||
addr,
|
||||
)
|
||||
} else {
|
||||
if !bytes.Contains(got.Current.AttrsJSON, []byte("upgraded")) {
|
||||
t.Fatalf("previous run state has non-upgraded object\n%s", got.Current.AttrsJSON)
|
||||
}
|
||||
}
|
||||
|
||||
// Because the configuration still mentions test_object.a, we should've
|
||||
// planned to recreate it in order to fix the drift.
|
||||
for _, c := range plan.Changes.Resources {
|
||||
if c.Action != plans.Create {
|
||||
t.Fatalf("expected Create action for missing %s, got %s", c.Addr, c.Action)
|
||||
@ -337,6 +395,127 @@ resource "test_resource" "b" {
|
||||
}
|
||||
}
|
||||
|
||||
func TestContext2Plan_destroyWithRefresh(t *testing.T) {
|
||||
m := testModuleInline(t, map[string]string{
|
||||
"main.tf": `
|
||||
resource "test_object" "a" {
|
||||
}
|
||||
`,
|
||||
})
|
||||
|
||||
p := simpleMockProvider()
|
||||
p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{
|
||||
Provider: providers.Schema{Block: simpleTestSchema()},
|
||||
ResourceTypes: map[string]providers.Schema{
|
||||
"test_object": {
|
||||
Block: &configschema.Block{
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"arg": {Type: cty.String, Optional: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) {
|
||||
newVal, err := cty.Transform(req.PriorState, func(path cty.Path, v cty.Value) (cty.Value, error) {
|
||||
if len(path) == 1 && path[0] == (cty.GetAttrStep{Name: "arg"}) {
|
||||
return cty.StringVal("current"), nil
|
||||
}
|
||||
return v, nil
|
||||
})
|
||||
if err != nil {
|
||||
// shouldn't get here
|
||||
t.Fatalf("ReadResourceFn transform failed")
|
||||
return providers.ReadResourceResponse{}
|
||||
}
|
||||
return providers.ReadResourceResponse{
|
||||
NewState: newVal,
|
||||
}
|
||||
}
|
||||
p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) {
|
||||
t.Logf("UpgradeResourceState %s", req.RawStateJSON)
|
||||
|
||||
// In the destroy-with-refresh codepath we end up calling
|
||||
// UpgradeResourceState twice, because we do so once during refreshing
|
||||
// (as part making a normal plan) and then again during the plan-destroy
|
||||
// walk. The second call recieves the result of the earlier refresh,
|
||||
// so we need to tolerate both "before" and "current" as possible
|
||||
// inputs here.
|
||||
if !bytes.Contains(req.RawStateJSON, []byte("before")) {
|
||||
if !bytes.Contains(req.RawStateJSON, []byte("current")) {
|
||||
t.Fatalf("UpgradeResourceState request doesn't contain the 'before' object or the 'current' object\n%s", req.RawStateJSON)
|
||||
}
|
||||
}
|
||||
|
||||
// We'll put something different in "arg" as part of upgrading, just
|
||||
// so that we can verify below that PrevRunState contains the upgraded
|
||||
// (but NOT refreshed) version of the object.
|
||||
resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{
|
||||
"arg": cty.StringVal("upgraded"),
|
||||
})
|
||||
return resp
|
||||
}
|
||||
|
||||
addr := mustResourceInstanceAddr("test_object.a")
|
||||
state := states.BuildState(func(s *states.SyncState) {
|
||||
s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{
|
||||
AttrsJSON: []byte(`{"arg":"before"}`),
|
||||
Status: states.ObjectReady,
|
||||
}, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`))
|
||||
})
|
||||
|
||||
ctx := testContext2(t, &ContextOpts{
|
||||
Config: m,
|
||||
State: state,
|
||||
Providers: map[addrs.Provider]providers.Factory{
|
||||
addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
|
||||
},
|
||||
PlanMode: plans.DestroyMode,
|
||||
SkipRefresh: false,
|
||||
})
|
||||
|
||||
plan, diags := ctx.Plan()
|
||||
if diags.HasErrors() {
|
||||
t.Fatal(diags.Err())
|
||||
}
|
||||
|
||||
if !p.UpgradeResourceStateCalled {
|
||||
t.Errorf("Provider's UpgradeResourceState wasn't called; should've been")
|
||||
}
|
||||
if !p.ReadResourceCalled {
|
||||
t.Errorf("Provider's ReadResource wasn't called; should've been")
|
||||
}
|
||||
|
||||
if plan.PriorState == nil {
|
||||
t.Fatal("missing plan state")
|
||||
}
|
||||
|
||||
for _, c := range plan.Changes.Resources {
|
||||
if c.Action != plans.Delete {
|
||||
t.Errorf("unexpected %s change for %s", c.Action, c.Addr)
|
||||
}
|
||||
}
|
||||
|
||||
if instState := plan.PrevRunState.ResourceInstance(addr); instState == nil {
|
||||
t.Errorf("%s has no previous run state at all after plan", addr)
|
||||
} else {
|
||||
if instState.Current == nil {
|
||||
t.Errorf("%s has no current object in the previous run state", addr)
|
||||
} else if got, want := instState.Current.AttrsJSON, `"upgraded"`; !bytes.Contains(got, []byte(want)) {
|
||||
t.Errorf("%s has wrong previous run state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want)
|
||||
}
|
||||
}
|
||||
if instState := plan.PriorState.ResourceInstance(addr); instState == nil {
|
||||
t.Errorf("%s has no prior state at all after plan", addr)
|
||||
} else {
|
||||
if instState.Current == nil {
|
||||
t.Errorf("%s has no current object in the prior state", addr)
|
||||
} else if got, want := instState.Current.AttrsJSON, `"current"`; !bytes.Contains(got, []byte(want)) {
|
||||
t.Errorf("%s has wrong prior state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContext2Plan_destroySkipRefresh(t *testing.T) {
|
||||
m := testModuleInline(t, map[string]string{
|
||||
"main.tf": `
|
||||
@ -346,11 +525,44 @@ resource "test_object" "a" {
|
||||
})
|
||||
|
||||
p := simpleMockProvider()
|
||||
p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{
|
||||
Provider: providers.Schema{Block: simpleTestSchema()},
|
||||
ResourceTypes: map[string]providers.Schema{
|
||||
"test_object": {
|
||||
Block: &configschema.Block{
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"arg": {Type: cty.String, Optional: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) {
|
||||
t.Helper()
|
||||
t.Errorf("unexpected call to ReadResource")
|
||||
resp.NewState = req.PriorState
|
||||
return resp
|
||||
}
|
||||
p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) {
|
||||
t.Logf("UpgradeResourceState %s", req.RawStateJSON)
|
||||
// We should've been given the prior state JSON as our input to upgrade.
|
||||
if !bytes.Contains(req.RawStateJSON, []byte("before")) {
|
||||
t.Fatalf("UpgradeResourceState request doesn't contain the 'before' object\n%s", req.RawStateJSON)
|
||||
}
|
||||
|
||||
// We'll put something different in "arg" as part of upgrading, just
|
||||
// so that we can verify below that PrevRunState contains the upgraded
|
||||
// (but NOT refreshed) version of the object.
|
||||
resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{
|
||||
"arg": cty.StringVal("upgraded"),
|
||||
})
|
||||
return resp
|
||||
}
|
||||
|
||||
addr := mustResourceInstanceAddr("test_object.a")
|
||||
state := states.BuildState(func(s *states.SyncState) {
|
||||
s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{
|
||||
AttrsJSON: []byte(`{"test_string":"foo"}`),
|
||||
AttrsJSON: []byte(`{"arg":"before"}`),
|
||||
Status: states.ObjectReady,
|
||||
}, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`))
|
||||
})
|
||||
@ -370,6 +582,13 @@ resource "test_object" "a" {
|
||||
t.Fatal(diags.Err())
|
||||
}
|
||||
|
||||
if !p.UpgradeResourceStateCalled {
|
||||
t.Errorf("Provider's UpgradeResourceState wasn't called; should've been")
|
||||
}
|
||||
if p.ReadResourceCalled {
|
||||
t.Errorf("Provider's ReadResource was called; shouldn't have been")
|
||||
}
|
||||
|
||||
if plan.PriorState == nil {
|
||||
t.Fatal("missing plan state")
|
||||
}
|
||||
@ -379,6 +598,27 @@ resource "test_object" "a" {
|
||||
t.Errorf("unexpected %s change for %s", c.Action, c.Addr)
|
||||
}
|
||||
}
|
||||
|
||||
if instState := plan.PrevRunState.ResourceInstance(addr); instState == nil {
|
||||
t.Errorf("%s has no previous run state at all after plan", addr)
|
||||
} else {
|
||||
if instState.Current == nil {
|
||||
t.Errorf("%s has no current object in the previous run state", addr)
|
||||
} else if got, want := instState.Current.AttrsJSON, `"upgraded"`; !bytes.Contains(got, []byte(want)) {
|
||||
t.Errorf("%s has wrong previous run state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want)
|
||||
}
|
||||
}
|
||||
if instState := plan.PriorState.ResourceInstance(addr); instState == nil {
|
||||
t.Errorf("%s has no prior state at all after plan", addr)
|
||||
} else {
|
||||
if instState.Current == nil {
|
||||
t.Errorf("%s has no current object in the prior state", addr)
|
||||
} else if got, want := instState.Current.AttrsJSON, `"upgraded"`; !bytes.Contains(got, []byte(want)) {
|
||||
// NOTE: The prior state should still have been _upgraded_, even
|
||||
// though we skipped running refresh after upgrading it.
|
||||
t.Errorf("%s has wrong prior state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContext2Plan_unmarkingSensitiveAttributeForOutput(t *testing.T) {
|
||||
@ -492,27 +732,39 @@ provider "test" {
|
||||
func TestContext2Plan_refreshOnlyMode(t *testing.T) {
|
||||
addr := mustResourceInstanceAddr("test_object.a")
|
||||
|
||||
p := simpleMockProvider()
|
||||
|
||||
// The configuration, the prior state, and the refresh result intentionally
|
||||
// have different values for "test_string" so we can observe that the
|
||||
// refresh took effect but the configuration change wasn't considered.
|
||||
m := testModuleInline(t, map[string]string{
|
||||
"main.tf": `
|
||||
resource "test_object" "a" {
|
||||
test_string = "after"
|
||||
arg = "after"
|
||||
}
|
||||
`,
|
||||
})
|
||||
state := states.BuildState(func(s *states.SyncState) {
|
||||
s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{
|
||||
AttrsJSON: []byte(`{"test_string":"before"}`),
|
||||
AttrsJSON: []byte(`{"arg":"before"}`),
|
||||
Status: states.ObjectReady,
|
||||
}, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`))
|
||||
})
|
||||
|
||||
p := simpleMockProvider()
|
||||
p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{
|
||||
Provider: providers.Schema{Block: simpleTestSchema()},
|
||||
ResourceTypes: map[string]providers.Schema{
|
||||
"test_object": {
|
||||
Block: &configschema.Block{
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"arg": {Type: cty.String, Optional: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse {
|
||||
newVal, err := cty.Transform(req.PriorState, func(path cty.Path, v cty.Value) (cty.Value, error) {
|
||||
if len(path) == 1 && path[0] == (cty.GetAttrStep{Name: "test_string"}) {
|
||||
if len(path) == 1 && path[0] == (cty.GetAttrStep{Name: "arg"}) {
|
||||
return cty.StringVal("current"), nil
|
||||
}
|
||||
return v, nil
|
||||
@ -526,6 +778,20 @@ func TestContext2Plan_refreshOnlyMode(t *testing.T) {
|
||||
NewState: newVal,
|
||||
}
|
||||
}
|
||||
p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) {
|
||||
// We should've been given the prior state JSON as our input to upgrade.
|
||||
if !bytes.Contains(req.RawStateJSON, []byte("before")) {
|
||||
t.Fatalf("UpgradeResourceState request doesn't contain the 'before' object\n%s", req.RawStateJSON)
|
||||
}
|
||||
|
||||
// We'll put something different in "arg" as part of upgrading, just
|
||||
// so that we can verify below that PrevRunState contains the upgraded
|
||||
// (but NOT refreshed) version of the object.
|
||||
resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{
|
||||
"arg": cty.StringVal("upgraded"),
|
||||
})
|
||||
return resp
|
||||
}
|
||||
|
||||
ctx := testContext2(t, &ContextOpts{
|
||||
Config: m,
|
||||
@ -541,20 +807,36 @@ func TestContext2Plan_refreshOnlyMode(t *testing.T) {
|
||||
t.Fatalf("unexpected errors\n%s", diags.Err().Error())
|
||||
}
|
||||
|
||||
if got, want := len(plan.Changes.Resources), 0; got != want {
|
||||
t.Fatalf("plan contains resource changes; want none\n%s", spew.Sdump(plan.Changes.Resources))
|
||||
if !p.UpgradeResourceStateCalled {
|
||||
t.Errorf("Provider's UpgradeResourceState wasn't called; should've been")
|
||||
}
|
||||
if !p.ReadResourceCalled {
|
||||
t.Errorf("Provider's ReadResource wasn't called; should've been")
|
||||
}
|
||||
|
||||
state = plan.PriorState
|
||||
instState := state.ResourceInstance(addr)
|
||||
if instState == nil {
|
||||
t.Fatalf("%s has no state at all after plan", addr)
|
||||
if got, want := len(plan.Changes.Resources), 0; got != want {
|
||||
t.Errorf("plan contains resource changes; want none\n%s", spew.Sdump(plan.Changes.Resources))
|
||||
}
|
||||
if instState.Current == nil {
|
||||
t.Fatalf("%s has no current object after plan", addr)
|
||||
|
||||
if instState := plan.PriorState.ResourceInstance(addr); instState == nil {
|
||||
t.Errorf("%s has no prior state at all after plan", addr)
|
||||
} else {
|
||||
if instState.Current == nil {
|
||||
t.Errorf("%s has no current object after plan", addr)
|
||||
} else if got, want := instState.Current.AttrsJSON, `"current"`; !bytes.Contains(got, []byte(want)) {
|
||||
// Should've saved the result of refreshing
|
||||
t.Errorf("%s has wrong prior state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want)
|
||||
}
|
||||
}
|
||||
if got, want := instState.Current.AttrsJSON, `"current"`; !bytes.Contains(got, []byte(want)) {
|
||||
t.Fatalf("%s has wrong prior state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want)
|
||||
if instState := plan.PrevRunState.ResourceInstance(addr); instState == nil {
|
||||
t.Errorf("%s has no previous run state at all after plan", addr)
|
||||
} else {
|
||||
if instState.Current == nil {
|
||||
t.Errorf("%s has no current object in the previous run state", addr)
|
||||
} else if got, want := instState.Current.AttrsJSON, `"upgraded"`; !bytes.Contains(got, []byte(want)) {
|
||||
// Should've saved the result of upgrading
|
||||
t.Errorf("%s has wrong previous run state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -151,6 +151,12 @@ type EvalContext interface {
|
||||
// values.
|
||||
RefreshState() *states.SyncState
|
||||
|
||||
// PrevRunState returns a wrapper object that provides safe concurrent
|
||||
// access to the state which represents the result of the previous run,
|
||||
// updated only so that object data conforms to current schemas for
|
||||
// meaningful comparison with RefreshState.
|
||||
PrevRunState() *states.SyncState
|
||||
|
||||
// InstanceExpander returns a helper object for tracking the expansion of
|
||||
// graph nodes during the plan phase in response to "count" and "for_each"
|
||||
// arguments.
|
||||
|
@ -72,6 +72,7 @@ type BuiltinEvalContext struct {
|
||||
ChangesValue *plans.ChangesSync
|
||||
StateValue *states.SyncState
|
||||
RefreshStateValue *states.SyncState
|
||||
PrevRunStateValue *states.SyncState
|
||||
InstanceExpanderValue *instances.Expander
|
||||
}
|
||||
|
||||
@ -359,6 +360,10 @@ func (ctx *BuiltinEvalContext) RefreshState() *states.SyncState {
|
||||
return ctx.RefreshStateValue
|
||||
}
|
||||
|
||||
func (ctx *BuiltinEvalContext) PrevRunState() *states.SyncState {
|
||||
return ctx.PrevRunStateValue
|
||||
}
|
||||
|
||||
func (ctx *BuiltinEvalContext) InstanceExpander() *instances.Expander {
|
||||
return ctx.InstanceExpanderValue
|
||||
}
|
||||
|
@ -125,6 +125,9 @@ type MockEvalContext struct {
|
||||
RefreshStateCalled bool
|
||||
RefreshStateState *states.SyncState
|
||||
|
||||
PrevRunStateCalled bool
|
||||
PrevRunStateState *states.SyncState
|
||||
|
||||
InstanceExpanderCalled bool
|
||||
InstanceExpanderExpander *instances.Expander
|
||||
}
|
||||
@ -339,6 +342,11 @@ func (c *MockEvalContext) RefreshState() *states.SyncState {
|
||||
return c.RefreshStateState
|
||||
}
|
||||
|
||||
func (c *MockEvalContext) PrevRunState() *states.SyncState {
|
||||
c.PrevRunStateCalled = true
|
||||
return c.PrevRunStateState
|
||||
}
|
||||
|
||||
func (c *MockEvalContext) InstanceExpander() *instances.Expander {
|
||||
c.InstanceExpanderCalled = true
|
||||
return c.InstanceExpanderExpander
|
||||
|
@ -36,6 +36,11 @@ type DestroyPlanGraphBuilder struct {
|
||||
|
||||
// Validate will do structural validation of the graph.
|
||||
Validate bool
|
||||
|
||||
// If set, skipRefresh will cause us stop skip refreshing any existing
|
||||
// resource instances as part of our planning. This will cause us to fail
|
||||
// to detect if an object has already been deleted outside of Terraform.
|
||||
skipRefresh bool
|
||||
}
|
||||
|
||||
// See GraphBuilder
|
||||
@ -52,6 +57,7 @@ func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer {
|
||||
concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex {
|
||||
return &NodePlanDestroyableResourceInstance{
|
||||
NodeAbstractResourceInstance: a,
|
||||
skipRefresh: b.skipRefresh,
|
||||
}
|
||||
}
|
||||
concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex {
|
||||
|
@ -25,6 +25,7 @@ type ContextGraphWalker struct {
|
||||
Context *Context
|
||||
State *states.SyncState // Used for safe concurrent access to state
|
||||
RefreshState *states.SyncState // Used for safe concurrent access to state
|
||||
PrevRunState *states.SyncState // Used for safe concurrent access to state
|
||||
Changes *plans.ChangesSync // Used for safe concurrent writes to changes
|
||||
InstanceExpander *instances.Expander // Tracks our gradual expansion of module and resource instances
|
||||
Operation walkOperation
|
||||
@ -95,6 +96,7 @@ func (w *ContextGraphWalker) EvalContext() EvalContext {
|
||||
ChangesValue: w.Changes,
|
||||
StateValue: w.State,
|
||||
RefreshStateValue: w.RefreshState,
|
||||
PrevRunStateValue: w.PrevRunState,
|
||||
Evaluator: evaluator,
|
||||
VariableValues: w.variableValues,
|
||||
VariableValuesLock: &w.variableValuesLock,
|
||||
|
@ -256,8 +256,11 @@ type phaseState int
|
||||
const (
|
||||
workingState phaseState = iota
|
||||
refreshState
|
||||
prevRunState
|
||||
)
|
||||
|
||||
//go:generate go run golang.org/x/tools/cmd/stringer -type phaseState
|
||||
|
||||
// writeResourceInstanceState saves the given object as the current object for
|
||||
// the selected resource instance.
|
||||
//
|
||||
@ -276,11 +279,23 @@ func (n *NodeAbstractResourceInstance) writeResourceInstanceState(ctx EvalContex
|
||||
|
||||
var state *states.SyncState
|
||||
switch targetState {
|
||||
case workingState:
|
||||
state = ctx.State()
|
||||
case refreshState:
|
||||
log.Printf("[TRACE] writeResourceInstanceState: using RefreshState for %s", absAddr)
|
||||
state = ctx.RefreshState()
|
||||
case prevRunState:
|
||||
state = ctx.PrevRunState()
|
||||
default:
|
||||
state = ctx.State()
|
||||
panic(fmt.Sprintf("unsupported phaseState value %#v", targetState))
|
||||
}
|
||||
if state == nil {
|
||||
// Should not happen, because we shouldn't ever try to write to
|
||||
// a state that isn't applicable to the current operation.
|
||||
// (We can also get in here for unit tests which are using
|
||||
// EvalContextMock but not populating PrevRunStateState with
|
||||
// a suitable state object.)
|
||||
return fmt.Errorf("state of type %s is not applicable to the current operation; this is a bug in Terraform", targetState)
|
||||
}
|
||||
|
||||
if obj == nil || obj.Value.IsNull() {
|
||||
@ -430,6 +445,7 @@ func (n *NodeAbstractResourceInstance) writeChange(ctx EvalContext, change *plan
|
||||
func (n *NodeAbstractResourceInstance) refresh(ctx EvalContext, state *states.ResourceInstanceObject) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
absAddr := n.Addr
|
||||
log.Printf("[TRACE] NodeAbstractResourceInstance.refresh for %s", absAddr)
|
||||
provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
|
||||
if err != nil {
|
||||
return state, diags.Append(err)
|
||||
|
@ -354,6 +354,7 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
|
||||
|
||||
return &NodePlannableResourceInstanceOrphan{
|
||||
NodeAbstractResourceInstance: a,
|
||||
skipRefresh: n.skipRefresh,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11,6 +11,9 @@ import (
|
||||
// to be planned for destruction.
|
||||
type NodePlanDestroyableResourceInstance struct {
|
||||
*NodeAbstractResourceInstance
|
||||
|
||||
// skipRefresh indicates that we should skip refreshing
|
||||
skipRefresh bool
|
||||
}
|
||||
|
||||
var (
|
||||
@ -48,6 +51,26 @@ func (n *NodePlanDestroyableResourceInstance) Execute(ctx EvalContext, op walkOp
|
||||
return diags
|
||||
}
|
||||
|
||||
// If we are in the "skip refresh" mode then we will have skipped over our
|
||||
// usual opportunity to update the previous run state and refresh state
|
||||
// with the result of any provider schema upgrades, so we'll compensate
|
||||
// by doing that here.
|
||||
//
|
||||
// NOTE: this is coupled with logic in Context.destroyPlan which skips
|
||||
// running a normal plan walk when refresh is enabled. These two
|
||||
// conditionals must agree (be exactly opposite) in order to get the
|
||||
// correct behavior in both cases.
|
||||
if n.skipRefresh {
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, state, prevRunState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, state, refreshState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
}
|
||||
|
||||
change, destroyPlanDiags := n.planDestroy(ctx, state, "")
|
||||
diags = diags.Append(destroyPlanDiags)
|
||||
if diags.HasErrors() {
|
||||
|
@ -77,6 +77,19 @@ func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (di
|
||||
return diags
|
||||
}
|
||||
|
||||
// We'll save a snapshot of what we just read from the state into the
|
||||
// prevRunState which will capture the result read in the previous
|
||||
// run, possibly tweaked by any upgrade steps that
|
||||
// readResourceInstanceState might've made.
|
||||
// However, note that we don't have any explicit mechanism for upgrading
|
||||
// data resource results as we do for managed resources, and so the
|
||||
// prevRunState might not conform to the current schema if the
|
||||
// previous run was with a different provider version.
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, state, prevRunState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
|
||||
diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
@ -127,6 +140,22 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
|
||||
return diags
|
||||
}
|
||||
|
||||
// We'll save a snapshot of what we just read from the state into the
|
||||
// prevRunState before we do anything else, since this will capture the
|
||||
// result of any schema upgrading that readResourceInstanceState just did,
|
||||
// but not include any out-of-band changes we might detect in in the
|
||||
// refresh step below.
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, prevRunState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
// Also the refreshState, because that should still reflect schema upgrades
|
||||
// even if it doesn't reflect upstream changes.
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
|
||||
// In 0.13 we could be refreshing a resource with no config.
|
||||
// We should be operating on managed resource, but check here to be certain
|
||||
if n.Config == nil || n.Config.Managed == nil {
|
||||
|
@ -78,14 +78,25 @@ func (n *NodePlannableResourceInstanceOrphan) dataResourceExecute(ctx EvalContex
|
||||
func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) {
|
||||
addr := n.ResourceInstanceAddr()
|
||||
|
||||
// Declare a bunch of variables that are used for state during
|
||||
// evaluation. These are written to by-address below.
|
||||
oldState, readDiags := n.readResourceInstanceState(ctx, addr)
|
||||
diags = diags.Append(readDiags)
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
|
||||
// Note any upgrades that readResourceInstanceState might've done in the
|
||||
// prevRunState, so that it'll conform to current schema.
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, oldState, prevRunState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
// Also the refreshState, because that should still reflect schema upgrades
|
||||
// even if not refreshing.
|
||||
diags = diags.Append(n.writeResourceInstanceState(ctx, oldState, refreshState))
|
||||
if diags.HasErrors() {
|
||||
return diags
|
||||
}
|
||||
|
||||
if !n.skipRefresh {
|
||||
// Refresh this instance even though it is going to be destroyed, in
|
||||
// order to catch missing resources. If this is a normal plan,
|
||||
|
@ -33,7 +33,8 @@ func TestNodeResourcePlanOrphanExecute(t *testing.T) {
|
||||
p := simpleMockProvider()
|
||||
ctx := &MockEvalContext{
|
||||
StateState: state.SyncWrapper(),
|
||||
RefreshStateState: state.SyncWrapper(),
|
||||
RefreshStateState: state.DeepCopy().SyncWrapper(),
|
||||
PrevRunStateState: state.DeepCopy().SyncWrapper(),
|
||||
InstanceExpanderExpander: instances.NewExpander(),
|
||||
ProviderProvider: p,
|
||||
ProviderSchemaSchema: &ProviderSchema{
|
||||
|
25
terraform/phasestate_string.go
Normal file
25
terraform/phasestate_string.go
Normal file
@ -0,0 +1,25 @@
|
||||
// Code generated by "stringer -type phaseState"; DO NOT EDIT.
|
||||
|
||||
package terraform
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[workingState-0]
|
||||
_ = x[refreshState-1]
|
||||
_ = x[prevRunState-2]
|
||||
}
|
||||
|
||||
const _phaseState_name = "workingStaterefreshStateprevRunState"
|
||||
|
||||
var _phaseState_index = [...]uint8{0, 12, 24, 36}
|
||||
|
||||
func (i phaseState) String() string {
|
||||
if i < 0 || i >= phaseState(len(_phaseState_index)-1) {
|
||||
return "phaseState(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _phaseState_name[_phaseState_index[i]:_phaseState_index[i+1]]
|
||||
}
|
Loading…
Reference in New Issue
Block a user