mirror of
https://github.com/opentofu/opentofu.git
synced 2025-01-26 16:36:26 -06:00
core: Record move result information in the plan
Here we wire through the "move results" into the graph walk data structures so that all of the the nodes which produce plans.ResourceInstanceChange values can capture the "PrevRunAddr" for each resource instance. This doesn't actually quite work yet, because the logic in Context.Plan isn't actually correct and so the updated state from refactoring.ApplyMoves isn't actually visible as the "previous run state". For that reason, the context test in this commit is currently skipped, with the intent of re-enabling it once the updated state is properly propagating into the plan graph walk and thus we can actually react to the result of the move while choosing actions for those addresses.
This commit is contained in:
parent
22b36d1f4c
commit
4faac6ee43
@ -2,9 +2,11 @@ package refactoring
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform/internal/addrs"
|
||||
"github.com/hashicorp/terraform/internal/dag"
|
||||
"github.com/hashicorp/terraform/internal/logging"
|
||||
"github.com/hashicorp/terraform/internal/states"
|
||||
)
|
||||
|
||||
@ -53,6 +55,13 @@ func ApplyMoves(stmts []MoveStatement, state *states.State) map[addrs.UniqueKey]
|
||||
}
|
||||
}
|
||||
|
||||
if startNodes.Len() == 0 {
|
||||
log.Println("[TRACE] refactoring.ApplyMoves: No 'moved' statements to consider in this configuration")
|
||||
return results
|
||||
}
|
||||
|
||||
log.Printf("[TRACE] refactoring.ApplyMoves: Processing 'moved' statements in the configuration\n%s", logging.Indent(g.String()))
|
||||
|
||||
g.ReverseDepthFirstWalk(startNodes, func(v dag.Vertex, depth int) error {
|
||||
stmt := v.(*MoveStatement)
|
||||
|
||||
@ -69,6 +78,7 @@ func ApplyMoves(stmts []MoveStatement, state *states.State) map[addrs.UniqueKey]
|
||||
// For a module endpoint we just try the module address
|
||||
// directly.
|
||||
if newAddr, matches := modAddr.MoveDestination(stmt.From, stmt.To); matches {
|
||||
log.Printf("[TRACE] refactoring.ApplyMoves: %s has moved to %s", modAddr, newAddr)
|
||||
// We need to visit all of the resource instances in the
|
||||
// module and record them individually as results.
|
||||
for _, rs := range ms.Resources {
|
||||
@ -94,6 +104,7 @@ func ApplyMoves(stmts []MoveStatement, state *states.State) map[addrs.UniqueKey]
|
||||
for _, rs := range ms.Resources {
|
||||
rAddr := rs.Addr
|
||||
if newAddr, matches := rAddr.MoveDestination(stmt.From, stmt.To); matches {
|
||||
log.Printf("[TRACE] refactoring.ApplyMoves: resource %s has moved to %s", rAddr, newAddr)
|
||||
for key := range rs.Instances {
|
||||
oldInst := rAddr.Instance(key)
|
||||
newInst := newAddr.Instance(key)
|
||||
@ -110,6 +121,7 @@ func ApplyMoves(stmts []MoveStatement, state *states.State) map[addrs.UniqueKey]
|
||||
for key := range rs.Instances {
|
||||
iAddr := rAddr.Instance(key)
|
||||
if newAddr, matches := iAddr.MoveDestination(stmt.From, stmt.To); matches {
|
||||
log.Printf("[TRACE] refactoring.ApplyMoves: resource instance %s has moved to %s", iAddr, newAddr)
|
||||
result := MoveResult{From: iAddr, To: newAddr}
|
||||
results[iAddr.UniqueKey()] = result
|
||||
results[newAddr.UniqueKey()] = result
|
||||
|
@ -129,6 +129,20 @@ type Context struct {
|
||||
refreshState *states.State
|
||||
prevRunState *states.State
|
||||
|
||||
// NOTE: If you're considering adding something new here, consider first
|
||||
// whether it'd work to add it to type graphWalkOpts instead, possibly by
|
||||
// adding new arguments to one of the exported operation methods, to scope
|
||||
// it only to a particular operation rather than having it survive from one
|
||||
// operation to the next as global mutable state.
|
||||
//
|
||||
// Historically we used fields here as a bit of a dumping ground for
|
||||
// data that needed to ambiently pass between methods of Context, but
|
||||
// that has tended to cause surprising misbehavior when data from one
|
||||
// walk inadvertently bleeds into another walk against the same context.
|
||||
// Perhaps one day we'll move changes, state, refreshState, and prevRunState
|
||||
// to graphWalkOpts too. Ideally there shouldn't be anything in here which
|
||||
// changes after NewContext returns.
|
||||
|
||||
hooks []Hook
|
||||
components contextComponentFactory
|
||||
schemas *Schemas
|
||||
@ -491,7 +505,7 @@ func (c *Context) Eval(path addrs.ModuleInstance) (*lang.Scope, tfdiags.Diagnost
|
||||
diags = diags.Append(graphDiags)
|
||||
if !diags.HasErrors() {
|
||||
var walkDiags tfdiags.Diagnostics
|
||||
walker, walkDiags = c.walk(graph, walkEval)
|
||||
walker, walkDiags = c.walk(graph, walkEval, &graphWalkOpts{})
|
||||
diags = diags.Append(walker.NonFatalDiagnostics)
|
||||
diags = diags.Append(walkDiags)
|
||||
}
|
||||
@ -500,7 +514,7 @@ func (c *Context) Eval(path addrs.ModuleInstance) (*lang.Scope, tfdiags.Diagnost
|
||||
// If we skipped walking the graph (due to errors) then we'll just
|
||||
// use a placeholder graph walker here, which'll refer to the
|
||||
// unmodified state.
|
||||
walker = c.graphWalker(walkEval)
|
||||
walker = c.graphWalker(walkEval, &graphWalkOpts{})
|
||||
}
|
||||
|
||||
// This is a bit weird since we don't normally evaluate outside of
|
||||
@ -545,7 +559,7 @@ func (c *Context) Apply() (*states.State, tfdiags.Diagnostics) {
|
||||
}
|
||||
|
||||
// Walk the graph
|
||||
walker, walkDiags := c.walk(graph, operation)
|
||||
walker, walkDiags := c.walk(graph, operation, &graphWalkOpts{})
|
||||
diags = diags.Append(walker.NonFatalDiagnostics)
|
||||
diags = diags.Append(walkDiags)
|
||||
|
||||
@ -656,7 +670,7 @@ The -target option is not for routine use, and is provided only for exceptional
|
||||
func (c *Context) plan() (*plans.Plan, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
moveStmts, _ := c.prePlanFindAndApplyMoves()
|
||||
moveStmts, moveResults := c.prePlanFindAndApplyMoves()
|
||||
|
||||
graph, graphDiags := c.Graph(GraphTypePlan, nil)
|
||||
diags = diags.Append(graphDiags)
|
||||
@ -665,7 +679,9 @@ func (c *Context) plan() (*plans.Plan, tfdiags.Diagnostics) {
|
||||
}
|
||||
|
||||
// Do the walk
|
||||
walker, walkDiags := c.walk(graph, walkPlan)
|
||||
walker, walkDiags := c.walk(graph, walkPlan, &graphWalkOpts{
|
||||
MoveResults: moveResults,
|
||||
})
|
||||
diags = diags.Append(walker.NonFatalDiagnostics)
|
||||
diags = diags.Append(walkDiags)
|
||||
if walkDiags.HasErrors() {
|
||||
@ -700,7 +716,7 @@ func (c *Context) destroyPlan() (*plans.Plan, tfdiags.Diagnostics) {
|
||||
}
|
||||
c.changes = plans.NewChanges()
|
||||
|
||||
moveStmts, _ := c.prePlanFindAndApplyMoves()
|
||||
moveStmts, moveResults := c.prePlanFindAndApplyMoves()
|
||||
|
||||
// A destroy plan starts by running Refresh to read any pending data
|
||||
// sources, and remove missing managed resources. This is required because
|
||||
@ -734,7 +750,9 @@ func (c *Context) destroyPlan() (*plans.Plan, tfdiags.Diagnostics) {
|
||||
}
|
||||
|
||||
// Do the walk
|
||||
walker, walkDiags := c.walk(graph, walkPlanDestroy)
|
||||
walker, walkDiags := c.walk(graph, walkPlanDestroy, &graphWalkOpts{
|
||||
MoveResults: moveResults,
|
||||
})
|
||||
diags = diags.Append(walker.NonFatalDiagnostics)
|
||||
diags = diags.Append(walkDiags)
|
||||
if walkDiags.HasErrors() {
|
||||
@ -764,7 +782,7 @@ func (c *Context) destroyPlan() (*plans.Plan, tfdiags.Diagnostics) {
|
||||
func (c *Context) refreshOnlyPlan() (*plans.Plan, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
moveStmts, _ := c.prePlanFindAndApplyMoves()
|
||||
moveStmts, moveResults := c.prePlanFindAndApplyMoves()
|
||||
|
||||
graph, graphDiags := c.Graph(GraphTypePlanRefreshOnly, nil)
|
||||
diags = diags.Append(graphDiags)
|
||||
@ -773,7 +791,9 @@ func (c *Context) refreshOnlyPlan() (*plans.Plan, tfdiags.Diagnostics) {
|
||||
}
|
||||
|
||||
// Do the walk
|
||||
walker, walkDiags := c.walk(graph, walkPlan)
|
||||
walker, walkDiags := c.walk(graph, walkPlan, &graphWalkOpts{
|
||||
MoveResults: moveResults,
|
||||
})
|
||||
diags = diags.Append(walker.NonFatalDiagnostics)
|
||||
diags = diags.Append(walkDiags)
|
||||
if walkDiags.HasErrors() {
|
||||
@ -918,7 +938,7 @@ func (c *Context) Validate() tfdiags.Diagnostics {
|
||||
}
|
||||
|
||||
// Walk
|
||||
walker, walkDiags := c.walk(graph, walkValidate)
|
||||
walker, walkDiags := c.walk(graph, walkValidate, &graphWalkOpts{})
|
||||
diags = diags.Append(walker.NonFatalDiagnostics)
|
||||
diags = diags.Append(walkDiags)
|
||||
if walkDiags.HasErrors() {
|
||||
@ -991,10 +1011,18 @@ func (c *Context) releaseRun() {
|
||||
c.runContext = nil
|
||||
}
|
||||
|
||||
func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, tfdiags.Diagnostics) {
|
||||
// graphWalkOpts is an assortment of options and inputs we need when
|
||||
// constructing a graph walker.
|
||||
type graphWalkOpts struct {
|
||||
// MoveResults is a table of the results of applying move statements prior
|
||||
// to a plan walk. Irrelevant and totally ignored for non-plan walks.
|
||||
MoveResults map[addrs.UniqueKey]refactoring.MoveResult
|
||||
}
|
||||
|
||||
func (c *Context) walk(graph *Graph, operation walkOperation, opts *graphWalkOpts) (*ContextGraphWalker, tfdiags.Diagnostics) {
|
||||
log.Printf("[DEBUG] Starting graph walk: %s", operation.String())
|
||||
|
||||
walker := c.graphWalker(operation)
|
||||
walker := c.graphWalker(operation, opts)
|
||||
|
||||
// Watch for a stop so we can call the provider Stop() API.
|
||||
watchStop, watchWait := c.watchStop(walker)
|
||||
@ -1009,7 +1037,7 @@ func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalk
|
||||
return walker, diags
|
||||
}
|
||||
|
||||
func (c *Context) graphWalker(operation walkOperation) *ContextGraphWalker {
|
||||
func (c *Context) graphWalker(operation walkOperation, opts *graphWalkOpts) *ContextGraphWalker {
|
||||
var state *states.SyncState
|
||||
var refreshState *states.SyncState
|
||||
var prevRunState *states.SyncState
|
||||
@ -1040,6 +1068,7 @@ func (c *Context) graphWalker(operation walkOperation) *ContextGraphWalker {
|
||||
PrevRunState: prevRunState,
|
||||
Changes: c.changes.SyncWrapper(),
|
||||
InstanceExpander: instances.NewExpander(),
|
||||
MoveResults: opts.MoveResults,
|
||||
Operation: operation,
|
||||
StopContext: c.runContext,
|
||||
RootVariableValues: c.variables,
|
||||
|
@ -60,7 +60,7 @@ func (c *Context) Import(opts *ImportOpts) (*states.State, tfdiags.Diagnostics)
|
||||
}
|
||||
|
||||
// Walk it
|
||||
_, walkDiags := c.walk(graph, walkImport)
|
||||
_, walkDiags := c.walk(graph, walkImport, &graphWalkOpts{})
|
||||
diags = diags.Append(walkDiags)
|
||||
if walkDiags.HasErrors() {
|
||||
return c.state, diags
|
||||
|
@ -730,6 +730,80 @@ provider "test" {
|
||||
}
|
||||
}
|
||||
|
||||
func TestContext2Plan_movedResourceBasic(t *testing.T) {
|
||||
t.Skip("Context.Plan doesn't properly propagate moves into the prior state yet")
|
||||
|
||||
addrA := mustResourceInstanceAddr("test_object.a")
|
||||
addrB := mustResourceInstanceAddr("test_object.b")
|
||||
m := testModuleInline(t, map[string]string{
|
||||
"main.tf": `
|
||||
resource "test_object" "b" {
|
||||
}
|
||||
|
||||
moved {
|
||||
from = test_object.a
|
||||
to = test_object.b
|
||||
}
|
||||
|
||||
terraform {
|
||||
experiments = [config_driven_move]
|
||||
}
|
||||
`,
|
||||
})
|
||||
|
||||
state := states.BuildState(func(s *states.SyncState) {
|
||||
// The prior state tracks test_object.a, which we should treat as
|
||||
// test_object.b because of the "moved" block in the config.
|
||||
s.SetResourceInstanceCurrent(addrA, &states.ResourceInstanceObjectSrc{
|
||||
AttrsJSON: []byte(`{}`),
|
||||
Status: states.ObjectReady,
|
||||
}, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`))
|
||||
})
|
||||
|
||||
p := simpleMockProvider()
|
||||
ctx := testContext2(t, &ContextOpts{
|
||||
Config: m,
|
||||
State: state,
|
||||
Providers: map[addrs.Provider]providers.Factory{
|
||||
addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
|
||||
},
|
||||
ForceReplace: []addrs.AbsResourceInstance{
|
||||
addrA,
|
||||
},
|
||||
})
|
||||
|
||||
plan, diags := ctx.Plan()
|
||||
if diags.HasErrors() {
|
||||
t.Fatalf("unexpected errors\n%s", diags.Err().Error())
|
||||
}
|
||||
|
||||
t.Run(addrA.String(), func(t *testing.T) {
|
||||
instPlan := plan.Changes.ResourceInstance(addrA)
|
||||
if instPlan != nil {
|
||||
t.Fatalf("unexpected plan for %s; should've moved to %s", addrA, addrB)
|
||||
}
|
||||
})
|
||||
t.Run(addrB.String(), func(t *testing.T) {
|
||||
instPlan := plan.Changes.ResourceInstance(addrB)
|
||||
if instPlan == nil {
|
||||
t.Fatalf("no plan for %s at all", addrB)
|
||||
}
|
||||
|
||||
if got, want := instPlan.Addr, addrB; !got.Equal(want) {
|
||||
t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want)
|
||||
}
|
||||
if got, want := instPlan.PrevRunAddr, addrA; !got.Equal(want) {
|
||||
t.Errorf("wrong previous run address\ngot: %s\nwant: %s", got, want)
|
||||
}
|
||||
if got, want := instPlan.Action, plans.NoOp; got != want {
|
||||
t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want)
|
||||
}
|
||||
if got, want := instPlan.ActionReason, plans.ResourceInstanceChangeNoReason; got != want {
|
||||
t.Errorf("wrong action reason\ngot: %s\nwant: %s", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestContext2Plan_refreshOnlyMode(t *testing.T) {
|
||||
addr := mustResourceInstanceAddr("test_object.a")
|
||||
|
||||
|
@ -1305,7 +1305,7 @@ func TestContext2Validate_PlanGraphBuilder(t *testing.T) {
|
||||
t.Fatalf("errors from PlanGraphBuilder: %s", diags.Err())
|
||||
}
|
||||
defer c.acquireRun("validate-test")()
|
||||
walker, diags := c.walk(graph, walkValidate)
|
||||
walker, diags := c.walk(graph, walkValidate, &graphWalkOpts{})
|
||||
if diags.HasErrors() {
|
||||
t.Fatal(diags.Err())
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/hashicorp/terraform/internal/plans"
|
||||
"github.com/hashicorp/terraform/internal/providers"
|
||||
"github.com/hashicorp/terraform/internal/provisioners"
|
||||
"github.com/hashicorp/terraform/internal/refactoring"
|
||||
"github.com/hashicorp/terraform/internal/states"
|
||||
"github.com/hashicorp/terraform/internal/tfdiags"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
@ -165,6 +166,16 @@ type EvalContext interface {
|
||||
// EvalContext objects for a given configuration.
|
||||
InstanceExpander() *instances.Expander
|
||||
|
||||
// MoveResults returns a map describing the results of handling any
|
||||
// resource instance move statements prior to the graph walk, so that
|
||||
// the graph walk can then record that information appropriately in other
|
||||
// artifacts produced by the graph walk.
|
||||
//
|
||||
// This data structure is created prior to the graph walk and read-only
|
||||
// thereafter, so callers must not modify the returned map or any other
|
||||
// objects accessible through it.
|
||||
MoveResults() map[addrs.UniqueKey]refactoring.MoveResult
|
||||
|
||||
// WithPath returns a copy of the context with the internal path set to the
|
||||
// path argument.
|
||||
WithPath(path addrs.ModuleInstance) EvalContext
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"github.com/hashicorp/terraform/internal/plans"
|
||||
"github.com/hashicorp/terraform/internal/providers"
|
||||
"github.com/hashicorp/terraform/internal/provisioners"
|
||||
"github.com/hashicorp/terraform/internal/refactoring"
|
||||
"github.com/hashicorp/terraform/version"
|
||||
|
||||
"github.com/hashicorp/terraform/internal/states"
|
||||
@ -74,6 +75,7 @@ type BuiltinEvalContext struct {
|
||||
RefreshStateValue *states.SyncState
|
||||
PrevRunStateValue *states.SyncState
|
||||
InstanceExpanderValue *instances.Expander
|
||||
MoveResultsValue map[addrs.UniqueKey]refactoring.MoveResult
|
||||
}
|
||||
|
||||
// BuiltinEvalContext implements EvalContext
|
||||
@ -367,3 +369,7 @@ func (ctx *BuiltinEvalContext) PrevRunState() *states.SyncState {
|
||||
func (ctx *BuiltinEvalContext) InstanceExpander() *instances.Expander {
|
||||
return ctx.InstanceExpanderValue
|
||||
}
|
||||
|
||||
func (ctx *BuiltinEvalContext) MoveResults() map[addrs.UniqueKey]refactoring.MoveResult {
|
||||
return ctx.MoveResultsValue
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"github.com/hashicorp/terraform/internal/plans"
|
||||
"github.com/hashicorp/terraform/internal/providers"
|
||||
"github.com/hashicorp/terraform/internal/provisioners"
|
||||
"github.com/hashicorp/terraform/internal/refactoring"
|
||||
"github.com/hashicorp/terraform/internal/states"
|
||||
"github.com/hashicorp/terraform/internal/tfdiags"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
@ -128,6 +129,9 @@ type MockEvalContext struct {
|
||||
PrevRunStateCalled bool
|
||||
PrevRunStateState *states.SyncState
|
||||
|
||||
MoveResultsCalled bool
|
||||
MoveResultsResults map[addrs.UniqueKey]refactoring.MoveResult
|
||||
|
||||
InstanceExpanderCalled bool
|
||||
InstanceExpanderExpander *instances.Expander
|
||||
}
|
||||
@ -347,6 +351,11 @@ func (c *MockEvalContext) PrevRunState() *states.SyncState {
|
||||
return c.PrevRunStateState
|
||||
}
|
||||
|
||||
func (c *MockEvalContext) MoveResults() map[addrs.UniqueKey]refactoring.MoveResult {
|
||||
c.MoveResultsCalled = true
|
||||
return c.MoveResultsResults
|
||||
}
|
||||
|
||||
func (c *MockEvalContext) InstanceExpander() *instances.Expander {
|
||||
c.InstanceExpanderCalled = true
|
||||
return c.InstanceExpanderExpander
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/hashicorp/terraform/internal/plans"
|
||||
"github.com/hashicorp/terraform/internal/providers"
|
||||
"github.com/hashicorp/terraform/internal/provisioners"
|
||||
"github.com/hashicorp/terraform/internal/refactoring"
|
||||
"github.com/hashicorp/terraform/internal/states"
|
||||
"github.com/hashicorp/terraform/internal/tfdiags"
|
||||
)
|
||||
@ -23,11 +24,12 @@ type ContextGraphWalker struct {
|
||||
|
||||
// Configurable values
|
||||
Context *Context
|
||||
State *states.SyncState // Used for safe concurrent access to state
|
||||
RefreshState *states.SyncState // Used for safe concurrent access to state
|
||||
PrevRunState *states.SyncState // Used for safe concurrent access to state
|
||||
Changes *plans.ChangesSync // Used for safe concurrent writes to changes
|
||||
InstanceExpander *instances.Expander // Tracks our gradual expansion of module and resource instances
|
||||
State *states.SyncState // Used for safe concurrent access to state
|
||||
RefreshState *states.SyncState // Used for safe concurrent access to state
|
||||
PrevRunState *states.SyncState // Used for safe concurrent access to state
|
||||
Changes *plans.ChangesSync // Used for safe concurrent writes to changes
|
||||
InstanceExpander *instances.Expander // Tracks our gradual expansion of module and resource instances
|
||||
MoveResults map[addrs.UniqueKey]refactoring.MoveResult // Read-only record of earlier processing of move statements
|
||||
Operation walkOperation
|
||||
StopContext context.Context
|
||||
RootVariableValues InputValues
|
||||
@ -88,6 +90,7 @@ func (w *ContextGraphWalker) EvalContext() EvalContext {
|
||||
InstanceExpanderValue: w.InstanceExpander,
|
||||
Components: w.Context.components,
|
||||
Schemas: w.Context.schemas,
|
||||
MoveResultsValue: w.MoveResults,
|
||||
ProviderCache: w.providerCache,
|
||||
ProviderInputConfig: w.Context.providerInputConfig,
|
||||
ProviderLock: &w.providerLock,
|
||||
|
@ -393,7 +393,7 @@ func (n *NodeAbstractResourceInstance) planDestroy(ctx EvalContext, currentState
|
||||
// vs. that something being entirely excluded e.g. due to -target.
|
||||
noop := &plans.ResourceInstanceChange{
|
||||
Addr: absAddr,
|
||||
PrevRunAddr: absAddr, // TODO-PrevRunAddr: If this instance was moved/renamed in this run, record its old address
|
||||
PrevRunAddr: n.prevRunAddr(ctx),
|
||||
DeposedKey: deposedKey,
|
||||
Change: plans.Change{
|
||||
Action: plans.NoOp,
|
||||
@ -421,7 +421,7 @@ func (n *NodeAbstractResourceInstance) planDestroy(ctx EvalContext, currentState
|
||||
// help for this one.
|
||||
plan := &plans.ResourceInstanceChange{
|
||||
Addr: absAddr,
|
||||
PrevRunAddr: absAddr, // TODO-PrevRunAddr: If this instance was moved/renamed in this run, record its old address
|
||||
PrevRunAddr: n.prevRunAddr(ctx),
|
||||
DeposedKey: deposedKey,
|
||||
Change: plans.Change{
|
||||
Action: plans.Delete,
|
||||
@ -1066,7 +1066,7 @@ func (n *NodeAbstractResourceInstance) plan(
|
||||
// Update our return plan
|
||||
plan = &plans.ResourceInstanceChange{
|
||||
Addr: n.Addr,
|
||||
PrevRunAddr: n.Addr, // TODO-PrevRunAddr: If this instance was moved/renamed in this run, record its old address
|
||||
PrevRunAddr: n.prevRunAddr(ctx),
|
||||
Private: plannedPrivate,
|
||||
ProviderAddr: n.ResolvedProvider,
|
||||
Change: plans.Change{
|
||||
@ -1528,7 +1528,7 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentSt
|
||||
// value containing unknowns from PlanDataResourceObject.
|
||||
plannedChange := &plans.ResourceInstanceChange{
|
||||
Addr: n.Addr,
|
||||
PrevRunAddr: n.Addr, // data resources are not refactorable
|
||||
PrevRunAddr: n.prevRunAddr(ctx),
|
||||
ProviderAddr: n.ResolvedProvider,
|
||||
Change: plans.Change{
|
||||
Action: plans.Read,
|
||||
@ -2267,3 +2267,20 @@ func (n *NodeAbstractResourceInstance) apply(
|
||||
return nil, diags
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NodeAbstractResourceInstance) prevRunAddr(ctx EvalContext) addrs.AbsResourceInstance {
|
||||
return resourceInstancePrevRunAddr(ctx, n.Addr)
|
||||
}
|
||||
|
||||
func resourceInstancePrevRunAddr(ctx EvalContext, currentAddr addrs.AbsResourceInstance) addrs.AbsResourceInstance {
|
||||
table := ctx.MoveResults()
|
||||
|
||||
result, ok := table[currentAddr.UniqueKey()]
|
||||
if !ok {
|
||||
// If there's no entry in the table then we'll assume it didn't move
|
||||
// at all, and so its previous address is the same as the current one.
|
||||
return currentAddr
|
||||
}
|
||||
|
||||
return result.From
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user