2016-09-19 11:28:24 -05:00
|
|
|
package terraform
|
|
|
|
|
|
|
|
import (
|
terraform: Ugly huge change to weave in new State and Plan types
Due to how often the state and plan types are referenced throughout
Terraform, there isn't a great way to switch them out gradually. As a
consequence, this huge commit gets us from the old world to a _compilable_
new world, but still has a large number of known test failures due to
key functionality being stubbed out.
The stubs here are for anything that interacts with providers, since we
now need to do the follow-up work to similarly replace the old
terraform.ResourceProvider interface with its replacement in the new
"providers" package. That work, along with work to fix the remaining
failing tests, will follow in subsequent commits.
The aim here was to replace all references to terraform.State and its
downstream types with states.State, terraform.Plan with plans.Plan,
state.State with statemgr.State, and switch to the new implementations of
the state and plan file formats. However, due to the number of times those
types are used, this also ended up affecting numerous other parts of core
such as terraform.Hook, the backend.Backend interface, and most of the CLI
commands.
Just as with 5861dbf3fc49b19587a31816eb06f511ab861bb4 before, I apologize
in advance to the person who inevitably just found this huge commit while
spelunking through the commit history.
2018-08-14 16:24:45 -05:00
|
|
|
"fmt"
|
2016-09-19 11:28:24 -05:00
|
|
|
"log"
|
2018-08-27 14:03:20 -05:00
|
|
|
|
|
|
|
"github.com/hashicorp/terraform/addrs"
|
|
|
|
"github.com/hashicorp/terraform/configs"
|
2016-09-19 11:28:24 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state
|
|
|
|
// when there is a resource count with zero/one boundary, i.e. fixing
|
|
|
|
// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa.
|
|
|
|
//
|
|
|
|
// This works on the global state.
|
2018-08-27 14:03:20 -05:00
|
|
|
type EvalCountFixZeroOneBoundaryGlobal struct {
|
|
|
|
Config *configs.Config
|
|
|
|
}
|
2016-09-19 11:28:24 -05:00
|
|
|
|
|
|
|
// TODO: test
|
|
|
|
func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) {
|
2018-08-27 14:03:20 -05:00
|
|
|
// We'll temporarily lock the state to grab the modules, then work on each
|
|
|
|
// one separately while taking a lock again for each separate resource.
|
|
|
|
// This means that if another caller concurrently adds a module here while
|
|
|
|
// we're working then we won't update it, but that's no worse than the
|
|
|
|
// concurrent writer blocking for our entire fixup process and _then_
|
|
|
|
// adding a new module, and in practice the graph node associated with
|
|
|
|
// this eval depends on everything else in the graph anyway, so there
|
|
|
|
// should not be concurrent writers.
|
|
|
|
state := ctx.State().Lock()
|
|
|
|
moduleAddrs := make([]addrs.ModuleInstance, 0, len(state.Modules))
|
|
|
|
for _, m := range state.Modules {
|
|
|
|
moduleAddrs = append(moduleAddrs, m.Addr)
|
|
|
|
}
|
|
|
|
ctx.State().Unlock()
|
2016-09-19 11:28:24 -05:00
|
|
|
|
2018-08-27 14:03:20 -05:00
|
|
|
for _, addr := range moduleAddrs {
|
|
|
|
cfg := n.Config.DescendentForInstance(addr)
|
|
|
|
if cfg == nil {
|
|
|
|
log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr)
|
|
|
|
continue
|
2016-09-19 11:28:24 -05:00
|
|
|
}
|
2018-08-27 14:03:20 -05:00
|
|
|
if err := n.fixModule(ctx, addr); err != nil {
|
|
|
|
return nil, err
|
2016-09-19 11:28:24 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-27 14:03:20 -05:00
|
|
|
return nil, nil
|
|
|
|
}
|
2016-09-19 11:28:24 -05:00
|
|
|
|
2018-08-27 14:03:20 -05:00
|
|
|
func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(ctx EvalContext, moduleAddr addrs.ModuleInstance) error {
|
|
|
|
ms := ctx.State().Module(moduleAddr)
|
|
|
|
cfg := n.Config.DescendentForInstance(moduleAddr)
|
|
|
|
if ms == nil {
|
|
|
|
// Theoretically possible for a concurrent writer to delete a module
|
|
|
|
// while we're running, but in practice the graph node that called us
|
|
|
|
// depends on everything else in the graph and so there can never
|
|
|
|
// be a concurrent writer.
|
|
|
|
return fmt.Errorf("[WARN] no state found for %s while trying to fix up EachModes", moduleAddr)
|
|
|
|
}
|
|
|
|
if cfg == nil {
|
|
|
|
return fmt.Errorf("[WARN] no config found for %s while trying to fix up EachModes", moduleAddr)
|
|
|
|
}
|
2016-09-19 11:28:24 -05:00
|
|
|
|
2018-08-27 14:03:20 -05:00
|
|
|
for _, r := range ms.Resources {
|
|
|
|
addr := r.Addr.Absolute(moduleAddr)
|
|
|
|
rCfg := cfg.Module.ResourceByAddr(r.Addr)
|
|
|
|
if rCfg == nil {
|
|
|
|
log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr)
|
2016-09-19 11:28:24 -05:00
|
|
|
continue
|
|
|
|
}
|
2018-08-27 14:03:20 -05:00
|
|
|
hasCount := rCfg.Count != nil
|
|
|
|
fixResourceCountSetTransition(ctx, addr, hasCount)
|
2016-09-19 11:28:24 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|