opentofu/internal/terraform/evaluate_test.go

567 lines
16 KiB
Go
Raw Normal View History

package terraform
import (
"sync"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/configs"
"github.com/hashicorp/terraform/internal/configs/configschema"
2021-06-24 16:53:43 -05:00
"github.com/hashicorp/terraform/internal/lang/marks"
"github.com/hashicorp/terraform/internal/plans"
"github.com/hashicorp/terraform/internal/states"
"github.com/hashicorp/terraform/internal/tfdiags"
)
func TestEvaluatorGetTerraformAttr(t *testing.T) {
evaluator := &Evaluator{
Meta: &ContextMeta{
Env: "foo",
},
}
data := &evaluationStateData{
Evaluator: evaluator,
}
scope := evaluator.Scope(data, nil)
t.Run("workspace", func(t *testing.T) {
want := cty.StringVal("foo")
got, diags := scope.Data.GetTerraformAttr(addrs.TerraformAttr{
Name: "workspace",
}, tfdiags.SourceRange{})
if len(diags) != 0 {
t.Errorf("unexpected diagnostics %s", spew.Sdump(diags))
}
if !got.RawEquals(want) {
t.Errorf("wrong result %q; want %q", got, want)
}
})
}
func TestEvaluatorGetPathAttr(t *testing.T) {
evaluator := &Evaluator{
Meta: &ContextMeta{
Env: "foo",
},
Config: &configs.Config{
Module: &configs.Module{
SourceDir: "bar/baz",
},
},
}
data := &evaluationStateData{
Evaluator: evaluator,
}
scope := evaluator.Scope(data, nil)
t.Run("module", func(t *testing.T) {
want := cty.StringVal("bar/baz")
got, diags := scope.Data.GetPathAttr(addrs.PathAttr{
Name: "module",
}, tfdiags.SourceRange{})
if len(diags) != 0 {
t.Errorf("unexpected diagnostics %s", spew.Sdump(diags))
}
if !got.RawEquals(want) {
t.Errorf("wrong result %#v; want %#v", got, want)
}
})
t.Run("root", func(t *testing.T) {
want := cty.StringVal("bar/baz")
got, diags := scope.Data.GetPathAttr(addrs.PathAttr{
Name: "root",
}, tfdiags.SourceRange{})
if len(diags) != 0 {
t.Errorf("unexpected diagnostics %s", spew.Sdump(diags))
}
if !got.RawEquals(want) {
t.Errorf("wrong result %#v; want %#v", got, want)
}
})
}
// This particularly tests that a sensitive attribute in config
// results in a value that has a "sensitive" cty Mark
func TestEvaluatorGetInputVariable(t *testing.T) {
evaluator := &Evaluator{
Meta: &ContextMeta{
Env: "foo",
},
Config: &configs.Config{
Module: &configs.Module{
Variables: map[string]*configs.Variable{
"some_var": {
Name: "some_var",
Sensitive: true,
Default: cty.StringVal("foo"),
Type: cty.String,
ConstraintType: cty.String,
},
// Avoid double marking a value
"some_other_var": {
Name: "some_other_var",
Sensitive: true,
Default: cty.StringVal("bar"),
Type: cty.String,
ConstraintType: cty.String,
},
},
},
},
VariableValues: map[string]map[string]cty.Value{
"": {
"some_var": cty.StringVal("bar"),
2021-06-24 16:53:43 -05:00
"some_other_var": cty.StringVal("boop").Mark(marks.Sensitive),
},
},
VariableValuesLock: &sync.Mutex{},
}
data := &evaluationStateData{
Evaluator: evaluator,
}
scope := evaluator.Scope(data, nil)
2021-06-24 16:53:43 -05:00
want := cty.StringVal("bar").Mark(marks.Sensitive)
got, diags := scope.Data.GetInputVariable(addrs.InputVariable{
Name: "some_var",
}, tfdiags.SourceRange{})
if len(diags) != 0 {
t.Errorf("unexpected diagnostics %s", spew.Sdump(diags))
}
if !got.RawEquals(want) {
t.Errorf("wrong result %#v; want %#v", got, want)
}
2021-06-24 16:53:43 -05:00
want = cty.StringVal("boop").Mark(marks.Sensitive)
got, diags = scope.Data.GetInputVariable(addrs.InputVariable{
Name: "some_other_var",
}, tfdiags.SourceRange{})
if len(diags) != 0 {
t.Errorf("unexpected diagnostics %s", spew.Sdump(diags))
}
if !got.RawEquals(want) {
t.Errorf("wrong result %#v; want %#v", got, want)
}
}
func TestEvaluatorGetResource(t *testing.T) {
stateSync := states.BuildState(func(ss *states.SyncState) {
ss.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_resource",
Name: "foo",
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"id":"foo", "nesting_list": [{"sensitive_value":"abc"}], "nesting_map": {"foo":{"foo":"x"}}, "nesting_set": [{"baz":"abc"}], "nesting_single": {"boop":"abc"}, "nesting_nesting": {"nesting_list":[{"sensitive_value":"abc"}]}, "value":"hello"}`),
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: addrs.RootModule,
},
)
}).SyncWrapper()
rc := &configs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_resource",
Name: "foo",
Config: configs.SynthBody("", map[string]cty.Value{
"id": cty.StringVal("foo"),
}),
Provider: addrs.Provider{
Hostname: addrs.DefaultProviderRegistryHost,
Namespace: "hashicorp",
Type: "test",
},
}
evaluator := &Evaluator{
Meta: &ContextMeta{
Env: "foo",
},
Changes: plans.NewChanges().SyncWrapper(),
Config: &configs.Config{
Module: &configs.Module{
ManagedResources: map[string]*configs.Resource{
"test_resource.foo": rc,
},
},
},
State: stateSync,
core: Graph walk loads plugin schemas opportunistically Previously our graph walker expected to recieve a data structure containing schemas for all of the provider and provisioner plugins used in the configuration and state. That made sense back when terraform.NewContext was responsible for loading all of the schemas before taking any other action, but it no longer has that responsiblity. Instead, we'll now make sure that the "contextPlugins" object reaches all of the locations where we need schema -- many of which already had access to that object anyway -- and then load the needed schemas just in time. The contextPlugins object memoizes schema lookups, so we can safely call it many times with the same provider address or provisioner type name and know that it'll still only load each distinct plugin once per Context object. As of this commit, the Context.Schemas method is now a public interface only and not used by logic in the "terraform" package at all. However, that does leave us in a rather tenuous situation of relying on the fact that all practical users of terraform.Context end up calling "Schemas" at some point in order to verify that we have all of the expected versions of plugins. That's a non-obvious implicit dependency, and so in subsequent commits we'll gradually move all responsibility for verifying plugin versions into the caller of terraform.NewContext, which'll heal a long-standing architectural wart whereby the caller is responsible for installing and locating the plugin executables but not for verifying that what's installed is conforming to the current configuration and dependency lock file.
2021-08-31 19:53:03 -05:00
Plugins: schemaOnlyProvidersForTesting(map[addrs.Provider]*ProviderSchema{
addrs.NewDefaultProvider("test"): {
Provider: &configschema.Block{},
ResourceTypes: map[string]*configschema.Block{
"test_resource": {
Attributes: map[string]*configschema.Attribute{
"id": {
Type: cty.String,
Computed: true,
},
core: Graph walk loads plugin schemas opportunistically Previously our graph walker expected to recieve a data structure containing schemas for all of the provider and provisioner plugins used in the configuration and state. That made sense back when terraform.NewContext was responsible for loading all of the schemas before taking any other action, but it no longer has that responsiblity. Instead, we'll now make sure that the "contextPlugins" object reaches all of the locations where we need schema -- many of which already had access to that object anyway -- and then load the needed schemas just in time. The contextPlugins object memoizes schema lookups, so we can safely call it many times with the same provider address or provisioner type name and know that it'll still only load each distinct plugin once per Context object. As of this commit, the Context.Schemas method is now a public interface only and not used by logic in the "terraform" package at all. However, that does leave us in a rather tenuous situation of relying on the fact that all practical users of terraform.Context end up calling "Schemas" at some point in order to verify that we have all of the expected versions of plugins. That's a non-obvious implicit dependency, and so in subsequent commits we'll gradually move all responsibility for verifying plugin versions into the caller of terraform.NewContext, which'll heal a long-standing architectural wart whereby the caller is responsible for installing and locating the plugin executables but not for verifying that what's installed is conforming to the current configuration and dependency lock file.
2021-08-31 19:53:03 -05:00
"value": {
Type: cty.String,
Computed: true,
Sensitive: true,
},
},
BlockTypes: map[string]*configschema.NestedBlock{
"nesting_list": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"value": {Type: cty.String, Optional: true},
"sensitive_value": {Type: cty.String, Optional: true, Sensitive: true},
2020-10-15 15:55:11 -05:00
},
},
core: Graph walk loads plugin schemas opportunistically Previously our graph walker expected to recieve a data structure containing schemas for all of the provider and provisioner plugins used in the configuration and state. That made sense back when terraform.NewContext was responsible for loading all of the schemas before taking any other action, but it no longer has that responsiblity. Instead, we'll now make sure that the "contextPlugins" object reaches all of the locations where we need schema -- many of which already had access to that object anyway -- and then load the needed schemas just in time. The contextPlugins object memoizes schema lookups, so we can safely call it many times with the same provider address or provisioner type name and know that it'll still only load each distinct plugin once per Context object. As of this commit, the Context.Schemas method is now a public interface only and not used by logic in the "terraform" package at all. However, that does leave us in a rather tenuous situation of relying on the fact that all practical users of terraform.Context end up calling "Schemas" at some point in order to verify that we have all of the expected versions of plugins. That's a non-obvious implicit dependency, and so in subsequent commits we'll gradually move all responsibility for verifying plugin versions into the caller of terraform.NewContext, which'll heal a long-standing architectural wart whereby the caller is responsible for installing and locating the plugin executables but not for verifying that what's installed is conforming to the current configuration and dependency lock file.
2021-08-31 19:53:03 -05:00
Nesting: configschema.NestingList,
},
"nesting_map": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"foo": {Type: cty.String, Optional: true, Sensitive: true},
2020-10-15 16:19:27 -05:00
},
},
core: Graph walk loads plugin schemas opportunistically Previously our graph walker expected to recieve a data structure containing schemas for all of the provider and provisioner plugins used in the configuration and state. That made sense back when terraform.NewContext was responsible for loading all of the schemas before taking any other action, but it no longer has that responsiblity. Instead, we'll now make sure that the "contextPlugins" object reaches all of the locations where we need schema -- many of which already had access to that object anyway -- and then load the needed schemas just in time. The contextPlugins object memoizes schema lookups, so we can safely call it many times with the same provider address or provisioner type name and know that it'll still only load each distinct plugin once per Context object. As of this commit, the Context.Schemas method is now a public interface only and not used by logic in the "terraform" package at all. However, that does leave us in a rather tenuous situation of relying on the fact that all practical users of terraform.Context end up calling "Schemas" at some point in order to verify that we have all of the expected versions of plugins. That's a non-obvious implicit dependency, and so in subsequent commits we'll gradually move all responsibility for verifying plugin versions into the caller of terraform.NewContext, which'll heal a long-standing architectural wart whereby the caller is responsible for installing and locating the plugin executables but not for verifying that what's installed is conforming to the current configuration and dependency lock file.
2021-08-31 19:53:03 -05:00
Nesting: configschema.NestingMap,
},
"nesting_set": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"baz": {Type: cty.String, Optional: true, Sensitive: true},
2020-10-15 16:25:53 -05:00
},
},
core: Graph walk loads plugin schemas opportunistically Previously our graph walker expected to recieve a data structure containing schemas for all of the provider and provisioner plugins used in the configuration and state. That made sense back when terraform.NewContext was responsible for loading all of the schemas before taking any other action, but it no longer has that responsiblity. Instead, we'll now make sure that the "contextPlugins" object reaches all of the locations where we need schema -- many of which already had access to that object anyway -- and then load the needed schemas just in time. The contextPlugins object memoizes schema lookups, so we can safely call it many times with the same provider address or provisioner type name and know that it'll still only load each distinct plugin once per Context object. As of this commit, the Context.Schemas method is now a public interface only and not used by logic in the "terraform" package at all. However, that does leave us in a rather tenuous situation of relying on the fact that all practical users of terraform.Context end up calling "Schemas" at some point in order to verify that we have all of the expected versions of plugins. That's a non-obvious implicit dependency, and so in subsequent commits we'll gradually move all responsibility for verifying plugin versions into the caller of terraform.NewContext, which'll heal a long-standing architectural wart whereby the caller is responsible for installing and locating the plugin executables but not for verifying that what's installed is conforming to the current configuration and dependency lock file.
2021-08-31 19:53:03 -05:00
Nesting: configschema.NestingSet,
},
"nesting_single": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"boop": {Type: cty.String, Optional: true, Sensitive: true},
2020-10-15 16:25:53 -05:00
},
},
core: Graph walk loads plugin schemas opportunistically Previously our graph walker expected to recieve a data structure containing schemas for all of the provider and provisioner plugins used in the configuration and state. That made sense back when terraform.NewContext was responsible for loading all of the schemas before taking any other action, but it no longer has that responsiblity. Instead, we'll now make sure that the "contextPlugins" object reaches all of the locations where we need schema -- many of which already had access to that object anyway -- and then load the needed schemas just in time. The contextPlugins object memoizes schema lookups, so we can safely call it many times with the same provider address or provisioner type name and know that it'll still only load each distinct plugin once per Context object. As of this commit, the Context.Schemas method is now a public interface only and not used by logic in the "terraform" package at all. However, that does leave us in a rather tenuous situation of relying on the fact that all practical users of terraform.Context end up calling "Schemas" at some point in order to verify that we have all of the expected versions of plugins. That's a non-obvious implicit dependency, and so in subsequent commits we'll gradually move all responsibility for verifying plugin versions into the caller of terraform.NewContext, which'll heal a long-standing architectural wart whereby the caller is responsible for installing and locating the plugin executables but not for verifying that what's installed is conforming to the current configuration and dependency lock file.
2021-08-31 19:53:03 -05:00
Nesting: configschema.NestingSingle,
},
"nesting_nesting": {
Block: configschema.Block{
BlockTypes: map[string]*configschema.NestedBlock{
"nesting_list": {
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"value": {Type: cty.String, Optional: true},
"sensitive_value": {Type: cty.String, Optional: true, Sensitive: true},
},
},
core: Graph walk loads plugin schemas opportunistically Previously our graph walker expected to recieve a data structure containing schemas for all of the provider and provisioner plugins used in the configuration and state. That made sense back when terraform.NewContext was responsible for loading all of the schemas before taking any other action, but it no longer has that responsiblity. Instead, we'll now make sure that the "contextPlugins" object reaches all of the locations where we need schema -- many of which already had access to that object anyway -- and then load the needed schemas just in time. The contextPlugins object memoizes schema lookups, so we can safely call it many times with the same provider address or provisioner type name and know that it'll still only load each distinct plugin once per Context object. As of this commit, the Context.Schemas method is now a public interface only and not used by logic in the "terraform" package at all. However, that does leave us in a rather tenuous situation of relying on the fact that all practical users of terraform.Context end up calling "Schemas" at some point in order to verify that we have all of the expected versions of plugins. That's a non-obvious implicit dependency, and so in subsequent commits we'll gradually move all responsibility for verifying plugin versions into the caller of terraform.NewContext, which'll heal a long-standing architectural wart whereby the caller is responsible for installing and locating the plugin executables but not for verifying that what's installed is conforming to the current configuration and dependency lock file.
2021-08-31 19:53:03 -05:00
Nesting: configschema.NestingList,
},
},
},
core: Graph walk loads plugin schemas opportunistically Previously our graph walker expected to recieve a data structure containing schemas for all of the provider and provisioner plugins used in the configuration and state. That made sense back when terraform.NewContext was responsible for loading all of the schemas before taking any other action, but it no longer has that responsiblity. Instead, we'll now make sure that the "contextPlugins" object reaches all of the locations where we need schema -- many of which already had access to that object anyway -- and then load the needed schemas just in time. The contextPlugins object memoizes schema lookups, so we can safely call it many times with the same provider address or provisioner type name and know that it'll still only load each distinct plugin once per Context object. As of this commit, the Context.Schemas method is now a public interface only and not used by logic in the "terraform" package at all. However, that does leave us in a rather tenuous situation of relying on the fact that all practical users of terraform.Context end up calling "Schemas" at some point in order to verify that we have all of the expected versions of plugins. That's a non-obvious implicit dependency, and so in subsequent commits we'll gradually move all responsibility for verifying plugin versions into the caller of terraform.NewContext, which'll heal a long-standing architectural wart whereby the caller is responsible for installing and locating the plugin executables but not for verifying that what's installed is conforming to the current configuration and dependency lock file.
2021-08-31 19:53:03 -05:00
Nesting: configschema.NestingSingle,
2020-10-15 15:55:11 -05:00
},
},
},
},
},
core: Graph walk loads plugin schemas opportunistically Previously our graph walker expected to recieve a data structure containing schemas for all of the provider and provisioner plugins used in the configuration and state. That made sense back when terraform.NewContext was responsible for loading all of the schemas before taking any other action, but it no longer has that responsiblity. Instead, we'll now make sure that the "contextPlugins" object reaches all of the locations where we need schema -- many of which already had access to that object anyway -- and then load the needed schemas just in time. The contextPlugins object memoizes schema lookups, so we can safely call it many times with the same provider address or provisioner type name and know that it'll still only load each distinct plugin once per Context object. As of this commit, the Context.Schemas method is now a public interface only and not used by logic in the "terraform" package at all. However, that does leave us in a rather tenuous situation of relying on the fact that all practical users of terraform.Context end up calling "Schemas" at some point in order to verify that we have all of the expected versions of plugins. That's a non-obvious implicit dependency, and so in subsequent commits we'll gradually move all responsibility for verifying plugin versions into the caller of terraform.NewContext, which'll heal a long-standing architectural wart whereby the caller is responsible for installing and locating the plugin executables but not for verifying that what's installed is conforming to the current configuration and dependency lock file.
2021-08-31 19:53:03 -05:00
}),
}
data := &evaluationStateData{
Evaluator: evaluator,
}
scope := evaluator.Scope(data, nil)
want := cty.ObjectVal(map[string]cty.Value{
2020-10-15 15:55:11 -05:00
"id": cty.StringVal("foo"),
"nesting_list": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
2021-06-24 16:53:43 -05:00
"sensitive_value": cty.StringVal("abc").Mark(marks.Sensitive),
2020-10-15 15:55:11 -05:00
"value": cty.NullVal(cty.String),
}),
}),
2020-10-15 16:19:27 -05:00
"nesting_map": cty.MapVal(map[string]cty.Value{
2021-06-24 16:53:43 -05:00
"foo": cty.ObjectVal(map[string]cty.Value{"foo": cty.StringVal("x").Mark(marks.Sensitive)}),
2020-10-15 16:19:27 -05:00
}),
"nesting_nesting": cty.ObjectVal(map[string]cty.Value{
"nesting_list": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
2021-06-24 16:53:43 -05:00
"sensitive_value": cty.StringVal("abc").Mark(marks.Sensitive),
"value": cty.NullVal(cty.String),
}),
}),
}),
2020-10-15 16:25:53 -05:00
"nesting_set": cty.SetVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
2021-06-24 16:53:43 -05:00
"baz": cty.StringVal("abc").Mark(marks.Sensitive),
2020-10-15 16:25:53 -05:00
}),
}),
"nesting_single": cty.ObjectVal(map[string]cty.Value{
2021-06-24 16:53:43 -05:00
"boop": cty.StringVal("abc").Mark(marks.Sensitive),
2020-10-15 16:25:53 -05:00
}),
2021-06-24 16:53:43 -05:00
"value": cty.StringVal("hello").Mark(marks.Sensitive),
})
addr := addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_resource",
Name: "foo",
}
got, diags := scope.Data.GetResource(addr, tfdiags.SourceRange{})
if len(diags) != 0 {
t.Errorf("unexpected diagnostics %s", spew.Sdump(diags))
}
if !got.RawEquals(want) {
t.Errorf("wrong result:\ngot: %#v\nwant: %#v", got, want)
}
}
// GetResource will return a planned object's After value
// if there is a change for that resource instance.
func TestEvaluatorGetResource_changes(t *testing.T) {
// Set up existing state
stateSync := states.BuildState(func(ss *states.SyncState) {
ss.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_resource",
Name: "foo",
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectPlanned,
AttrsJSON: []byte(`{"id":"foo", "to_mark_val":"tacos", "sensitive_value":"abc"}`),
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: addrs.RootModule,
},
)
}).SyncWrapper()
// Create a change for the existing state resource,
// to exercise retrieving the After value of the change
changesSync := plans.NewChanges().SyncWrapper()
change := &plans.ResourceInstanceChange{
Addr: mustResourceInstanceAddr("test_resource.foo"),
ProviderAddr: addrs.AbsProviderConfig{
Module: addrs.RootModule,
Provider: addrs.NewDefaultProvider("test"),
},
Change: plans.Change{
Action: plans.Update,
// Provide an After value that contains a marked value
After: cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("foo"),
2021-06-24 16:53:43 -05:00
"to_mark_val": cty.StringVal("pizza").Mark(marks.Sensitive),
"sensitive_value": cty.StringVal("abc"),
"sensitive_collection": cty.MapVal(map[string]cty.Value{
"boop": cty.StringVal("beep"),
}),
}),
},
}
// Set up our schemas
schemas := &Schemas{
Providers: map[addrs.Provider]*ProviderSchema{
addrs.NewDefaultProvider("test"): {
Provider: &configschema.Block{},
ResourceTypes: map[string]*configschema.Block{
"test_resource": {
Attributes: map[string]*configschema.Attribute{
"id": {
Type: cty.String,
Computed: true,
},
"to_mark_val": {
Type: cty.String,
Computed: true,
},
"sensitive_value": {
Type: cty.String,
Computed: true,
Sensitive: true,
},
"sensitive_collection": {
Type: cty.Map(cty.String),
Computed: true,
Sensitive: true,
},
},
},
},
},
},
}
// The resource we'll inspect
addr := addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_resource",
Name: "foo",
}
schema, _ := schemas.ResourceTypeConfig(addrs.NewDefaultProvider("test"), addr.Mode, addr.Type)
// This encoding separates out the After's marks into its AfterValMarks
csrc, _ := change.Encode(schema.ImpliedType())
changesSync.AppendResourceInstanceChange(csrc)
evaluator := &Evaluator{
Meta: &ContextMeta{
Env: "foo",
},
Changes: changesSync,
Config: &configs.Config{
Module: &configs.Module{
ManagedResources: map[string]*configs.Resource{
"test_resource.foo": {
Mode: addrs.ManagedResourceMode,
Type: "test_resource",
Name: "foo",
Provider: addrs.Provider{
Hostname: addrs.DefaultProviderRegistryHost,
Namespace: "hashicorp",
Type: "test",
},
},
},
},
},
State: stateSync,
core: Graph walk loads plugin schemas opportunistically Previously our graph walker expected to recieve a data structure containing schemas for all of the provider and provisioner plugins used in the configuration and state. That made sense back when terraform.NewContext was responsible for loading all of the schemas before taking any other action, but it no longer has that responsiblity. Instead, we'll now make sure that the "contextPlugins" object reaches all of the locations where we need schema -- many of which already had access to that object anyway -- and then load the needed schemas just in time. The contextPlugins object memoizes schema lookups, so we can safely call it many times with the same provider address or provisioner type name and know that it'll still only load each distinct plugin once per Context object. As of this commit, the Context.Schemas method is now a public interface only and not used by logic in the "terraform" package at all. However, that does leave us in a rather tenuous situation of relying on the fact that all practical users of terraform.Context end up calling "Schemas" at some point in order to verify that we have all of the expected versions of plugins. That's a non-obvious implicit dependency, and so in subsequent commits we'll gradually move all responsibility for verifying plugin versions into the caller of terraform.NewContext, which'll heal a long-standing architectural wart whereby the caller is responsible for installing and locating the plugin executables but not for verifying that what's installed is conforming to the current configuration and dependency lock file.
2021-08-31 19:53:03 -05:00
Plugins: schemaOnlyProvidersForTesting(schemas.Providers),
}
data := &evaluationStateData{
Evaluator: evaluator,
}
scope := evaluator.Scope(data, nil)
want := cty.ObjectVal(map[string]cty.Value{
"id": cty.StringVal("foo"),
2021-06-24 16:53:43 -05:00
"to_mark_val": cty.StringVal("pizza").Mark(marks.Sensitive),
"sensitive_value": cty.StringVal("abc").Mark(marks.Sensitive),
"sensitive_collection": cty.MapVal(map[string]cty.Value{
"boop": cty.StringVal("beep"),
2021-06-24 16:53:43 -05:00
}).Mark(marks.Sensitive),
})
got, diags := scope.Data.GetResource(addr, tfdiags.SourceRange{})
if len(diags) != 0 {
t.Errorf("unexpected diagnostics %s", spew.Sdump(diags))
}
if !got.RawEquals(want) {
t.Errorf("wrong result:\ngot: %#v\nwant: %#v", got, want)
}
}
func TestEvaluatorGetModule(t *testing.T) {
// Create a new evaluator with an existing state
stateSync := states.BuildState(func(ss *states.SyncState) {
ss.SetOutputValue(
addrs.OutputValue{Name: "out"}.Absolute(addrs.ModuleInstance{addrs.ModuleInstanceStep{Name: "mod"}}),
cty.StringVal("bar"),
true,
)
}).SyncWrapper()
evaluator := evaluatorForModule(stateSync, plans.NewChanges().SyncWrapper())
data := &evaluationStateData{
Evaluator: evaluator,
}
scope := evaluator.Scope(data, nil)
2021-06-24 16:53:43 -05:00
want := cty.ObjectVal(map[string]cty.Value{"out": cty.StringVal("bar").Mark(marks.Sensitive)})
got, diags := scope.Data.GetModule(addrs.ModuleCall{
Name: "mod",
}, tfdiags.SourceRange{})
if len(diags) != 0 {
t.Errorf("unexpected diagnostics %s", spew.Sdump(diags))
}
if !got.RawEquals(want) {
t.Errorf("wrong result %#v; want %#v", got, want)
}
// Changes should override the state value
changesSync := plans.NewChanges().SyncWrapper()
change := &plans.OutputChange{
Addr: addrs.OutputValue{Name: "out"}.Absolute(addrs.ModuleInstance{addrs.ModuleInstanceStep{Name: "mod"}}),
Sensitive: true,
Change: plans.Change{
After: cty.StringVal("baz"),
},
}
cs, _ := change.Encode()
changesSync.AppendOutputChange(cs)
evaluator = evaluatorForModule(stateSync, changesSync)
data = &evaluationStateData{
Evaluator: evaluator,
}
scope = evaluator.Scope(data, nil)
2021-06-24 16:53:43 -05:00
want = cty.ObjectVal(map[string]cty.Value{"out": cty.StringVal("baz").Mark(marks.Sensitive)})
got, diags = scope.Data.GetModule(addrs.ModuleCall{
Name: "mod",
}, tfdiags.SourceRange{})
if len(diags) != 0 {
t.Errorf("unexpected diagnostics %s", spew.Sdump(diags))
}
if !got.RawEquals(want) {
t.Errorf("wrong result %#v; want %#v", got, want)
}
// Test changes with empty state
evaluator = evaluatorForModule(states.NewState().SyncWrapper(), changesSync)
data = &evaluationStateData{
Evaluator: evaluator,
}
scope = evaluator.Scope(data, nil)
2021-06-24 16:53:43 -05:00
want = cty.ObjectVal(map[string]cty.Value{"out": cty.StringVal("baz").Mark(marks.Sensitive)})
got, diags = scope.Data.GetModule(addrs.ModuleCall{
Name: "mod",
}, tfdiags.SourceRange{})
if len(diags) != 0 {
t.Errorf("unexpected diagnostics %s", spew.Sdump(diags))
}
if !got.RawEquals(want) {
t.Errorf("wrong result %#v; want %#v", got, want)
}
}
func evaluatorForModule(stateSync *states.SyncState, changesSync *plans.ChangesSync) *Evaluator {
return &Evaluator{
Meta: &ContextMeta{
Env: "foo",
},
Config: &configs.Config{
Module: &configs.Module{
ModuleCalls: map[string]*configs.ModuleCall{
"mod": {
Name: "mod",
},
},
},
Children: map[string]*configs.Config{
"mod": {
Path: addrs.Module{"module.mod"},
Module: &configs.Module{
Outputs: map[string]*configs.Output{
"out": {
Name: "out",
Sensitive: true,
},
},
},
},
},
},
State: stateSync,
Changes: changesSync,
}
}