Adds warning if tests don't provide valid variable (#2057)

Signed-off-by: Andrew Hayes <andrew.hayes@harness.io>
This commit is contained in:
Andy Hayes 2024-10-15 09:20:11 +01:00 committed by GitHub
parent 44ac3d5eb4
commit 7215ee2ed8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 202 additions and 20 deletions

View File

@ -7,6 +7,7 @@ UPGRADE NOTES:
NEW FEATURES:
ENHANCEMENTS:
* Added user input prompt for static variables. ([#1792](https://github.com/opentofu/opentofu/issues/1792))
* Added `-show-sensitive` flag to tofu plan, apply, state-show and output commands to display sensitive data in output. ([#1554](https://github.com/opentofu/opentofu/pull/1554))
* Improved performance for large graphs when debug logs are not enabled. ([#1810](https://github.com/opentofu/opentofu/pull/1810))
* Improved performance for large graphs with many submodules. ([#1809](https://github.com/opentofu/opentofu/pull/1809))

View File

@ -530,8 +530,9 @@ func (runner *TestFileRunner) ExecuteTestRun(run *moduletest.Run, file *modulete
planCtx, plan, planDiags := runner.plan(config, state, run, file)
if run.Config.Command == configs.PlanTestCommand {
expectedFailures, sourceRanges := run.BuildExpectedFailuresAndSourceMaps()
// Then we want to assess our conditions and diagnostics differently.
planDiags = run.ValidateExpectedFailures(planDiags)
planDiags = run.ValidateExpectedFailures(expectedFailures, sourceRanges, planDiags)
run.Diagnostics = run.Diagnostics.Append(planDiags)
if planDiags.HasErrors() {
run.Status = moduletest.Error
@ -576,6 +577,10 @@ func (runner *TestFileRunner) ExecuteTestRun(run *moduletest.Run, file *modulete
return state, false
}
expectedFailures, sourceRanges := run.BuildExpectedFailuresAndSourceMaps()
planDiags = checkProblematicPlanErrors(expectedFailures, planDiags)
// Otherwise any error during the planning prevents our apply from
// continuing which is an error.
run.Diagnostics = run.Diagnostics.Append(planDiags)
@ -601,7 +606,7 @@ func (runner *TestFileRunner) ExecuteTestRun(run *moduletest.Run, file *modulete
applyCtx, updated, applyDiags := runner.apply(plan, state, config, run, file)
// Remove expected diagnostics, and add diagnostics in case anything that should have failed didn't.
applyDiags = run.ValidateExpectedFailures(applyDiags)
applyDiags = run.ValidateExpectedFailures(expectedFailures, sourceRanges, applyDiags)
run.Diagnostics = run.Diagnostics.Append(applyDiags)
if applyDiags.HasErrors() {
@ -1266,3 +1271,26 @@ func (runner *TestFileRunner) prepareInputVariablesForAssertions(config *configs
config.Module.Variables = currentVars
}, diags
}
// checkProblematicPlanErrors checks for plan errors that are also "expected" by the tests. In some cases we expect an error, however,
// what causes the error might not be what we expected. So we try to warn about that here.
func checkProblematicPlanErrors(expectedFailures addrs.Map[addrs.Referenceable, bool], planDiags tfdiags.Diagnostics) tfdiags.Diagnostics {
for _, diag := range planDiags {
rule, ok := addrs.DiagnosticOriginatesFromCheckRule(diag)
if !ok {
continue
}
if rule.Container.CheckableKind() != addrs.CheckableInputVariable {
continue
}
addr, ok := rule.Container.(addrs.AbsInputVariableInstance)
if ok && expectedFailures.Has(addr.Variable) {
planDiags = planDiags.Append(tfdiags.Sourceless(
tfdiags.Warning,
"Invalid Variable in test file",
fmt.Sprintf("Variable %s, has an invalid value within the test. Although this was an expected failure, it has meant the apply stage was unable to run so the overall test will fail.", rule.Container.String())))
}
}
return planDiags
}

View File

@ -1241,6 +1241,100 @@ Success! 1 passed, 0 failed.
}
}
func TestTest_InvalidLocalVariables(t *testing.T) {
tcs := map[string]struct {
contains []string
notContains []string
code int
skip bool
}{
"invalid_variable_warning_expected": {
contains: []string{
"Warning: Invalid Variable in test file",
"Error: Invalid value for variable",
"This was checked by the validation rule at main.tf:5,3-13.",
"This was checked by the validation rule at main.tf:14,3-13.",
},
code: 1,
},
"invalid_variable_warning_no_expected": {
contains: []string{
"Error: Invalid value for variable",
"This was checked by the validation rule at main.tf:5,3-13.",
"This was checked by the validation rule at main.tf:14,3-13.",
},
notContains: []string{
"Warning: Invalid Variable in test file",
},
code: 1,
},
}
for name, tc := range tcs {
t.Run(name, func(t *testing.T) {
if tc.skip {
t.Skip()
}
file := name
td := t.TempDir()
testCopyDir(t, testFixturePath(path.Join("test", file)), td)
defer testChdir(t, td)()
provider := testing_command.NewProvider(nil)
providerSource, providerClose := newMockProviderSource(t, map[string][]string{
"test": {"1.0.0"},
})
defer providerClose()
streams, done := terminal.StreamsForTesting(t)
view := views.NewView(streams)
ui := new(cli.MockUi)
meta := Meta{
testingOverrides: metaOverridesForProvider(provider.Provider),
Ui: ui,
View: view,
Streams: streams,
ProviderSource: providerSource,
}
init := &InitCommand{
Meta: meta,
}
if code := init.Run(nil); code != 0 {
t.Fatalf("expected status code 0 but got %d: %s", code, ui.ErrorWriter)
}
command := &TestCommand{
Meta: meta,
}
code := command.Run([]string{"-verbose", "-no-color"})
output := done(t)
if code != tc.code {
t.Errorf("expected status code %d but got %d: %s", tc.code, code, output.All())
}
actual := output.All()
for _, containsString := range tc.contains {
if !strings.Contains(actual, containsString) {
t.Errorf("expected '%s' in output but didn't find it: \n%s", containsString, output.All())
}
}
for _, notContainsString := range tc.notContains {
if strings.Contains(actual, notContainsString) {
t.Errorf("expected not to find '%s' in output: \n%s", notContainsString, output.All())
}
}
})
}
}
func TestTest_RunBlock(t *testing.T) {
tcs := map[string]struct {
expected string

View File

@ -0,0 +1,19 @@
variable "variable1" {
type = string
validation {
condition = var.variable1 == "foobar"
error_message = "The variable1 value must be: foobar"
}
}
variable "variable2" {
type = string
validation {
condition = var.variable2 == "barfoo"
error_message = "The variable2 value must be: barfoo"
}
}

View File

@ -0,0 +1,10 @@
run "invalid_object" {
variables {
variable1 = "lalalala"
variable2 = "lalalala"
}
expect_failures = [
var.variable1,
]
}

View File

@ -0,0 +1,19 @@
variable "variable1" {
type = string
validation {
condition = var.variable1 == "foobar"
error_message = "The variable1 value must be: foobar"
}
}
variable "variable2" {
type = string
validation {
condition = var.variable2 == "barfoo"
error_message = "The variable2 value must be: barfoo"
}
}

View File

@ -0,0 +1,9 @@
run "invalid_object" {
variables {
variable1 = "lalalala"
variable2 = "lalalala"
}
expect_failures = [
]
}

View File

@ -130,25 +130,9 @@ func (run *Run) GetReferences() ([]*addrs.Reference, tfdiags.Diagnostics) {
// diagnostics were generated by custom conditions. OpenTofu adds the
// addrs.CheckRule that generated each diagnostic to the diagnostic itself so we
// can tell which diagnostics can be expected.
func (run *Run) ValidateExpectedFailures(originals tfdiags.Diagnostics) tfdiags.Diagnostics {
// We're going to capture all the checkable objects that are referenced
// from the expected failures.
expectedFailures := addrs.MakeMap[addrs.Referenceable, bool]()
sourceRanges := addrs.MakeMap[addrs.Referenceable, tfdiags.SourceRange]()
for _, traversal := range run.Config.ExpectFailures {
// Ignore the diagnostics returned from the reference parsing, these
// references will have been checked earlier in the process by the
// validate stage so we don't need to do that again here.
reference, _ := addrs.ParseRefFromTestingScope(traversal)
expectedFailures.Put(reference.Subject, false)
sourceRanges.Put(reference.Subject, reference.SourceRange)
}
func (run *Run) ValidateExpectedFailures(expectedFailures addrs.Map[addrs.Referenceable, bool], sourceRanges addrs.Map[addrs.Referenceable, tfdiags.SourceRange], originals tfdiags.Diagnostics) tfdiags.Diagnostics { //nolint: funlen,gocognit,cyclop // legacy code causes this to trigger, we actually reduced the statements
var diags tfdiags.Diagnostics
for _, diag := range originals {
if rule, ok := addrs.DiagnosticOriginatesFromCheckRule(diag); ok {
switch rule.Container.CheckableKind() {
case addrs.CheckableOutputValue:
@ -340,3 +324,20 @@ func (run *Run) ValidateExpectedFailures(originals tfdiags.Diagnostics) tfdiags.
return diags
}
// BuildExpectedFailuresAndSourceMaps captures all the checkable objects that are referenced
// from the expected failures.
func (run *Run) BuildExpectedFailuresAndSourceMaps() (addrs.Map[addrs.Referenceable, bool], addrs.Map[addrs.Referenceable, tfdiags.SourceRange]) {
expectedFailures := addrs.MakeMap[addrs.Referenceable, bool]()
sourceRanges := addrs.MakeMap[addrs.Referenceable, tfdiags.SourceRange]()
for _, traversal := range run.Config.ExpectFailures {
// Ignore the diagnostics returned from the reference parsing, these
// references will have been checked earlier in the process by the
// validate stage so we don't need to do that again here.
reference, _ := addrs.ParseRefFromTestingScope(traversal)
expectedFailures.Put(reference.Subject, false)
sourceRanges.Put(reference.Subject, reference.SourceRange)
}
return expectedFailures, sourceRanges
}

View File

@ -767,8 +767,9 @@ func TestRun_ValidateExpectedFailures(t *testing.T) {
ExpectFailures: traversals,
},
}
expectedFailures, sourceRanges := run.BuildExpectedFailuresAndSourceMaps()
out := run.ValidateExpectedFailures(tc.Input)
out := run.ValidateExpectedFailures(expectedFailures, sourceRanges, tc.Input)
ix := 0
for ; ix < len(tc.Output); ix++ {
expected := tc.Output[ix]