test framework: include transformed config before validating config (#33608)

This commit is contained in:
Liam Cervante 2023-08-01 09:47:00 +02:00 committed by GitHub
parent 84edaaed57
commit 4560a83721
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 442 additions and 226 deletions

View File

@ -233,23 +233,6 @@ func (c *TestCommand) Run(rawArgs []string) int {
defer stop()
defer cancel()
// Validate the main config first.
validateDiags := runner.Validate()
// Print out any warnings or errors from the validation.
view.Diagnostics(nil, nil, validateDiags)
if validateDiags.HasErrors() {
// Don't try and run the tests if the validation actually failed.
// We'll also leave the test status as pending as we actually made
// no effort to run the tests.
return
}
if runner.Stopped || runner.Cancelled {
suite.Status = moduletest.Error
return
}
runner.Start(variables)
}()
@ -328,64 +311,6 @@ type TestRunner struct {
Verbose bool
}
func (runner *TestRunner) Validate() tfdiags.Diagnostics {
log.Printf("[TRACE] TestRunner: Validating configuration.")
var diags tfdiags.Diagnostics
diags = diags.Append(runner.validateConfig(runner.Config))
if runner.Cancelled || runner.Stopped {
return diags
}
// We've validated the main configuration under test. We now need to
// validate any other modules that are being executed by the test files.
//
// We only validate modules that are sourced locally, we're making an
// assumption that any remote modules were properly vetted and tested before
// being used in our tests.
validatedModules := make(map[string]bool)
for _, file := range runner.Suite.Files {
for _, run := range file.Runs {
if runner.Cancelled || runner.Stopped {
return diags
}
// While we're here, also do a quick validation of the config of the
// actual run block.
diags = diags.Append(run.Config.Validate())
// If the run block is executing another local module, we should
// validate that before we try and run it.
if run.Config.ConfigUnderTest != nil {
if _, ok := run.Config.Module.Source.(addrs.ModuleSourceLocal); !ok {
// If it's not a local module, we're not going to validate
// it. The idea here is that if we're retrieving this module
// from the registry it's not the job of this run of the
// testing framework to test it. We should assume it's
// working correctly.
continue
}
if validated := validatedModules[run.Config.Module.Source.String()]; validated {
// We've validated this local module before, so don't do
// it again.
continue
}
validatedModules[run.Config.Module.Source.String()] = true
diags = diags.Append(runner.validateConfig(run.Config.ConfigUnderTest))
}
}
}
return diags
}
func (runner *TestRunner) Start(globals map[string]backend.UnparsedVariableValue) {
var files []string
for name := range runner.Suite.Files {
@ -408,21 +333,32 @@ func (runner *TestRunner) Start(globals map[string]backend.UnparsedVariableValue
func (runner *TestRunner) ExecuteTestFile(file *moduletest.File, globals map[string]backend.UnparsedVariableValue) {
log.Printf("[TRACE] TestRunner: executing test file %s", file.Name)
printAll := func() {
runner.View.File(file)
for _, run := range file.Runs {
runner.View.Run(run, file)
}
}
mgr := new(TestStateManager)
mgr.runner = runner
mgr.State = states.NewState()
// We're going to check if the cleanupStates function call will actually
// work before we start the test.
diags := mgr.prepare(file, globals)
if diags.HasErrors() || runner.Cancelled || runner.Stopped {
file.Status = moduletest.Error
runner.View.File(file)
runner.View.Diagnostics(nil, file, diags)
for _, run := range file.Runs {
run.Status = moduletest.Skip
runner.View.Run(run, file)
mgr.prepare(file, globals)
if runner.Cancelled {
return // Don't print anything just stop.
}
if file.Diagnostics.HasErrors() || runner.Stopped {
// We can't run this file, but we still want to do nice printing.
for _, run := range file.Runs {
// The prepare function doesn't touch the run blocks, so we'll
// update those so they make sense.
run.Status = moduletest.Skip
}
printAll()
return
}
@ -468,11 +404,7 @@ func (runner *TestRunner) ExecuteTestFile(file *moduletest.File, globals map[str
file.Status = file.Status.Merge(run.Status)
}
runner.View.File(file)
runner.View.Diagnostics(nil, file, diags) // Print out any warnings from the preparation.
for _, run := range file.Runs {
runner.View.Run(run, file)
}
printAll()
}
func (runner *TestRunner) ExecuteTestRun(mgr *TestStateManager, run *moduletest.Run, file *moduletest.File, state *states.State, config *configs.Config, globals map[string]backend.UnparsedVariableValue) *states.State {
@ -519,7 +451,12 @@ func (runner *TestRunner) ExecuteTestRun(mgr *TestStateManager, run *moduletest.
SkipRefresh: !run.Config.Options.Refresh,
ExternalReferences: references,
}, run.Config.Command, globals)
if plan != nil {
// If the returned plan is nil, then the something went wrong before
// we could even attempt to plan or apply the expected failures, so we
// won't validate them if the plan is nil.
diags = run.ValidateExpectedFailures(diags)
}
run.Diagnostics = run.Diagnostics.Append(diags)
if runner.Cancelled {
@ -586,21 +523,32 @@ func (runner *TestRunner) ExecuteTestRun(mgr *TestStateManager, run *moduletest.
return state
}
func (runner *TestRunner) validateConfig(config *configs.Config) tfdiags.Diagnostics {
log.Printf("[TRACE] TestRunner: validating specific config %s", config.Path)
func (runner *TestRunner) validateFile(file *moduletest.File) {
log.Printf("[TRACE] TestRunner: validating config for %s", file.Name)
var diags tfdiags.Diagnostics
config := runner.Config
reset, transformDiags := config.TransformForTest(nil, file.Config)
defer reset()
file.Diagnostics = file.Diagnostics.Append(transformDiags)
if transformDiags.HasErrors() {
file.Status = moduletest.Error
return
}
tfCtxOpts, err := runner.command.contextOpts()
diags = diags.Append(err)
file.Diagnostics = file.Diagnostics.Append(err)
if err != nil {
return diags
file.Status = moduletest.Error
return
}
tfCtx, ctxDiags := terraform.NewContext(tfCtxOpts)
diags = diags.Append(ctxDiags)
file.Diagnostics = file.Diagnostics.Append(ctxDiags)
if ctxDiags.HasErrors() {
return diags
file.Status = moduletest.Error
return
}
runningCtx, done := context.WithCancel(context.Background())
@ -609,17 +557,22 @@ func (runner *TestRunner) validateConfig(config *configs.Config) tfdiags.Diagnos
go func() {
defer logging.PanicHandler()
defer done()
validateDiags = tfCtx.Validate(config)
}()
// We don't need to pass in any metadata here, as we're only validating
// so if something is cancelled it doesn't matter. We only pass in the
// metadata so we can print context around the cancellation which we don't
// need to do in this case.
waitDiags, _ := runner.wait(tfCtx, runningCtx, nil, nil, nil, nil)
diags = diags.Append(validateDiags)
diags = diags.Append(waitDiags)
return diags
log.Printf("[DEBUG] TestRunner: starting validate for %s", file.Name)
validateDiags = tfCtx.Validate(config)
log.Printf("[DEBUG] TestRunner: completed validate for %s", file.Name)
}()
// We don't pass in a manager or any created resources here since we are
// only validating. If something goes wrong, there will be no state we need
// to worry about cleaning up manually. So the manager and created resources
// can be empty.
waitDiags, _ := runner.wait(tfCtx, runningCtx, nil, nil, file, nil)
file.Diagnostics = file.Diagnostics.Append(validateDiags)
file.Diagnostics = file.Diagnostics.Append(waitDiags)
if validateDiags.HasErrors() || waitDiags.HasErrors() {
file.Status = moduletest.Error
}
}
// execute executes Terraform plan and apply operations for the given arguments.
@ -639,9 +592,19 @@ func (runner *TestRunner) execute(mgr *TestStateManager, run *moduletest.Run, fi
return nil, nil, state, nil
}
// First, transform the config for the given test run and test file.
var diags tfdiags.Diagnostics
// First, do a quick validation of the run blocks config.
if run != nil {
diags = diags.Append(run.Config.Validate())
if diags.HasErrors() {
return nil, nil, state, diags
}
}
// Second, transform the config for the given test run and test file.
if run == nil {
reset, cfgDiags := config.TransformForTest(nil, file.Config)
defer reset()
@ -655,16 +618,7 @@ func (runner *TestRunner) execute(mgr *TestStateManager, run *moduletest.Run, fi
return nil, nil, state, diags
}
// Second, gather any variables and give them to the plan options.
variables, variableDiags := buildInputVariablesForTest(run, file, config, globals)
diags = diags.Append(variableDiags)
if variableDiags.HasErrors() {
return nil, nil, state, diags
}
opts.SetVariables = variables
// Third, execute planning stage.
// Third, do a full validation of the now transformed config.
tfCtxOpts, err := runner.command.contextOpts()
diags = diags.Append(err)
@ -680,6 +634,52 @@ func (runner *TestRunner) execute(mgr *TestStateManager, run *moduletest.Run, fi
runningCtx, done := context.WithCancel(context.Background())
var validateDiags tfdiags.Diagnostics
go func() {
defer logging.PanicHandler()
defer done()
log.Printf("[DEBUG] TestRunner: starting validate for %s", identifier)
validateDiags = tfCtx.Validate(config)
log.Printf("[DEBUG] TestRunner: completed validate for %s", identifier)
}()
waitDiags, cancelled := runner.wait(tfCtx, runningCtx, mgr, run, file, nil)
validateDiags = validateDiags.Append(waitDiags)
diags = diags.Append(validateDiags)
if validateDiags.HasErrors() {
// Either the plan errored, or we only wanted to see the plan. Either
// way, just return what we have: The plan and diagnostics from making
// it and the unchanged state.
return tfCtx, nil, state, diags
}
if cancelled {
log.Printf("[DEBUG] TestRunner: skipping plan and apply stage for %s due to cancellation", identifier)
// If the execution was cancelled during the plan, we'll exit here to
// stop the plan being applied and using more time.
return tfCtx, nil, state, diags
}
// Fourth, gather any variables and give them to the plan options.
variables, variableDiags := buildInputVariablesForTest(run, file, config, globals)
diags = diags.Append(variableDiags)
if variableDiags.HasErrors() {
return nil, nil, state, diags
}
opts.SetVariables = variables
// Fifth, execute planning stage.
tfCtx, ctxDiags = terraform.NewContext(tfCtxOpts)
diags = diags.Append(ctxDiags)
if ctxDiags.HasErrors() {
return nil, nil, state, diags
}
runningCtx, done = context.WithCancel(context.Background())
var plan *plans.Plan
var planDiags tfdiags.Diagnostics
go func() {
@ -690,7 +690,7 @@ func (runner *TestRunner) execute(mgr *TestStateManager, run *moduletest.Run, fi
plan, planDiags = tfCtx.Plan(config, state, opts)
log.Printf("[DEBUG] TestRunner: completed plan for %s", identifier)
}()
waitDiags, cancelled := runner.wait(tfCtx, runningCtx, mgr, run, file, nil)
waitDiags, cancelled = runner.wait(tfCtx, runningCtx, mgr, run, file, nil)
planDiags = planDiags.Append(waitDiags)
diags = diags.Append(planDiags)
@ -721,7 +721,7 @@ func (runner *TestRunner) execute(mgr *TestStateManager, run *moduletest.Run, fi
}
diags = filteredDiags
// Fourth, execute apply stage.
// Sixth, execute apply stage.
tfCtx, ctxDiags = terraform.NewContext(tfCtxOpts)
diags = diags.Append(ctxDiags)
if ctxDiags.HasErrors() {
@ -879,15 +879,22 @@ type TestModuleState struct {
// successfully execute all our run blocks and then find we cannot perform any
// cleanup. We want to use this function to check that our cleanup can happen
// using only the information available within the file.
func (manager *TestStateManager) prepare(file *moduletest.File, globals map[string]backend.UnparsedVariableValue) tfdiags.Diagnostics {
func (manager *TestStateManager) prepare(file *moduletest.File, globals map[string]backend.UnparsedVariableValue) {
// First, we're going to check we have definitions for variables at the
// file level.
// For now, the only thing we care about is making sure all the required
// variables have values.
_, diags := buildInputVariablesForTest(nil, file, manager.runner.Config, globals)
// Return the sum of diagnostics that might indicate a problem for any
// future attempted cleanup.
return diags
file.Diagnostics = file.Diagnostics.Append(diags)
if diags.HasErrors() {
file.Status = moduletest.Error
}
// Second, we'll validate that the default provider configurations actually
// pass a validate operation.
manager.runner.validateFile(file)
}
func (manager *TestStateManager) cleanupStates(file *moduletest.File, globals map[string]backend.UnparsedVariableValue) {

View File

@ -120,6 +120,10 @@ func TestTest(t *testing.T) {
expected: "0 passed, 1 failed.",
code: 1,
},
"no_providers_in_main": {
expected: "1 passed, 0 failed",
code: 0,
},
}
for name, tc := range tcs {
t.Run(name, func(t *testing.T) {
@ -500,31 +504,17 @@ Success! 2 passed, 0 failed.
}
func TestTest_ValidatesBeforeExecution(t *testing.T) {
td := t.TempDir()
testCopyDir(t, testFixturePath(path.Join("test", "invalid")), td)
defer testChdir(t, td)()
tcs := map[string]struct {
expectedOut string
expectedErr string
}{
"invalid": {
expectedOut: `main.tftest.hcl... fail
run "invalid"... fail
provider := testing_command.NewProvider(nil)
view, done := testView(t)
c := &TestCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(provider.Provider),
View: view,
},
}
code := c.Run([]string{"-verbose", "-no-color"})
output := done(t)
if code != 1 {
t.Errorf("expected status code 1 but got %d", code)
}
expectedOut := `
Executed 0 tests.
`
expectedErr := `
Failure! 0 passed, 1 failed.
`,
expectedErr: `
Error: Invalid ` + "`expect_failures`" + ` reference
on main.tftest.hcl line 5, in run "invalid":
@ -533,27 +523,84 @@ Error: Invalid ` + "`expect_failures`" + ` reference
You cannot expect failures from local.my_value. You can only expect failures
from checkable objects such as input variables, output values, check blocks,
managed resources and data sources.
`
`,
},
"invalid-module": {
expectedOut: `main.tftest.hcl... fail
run "invalid"... fail
run "test"... skip
actualOut := output.Stdout()
actualErr := output.Stderr()
Failure! 0 passed, 1 failed, 1 skipped.
`,
expectedErr: `
Error: Reference to undeclared input variable
if diff := cmp.Diff(actualOut, expectedOut); len(diff) > 0 {
t.Errorf("output didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedOut, actualOut, diff)
on setup/main.tf line 3, in resource "test_resource" "setup":
3: value = var.not_real // Oh no!
An input variable with the name "not_real" has not been declared. This
variable can be declared with a variable "not_real" {} block.
`,
},
"missing-provider": {
expectedOut: `main.tftest.hcl... fail
run "passes_validation"... skip
Failure! 0 passed, 0 failed, 1 skipped.
`,
expectedErr: `
Error: Provider configuration not present
To work with test_resource.secondary its original provider configuration at
provider["registry.terraform.io/hashicorp/test"].secondary is required, but
it has been removed. This occurs when a provider configuration is removed
while objects created by that provider still exist in the state. Re-add the
provider configuration to destroy test_resource.secondary, after which you
can remove the provider configuration again.
`,
},
"missing-provider-in-run-block": {
expectedOut: `main.tftest.hcl... fail
run "passes_validation"... fail
Failure! 0 passed, 1 failed.
`,
expectedErr: `
Error: Provider configuration not present
To work with test_resource.secondary its original provider configuration at
provider["registry.terraform.io/hashicorp/test"].secondary is required, but
it has been removed. This occurs when a provider configuration is removed
while objects created by that provider still exist in the state. Re-add the
provider configuration to destroy test_resource.secondary, after which you
can remove the provider configuration again.
`,
},
"missing-provider-in-test-module": {
expectedOut: `main.tftest.hcl... fail
run "passes_validation_primary"... pass
run "passes_validation_secondary"... fail
Failure! 1 passed, 1 failed.
`,
expectedErr: `
Error: Provider configuration not present
To work with test_resource.secondary its original provider configuration at
provider["registry.terraform.io/hashicorp/test"].secondary is required, but
it has been removed. This occurs when a provider configuration is removed
while objects created by that provider still exist in the state. Re-add the
provider configuration to destroy test_resource.secondary, after which you
can remove the provider configuration again.
`,
},
}
if diff := cmp.Diff(actualErr, expectedErr); len(diff) > 0 {
t.Errorf("error didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedErr, actualErr, diff)
}
for file, tc := range tcs {
t.Run(file, func(t *testing.T) {
if provider.ResourceCount() > 0 {
t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString())
}
}
func TestTest_ValidatesLocalModulesBeforeExecution(t *testing.T) {
td := t.TempDir()
testCopyDir(t, testFixturePath(path.Join("test", "invalid-module")), td)
testCopyDir(t, testFixturePath(path.Join("test", file)), td)
defer testChdir(t, td)()
provider := testing_command.NewProvider(nil)
@ -583,32 +630,19 @@ func TestTest_ValidatesLocalModulesBeforeExecution(t *testing.T) {
t.Fatalf("expected status code 0 but got %d: %s", code, ui.ErrorWriter)
}
command := &TestCommand{
c := &TestCommand{
Meta: meta,
}
code := command.Run([]string{"-no-color"})
code := c.Run([]string{"-no-color"})
output := done(t)
if code != 1 {
t.Errorf("expected status code 1 but got %d", code)
}
expectedOut := `
Executed 0 tests.
`
expectedErr := `
Error: Reference to undeclared input variable
on setup/main.tf line 3, in resource "test_resource" "setup":
3: value = var.not_real // Oh no!
An input variable with the name "not_real" has not been declared. This
variable can be declared with a variable "not_real" {} block.
`
actualOut := output.Stdout()
actualErr := output.Stderr()
actualOut, expectedOut := output.Stdout(), tc.expectedOut
actualErr, expectedErr := output.Stderr(), tc.expectedErr
if diff := cmp.Diff(actualOut, expectedOut); len(diff) > 0 {
t.Errorf("output didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedOut, actualOut, diff)
@ -621,9 +655,7 @@ variable can be declared with a variable "not_real" {} block.
if provider.ResourceCount() > 0 {
t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString())
}
if provider.ResourceCount() > 0 {
t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString())
})
}
}

View File

@ -0,0 +1,17 @@
terraform {
required_providers {
test = {
source = "hashicorp/test"
configuration_aliases = [ test.secondary ]
}
}
}
resource "test_resource" "primary" {
value = "foo"
}
resource "test_resource" "secondary" {
provider = test.secondary
value = "bar"
}

View File

@ -0,0 +1,23 @@
provider "test" {}
provider "test" {
alias = "secondary"
}
run "passes_validation" {
providers = {
test = test
}
assert {
condition = test_resource.primary.value == "foo"
error_message = "primary contains invalid value"
}
assert {
condition = test_resource.secondary.value == "bar"
error_message = "secondary contains invalid value"
}
}

View File

@ -0,0 +1,3 @@
resource "test_resource" "primary" {
value = "foo"
}

View File

@ -0,0 +1,40 @@
provider "test" {}
provider "test" {
alias = "secondary"
}
run "passes_validation_primary" {
providers = {
test = test
}
assert {
condition = test_resource.primary.value == "foo"
error_message = "primary contains invalid value"
}
}
run "passes_validation_secondary" {
providers = {
test = test
}
module {
source = "./setup"
}
assert {
condition = test_resource.primary.value == "foo"
error_message = "primary contains invalid value"
}
assert {
condition = test_resource.secondary.value == "bar"
error_message = "secondary contains invalid value"
}
}

View File

@ -0,0 +1,17 @@
terraform {
required_providers {
test = {
source = "hashicorp/test"
configuration_aliases = [ test.secondary ]
}
}
}
resource "test_resource" "primary" {
value = "foo"
}
resource "test_resource" "secondary" {
provider = test.secondary
value = "bar"
}

View File

@ -0,0 +1,17 @@
terraform {
required_providers {
test = {
source = "hashicorp/test"
configuration_aliases = [ test.secondary ]
}
}
}
resource "test_resource" "primary" {
value = "foo"
}
resource "test_resource" "secondary" {
provider = test.secondary
value = "bar"
}

View File

@ -0,0 +1,14 @@
provider "test" {}
run "passes_validation" {
assert {
condition = test_resource.primary.value == "foo"
error_message = "primary contains invalid value"
}
assert {
condition = test_resource.secondary.value == "bar"
error_message = "secondary contains invalid value"
}
}

View File

@ -0,0 +1,19 @@
terraform {
required_providers {
test = {
source = "hashicorp/test"
configuration_aliases = [test.primary, test.secondary]
}
}
}
resource "test_resource" "primary" {
provider = test.primary
value = "foo"
}
resource "test_resource" "secondary" {
provider = test.secondary
value = "bar"
}

View File

@ -0,0 +1,20 @@
provider "test" {
alias = "primary"
}
provider "test" {
alias = "secondary"
}
run "passes_validation" {
assert {
condition = test_resource.primary.value == "foo"
error_message = "primary contains invalid value"
}
assert {
condition = test_resource.secondary.value == "bar"
error_message = "secondary contains invalid value"
}
}

View File

@ -130,6 +130,7 @@ func (t *TestHuman) Conclusion(suite *moduletest.Suite) {
func (t *TestHuman) File(file *moduletest.File) {
t.view.streams.Printf("%s... %s\n", file.Name, colorizeTestStatus(file.Status, t.view.colorize))
t.Diagnostics(nil, file, file.Diagnostics)
}
func (t *TestHuman) Run(run *moduletest.Run, file *moduletest.File) {
@ -388,6 +389,7 @@ func (t *TestJSON) File(file *moduletest.File) {
"type", json.MessageTestFile,
json.MessageTestFile, json.TestFileStatus{file.Name, json.ToTestStatus(file.Status)},
"@testfile", file.Name)
t.Diagnostics(nil, file, file.Diagnostics)
}
func (t *TestJSON) Run(run *moduletest.Run, file *moduletest.File) {

View File

@ -1,6 +1,9 @@
package moduletest
import "github.com/hashicorp/terraform/internal/configs"
import (
"github.com/hashicorp/terraform/internal/configs"
"github.com/hashicorp/terraform/internal/tfdiags"
)
type File struct {
Config *configs.TestFile
@ -9,4 +12,6 @@ type File struct {
Status Status
Runs []*Run
Diagnostics tfdiags.Diagnostics
}