test framework: include transformed config before validating config (#33608)

This commit is contained in:
Liam Cervante 2023-08-01 09:47:00 +02:00 committed by GitHub
parent 84edaaed57
commit 4560a83721
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 442 additions and 226 deletions

View File

@ -233,23 +233,6 @@ func (c *TestCommand) Run(rawArgs []string) int {
defer stop() defer stop()
defer cancel() defer cancel()
// Validate the main config first.
validateDiags := runner.Validate()
// Print out any warnings or errors from the validation.
view.Diagnostics(nil, nil, validateDiags)
if validateDiags.HasErrors() {
// Don't try and run the tests if the validation actually failed.
// We'll also leave the test status as pending as we actually made
// no effort to run the tests.
return
}
if runner.Stopped || runner.Cancelled {
suite.Status = moduletest.Error
return
}
runner.Start(variables) runner.Start(variables)
}() }()
@ -328,64 +311,6 @@ type TestRunner struct {
Verbose bool Verbose bool
} }
func (runner *TestRunner) Validate() tfdiags.Diagnostics {
log.Printf("[TRACE] TestRunner: Validating configuration.")
var diags tfdiags.Diagnostics
diags = diags.Append(runner.validateConfig(runner.Config))
if runner.Cancelled || runner.Stopped {
return diags
}
// We've validated the main configuration under test. We now need to
// validate any other modules that are being executed by the test files.
//
// We only validate modules that are sourced locally, we're making an
// assumption that any remote modules were properly vetted and tested before
// being used in our tests.
validatedModules := make(map[string]bool)
for _, file := range runner.Suite.Files {
for _, run := range file.Runs {
if runner.Cancelled || runner.Stopped {
return diags
}
// While we're here, also do a quick validation of the config of the
// actual run block.
diags = diags.Append(run.Config.Validate())
// If the run block is executing another local module, we should
// validate that before we try and run it.
if run.Config.ConfigUnderTest != nil {
if _, ok := run.Config.Module.Source.(addrs.ModuleSourceLocal); !ok {
// If it's not a local module, we're not going to validate
// it. The idea here is that if we're retrieving this module
// from the registry it's not the job of this run of the
// testing framework to test it. We should assume it's
// working correctly.
continue
}
if validated := validatedModules[run.Config.Module.Source.String()]; validated {
// We've validated this local module before, so don't do
// it again.
continue
}
validatedModules[run.Config.Module.Source.String()] = true
diags = diags.Append(runner.validateConfig(run.Config.ConfigUnderTest))
}
}
}
return diags
}
func (runner *TestRunner) Start(globals map[string]backend.UnparsedVariableValue) { func (runner *TestRunner) Start(globals map[string]backend.UnparsedVariableValue) {
var files []string var files []string
for name := range runner.Suite.Files { for name := range runner.Suite.Files {
@ -408,21 +333,32 @@ func (runner *TestRunner) Start(globals map[string]backend.UnparsedVariableValue
func (runner *TestRunner) ExecuteTestFile(file *moduletest.File, globals map[string]backend.UnparsedVariableValue) { func (runner *TestRunner) ExecuteTestFile(file *moduletest.File, globals map[string]backend.UnparsedVariableValue) {
log.Printf("[TRACE] TestRunner: executing test file %s", file.Name) log.Printf("[TRACE] TestRunner: executing test file %s", file.Name)
printAll := func() {
runner.View.File(file)
for _, run := range file.Runs {
runner.View.Run(run, file)
}
}
mgr := new(TestStateManager) mgr := new(TestStateManager)
mgr.runner = runner mgr.runner = runner
mgr.State = states.NewState() mgr.State = states.NewState()
// We're going to check if the cleanupStates function call will actually // We're going to check if the cleanupStates function call will actually
// work before we start the test. // work before we start the test.
diags := mgr.prepare(file, globals) mgr.prepare(file, globals)
if diags.HasErrors() || runner.Cancelled || runner.Stopped { if runner.Cancelled {
file.Status = moduletest.Error return // Don't print anything just stop.
runner.View.File(file) }
runner.View.Diagnostics(nil, file, diags)
if file.Diagnostics.HasErrors() || runner.Stopped {
// We can't run this file, but we still want to do nice printing.
for _, run := range file.Runs { for _, run := range file.Runs {
// The prepare function doesn't touch the run blocks, so we'll
// update those so they make sense.
run.Status = moduletest.Skip run.Status = moduletest.Skip
runner.View.Run(run, file)
} }
printAll()
return return
} }
@ -468,11 +404,7 @@ func (runner *TestRunner) ExecuteTestFile(file *moduletest.File, globals map[str
file.Status = file.Status.Merge(run.Status) file.Status = file.Status.Merge(run.Status)
} }
runner.View.File(file) printAll()
runner.View.Diagnostics(nil, file, diags) // Print out any warnings from the preparation.
for _, run := range file.Runs {
runner.View.Run(run, file)
}
} }
func (runner *TestRunner) ExecuteTestRun(mgr *TestStateManager, run *moduletest.Run, file *moduletest.File, state *states.State, config *configs.Config, globals map[string]backend.UnparsedVariableValue) *states.State { func (runner *TestRunner) ExecuteTestRun(mgr *TestStateManager, run *moduletest.Run, file *moduletest.File, state *states.State, config *configs.Config, globals map[string]backend.UnparsedVariableValue) *states.State {
@ -519,7 +451,12 @@ func (runner *TestRunner) ExecuteTestRun(mgr *TestStateManager, run *moduletest.
SkipRefresh: !run.Config.Options.Refresh, SkipRefresh: !run.Config.Options.Refresh,
ExternalReferences: references, ExternalReferences: references,
}, run.Config.Command, globals) }, run.Config.Command, globals)
diags = run.ValidateExpectedFailures(diags) if plan != nil {
// If the returned plan is nil, then the something went wrong before
// we could even attempt to plan or apply the expected failures, so we
// won't validate them if the plan is nil.
diags = run.ValidateExpectedFailures(diags)
}
run.Diagnostics = run.Diagnostics.Append(diags) run.Diagnostics = run.Diagnostics.Append(diags)
if runner.Cancelled { if runner.Cancelled {
@ -586,21 +523,32 @@ func (runner *TestRunner) ExecuteTestRun(mgr *TestStateManager, run *moduletest.
return state return state
} }
func (runner *TestRunner) validateConfig(config *configs.Config) tfdiags.Diagnostics { func (runner *TestRunner) validateFile(file *moduletest.File) {
log.Printf("[TRACE] TestRunner: validating specific config %s", config.Path) log.Printf("[TRACE] TestRunner: validating config for %s", file.Name)
var diags tfdiags.Diagnostics config := runner.Config
reset, transformDiags := config.TransformForTest(nil, file.Config)
defer reset()
file.Diagnostics = file.Diagnostics.Append(transformDiags)
if transformDiags.HasErrors() {
file.Status = moduletest.Error
return
}
tfCtxOpts, err := runner.command.contextOpts() tfCtxOpts, err := runner.command.contextOpts()
diags = diags.Append(err) file.Diagnostics = file.Diagnostics.Append(err)
if err != nil { if err != nil {
return diags file.Status = moduletest.Error
return
} }
tfCtx, ctxDiags := terraform.NewContext(tfCtxOpts) tfCtx, ctxDiags := terraform.NewContext(tfCtxOpts)
diags = diags.Append(ctxDiags) file.Diagnostics = file.Diagnostics.Append(ctxDiags)
if ctxDiags.HasErrors() { if ctxDiags.HasErrors() {
return diags file.Status = moduletest.Error
return
} }
runningCtx, done := context.WithCancel(context.Background()) runningCtx, done := context.WithCancel(context.Background())
@ -609,17 +557,22 @@ func (runner *TestRunner) validateConfig(config *configs.Config) tfdiags.Diagnos
go func() { go func() {
defer logging.PanicHandler() defer logging.PanicHandler()
defer done() defer done()
validateDiags = tfCtx.Validate(config)
}()
// We don't need to pass in any metadata here, as we're only validating
// so if something is cancelled it doesn't matter. We only pass in the
// metadata so we can print context around the cancellation which we don't
// need to do in this case.
waitDiags, _ := runner.wait(tfCtx, runningCtx, nil, nil, nil, nil)
diags = diags.Append(validateDiags) log.Printf("[DEBUG] TestRunner: starting validate for %s", file.Name)
diags = diags.Append(waitDiags) validateDiags = tfCtx.Validate(config)
return diags log.Printf("[DEBUG] TestRunner: completed validate for %s", file.Name)
}()
// We don't pass in a manager or any created resources here since we are
// only validating. If something goes wrong, there will be no state we need
// to worry about cleaning up manually. So the manager and created resources
// can be empty.
waitDiags, _ := runner.wait(tfCtx, runningCtx, nil, nil, file, nil)
file.Diagnostics = file.Diagnostics.Append(validateDiags)
file.Diagnostics = file.Diagnostics.Append(waitDiags)
if validateDiags.HasErrors() || waitDiags.HasErrors() {
file.Status = moduletest.Error
}
} }
// execute executes Terraform plan and apply operations for the given arguments. // execute executes Terraform plan and apply operations for the given arguments.
@ -639,9 +592,19 @@ func (runner *TestRunner) execute(mgr *TestStateManager, run *moduletest.Run, fi
return nil, nil, state, nil return nil, nil, state, nil
} }
// First, transform the config for the given test run and test file.
var diags tfdiags.Diagnostics var diags tfdiags.Diagnostics
// First, do a quick validation of the run blocks config.
if run != nil {
diags = diags.Append(run.Config.Validate())
if diags.HasErrors() {
return nil, nil, state, diags
}
}
// Second, transform the config for the given test run and test file.
if run == nil { if run == nil {
reset, cfgDiags := config.TransformForTest(nil, file.Config) reset, cfgDiags := config.TransformForTest(nil, file.Config)
defer reset() defer reset()
@ -655,16 +618,7 @@ func (runner *TestRunner) execute(mgr *TestStateManager, run *moduletest.Run, fi
return nil, nil, state, diags return nil, nil, state, diags
} }
// Second, gather any variables and give them to the plan options. // Third, do a full validation of the now transformed config.
variables, variableDiags := buildInputVariablesForTest(run, file, config, globals)
diags = diags.Append(variableDiags)
if variableDiags.HasErrors() {
return nil, nil, state, diags
}
opts.SetVariables = variables
// Third, execute planning stage.
tfCtxOpts, err := runner.command.contextOpts() tfCtxOpts, err := runner.command.contextOpts()
diags = diags.Append(err) diags = diags.Append(err)
@ -680,6 +634,52 @@ func (runner *TestRunner) execute(mgr *TestStateManager, run *moduletest.Run, fi
runningCtx, done := context.WithCancel(context.Background()) runningCtx, done := context.WithCancel(context.Background())
var validateDiags tfdiags.Diagnostics
go func() {
defer logging.PanicHandler()
defer done()
log.Printf("[DEBUG] TestRunner: starting validate for %s", identifier)
validateDiags = tfCtx.Validate(config)
log.Printf("[DEBUG] TestRunner: completed validate for %s", identifier)
}()
waitDiags, cancelled := runner.wait(tfCtx, runningCtx, mgr, run, file, nil)
validateDiags = validateDiags.Append(waitDiags)
diags = diags.Append(validateDiags)
if validateDiags.HasErrors() {
// Either the plan errored, or we only wanted to see the plan. Either
// way, just return what we have: The plan and diagnostics from making
// it and the unchanged state.
return tfCtx, nil, state, diags
}
if cancelled {
log.Printf("[DEBUG] TestRunner: skipping plan and apply stage for %s due to cancellation", identifier)
// If the execution was cancelled during the plan, we'll exit here to
// stop the plan being applied and using more time.
return tfCtx, nil, state, diags
}
// Fourth, gather any variables and give them to the plan options.
variables, variableDiags := buildInputVariablesForTest(run, file, config, globals)
diags = diags.Append(variableDiags)
if variableDiags.HasErrors() {
return nil, nil, state, diags
}
opts.SetVariables = variables
// Fifth, execute planning stage.
tfCtx, ctxDiags = terraform.NewContext(tfCtxOpts)
diags = diags.Append(ctxDiags)
if ctxDiags.HasErrors() {
return nil, nil, state, diags
}
runningCtx, done = context.WithCancel(context.Background())
var plan *plans.Plan var plan *plans.Plan
var planDiags tfdiags.Diagnostics var planDiags tfdiags.Diagnostics
go func() { go func() {
@ -690,7 +690,7 @@ func (runner *TestRunner) execute(mgr *TestStateManager, run *moduletest.Run, fi
plan, planDiags = tfCtx.Plan(config, state, opts) plan, planDiags = tfCtx.Plan(config, state, opts)
log.Printf("[DEBUG] TestRunner: completed plan for %s", identifier) log.Printf("[DEBUG] TestRunner: completed plan for %s", identifier)
}() }()
waitDiags, cancelled := runner.wait(tfCtx, runningCtx, mgr, run, file, nil) waitDiags, cancelled = runner.wait(tfCtx, runningCtx, mgr, run, file, nil)
planDiags = planDiags.Append(waitDiags) planDiags = planDiags.Append(waitDiags)
diags = diags.Append(planDiags) diags = diags.Append(planDiags)
@ -721,7 +721,7 @@ func (runner *TestRunner) execute(mgr *TestStateManager, run *moduletest.Run, fi
} }
diags = filteredDiags diags = filteredDiags
// Fourth, execute apply stage. // Sixth, execute apply stage.
tfCtx, ctxDiags = terraform.NewContext(tfCtxOpts) tfCtx, ctxDiags = terraform.NewContext(tfCtxOpts)
diags = diags.Append(ctxDiags) diags = diags.Append(ctxDiags)
if ctxDiags.HasErrors() { if ctxDiags.HasErrors() {
@ -879,15 +879,22 @@ type TestModuleState struct {
// successfully execute all our run blocks and then find we cannot perform any // successfully execute all our run blocks and then find we cannot perform any
// cleanup. We want to use this function to check that our cleanup can happen // cleanup. We want to use this function to check that our cleanup can happen
// using only the information available within the file. // using only the information available within the file.
func (manager *TestStateManager) prepare(file *moduletest.File, globals map[string]backend.UnparsedVariableValue) tfdiags.Diagnostics { func (manager *TestStateManager) prepare(file *moduletest.File, globals map[string]backend.UnparsedVariableValue) {
// First, we're going to check we have definitions for variables at the
// file level.
// For now, the only thing we care about is making sure all the required
// variables have values.
_, diags := buildInputVariablesForTest(nil, file, manager.runner.Config, globals) _, diags := buildInputVariablesForTest(nil, file, manager.runner.Config, globals)
// Return the sum of diagnostics that might indicate a problem for any file.Diagnostics = file.Diagnostics.Append(diags)
// future attempted cleanup. if diags.HasErrors() {
return diags file.Status = moduletest.Error
}
// Second, we'll validate that the default provider configurations actually
// pass a validate operation.
manager.runner.validateFile(file)
} }
func (manager *TestStateManager) cleanupStates(file *moduletest.File, globals map[string]backend.UnparsedVariableValue) { func (manager *TestStateManager) cleanupStates(file *moduletest.File, globals map[string]backend.UnparsedVariableValue) {

View File

@ -120,6 +120,10 @@ func TestTest(t *testing.T) {
expected: "0 passed, 1 failed.", expected: "0 passed, 1 failed.",
code: 1, code: 1,
}, },
"no_providers_in_main": {
expected: "1 passed, 0 failed",
code: 0,
},
} }
for name, tc := range tcs { for name, tc := range tcs {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
@ -500,31 +504,17 @@ Success! 2 passed, 0 failed.
} }
func TestTest_ValidatesBeforeExecution(t *testing.T) { func TestTest_ValidatesBeforeExecution(t *testing.T) {
td := t.TempDir() tcs := map[string]struct {
testCopyDir(t, testFixturePath(path.Join("test", "invalid")), td) expectedOut string
defer testChdir(t, td)() expectedErr string
}{
"invalid": {
expectedOut: `main.tftest.hcl... fail
run "invalid"... fail
provider := testing_command.NewProvider(nil) Failure! 0 passed, 1 failed.
view, done := testView(t) `,
expectedErr: `
c := &TestCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(provider.Provider),
View: view,
},
}
code := c.Run([]string{"-verbose", "-no-color"})
output := done(t)
if code != 1 {
t.Errorf("expected status code 1 but got %d", code)
}
expectedOut := `
Executed 0 tests.
`
expectedErr := `
Error: Invalid ` + "`expect_failures`" + ` reference Error: Invalid ` + "`expect_failures`" + ` reference
on main.tftest.hcl line 5, in run "invalid": on main.tftest.hcl line 5, in run "invalid":
@ -533,71 +523,16 @@ Error: Invalid ` + "`expect_failures`" + ` reference
You cannot expect failures from local.my_value. You can only expect failures You cannot expect failures from local.my_value. You can only expect failures
from checkable objects such as input variables, output values, check blocks, from checkable objects such as input variables, output values, check blocks,
managed resources and data sources. managed resources and data sources.
` `,
},
"invalid-module": {
expectedOut: `main.tftest.hcl... fail
run "invalid"... fail
run "test"... skip
actualOut := output.Stdout() Failure! 0 passed, 1 failed, 1 skipped.
actualErr := output.Stderr() `,
expectedErr: `
if diff := cmp.Diff(actualOut, expectedOut); len(diff) > 0 {
t.Errorf("output didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedOut, actualOut, diff)
}
if diff := cmp.Diff(actualErr, expectedErr); len(diff) > 0 {
t.Errorf("error didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedErr, actualErr, diff)
}
if provider.ResourceCount() > 0 {
t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString())
}
}
func TestTest_ValidatesLocalModulesBeforeExecution(t *testing.T) {
td := t.TempDir()
testCopyDir(t, testFixturePath(path.Join("test", "invalid-module")), td)
defer testChdir(t, td)()
provider := testing_command.NewProvider(nil)
providerSource, close := newMockProviderSource(t, map[string][]string{
"test": {"1.0.0"},
})
defer close()
streams, done := terminal.StreamsForTesting(t)
view := views.NewView(streams)
ui := new(cli.MockUi)
meta := Meta{
testingOverrides: metaOverridesForProvider(provider.Provider),
Ui: ui,
View: view,
Streams: streams,
ProviderSource: providerSource,
}
init := &InitCommand{
Meta: meta,
}
if code := init.Run(nil); code != 0 {
t.Fatalf("expected status code 0 but got %d: %s", code, ui.ErrorWriter)
}
command := &TestCommand{
Meta: meta,
}
code := command.Run([]string{"-no-color"})
output := done(t)
if code != 1 {
t.Errorf("expected status code 1 but got %d", code)
}
expectedOut := `
Executed 0 tests.
`
expectedErr := `
Error: Reference to undeclared input variable Error: Reference to undeclared input variable
on setup/main.tf line 3, in resource "test_resource" "setup": on setup/main.tf line 3, in resource "test_resource" "setup":
@ -605,25 +540,122 @@ Error: Reference to undeclared input variable
An input variable with the name "not_real" has not been declared. This An input variable with the name "not_real" has not been declared. This
variable can be declared with a variable "not_real" {} block. variable can be declared with a variable "not_real" {} block.
` `,
},
"missing-provider": {
expectedOut: `main.tftest.hcl... fail
run "passes_validation"... skip
actualOut := output.Stdout() Failure! 0 passed, 0 failed, 1 skipped.
actualErr := output.Stderr() `,
expectedErr: `
Error: Provider configuration not present
if diff := cmp.Diff(actualOut, expectedOut); len(diff) > 0 { To work with test_resource.secondary its original provider configuration at
t.Errorf("output didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedOut, actualOut, diff) provider["registry.terraform.io/hashicorp/test"].secondary is required, but
it has been removed. This occurs when a provider configuration is removed
while objects created by that provider still exist in the state. Re-add the
provider configuration to destroy test_resource.secondary, after which you
can remove the provider configuration again.
`,
},
"missing-provider-in-run-block": {
expectedOut: `main.tftest.hcl... fail
run "passes_validation"... fail
Failure! 0 passed, 1 failed.
`,
expectedErr: `
Error: Provider configuration not present
To work with test_resource.secondary its original provider configuration at
provider["registry.terraform.io/hashicorp/test"].secondary is required, but
it has been removed. This occurs when a provider configuration is removed
while objects created by that provider still exist in the state. Re-add the
provider configuration to destroy test_resource.secondary, after which you
can remove the provider configuration again.
`,
},
"missing-provider-in-test-module": {
expectedOut: `main.tftest.hcl... fail
run "passes_validation_primary"... pass
run "passes_validation_secondary"... fail
Failure! 1 passed, 1 failed.
`,
expectedErr: `
Error: Provider configuration not present
To work with test_resource.secondary its original provider configuration at
provider["registry.terraform.io/hashicorp/test"].secondary is required, but
it has been removed. This occurs when a provider configuration is removed
while objects created by that provider still exist in the state. Re-add the
provider configuration to destroy test_resource.secondary, after which you
can remove the provider configuration again.
`,
},
} }
if diff := cmp.Diff(actualErr, expectedErr); len(diff) > 0 { for file, tc := range tcs {
t.Errorf("error didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedErr, actualErr, diff) t.Run(file, func(t *testing.T) {
}
if provider.ResourceCount() > 0 { td := t.TempDir()
t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString()) testCopyDir(t, testFixturePath(path.Join("test", file)), td)
} defer testChdir(t, td)()
if provider.ResourceCount() > 0 { provider := testing_command.NewProvider(nil)
t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString())
providerSource, close := newMockProviderSource(t, map[string][]string{
"test": {"1.0.0"},
})
defer close()
streams, done := terminal.StreamsForTesting(t)
view := views.NewView(streams)
ui := new(cli.MockUi)
meta := Meta{
testingOverrides: metaOverridesForProvider(provider.Provider),
Ui: ui,
View: view,
Streams: streams,
ProviderSource: providerSource,
}
init := &InitCommand{
Meta: meta,
}
if code := init.Run(nil); code != 0 {
t.Fatalf("expected status code 0 but got %d: %s", code, ui.ErrorWriter)
}
c := &TestCommand{
Meta: meta,
}
code := c.Run([]string{"-no-color"})
output := done(t)
if code != 1 {
t.Errorf("expected status code 1 but got %d", code)
}
actualOut, expectedOut := output.Stdout(), tc.expectedOut
actualErr, expectedErr := output.Stderr(), tc.expectedErr
if diff := cmp.Diff(actualOut, expectedOut); len(diff) > 0 {
t.Errorf("output didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedOut, actualOut, diff)
}
if diff := cmp.Diff(actualErr, expectedErr); len(diff) > 0 {
t.Errorf("error didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedErr, actualErr, diff)
}
if provider.ResourceCount() > 0 {
t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString())
}
})
} }
} }

View File

@ -0,0 +1,17 @@
terraform {
required_providers {
test = {
source = "hashicorp/test"
configuration_aliases = [ test.secondary ]
}
}
}
resource "test_resource" "primary" {
value = "foo"
}
resource "test_resource" "secondary" {
provider = test.secondary
value = "bar"
}

View File

@ -0,0 +1,23 @@
provider "test" {}
provider "test" {
alias = "secondary"
}
run "passes_validation" {
providers = {
test = test
}
assert {
condition = test_resource.primary.value == "foo"
error_message = "primary contains invalid value"
}
assert {
condition = test_resource.secondary.value == "bar"
error_message = "secondary contains invalid value"
}
}

View File

@ -0,0 +1,3 @@
resource "test_resource" "primary" {
value = "foo"
}

View File

@ -0,0 +1,40 @@
provider "test" {}
provider "test" {
alias = "secondary"
}
run "passes_validation_primary" {
providers = {
test = test
}
assert {
condition = test_resource.primary.value == "foo"
error_message = "primary contains invalid value"
}
}
run "passes_validation_secondary" {
providers = {
test = test
}
module {
source = "./setup"
}
assert {
condition = test_resource.primary.value == "foo"
error_message = "primary contains invalid value"
}
assert {
condition = test_resource.secondary.value == "bar"
error_message = "secondary contains invalid value"
}
}

View File

@ -0,0 +1,17 @@
terraform {
required_providers {
test = {
source = "hashicorp/test"
configuration_aliases = [ test.secondary ]
}
}
}
resource "test_resource" "primary" {
value = "foo"
}
resource "test_resource" "secondary" {
provider = test.secondary
value = "bar"
}

View File

@ -0,0 +1,17 @@
terraform {
required_providers {
test = {
source = "hashicorp/test"
configuration_aliases = [ test.secondary ]
}
}
}
resource "test_resource" "primary" {
value = "foo"
}
resource "test_resource" "secondary" {
provider = test.secondary
value = "bar"
}

View File

@ -0,0 +1,14 @@
provider "test" {}
run "passes_validation" {
assert {
condition = test_resource.primary.value == "foo"
error_message = "primary contains invalid value"
}
assert {
condition = test_resource.secondary.value == "bar"
error_message = "secondary contains invalid value"
}
}

View File

@ -0,0 +1,19 @@
terraform {
required_providers {
test = {
source = "hashicorp/test"
configuration_aliases = [test.primary, test.secondary]
}
}
}
resource "test_resource" "primary" {
provider = test.primary
value = "foo"
}
resource "test_resource" "secondary" {
provider = test.secondary
value = "bar"
}

View File

@ -0,0 +1,20 @@
provider "test" {
alias = "primary"
}
provider "test" {
alias = "secondary"
}
run "passes_validation" {
assert {
condition = test_resource.primary.value == "foo"
error_message = "primary contains invalid value"
}
assert {
condition = test_resource.secondary.value == "bar"
error_message = "secondary contains invalid value"
}
}

View File

@ -130,6 +130,7 @@ func (t *TestHuman) Conclusion(suite *moduletest.Suite) {
func (t *TestHuman) File(file *moduletest.File) { func (t *TestHuman) File(file *moduletest.File) {
t.view.streams.Printf("%s... %s\n", file.Name, colorizeTestStatus(file.Status, t.view.colorize)) t.view.streams.Printf("%s... %s\n", file.Name, colorizeTestStatus(file.Status, t.view.colorize))
t.Diagnostics(nil, file, file.Diagnostics)
} }
func (t *TestHuman) Run(run *moduletest.Run, file *moduletest.File) { func (t *TestHuman) Run(run *moduletest.Run, file *moduletest.File) {
@ -388,6 +389,7 @@ func (t *TestJSON) File(file *moduletest.File) {
"type", json.MessageTestFile, "type", json.MessageTestFile,
json.MessageTestFile, json.TestFileStatus{file.Name, json.ToTestStatus(file.Status)}, json.MessageTestFile, json.TestFileStatus{file.Name, json.ToTestStatus(file.Status)},
"@testfile", file.Name) "@testfile", file.Name)
t.Diagnostics(nil, file, file.Diagnostics)
} }
func (t *TestJSON) Run(run *moduletest.Run, file *moduletest.File) { func (t *TestJSON) Run(run *moduletest.Run, file *moduletest.File) {

View File

@ -1,6 +1,9 @@
package moduletest package moduletest
import "github.com/hashicorp/terraform/internal/configs" import (
"github.com/hashicorp/terraform/internal/configs"
"github.com/hashicorp/terraform/internal/tfdiags"
)
type File struct { type File struct {
Config *configs.TestFile Config *configs.TestFile
@ -9,4 +12,6 @@ type File struct {
Status Status Status Status
Runs []*Run Runs []*Run
Diagnostics tfdiags.Diagnostics
} }