testing framework: introduce interrupts for stopping tests (#33477)

* [testing framework] prepare for beta phase of development

* [Testing Framework] Add module block to test run blocks

* [testing framework] allow tests to define and override providers

* testing framework: introduce interrupts for stopping tests

* remove panic handling, will do it properly later
This commit is contained in:
Liam Cervante 2023-07-10 15:53:13 +02:00 committed by GitHub
parent 4b34902fab
commit 4862812c94
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 701 additions and 192 deletions

View File

@ -1,16 +1,18 @@
package command
import (
"context"
"fmt"
"sort"
"strings"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/backend"
"github.com/hashicorp/terraform/internal/command/arguments"
"github.com/hashicorp/terraform/internal/command/views"
"github.com/hashicorp/terraform/internal/configs"
"github.com/hashicorp/terraform/internal/logging"
"github.com/hashicorp/terraform/internal/moduletest"
"github.com/hashicorp/terraform/internal/plans"
"github.com/hashicorp/terraform/internal/states"
@ -85,8 +87,72 @@ func (c *TestCommand) Run(rawArgs []string) int {
}(),
}
runningCtx, done := context.WithCancel(context.Background())
stopCtx, stop := context.WithCancel(runningCtx)
cancelCtx, cancel := context.WithCancel(context.Background())
runner := &TestRunner{
command: c,
Suite: &suite,
Config: config,
View: view,
CancelledCtx: cancelCtx,
StoppedCtx: stopCtx,
// Just to be explicit, we'll set the following fields even though they
// default to these values.
Cancelled: false,
Stopped: false,
}
view.Abstract(&suite)
c.ExecuteTestSuite(&suite, config, view)
go func() {
defer logging.PanicHandler()
defer done() // We completed successfully.
defer stop()
defer cancel()
runner.Start()
}()
// Wait for the operation to complete, or for an interrupt to occur.
select {
case <-c.ShutdownCh:
// Nice request to be cancelled.
view.Interrupted()
runner.Stopped = true
stop()
select {
case <-c.ShutdownCh:
// The user pressed it again, now we have to get it to stop as
// fast as possible.
view.FatalInterrupt()
runner.Cancelled = true
cancel()
// TODO(liamcervante): Should we add a timer here? That would mean
// after 5 seconds we just give up and don't even print out the
// lists of resources left behind?
<-runningCtx.Done() // Nothing left to do now but wait.
case <-runningCtx.Done():
// The application finished nicely after the request was stopped.
}
case <-runningCtx.Done():
// tests finished normally with no interrupts.
}
if runner.Cancelled {
// Don't print out the conclusion if the test was cancelled.
return 1
}
view.Conclusion(&suite)
if suite.Status != moduletest.Pass {
@ -95,50 +161,74 @@ func (c *TestCommand) Run(rawArgs []string) int {
return 0
}
func (c *TestCommand) ExecuteTestSuite(suite *moduletest.Suite, config *configs.Config, view views.Test) {
var diags tfdiags.Diagnostics
// test runner
opts, err := c.contextOpts()
diags = diags.Append(err)
if err != nil {
suite.Status = suite.Status.Merge(moduletest.Error)
view.Diagnostics(nil, nil, diags)
return
}
type TestRunner struct {
command *TestCommand
ctx, ctxDiags := terraform.NewContext(opts)
diags = diags.Append(ctxDiags)
if ctxDiags.HasErrors() {
suite.Status = suite.Status.Merge(moduletest.Error)
view.Diagnostics(nil, nil, diags)
return
}
view.Diagnostics(nil, nil, diags) // Print out any warnings from the setup.
Suite *moduletest.Suite
Config *configs.Config
View views.Test
// Stopped and Cancelled track whether the user requested the testing
// process to be interrupted. Stopped is a nice graceful exit, we'll still
// tidy up any state that was created and mark the tests with relevant
// `skipped` status updates. Cancelled is a hard stop right now exit, we
// won't attempt to clean up any state left hanging, and tests will just
// be left showing `pending` as the status. We will still print out the
// destroy summary diagnostics that tell the user what state has been left
// behind and needs manual clean up.
Stopped bool
Cancelled bool
// StoppedCtx and CancelledCtx allow in progress Terraform operations to
// respond to external calls from the test command.
StoppedCtx context.Context
CancelledCtx context.Context
}
func (runner *TestRunner) Start() {
var files []string
for name := range suite.Files {
for name := range runner.Suite.Files {
files = append(files, name)
}
sort.Strings(files) // execute the files in alphabetical order
suite.Status = moduletest.Pass
runner.Suite.Status = moduletest.Pass
for _, name := range files {
file := suite.Files[name]
c.ExecuteTestFile(ctx, file, config, view)
if runner.Cancelled {
return
}
suite.Status = suite.Status.Merge(file.Status)
file := runner.Suite.Files[name]
runner.ExecuteTestFile(file)
runner.Suite.Status = runner.Suite.Status.Merge(file.Status)
}
}
func (c *TestCommand) ExecuteTestFile(ctx *terraform.Context, file *moduletest.File, config *configs.Config, view views.Test) {
func (runner *TestRunner) ExecuteTestFile(file *moduletest.File) {
mgr := new(TestStateManager)
mgr.c = c
mgr.runner = runner
mgr.State = states.NewState()
defer mgr.cleanupStates(ctx, view, file, config)
defer mgr.cleanupStates(file)
file.Status = file.Status.Merge(moduletest.Pass)
for _, run := range file.Runs {
if runner.Cancelled {
// This means a hard stop has been requested, in this case we don't
// even stop to mark future tests as having been skipped. They'll
// just show up as pending in the printed summary.
return
}
if runner.Stopped {
// Then the test was requested to be stopped, so we just mark each
// following test as skipped and move on.
run.Status = moduletest.Skip
continue
}
if file.Status == moduletest.Error {
// If the overall test file has errored, we don't keep trying to
// execute tests. Instead, we mark all remaining run blocks as
@ -150,95 +240,52 @@ func (c *TestCommand) ExecuteTestFile(ctx *terraform.Context, file *moduletest.F
if run.Config.ConfigUnderTest != nil {
// Then we want to execute a different module under a kind of
// sandbox.
state := c.ExecuteTestRun(ctx, run, file, states.NewState(), run.Config.ConfigUnderTest)
state := runner.ExecuteTestRun(run, file, states.NewState(), run.Config.ConfigUnderTest)
mgr.States = append(mgr.States, &TestModuleState{
State: state,
Run: run,
})
} else {
mgr.State = c.ExecuteTestRun(ctx, run, file, mgr.State, config)
mgr.State = runner.ExecuteTestRun(run, file, mgr.State, runner.Config)
}
file.Status = file.Status.Merge(run.Status)
}
view.File(file)
runner.View.File(file)
for _, run := range file.Runs {
view.Run(run, file)
runner.View.Run(run, file)
}
}
func (c *TestCommand) ExecuteTestRun(ctx *terraform.Context, run *moduletest.Run, file *moduletest.File, state *states.State, config *configs.Config) *states.State {
// Since we don't want to modify the actual plan and apply operations for
// tests where possible, we insert provider blocks directly into the config
// under test for each test run.
//
// This function transforms the config under test by inserting relevant
// provider blocks. It returns a reset function which restores the config
// back to the original state.
cfgReset, cfgDiags := config.TransformForTest(run.Config, file.Config)
defer cfgReset()
run.Diagnostics = run.Diagnostics.Append(cfgDiags)
if cfgDiags.HasErrors() {
run.Status = moduletest.Error
func (runner *TestRunner) ExecuteTestRun(run *moduletest.Run, file *moduletest.File, state *states.State, config *configs.Config) *states.State {
if runner.Cancelled {
// Don't do anything, just give up and return immediately.
// The surrounding functions should stop this even being called, but in
// case of race conditions or something we can still verify this.
return state
}
var targets []addrs.Targetable
for _, target := range run.Config.Options.Target {
addr, diags := addrs.ParseTarget(target)
run.Diagnostics = run.Diagnostics.Append(diags)
if diags.HasErrors() {
run.Status = moduletest.Error
return state
}
targets = append(targets, addr.Subject)
if runner.Stopped {
// Basically the same as above, except we'll be a bit nicer.
run.Status = moduletest.Skip
return state
}
var replaces []addrs.AbsResourceInstance
for _, replace := range run.Config.Options.Replace {
addr, diags := addrs.ParseAbsResourceInstance(replace)
run.Diagnostics = run.Diagnostics.Append(diags)
if diags.HasErrors() {
run.Status = moduletest.Error
return state
}
if addr.Resource.Resource.Mode != addrs.ManagedResourceMode {
run.Diagnostics = run.Diagnostics.Append(hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "can only target managed resources for forced replacements",
Detail: addr.String(),
Subject: replace.SourceRange().Ptr(),
})
return state
}
replaces = append(replaces, addr)
}
variables, diags := c.GetInputValues(run.Config.Variables, file.Config.Variables, config)
targets, diags := run.GetTargets()
run.Diagnostics = run.Diagnostics.Append(diags)
replaces, diags := run.GetReplaces()
run.Diagnostics = run.Diagnostics.Append(diags)
references, diags := run.GetReferences()
run.Diagnostics = run.Diagnostics.Append(diags)
if diags.HasErrors() {
run.Status = moduletest.Error
return state
}
var references []*addrs.Reference
for _, assert := range run.Config.CheckRules {
for _, variable := range assert.Condition.Variables() {
reference, diags := addrs.ParseRef(variable)
run.Diagnostics = run.Diagnostics.Append(diags)
references = append(references, reference)
}
}
if run.Diagnostics.HasErrors() {
run.Status = moduletest.Error
return state
}
plan, diags := ctx.Plan(config, state, &terraform.PlanOpts{
ctx, plan, state, diags := runner.execute(run, file, config, state, &terraform.PlanOpts{
Mode: func() plans.Mode {
switch run.Config.Options.Mode {
case configs.RefreshOnlyTestMode:
@ -247,12 +294,32 @@ func (c *TestCommand) ExecuteTestRun(ctx *terraform.Context, run *moduletest.Run
return plans.NormalMode
}
}(),
SetVariables: variables,
Targets: targets,
ForceReplace: replaces,
SkipRefresh: !run.Config.Options.Refresh,
ExternalReferences: references,
})
}, run.Config.Command)
run.Diagnostics = run.Diagnostics.Append(diags)
if runner.Cancelled {
// Print out the diagnostics from the run now, since it was cancelled
// the normal set of diagnostics will not be printed otherwise.
runner.View.Diagnostics(run, file, run.Diagnostics)
run.Status = moduletest.Error
return state
}
if diags.HasErrors() {
run.Status = moduletest.Error
return state
}
if runner.Stopped {
run.Status = moduletest.Skip
return state
}
variables, diags := buildInputVariablesForAssertions(run, file, config)
run.Diagnostics = run.Diagnostics.Append(diags)
if diags.HasErrors() {
run.Status = moduletest.Error
@ -260,13 +327,6 @@ func (c *TestCommand) ExecuteTestRun(ctx *terraform.Context, run *moduletest.Run
}
if run.Config.Command == configs.ApplyTestCommand {
state, diags = ctx.Apply(plan, config)
run.Diagnostics = run.Diagnostics.Append(diags)
if diags.HasErrors() {
run.Status = moduletest.Error
return state
}
ctx.TestContext(config, state, plan, variables).EvaluateAgainstState(run)
return state
}
@ -275,98 +335,185 @@ func (c *TestCommand) ExecuteTestRun(ctx *terraform.Context, run *moduletest.Run
return state
}
func (c *TestCommand) GetInputValues(locals map[string]hcl.Expression, globals map[string]hcl.Expression, config *configs.Config) (terraform.InputValues, tfdiags.Diagnostics) {
variables := make(map[string]hcl.Expression)
for name := range config.Module.Variables {
if expr, exists := locals[name]; exists {
// Local variables take precedence.
variables[name] = expr
continue
}
if expr, exists := globals[name]; exists {
// If it's not set locally, it maybe set globally.
variables[name] = expr
continue
}
// If it's not set at all that might be okay if the variable is optional
// so we'll just not add anything to the map.
// execute executes Terraform plan and apply operations for the given arguments.
//
// The command argument decides whether it executes only a plan or also applies
// the plan it creates during the planning.
func (runner *TestRunner) execute(run *moduletest.Run, file *moduletest.File, config *configs.Config, state *states.State, opts *terraform.PlanOpts, command configs.TestCommand) (*terraform.Context, *plans.Plan, *states.State, tfdiags.Diagnostics) {
if opts.Mode == plans.DestroyMode && state.Empty() {
// Nothing to do!
return nil, nil, state, nil
}
unparsed := make(map[string]backend.UnparsedVariableValue)
for key, value := range variables {
unparsed[key] = unparsedVariableValueExpression{
expr: value,
sourceType: terraform.ValueFromConfig,
}
}
return backend.ParseVariableValues(unparsed, config.Module.Variables)
}
func (c *TestCommand) cleanupState(ctx *terraform.Context, view views.Test, run *moduletest.Run, file *moduletest.File, config *configs.Config, state *states.State) {
if state.Empty() {
// Nothing to do.
return
}
var locals, globals map[string]hcl.Expression
identifier := file.Name
if run != nil {
locals = run.Config.Variables
}
if file != nil {
globals = file.Config.Variables
identifier = fmt.Sprintf("%s/%s", identifier, run.Name)
}
var cfgDiags tfdiags.Diagnostics
// First, transform the config for the given test run and test file.
var diags tfdiags.Diagnostics
if run == nil {
cfgReset, diags := config.TransformForTest(nil, file.Config)
defer cfgReset()
cfgDiags = cfgDiags.Append(diags)
reset, cfgDiags := config.TransformForTest(nil, file.Config)
defer reset()
diags = diags.Append(cfgDiags)
} else {
cfgReset, diags := config.TransformForTest(run.Config, file.Config)
defer cfgReset()
cfgDiags = cfgDiags.Append(diags)
reset, cfgDiags := config.TransformForTest(run.Config, file.Config)
defer reset()
diags = diags.Append(cfgDiags)
}
if cfgDiags.HasErrors() {
// This shouldn't really trigger, as we will have applied this transform
// earlier and it will have worked so a problem now would be strange.
// To be safe, we'll handle it anyway.
view.DestroySummary(cfgDiags, run, file, state)
return
if diags.HasErrors() {
return nil, nil, state, diags
}
c.View.Diagnostics(cfgDiags)
variables, variableDiags := c.GetInputValues(locals, globals, config)
// Second, gather any variables and give them to the plan options.
variables, variableDiags := buildInputVariablesForTest(run, file, config)
diags = diags.Append(variableDiags)
if variableDiags.HasErrors() {
// This shouldn't really trigger, as we will have created something
// using these variables at an earlier stage so for them to have a
// problem now would be strange. But just to be safe we'll handle this.
view.DestroySummary(variableDiags, run, file, state)
return
return nil, nil, state, diags
}
view.Diagnostics(nil, file, variableDiags)
opts.SetVariables = variables
plan, planDiags := ctx.Plan(config, state, &terraform.PlanOpts{
Mode: plans.DestroyMode,
SetVariables: variables,
})
if planDiags.HasErrors() {
// This is bad, we need to tell the user that we couldn't clean up
// and they need to go and manually delete some resources.
view.DestroySummary(planDiags, run, file, state)
return
// Third, execute planning stage.
tfCtxOpts, err := runner.command.contextOpts()
diags = diags.Append(err)
if err != nil {
return nil, nil, state, diags
}
view.Diagnostics(nil, file, planDiags) // Print out any warnings from the destroy plan.
finalState, applyDiags := ctx.Apply(plan, config)
view.DestroySummary(applyDiags, run, file, finalState)
tfCtx, ctxDiags := terraform.NewContext(tfCtxOpts)
diags = diags.Append(ctxDiags)
if ctxDiags.HasErrors() {
return nil, nil, state, diags
}
runningCtx, done := context.WithCancel(context.Background())
var plan *plans.Plan
var planDiags tfdiags.Diagnostics
go func() {
defer done()
plan, planDiags = tfCtx.Plan(config, state, opts)
}()
waitDiags, cancelled := runner.wait(tfCtx, runningCtx, opts, identifier)
planDiags = planDiags.Append(waitDiags)
diags = diags.Append(planDiags)
if planDiags.HasErrors() || command == configs.PlanTestCommand {
// Either the plan errored, or we only wanted to see the plan. Either
// way, just return what we have: The plan and diagnostics from making
// it and the unchanged state.
return tfCtx, plan, state, diags
}
if cancelled {
// If the execution was cancelled during the plan, we'll exit here to
// stop the plan being applied and using more time.
return tfCtx, plan, state, diags
}
// Fourth, execute apply stage.
tfCtx, ctxDiags = terraform.NewContext(tfCtxOpts)
diags = diags.Append(ctxDiags)
if ctxDiags.HasErrors() {
return nil, nil, state, diags
}
runningCtx, done = context.WithCancel(context.Background())
var updated *states.State
var applyDiags tfdiags.Diagnostics
go func() {
defer done()
updated, applyDiags = tfCtx.Apply(plan, config)
}()
waitDiags, _ = runner.wait(tfCtx, runningCtx, opts, identifier)
applyDiags = applyDiags.Append(waitDiags)
diags = diags.Append(applyDiags)
return tfCtx, plan, updated, diags
}
func (runner *TestRunner) wait(ctx *terraform.Context, runningCtx context.Context, opts *terraform.PlanOpts, identifier string) (diags tfdiags.Diagnostics, cancelled bool) {
select {
case <-runner.StoppedCtx.Done():
if opts.Mode != plans.DestroyMode {
// It takes more impetus from the user to cancel the cleanup
// operations, so we only do this during the actual tests.
cancelled = true
go ctx.Stop()
}
select {
case <-runner.CancelledCtx.Done():
// If the user still really wants to cancel, then we'll oblige
// even during the destroy mode at this point.
if opts.Mode == plans.DestroyMode {
cancelled = true
go ctx.Stop()
}
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Terraform Test Interrupted",
fmt.Sprintf("Terraform test was interrupted while executing %s. This means resources that were created during the test may have been left active, please monitor the rest of the output closely as any dangling resources will be listed.", identifier)))
// It is actually quite disastrous if we exist early at this
// point as it means we'll have created resources that we
// haven't tracked at all. So for now, we won't ever actually
// forcibly terminate the test. When cancelled, we make the
// clean up faster by not performing it but we should still
// always manage it give an accurate list of resources left
// alive.
// TODO(liamcervante): Consider adding a timer here, so that we
// exit early even if that means some resources are just lost
// forever.
<-runningCtx.Done() // Just wait for things to finish now.
case <-runningCtx.Done():
// The operation exited nicely when asked!
}
case <-runner.CancelledCtx.Done():
// This shouldn't really happen, as we'd expect to see the StoppedCtx
// being triggered first. But, just in case.
cancelled = true
go ctx.Stop()
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Terraform Test Interrupted",
fmt.Sprintf("Terraform test was interrupted while executing %s. This means resources that were created during the test may have been left active, please monitor the rest of the output closely as any dangling resources will be listed.", identifier)))
// It is actually quite disastrous if we exist early at this
// point as it means we'll have created resources that we
// haven't tracked at all. So for now, we won't ever actually
// forcibly terminate the test. When cancelled, we make the
// clean up faster by not performing it but we should still
// always manage it give an accurate list of resources left
// alive.
// TODO(liamcervante): Consider adding a timer here, so that we
// exit early even if that means some resources are just lost
// forever.
<-runningCtx.Done() // Just wait for things to finish now.
case <-runningCtx.Done():
// The operation exited normally.
}
return diags, cancelled
}
// state management
// TestStateManager is a helper struct to maintain the various state objects
// that a test file has to keep track of.
type TestStateManager struct {
c *TestCommand
runner *TestRunner
// State is the main state of the module under test during a single test
// file execution. This state will be updated by every run block without
@ -387,22 +534,132 @@ type TestModuleState struct {
// State is the state after the module executed.
State *states.State
// File is the config for the file containing the Run.
File *moduletest.File
// Run is the config for the given run block, that contains the config
// under test and the variable values.
Run *moduletest.Run
}
func (manager *TestStateManager) cleanupStates(ctx *terraform.Context, view views.Test, file *moduletest.File, config *configs.Config) {
func (manager *TestStateManager) cleanupStates(file *moduletest.File) {
if manager.runner.Cancelled {
// We are still going to print out the resources that we have left
// even though the user asked for an immediate exit.
var diags tfdiags.Diagnostics
diags = diags.Append(tfdiags.Sourceless(tfdiags.Error, "Test cleanup skipped due to immediate exit", "Terraform could not clean up the state left behind due to immediate interrupt."))
manager.runner.View.DestroySummary(diags, nil, file, manager.State)
for _, module := range manager.States {
manager.runner.View.DestroySummary(diags, module.Run, file, module.State)
}
return
}
// First, we'll clean up the main state.
manager.c.cleanupState(ctx, view, nil, file, config, manager.State)
_, _, state, diags := manager.runner.execute(nil, file, manager.runner.Config, manager.State, &terraform.PlanOpts{
Mode: plans.DestroyMode,
}, configs.ApplyTestCommand)
manager.runner.View.DestroySummary(diags, nil, file, state)
// Then we'll clean up the additional states for custom modules in reverse
// order.
for ix := len(manager.States); ix > 0; ix-- {
state := manager.States[ix-1]
manager.c.cleanupState(ctx, view, state.Run, file, state.Run.Config.ConfigUnderTest, state.State)
module := manager.States[ix-1]
if manager.runner.Cancelled {
// In case the cancellation came while a previous state was being
// destroyed.
manager.runner.View.DestroySummary(diags, module.Run, file, module.State)
continue
}
_, _, state, diags := manager.runner.execute(module.Run, file, module.Run.Config.ConfigUnderTest, module.State, &terraform.PlanOpts{
Mode: plans.DestroyMode,
}, configs.ApplyTestCommand)
manager.runner.View.DestroySummary(diags, module.Run, file, state)
}
}
// helper functions
// buildInputVariablesForTest creates a terraform.InputValues mapping for
// variable values that are relevant to the config being tested.
//
// Crucially, it differs from buildInputVariablesForAssertions in that it only
// includes variables that are reference by the config and not everything that
// is defined within the test run block and test file.
func buildInputVariablesForTest(run *moduletest.Run, file *moduletest.File, config *configs.Config) (terraform.InputValues, tfdiags.Diagnostics) {
variables := make(map[string]hcl.Expression)
for name := range config.Module.Variables {
if run != nil {
if expr, exists := run.Config.Variables[name]; exists {
// Local variables take precedence.
variables[name] = expr
continue
}
}
if file != nil {
if expr, exists := file.Config.Variables[name]; exists {
// If it's not set locally, it maybe set globally.
variables[name] = expr
continue
}
}
// If it's not set at all that might be okay if the variable is optional
// so we'll just not add anything to the map.
}
unparsed := make(map[string]backend.UnparsedVariableValue)
for key, value := range variables {
unparsed[key] = unparsedVariableValueExpression{
expr: value,
sourceType: terraform.ValueFromConfig,
}
}
return backend.ParseVariableValues(unparsed, config.Module.Variables)
}
// buildInputVariablesForAssertions creates a terraform.InputValues mapping that
// contains all the variables defined for a given run and file, alongside any
// unset variables that have defaults within the provided config.
//
// Crucially, it differs from buildInputVariablesForTest in that the returned
// input values include all variables available even if they are not defined
// within the config.
//
// This does mean the returned diags might contain warnings about variables not
// defined within the config. We might want to remove these warnings in the
// future, since it is actually okay for test files to have variables defined
// outside the configuration.
func buildInputVariablesForAssertions(run *moduletest.Run, file *moduletest.File, config *configs.Config) (terraform.InputValues, tfdiags.Diagnostics) {
merged := make(map[string]hcl.Expression)
if run != nil {
for name, expr := range run.Config.Variables {
merged[name] = expr
}
}
if file != nil {
for name, expr := range file.Config.Variables {
if _, exists := merged[name]; exists {
// Then this variable was defined at the run level and we want
// that value to take precedence.
continue
}
merged[name] = expr
}
}
unparsed := make(map[string]backend.UnparsedVariableValue)
for key, value := range merged {
unparsed[key] = unparsedVariableValueExpression{
expr: value,
sourceType: terraform.ValueFromConfig,
}
}
return backend.ParseVariableValues(unparsed, config.Module.Variables)
}

View File

@ -137,6 +137,72 @@ func TestTest(t *testing.T) {
}
}
func TestTest_Interrupt(t *testing.T) {
td := t.TempDir()
testCopyDir(t, testFixturePath(path.Join("test", "with_interrupt")), td)
defer testChdir(t, td)()
provider := testing_command.NewProvider(nil)
view, done := testView(t)
interrupt := make(chan struct{})
provider.Interrupt = interrupt
c := &TestCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(provider.Provider),
View: view,
ShutdownCh: interrupt,
},
}
c.Run(nil)
output := done(t).All()
if !strings.Contains(output, "Interrupt received") {
t.Errorf("output didn't produce the right output:\n\n%s", output)
}
if provider.ResourceCount() > 0 {
// we asked for a nice stop in this one, so it should still have tidied everything up.
t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString())
}
}
func TestTest_DoubleInterrupt(t *testing.T) {
td := t.TempDir()
testCopyDir(t, testFixturePath(path.Join("test", "with_double_interrupt")), td)
defer testChdir(t, td)()
provider := testing_command.NewProvider(nil)
view, done := testView(t)
interrupt := make(chan struct{})
provider.Interrupt = interrupt
c := &TestCommand{
Meta: Meta{
testingOverrides: metaOverridesForProvider(provider.Provider),
View: view,
ShutdownCh: interrupt,
},
}
c.Run(nil)
output := done(t).All()
if !strings.Contains(output, "Terraform Test Interrupted") {
t.Errorf("output didn't produce the right output:\n\n%s", output)
}
// This time the test command shouldn't have cleaned up the resource because
// of the hard interrupt.
if provider.ResourceCount() != 3 {
// we asked for a nice stop in this one, so it should still have tidied everything up.
t.Errorf("should not have deleted all resources on completion but left %v", provider.ResourceString())
}
}
func TestTest_ProviderAlias(t *testing.T) {
td := t.TempDir()
testCopyDir(t, testFixturePath(path.Join("test", "with_provider_alias")), td)

View File

@ -0,0 +1,25 @@
variable "interrupts" {
type = number
}
resource "test_resource" "primary" {
value = "primary"
}
resource "test_resource" "secondary" {
value = "secondary"
interrupt_count = var.interrupts
depends_on = [
test_resource.primary
]
}
resource "test_resource" "tertiary" {
value = "tertiary"
depends_on = [
test_resource.secondary
]
}

View File

@ -0,0 +1,17 @@
variables {
interrupts = 0
}
run "primary" {
}
run "secondary" {
variables {
interrupts = 2
}
}
run "tertiary" {
}

View File

@ -0,0 +1,25 @@
variable "interrupts" {
type = number
}
resource "test_resource" "primary" {
value = "primary"
}
resource "test_resource" "secondary" {
value = "secondary"
interrupt_count = var.interrupts
depends_on = [
test_resource.primary
]
}
resource "test_resource" "tertiary" {
value = "tertiary"
depends_on = [
test_resource.secondary
]
}

View File

@ -0,0 +1,17 @@
variables {
interrupts = 0
}
run "primary" {
}
run "secondary" {
variables {
interrupts = 1
}
}
run "tertiary" {
}

View File

@ -28,8 +28,9 @@ var (
"test_resource": {
Block: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Optional: true, Computed: true},
"value": {Type: cty.String, Optional: true},
"id": {Type: cty.String, Optional: true, Computed: true},
"value": {Type: cty.String, Optional: true},
"interrupt_count": {Type: cty.Number, Optional: true},
},
},
},
@ -38,8 +39,9 @@ var (
"test_data_source": {
Block: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"id": {Type: cty.String, Required: true},
"value": {Type: cty.String, Computed: true},
"id": {Type: cty.String, Required: true},
"value": {Type: cty.String, Computed: true},
"interrupt_count": {Type: cty.Number, Computed: true},
},
},
},
@ -54,6 +56,8 @@ type TestProvider struct {
data, resource cty.Value
Interrupt chan<- struct{}
Store *ResourceStore
}
@ -207,6 +211,14 @@ func (provider *TestProvider) ApplyResourceChange(request providers.ApplyResourc
resource = cty.ObjectVal(vals)
}
interrupts := resource.GetAttr("interrupt_count")
if !interrupts.IsNull() && interrupts.IsKnown() && provider.Interrupt != nil {
count, _ := interrupts.AsBigFloat().Int64()
for ix := 0; ix < int(count); ix++ {
provider.Interrupt <- struct{}{}
}
}
provider.Store.Put(provider.GetResourceKey(id.AsString()), resource)
return providers.ApplyResourceChangeResponse{
NewState: resource,

View File

@ -38,6 +38,14 @@ type Test interface {
// Diagnostics prints out the provided diagnostics.
Diagnostics(run *moduletest.Run, file *moduletest.File, diags tfdiags.Diagnostics)
// Interrupted prints out a message stating that an interrupt has been
// received and testing will stop.
Interrupted()
// FatalInterrupt prints out a message stating that a hard interrupt has
// been received and testing will stop and cleanup will be skipped.
FatalInterrupt()
}
func NewTest(vt arguments.ViewType, view *View) Test {
@ -139,13 +147,21 @@ func (t *TestHuman) Diagnostics(_ *moduletest.Run, _ *moduletest.File, diags tfd
t.view.Diagnostics(diags)
}
func (t *TestHuman) Interrupted() {
t.view.streams.Print(interrupted)
}
func (t *TestHuman) FatalInterrupt() {
t.view.streams.Print(fatalInterrupt)
}
type TestJSON struct {
view *JSONView
}
var _ Test = (*TestJSON)(nil)
func (t TestJSON) Abstract(suite *moduletest.Suite) {
func (t *TestJSON) Abstract(suite *moduletest.Suite) {
var fileCount, runCount int
abstract := json.TestSuiteAbstract{}
@ -176,7 +192,7 @@ func (t TestJSON) Abstract(suite *moduletest.Suite) {
json.MessageTestAbstract, abstract)
}
func (t TestJSON) Conclusion(suite *moduletest.Suite) {
func (t *TestJSON) Conclusion(suite *moduletest.Suite) {
summary := json.TestSuiteSummary{
Status: json.ToTestStatus(suite.Status),
}
@ -225,7 +241,7 @@ func (t TestJSON) Conclusion(suite *moduletest.Suite) {
json.MessageTestSummary, summary)
}
func (t TestJSON) File(file *moduletest.File) {
func (t *TestJSON) File(file *moduletest.File) {
t.view.log.Info(
fmt.Sprintf("%s... %s", file.Name, testStatus(file.Status)),
"type", json.MessageTestFile,
@ -233,7 +249,7 @@ func (t TestJSON) File(file *moduletest.File) {
"@testfile", file.Name)
}
func (t TestJSON) Run(run *moduletest.Run, file *moduletest.File) {
func (t *TestJSON) Run(run *moduletest.Run, file *moduletest.File) {
t.view.log.Info(
fmt.Sprintf(" %q... %s", run.Name, testStatus(run.Status)),
"type", json.MessageTestRun,
@ -244,7 +260,7 @@ func (t TestJSON) Run(run *moduletest.Run, file *moduletest.File) {
t.Diagnostics(run, file, run.Diagnostics)
}
func (t TestJSON) DestroySummary(diags tfdiags.Diagnostics, run *moduletest.Run, file *moduletest.File, state *states.State) {
func (t *TestJSON) DestroySummary(diags tfdiags.Diagnostics, run *moduletest.Run, file *moduletest.File, state *states.State) {
if state.HasManagedResourceInstanceObjects() {
cleanup := json.TestFileCleanup{}
for _, resource := range state.AllResourceInstanceObjectAddrs() {
@ -274,7 +290,7 @@ func (t TestJSON) DestroySummary(diags tfdiags.Diagnostics, run *moduletest.Run,
t.Diagnostics(run, file, diags)
}
func (t TestJSON) Diagnostics(run *moduletest.Run, file *moduletest.File, diags tfdiags.Diagnostics) {
func (t *TestJSON) Diagnostics(run *moduletest.Run, file *moduletest.File, diags tfdiags.Diagnostics) {
var metadata []interface{}
if file != nil {
metadata = append(metadata, "@testfile", file.Name)
@ -285,6 +301,14 @@ func (t TestJSON) Diagnostics(run *moduletest.Run, file *moduletest.File, diags
t.view.Diagnostics(diags, metadata...)
}
func (t *TestJSON) Interrupted() {
t.view.Log(interrupted)
}
func (t *TestJSON) FatalInterrupt() {
t.view.Log(fatalInterrupt)
}
func colorizeTestStatus(status moduletest.Status, color *colorstring.Colorize) string {
switch status {
case moduletest.Error, moduletest.Fail:

View File

@ -19,6 +19,72 @@ type Run struct {
Diagnostics tfdiags.Diagnostics
}
func (run *Run) GetTargets() ([]addrs.Targetable, tfdiags.Diagnostics) {
var diagnostics tfdiags.Diagnostics
var targets []addrs.Targetable
for _, target := range run.Config.Options.Target {
addr, diags := addrs.ParseTarget(target)
diagnostics = diagnostics.Append(diags)
if addr != nil {
targets = append(targets, addr.Subject)
}
}
return targets, diagnostics
}
func (run *Run) GetReplaces() ([]addrs.AbsResourceInstance, tfdiags.Diagnostics) {
var diagnostics tfdiags.Diagnostics
var replaces []addrs.AbsResourceInstance
for _, replace := range run.Config.Options.Replace {
addr, diags := addrs.ParseAbsResourceInstance(replace)
diagnostics = diagnostics.Append(diags)
if diags.HasErrors() {
continue
}
if addr.Resource.Resource.Mode != addrs.ManagedResourceMode {
diagnostics = diagnostics.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Can only target managed resources for forced replacements.",
Detail: addr.String(),
Subject: replace.SourceRange().Ptr(),
})
continue
}
replaces = append(replaces, addr)
}
return replaces, diagnostics
}
func (run *Run) GetReferences() ([]*addrs.Reference, tfdiags.Diagnostics) {
var diagnostics tfdiags.Diagnostics
var references []*addrs.Reference
for _, rule := range run.Config.CheckRules {
for _, variable := range rule.Condition.Variables() {
reference, diags := addrs.ParseRef(variable)
diagnostics = diagnostics.Append(diags)
if reference != nil {
references = append(references, reference)
}
}
for _, variable := range rule.ErrorMessage.Variables() {
reference, diags := addrs.ParseRef(variable)
diagnostics = diagnostics.Append(diags)
if reference != nil {
references = append(references, reference)
}
}
}
return references, diagnostics
}
// ValidateExpectedFailures steps through the provided diagnostics (which should
// be the result of a plan or an apply operation), and does 3 things:
// 1. Removes diagnostics that match the expected failures from the config.
@ -35,8 +101,8 @@ type Run struct {
// already processing the diagnostics from check blocks in here anyway.
//
// The way the function works out which diagnostics are relevant to expected
// failures is by using the tfdiags.ValuedDiagnostic functionality to detect
// which diagnostics were generated by custom conditions. Terraform adds the
// failures is by using the tfdiags Extra functionality to detect which
// diagnostics were generated by custom conditions. Terraform adds the
// addrs.CheckRule that generated each diagnostic to the diagnostic itself so we
// can tell which diagnostics can be expected.
func (run *Run) ValidateExpectedFailures(originals tfdiags.Diagnostics) tfdiags.Diagnostics {