2024-02-08 03:48:59 -06:00
// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
2023-06-28 02:37:42 -05:00
package command
import (
2024-12-10 08:34:25 -06:00
"bytes"
2023-07-10 08:53:13 -05:00
"context"
"fmt"
2023-07-26 03:11:27 -05:00
"log"
2023-07-19 03:07:46 -05:00
"path"
2023-06-28 02:37:42 -05:00
"sort"
"strings"
2023-07-19 03:31:32 -05:00
"time"
2023-06-28 02:37:42 -05:00
2023-08-07 07:42:07 -05:00
"github.com/hashicorp/hcl/v2"
2023-08-01 02:59:29 -05:00
"github.com/zclconf/go-cty/cty"
2023-08-07 07:42:07 -05:00
"golang.org/x/exp/slices"
2023-08-01 02:59:29 -05:00
2023-09-20 06:35:35 -05:00
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/backend"
"github.com/opentofu/opentofu/internal/command/arguments"
"github.com/opentofu/opentofu/internal/command/views"
"github.com/opentofu/opentofu/internal/configs"
2024-03-07 07:55:57 -06:00
"github.com/opentofu/opentofu/internal/encryption"
2023-09-20 06:35:35 -05:00
"github.com/opentofu/opentofu/internal/logging"
"github.com/opentofu/opentofu/internal/moduletest"
"github.com/opentofu/opentofu/internal/plans"
"github.com/opentofu/opentofu/internal/states"
2024-12-10 08:34:25 -06:00
"github.com/opentofu/opentofu/internal/states/statefile"
2023-09-20 06:35:35 -05:00
"github.com/opentofu/opentofu/internal/tfdiags"
2023-09-20 07:16:53 -05:00
"github.com/opentofu/opentofu/internal/tofu"
2023-06-28 02:37:42 -05:00
)
2023-08-07 07:42:07 -05:00
const (
MainStateIdentifier = ""
)
2023-06-28 02:37:42 -05:00
type TestCommand struct {
Meta
}
func ( c * TestCommand ) Help ( ) string {
helpText := `
2023-09-21 07:38:46 -05:00
Usage : tofu [ global options ] test [ options ]
2023-06-28 02:37:42 -05:00
2023-09-21 07:38:46 -05:00
Executes automated integration tests against the current OpenTofu
2023-06-28 02:37:42 -05:00
configuration .
2023-09-21 07:38:46 -05:00
OpenTofu will search for . tftest . hcl files within the current configuration
and testing directories . OpenTofu will then execute the testing run blocks
2023-07-20 09:57:05 -05:00
within any testing files in order , and verify conditional checks and
assertions against the created infrastructure .
2023-06-28 02:37:42 -05:00
This command creates real infrastructure and will attempt to clean up the
testing infrastructure on completion . Monitor the output carefully to ensure
this cleanup process is successful .
Options :
2024-09-23 06:31:06 -05:00
- compact - warnings If OpenTofu produces any warnings that are not
accompanied by errors , show them in a more compact
form that includes only the summary messages .
- consolidate - warnings If OpenTofu produces any warnings , no consolodation
will be performed . All locations , for all warnings
will be listed . Enabled by default .
- consolidate - errors If OpenTofu produces any errors , no consolodation
will be performed . All locations , for all errors
will be listed . Disabled by default
2023-09-21 07:38:46 -05:00
- filter = testfile If specified , OpenTofu will only execute the test files
2023-07-19 03:07:46 -05:00
specified by this flag . You can use this option multiple
times to execute more than one test file .
- json If specified , machine readable output will be printed in
JSON format
2023-07-26 03:56:44 -05:00
- no - color If specified , output won ' t contain any color .
2023-10-03 03:17:26 -05:00
- test - directory = path Set the OpenTofu test directory , defaults to "tests" . When set , the
test command will search for test files in the current directory and
in the one specified by the flag .
2023-07-19 03:07:46 -05:00
- var ' foo = bar ' Set a value for one of the input variables in the root
module of the configuration . Use this option more than
once to set more than one variable .
- var - file = filename Load variable values from the given file , in addition
to the default files terraform . tfvars and * . auto . tfvars .
Use this option more than once to include more than one
variables file .
- verbose Print the plan or state for each test run block as it
executes .
2024-07-11 10:00:18 -05:00
- var ' foo = bar ' Set a value for one of the input variables in the root
module of the configuration . Use this option more than
once to set more than one variable .
- var - file = filename Load variable values from the given file , in addition
to the default files terraform . tfvars and * . auto . tfvars .
Use this option more than once to include more than one
variables file .
2023-06-28 02:37:42 -05:00
`
return strings . TrimSpace ( helpText )
}
func ( c * TestCommand ) Synopsis ( ) string {
2023-09-21 07:38:46 -05:00
return "Execute integration tests for OpenTofu modules"
2023-06-28 02:37:42 -05:00
}
func ( c * TestCommand ) Run ( rawArgs [ ] string ) int {
var diags tfdiags . Diagnostics
2024-11-12 17:19:45 -06:00
ctx := c . CommandContext ( )
2023-06-28 02:37:42 -05:00
2023-07-19 03:07:46 -05:00
common , rawArgs := arguments . ParseView ( rawArgs )
2023-06-28 02:37:42 -05:00
c . View . Configure ( common )
2023-07-19 03:07:46 -05:00
args , diags := arguments . ParseTest ( rawArgs )
if diags . HasErrors ( ) {
c . View . Diagnostics ( diags )
c . View . HelpPrompt ( "test" )
return 1
}
2023-06-28 02:37:42 -05:00
2023-07-19 03:07:46 -05:00
view := views . NewTest ( args . ViewType , c . View )
2024-06-24 08:13:07 -05:00
// Users can also specify variables via the command line, so we'll parse
// all that here.
var items [ ] rawFlag
for _ , variable := range args . Vars . All ( ) {
items = append ( items , rawFlag {
Name : variable . Name ,
Value : variable . Value ,
} )
}
c . variableArgs = rawFlags { items : & items }
2024-10-02 13:11:42 -05:00
variables , variableDiags := c . collectVariableValuesWithTests ( args . TestDirectory )
2024-06-24 08:13:07 -05:00
diags = diags . Append ( variableDiags )
if variableDiags . HasErrors ( ) {
view . Diagnostics ( nil , nil , diags )
return 1
}
2023-07-19 03:07:46 -05:00
config , configDiags := c . loadConfigWithTests ( "." , args . TestDirectory )
2023-06-28 02:37:42 -05:00
diags = diags . Append ( configDiags )
if configDiags . HasErrors ( ) {
2023-07-06 08:53:18 -05:00
view . Diagnostics ( nil , nil , diags )
2023-06-28 02:37:42 -05:00
return 1
}
2023-07-26 03:11:27 -05:00
runCount := 0
fileCount := 0
2023-07-19 03:07:46 -05:00
var fileDiags tfdiags . Diagnostics
2023-06-28 02:37:42 -05:00
suite := moduletest . Suite {
Files : func ( ) map [ string ] * moduletest . File {
files := make ( map [ string ] * moduletest . File )
2023-07-19 03:07:46 -05:00
if len ( args . Filter ) > 0 {
for _ , name := range args . Filter {
file , ok := config . Module . Tests [ name ]
if ! ok {
// If the filter is invalid, we'll simply skip this
// entry and print a warning. But we could still execute
// any other tests within the filter.
fileDiags . Append ( tfdiags . Sourceless (
tfdiags . Warning ,
"Unknown test file" ,
fmt . Sprintf ( "The specified test file, %s, could not be found." , name ) ) )
continue
}
2023-07-26 03:11:27 -05:00
fileCount ++
2023-07-19 03:07:46 -05:00
var runs [ ] * moduletest . Run
for ix , run := range file . Runs {
runs = append ( runs , & moduletest . Run {
Config : run ,
Index : ix ,
Name : run . Name ,
} )
}
2023-07-26 03:11:27 -05:00
runCount += len ( runs )
2023-07-19 03:07:46 -05:00
files [ name ] = & moduletest . File {
Config : file ,
Name : name ,
Runs : runs ,
}
}
return files
}
// Otherwise, we'll just do all the tests in the directory!
2023-06-28 02:37:42 -05:00
for name , file := range config . Module . Tests {
2023-07-26 03:11:27 -05:00
fileCount ++
2023-06-28 02:37:42 -05:00
var runs [ ] * moduletest . Run
2023-07-19 03:07:46 -05:00
for ix , run := range file . Runs {
2023-06-28 02:37:42 -05:00
runs = append ( runs , & moduletest . Run {
Config : run ,
2023-07-19 03:07:46 -05:00
Index : ix ,
2023-06-28 02:37:42 -05:00
Name : run . Name ,
} )
}
2023-07-26 03:11:27 -05:00
runCount += len ( runs )
2023-06-28 02:37:42 -05:00
files [ name ] = & moduletest . File {
Config : file ,
Name : name ,
Runs : runs ,
}
}
return files
} ( ) ,
}
2023-07-26 03:11:27 -05:00
log . Printf ( "[DEBUG] TestCommand: found %d files with %d run blocks" , fileCount , runCount )
2023-07-19 03:07:46 -05:00
diags = diags . Append ( fileDiags )
if fileDiags . HasErrors ( ) {
view . Diagnostics ( nil , nil , diags )
return 1
}
2023-08-07 07:42:07 -05:00
opts , err := c . contextOpts ( )
if err != nil {
diags = diags . Append ( err )
view . Diagnostics ( nil , nil , diags )
return 1
}
2024-03-07 07:55:57 -06:00
// Don't use encryption during testing
opts . Encryption = encryption . Disabled ( )
2023-07-26 03:11:27 -05:00
// Print out all the diagnostics we have from the setup. These will just be
// warnings, and we want them out of the way before we start the actual
// testing.
view . Diagnostics ( nil , nil , diags )
2023-07-19 03:31:32 -05:00
// We have two levels of interrupt here. A 'stop' and a 'cancel'. A 'stop'
// is a soft request to stop. We'll finish the current test, do the tidy up,
// but then skip all remaining tests and run blocks. A 'cancel' is a hard
// request to stop now. We'll cancel the current operation immediately
// even if it's a delete operation, and we won't clean up any infrastructure
// if we're halfway through a test. We'll print details explaining what was
// stopped so the user can do their best to recover from it.
2024-11-12 17:19:45 -06:00
runningCtx , done := context . WithCancel ( context . WithoutCancel ( ctx ) )
2023-07-10 08:53:13 -05:00
stopCtx , stop := context . WithCancel ( runningCtx )
2024-11-12 17:19:45 -06:00
cancelCtx , cancel := context . WithCancel ( context . WithoutCancel ( ctx ) )
2023-07-10 08:53:13 -05:00
2023-08-07 07:42:07 -05:00
runner := & TestSuiteRunner {
2023-07-10 08:53:13 -05:00
command : c ,
Suite : & suite ,
Config : config ,
View : view ,
2023-08-07 07:42:07 -05:00
GlobalVariables : variables ,
Opts : opts ,
2023-07-10 08:53:13 -05:00
CancelledCtx : cancelCtx ,
StoppedCtx : stopCtx ,
// Just to be explicit, we'll set the following fields even though they
// default to these values.
Cancelled : false ,
Stopped : false ,
2023-07-19 03:07:46 -05:00
Verbose : args . Verbose ,
2023-07-10 08:53:13 -05:00
}
2023-06-28 02:37:42 -05:00
view . Abstract ( & suite )
2023-07-10 08:53:13 -05:00
2024-03-26 06:41:16 -05:00
panicHandler := logging . PanicHandlerWithTraceFn ( )
2023-07-10 08:53:13 -05:00
go func ( ) {
2024-03-26 06:41:16 -05:00
defer panicHandler ( )
2023-07-19 03:31:32 -05:00
defer done ( )
2023-07-10 08:53:13 -05:00
defer stop ( )
defer cancel ( )
2024-11-12 17:19:45 -06:00
runner . Start ( ctx )
2023-07-10 08:53:13 -05:00
} ( )
// Wait for the operation to complete, or for an interrupt to occur.
select {
case <- c . ShutdownCh :
// Nice request to be cancelled.
view . Interrupted ( )
runner . Stopped = true
stop ( )
select {
case <- c . ShutdownCh :
// The user pressed it again, now we have to get it to stop as
// fast as possible.
view . FatalInterrupt ( )
runner . Cancelled = true
cancel ( )
2023-07-19 03:31:32 -05:00
// We'll wait 5 seconds for this operation to finish now, regardless
// of whether it finishes successfully or not.
select {
case <- runningCtx . Done ( ) :
case <- time . After ( 5 * time . Second ) :
}
2023-07-10 08:53:13 -05:00
case <- runningCtx . Done ( ) :
// The application finished nicely after the request was stopped.
}
case <- runningCtx . Done ( ) :
// tests finished normally with no interrupts.
}
if runner . Cancelled {
// Don't print out the conclusion if the test was cancelled.
return 1
}
2023-06-28 02:37:42 -05:00
view . Conclusion ( & suite )
if suite . Status != moduletest . Pass {
return 1
}
return 0
}
2023-07-10 08:53:13 -05:00
// test runner
2023-06-28 02:37:42 -05:00
2023-08-07 07:42:07 -05:00
type TestSuiteRunner struct {
2023-07-10 08:53:13 -05:00
command * TestCommand
2023-06-28 02:37:42 -05:00
2023-07-10 08:53:13 -05:00
Suite * moduletest . Suite
Config * configs . Config
2023-08-07 07:42:07 -05:00
GlobalVariables map [ string ] backend . UnparsedVariableValue
2023-09-20 07:16:53 -05:00
Opts * tofu . ContextOpts
2023-08-07 07:42:07 -05:00
2023-07-10 08:53:13 -05:00
View views . Test
// Stopped and Cancelled track whether the user requested the testing
// process to be interrupted. Stopped is a nice graceful exit, we'll still
// tidy up any state that was created and mark the tests with relevant
// `skipped` status updates. Cancelled is a hard stop right now exit, we
// won't attempt to clean up any state left hanging, and tests will just
// be left showing `pending` as the status. We will still print out the
// destroy summary diagnostics that tell the user what state has been left
// behind and needs manual clean up.
Stopped bool
Cancelled bool
2023-06-28 02:37:42 -05:00
2023-12-13 10:35:41 -06:00
// StoppedCtx and CancelledCtx allow in progress OpenTofu operations to
2023-07-10 08:53:13 -05:00
// respond to external calls from the test command.
StoppedCtx context . Context
CancelledCtx context . Context
2023-07-19 03:07:46 -05:00
// Verbose tells the runner to print out plan files during each test run.
Verbose bool
2023-07-10 08:53:13 -05:00
}
2024-11-12 17:19:45 -06:00
func ( runner * TestSuiteRunner ) Start ( ctx context . Context ) {
2023-06-28 02:37:42 -05:00
var files [ ] string
2023-07-10 08:53:13 -05:00
for name := range runner . Suite . Files {
2023-06-28 02:37:42 -05:00
files = append ( files , name )
}
sort . Strings ( files ) // execute the files in alphabetical order
2023-07-10 08:53:13 -05:00
runner . Suite . Status = moduletest . Pass
2023-06-28 02:37:42 -05:00
for _ , name := range files {
2023-07-10 08:53:13 -05:00
if runner . Cancelled {
return
}
2023-06-28 02:37:42 -05:00
2023-07-10 08:53:13 -05:00
file := runner . Suite . Files [ name ]
2023-07-26 03:11:27 -05:00
2023-08-07 07:42:07 -05:00
fileRunner := & TestFileRunner {
Suite : runner ,
States : map [ string ] * TestFileState {
MainStateIdentifier : {
Run : nil ,
State : states . NewState ( ) ,
} ,
} ,
2023-08-01 02:47:00 -05:00
}
2023-08-07 07:42:07 -05:00
2024-11-12 17:19:45 -06:00
fileRunner . ExecuteTestFile ( ctx , file )
fileRunner . Cleanup ( ctx , file )
2023-08-07 07:42:07 -05:00
runner . Suite . Status = runner . Suite . Status . Merge ( file . Status )
2023-08-01 02:47:00 -05:00
}
2023-08-07 07:42:07 -05:00
}
2023-08-01 02:47:00 -05:00
2023-08-07 07:42:07 -05:00
type TestFileRunner struct {
Suite * TestSuiteRunner
2023-07-26 03:24:25 -05:00
2023-08-07 07:42:07 -05:00
States map [ string ] * TestFileState
}
2023-08-01 02:47:00 -05:00
2023-08-07 07:42:07 -05:00
type TestFileState struct {
Run * moduletest . Run
State * states . State
}
2023-07-26 03:24:25 -05:00
2024-11-12 17:19:45 -06:00
func ( runner * TestFileRunner ) ExecuteTestFile ( ctx context . Context , file * moduletest . File ) {
2023-08-07 07:42:07 -05:00
log . Printf ( "[TRACE] TestFileRunner: executing test file %s" , file . Name )
2023-06-28 02:37:42 -05:00
file . Status = file . Status . Merge ( moduletest . Pass )
for _ , run := range file . Runs {
2023-08-07 07:42:07 -05:00
if runner . Suite . Cancelled {
2023-07-10 08:53:13 -05:00
// This means a hard stop has been requested, in this case we don't
// even stop to mark future tests as having been skipped. They'll
// just show up as pending in the printed summary.
return
}
2023-08-07 07:42:07 -05:00
if runner . Suite . Stopped {
2023-07-10 08:53:13 -05:00
// Then the test was requested to be stopped, so we just mark each
// following test as skipped and move on.
run . Status = moduletest . Skip
continue
}
2023-06-28 02:37:42 -05:00
if file . Status == moduletest . Error {
2023-07-10 05:42:05 -05:00
// If the overall test file has errored, we don't keep trying to
// execute tests. Instead, we mark all remaining run blocks as
// skipped.
2023-06-28 02:37:42 -05:00
run . Status = moduletest . Skip
continue
}
2023-08-07 07:42:07 -05:00
key := MainStateIdentifier
config := runner . Suite . Config
2023-07-10 05:42:05 -05:00
if run . Config . ConfigUnderTest != nil {
2023-08-07 07:42:07 -05:00
config = run . Config . ConfigUnderTest
// Then we need to load an alternate state and not the main one.
key = run . Config . Module . Source . String ( )
if key == MainStateIdentifier {
// This is bad. It means somehow the module we're loading has
// the same key as main state and we're about to corrupt things.
run . Diagnostics = run . Diagnostics . Append ( & hcl . Diagnostic {
Severity : hcl . DiagError ,
Summary : "Invalid module source" ,
2023-09-21 07:38:46 -05:00
Detail : fmt . Sprintf ( "The source for the selected module evaluated to %s which should not be possible. This is a bug in OpenTofu - please report it!" , key ) ,
2023-08-07 07:42:07 -05:00
Subject : run . Config . Module . DeclRange . Ptr ( ) ,
} )
run . Status = moduletest . Error
file . Status = moduletest . Error
continue // Abort!
}
if _ , exists := runner . States [ key ] ; ! exists {
runner . States [ key ] = & TestFileState {
Run : nil ,
State : states . NewState ( ) ,
}
}
2023-07-10 05:42:05 -05:00
}
2023-08-07 07:42:07 -05:00
2024-11-12 17:19:45 -06:00
state , updatedState := runner . ExecuteTestRun ( ctx , run , file , runner . States [ key ] . State , config )
2023-08-07 07:42:07 -05:00
if updatedState {
2024-12-10 08:34:25 -06:00
var err error
// We need to simulate state serialization between multiple runs
// due to its side effects. One of such side effects is removal
// of destroyed non-root module outputs. This is not handled
// during graph walk since those values are not stored in the
// state file. This is more of a weird workaround instead of a
// proper fix, unfortunately.
state , err = simulateStateSerialization ( state )
if err != nil {
run . Diagnostics = run . Diagnostics . Append ( & hcl . Diagnostic {
Severity : hcl . DiagError ,
Summary : "Failure during state serialization" ,
Detail : err . Error ( ) ,
} )
// We cannot reuse state later so that's a hard stop.
return
}
2023-08-07 07:42:07 -05:00
// Only update the most recent run and state if the state was
// actually updated by this change. We want to use the run that
// most recently updated the tracked state as the cleanup
// configuration.
runner . States [ key ] . State = state
runner . States [ key ] . Run = run
}
2023-06-28 02:37:42 -05:00
file . Status = file . Status . Merge ( run . Status )
}
2023-08-07 07:42:07 -05:00
runner . Suite . View . File ( file )
for _ , run := range file . Runs {
runner . Suite . View . Run ( run , file )
}
2023-06-28 02:37:42 -05:00
}
2024-11-12 17:19:45 -06:00
func ( runner * TestFileRunner ) ExecuteTestRun ( ctx context . Context , run * moduletest . Run , file * moduletest . File , state * states . State , config * configs . Config ) ( * states . State , bool ) {
2023-08-07 07:42:07 -05:00
log . Printf ( "[TRACE] TestFileRunner: executing run block %s/%s" , file . Name , run . Name )
2023-07-26 03:11:27 -05:00
2023-08-07 07:42:07 -05:00
if runner . Suite . Cancelled {
2023-07-10 08:53:13 -05:00
// Don't do anything, just give up and return immediately.
// The surrounding functions should stop this even being called, but in
// case of race conditions or something we can still verify this.
2023-08-07 07:42:07 -05:00
return state , false
2023-07-10 08:33:15 -05:00
}
2023-08-07 07:42:07 -05:00
if runner . Suite . Stopped {
2023-07-10 08:53:13 -05:00
// Basically the same as above, except we'll be a bit nicer.
run . Status = moduletest . Skip
2023-08-07 07:42:07 -05:00
return state , false
2023-06-28 02:37:42 -05:00
}
2024-06-06 05:20:41 -05:00
run . Diagnostics = run . Diagnostics . Append ( file . Config . Validate ( ) )
if run . Diagnostics . HasErrors ( ) {
run . Status = moduletest . Error
return state , false
}
2023-08-07 07:42:07 -05:00
run . Diagnostics = run . Diagnostics . Append ( run . Config . Validate ( ) )
if run . Diagnostics . HasErrors ( ) {
run . Status = moduletest . Error
return state , false
}
2023-06-28 02:37:42 -05:00
2023-08-07 07:42:07 -05:00
resetConfig , configDiags := config . TransformForTest ( run . Config , file . Config )
defer resetConfig ( )
2023-06-28 02:37:42 -05:00
2023-08-07 07:42:07 -05:00
run . Diagnostics = run . Diagnostics . Append ( configDiags )
if configDiags . HasErrors ( ) {
run . Status = moduletest . Error
return state , false
}
2023-06-28 02:37:42 -05:00
2024-11-12 18:02:54 -06:00
validateDiags := runner . validate ( ctx , config , run , file )
2023-08-07 07:42:07 -05:00
run . Diagnostics = run . Diagnostics . Append ( validateDiags )
if validateDiags . HasErrors ( ) {
2023-06-28 02:37:42 -05:00
run . Status = moduletest . Error
2023-08-07 07:42:07 -05:00
return state , false
2023-06-28 02:37:42 -05:00
}
2024-11-12 17:19:45 -06:00
planCtx , plan , planDiags := runner . plan ( ctx , config , state , run , file )
2023-08-07 07:42:07 -05:00
if run . Config . Command == configs . PlanTestCommand {
2024-10-15 03:20:11 -05:00
expectedFailures , sourceRanges := run . BuildExpectedFailuresAndSourceMaps ( )
2023-08-07 07:42:07 -05:00
// Then we want to assess our conditions and diagnostics differently.
2024-10-15 03:20:11 -05:00
planDiags = run . ValidateExpectedFailures ( expectedFailures , sourceRanges , planDiags )
2023-08-07 07:42:07 -05:00
run . Diagnostics = run . Diagnostics . Append ( planDiags )
if planDiags . HasErrors ( ) {
run . Status = moduletest . Error
return state , false
}
2024-01-17 05:57:14 -06:00
variables , resetVariables , variableDiags := runner . prepareInputVariablesForAssertions ( config , run , file , runner . Suite . GlobalVariables )
2023-08-07 07:42:07 -05:00
defer resetVariables ( )
run . Diagnostics = run . Diagnostics . Append ( variableDiags )
if variableDiags . HasErrors ( ) {
run . Status = moduletest . Error
return state , false
}
if runner . Suite . Verbose {
schemas , diags := planCtx . Schemas ( config , plan . PlannedState )
// If we're going to fail to render the plan, let's not fail the overall
// test. It can still have succeeded. So we'll add the diagnostics, but
// still report the test status as a success.
if diags . HasErrors ( ) {
// This is very unlikely.
diags = diags . Append ( tfdiags . Sourceless (
tfdiags . Warning ,
"Failed to print verbose output" ,
2023-09-21 07:38:46 -05:00
fmt . Sprintf ( "OpenTofu failed to print the verbose output for %s, other diagnostics will contain more details as to why." , path . Join ( file . Name , run . Name ) ) ) )
2023-08-07 07:42:07 -05:00
} else {
run . Verbose = & moduletest . Verbose {
Plan : plan ,
State : plan . PlannedState ,
Config : config ,
Providers : schemas . Providers ,
Provisioners : schemas . Provisioners ,
}
2023-06-28 02:37:42 -05:00
}
2023-07-10 08:53:13 -05:00
2023-08-07 07:42:07 -05:00
run . Diagnostics = run . Diagnostics . Append ( diags )
}
planCtx . TestContext ( config , plan . PlannedState , plan , variables ) . EvaluateAgainstPlan ( run )
return state , false
}
2023-07-26 03:11:27 -05:00
2024-10-15 03:20:11 -05:00
expectedFailures , sourceRanges := run . BuildExpectedFailuresAndSourceMaps ( )
planDiags = checkProblematicPlanErrors ( expectedFailures , planDiags )
2023-08-07 07:42:07 -05:00
// Otherwise any error during the planning prevents our apply from
// continuing which is an error.
run . Diagnostics = run . Diagnostics . Append ( planDiags )
if planDiags . HasErrors ( ) {
2023-07-10 08:53:13 -05:00
run . Status = moduletest . Error
2023-08-07 07:42:07 -05:00
return state , false
2023-07-10 08:53:13 -05:00
}
2023-08-07 07:42:07 -05:00
// Since we're carrying on an executing the apply operation as well, we're
// just going to do some post processing of the diagnostics. We remove the
// warnings generated from check blocks, as the apply operation will either
// reproduce them or fix them and we don't want fixed diagnostics to be
// reported and we don't want duplicates either.
var filteredDiags tfdiags . Diagnostics
for _ , diag := range run . Diagnostics {
if rule , ok := addrs . DiagnosticOriginatesFromCheckRule ( diag ) ; ok && rule . Container . CheckableKind ( ) == addrs . CheckableCheck {
continue
}
filteredDiags = filteredDiags . Append ( diag )
}
run . Diagnostics = filteredDiags
2024-11-12 17:27:55 -06:00
applyCtx , updated , applyDiags := runner . apply ( ctx , plan , state , config , run , file )
2023-08-07 07:42:07 -05:00
// Remove expected diagnostics, and add diagnostics in case anything that should have failed didn't.
2024-10-15 03:20:11 -05:00
applyDiags = run . ValidateExpectedFailures ( expectedFailures , sourceRanges , applyDiags )
2023-08-07 07:42:07 -05:00
run . Diagnostics = run . Diagnostics . Append ( applyDiags )
if applyDiags . HasErrors ( ) {
2023-06-28 02:37:42 -05:00
run . Status = moduletest . Error
2023-08-07 07:42:07 -05:00
// Even though the apply operation failed, the graph may have done
// partial updates and the returned state should reflect this.
return updated , true
2023-06-28 02:37:42 -05:00
}
2024-01-17 05:57:14 -06:00
variables , resetVariables , variableDiags := runner . prepareInputVariablesForAssertions ( config , run , file , runner . Suite . GlobalVariables )
2023-08-07 07:42:07 -05:00
defer resetVariables ( )
run . Diagnostics = run . Diagnostics . Append ( variableDiags )
if variableDiags . HasErrors ( ) {
run . Status = moduletest . Error
return updated , true
2023-07-10 08:53:13 -05:00
}
2023-06-28 02:37:42 -05:00
2023-08-07 07:42:07 -05:00
if runner . Suite . Verbose {
schemas , diags := planCtx . Schemas ( config , plan . PlannedState )
2023-07-19 03:07:46 -05:00
// If we're going to fail to render the plan, let's not fail the overall
// test. It can still have succeeded. So we'll add the diagnostics, but
// still report the test status as a success.
if diags . HasErrors ( ) {
// This is very unlikely.
diags = diags . Append ( tfdiags . Sourceless (
tfdiags . Warning ,
"Failed to print verbose output" ,
2023-09-21 07:38:46 -05:00
fmt . Sprintf ( "OpenTofu failed to print the verbose output for %s, other diagnostics will contain more details as to why." , path . Join ( file . Name , run . Name ) ) ) )
2023-07-19 03:07:46 -05:00
} else {
run . Verbose = & moduletest . Verbose {
Plan : plan ,
2023-08-07 07:42:07 -05:00
State : updated ,
2023-07-19 03:07:46 -05:00
Config : config ,
Providers : schemas . Providers ,
Provisioners : schemas . Provisioners ,
}
}
run . Diagnostics = run . Diagnostics . Append ( diags )
}
2023-08-07 07:42:07 -05:00
applyCtx . TestContext ( config , updated , plan , variables ) . EvaluateAgainstState ( run )
return updated , true
2023-06-28 02:37:42 -05:00
}
2024-11-12 18:02:54 -06:00
func ( runner * TestFileRunner ) validate ( ctx context . Context , config * configs . Config , run * moduletest . Run , file * moduletest . File ) tfdiags . Diagnostics {
2023-08-07 07:42:07 -05:00
log . Printf ( "[TRACE] TestFileRunner: called validate for %s/%s" , file . Name , run . Name )
2023-07-26 03:56:44 -05:00
2023-08-07 07:42:07 -05:00
var diags tfdiags . Diagnostics
2023-07-26 03:56:44 -05:00
2023-09-20 07:16:53 -05:00
tfCtx , ctxDiags := tofu . NewContext ( runner . Suite . Opts )
2023-08-07 07:42:07 -05:00
diags = diags . Append ( ctxDiags )
2023-07-26 03:56:44 -05:00
if ctxDiags . HasErrors ( ) {
2023-08-07 07:42:07 -05:00
return diags
2023-07-26 03:56:44 -05:00
}
2024-11-12 18:02:54 -06:00
runningCtx , done := context . WithCancel ( context . WithoutCancel ( ctx ) )
2023-07-26 03:56:44 -05:00
var validateDiags tfdiags . Diagnostics
2024-03-26 06:41:16 -05:00
panicHandler := logging . PanicHandlerWithTraceFn ( )
2023-07-26 03:56:44 -05:00
go func ( ) {
2024-03-26 06:41:16 -05:00
defer panicHandler ( )
2023-07-26 03:56:44 -05:00
defer done ( )
2023-08-01 02:47:00 -05:00
2023-08-07 07:42:07 -05:00
log . Printf ( "[DEBUG] TestFileRunner: starting validate for %s/%s" , file . Name , run . Name )
2024-11-12 18:02:54 -06:00
validateDiags = tfCtx . Validate ( ctx , config )
2023-08-07 07:42:07 -05:00
log . Printf ( "[DEBUG] TestFileRunner: completed validate for %s/%s" , file . Name , run . Name )
2023-07-26 03:56:44 -05:00
} ( )
2023-08-07 07:42:07 -05:00
waitDiags , cancelled := runner . wait ( tfCtx , runningCtx , run , file , nil )
2023-08-01 02:47:00 -05:00
2023-08-07 07:42:07 -05:00
if cancelled {
diags = diags . Append ( tfdiags . Sourceless ( tfdiags . Error , "Test interrupted" , "The test operation could not be completed due to an interrupt signal. Please read the remaining diagnostics carefully for any sign of failed state cleanup or dangling resources." ) )
2023-08-01 02:47:00 -05:00
}
2023-08-07 07:42:07 -05:00
diags = diags . Append ( waitDiags )
diags = diags . Append ( validateDiags )
return diags
2023-07-26 03:56:44 -05:00
}
2024-11-12 17:19:45 -06:00
func ( runner * TestFileRunner ) destroy ( ctx context . Context , config * configs . Config , state * states . State , run * moduletest . Run , file * moduletest . File ) ( * states . State , tfdiags . Diagnostics ) {
2023-08-07 07:42:07 -05:00
log . Printf ( "[TRACE] TestFileRunner: called destroy for %s/%s" , file . Name , run . Name )
if state . Empty ( ) {
2023-07-10 08:53:13 -05:00
// Nothing to do!
2023-08-07 07:42:07 -05:00
return state , nil
2023-07-10 08:53:13 -05:00
}
2023-07-10 05:42:05 -05:00
2023-07-10 08:53:13 -05:00
var diags tfdiags . Diagnostics
2023-08-01 02:47:00 -05:00
2024-06-03 08:14:05 -05:00
evalCtx , ctxDiags := getEvalContextForTest ( runner . States , config , runner . Suite . GlobalVariables )
diags = diags . Append ( ctxDiags )
variables , variableDiags := buildInputVariablesForTest ( run , file , config , runner . Suite . GlobalVariables , evalCtx )
2023-08-07 07:42:07 -05:00
diags = diags . Append ( variableDiags )
2023-08-01 02:47:00 -05:00
2023-07-10 08:53:13 -05:00
if diags . HasErrors ( ) {
2023-08-07 07:42:07 -05:00
return state , diags
2023-07-10 05:42:05 -05:00
}
2023-09-20 07:16:53 -05:00
planOpts := & tofu . PlanOpts {
2023-08-07 07:42:07 -05:00
Mode : plans . DestroyMode ,
SetVariables : variables ,
2023-06-28 02:37:42 -05:00
}
2023-09-20 07:16:53 -05:00
tfCtx , ctxDiags := tofu . NewContext ( runner . Suite . Opts )
2023-07-10 08:53:13 -05:00
diags = diags . Append ( ctxDiags )
if ctxDiags . HasErrors ( ) {
2023-08-07 07:42:07 -05:00
return state , diags
2023-07-10 05:42:05 -05:00
}
2023-07-10 08:53:13 -05:00
2024-11-12 17:19:45 -06:00
runningCtx , done := context . WithCancel ( context . WithoutCancel ( ctx ) )
2023-07-10 08:53:13 -05:00
2023-08-07 07:42:07 -05:00
var plan * plans . Plan
var planDiags tfdiags . Diagnostics
2024-03-26 06:41:16 -05:00
panicHandler := logging . PanicHandlerWithTraceFn ( )
2023-08-01 02:47:00 -05:00
go func ( ) {
2024-03-26 06:41:16 -05:00
defer panicHandler ( )
2023-08-01 02:47:00 -05:00
defer done ( )
2023-08-07 07:42:07 -05:00
log . Printf ( "[DEBUG] TestFileRunner: starting destroy plan for %s/%s" , file . Name , run . Name )
2024-11-12 17:19:45 -06:00
plan , planDiags = tfCtx . Plan ( ctx , config , state , planOpts )
2023-08-07 07:42:07 -05:00
log . Printf ( "[DEBUG] TestFileRunner: completed destroy plan for %s/%s" , file . Name , run . Name )
2023-08-01 02:47:00 -05:00
} ( )
2023-08-07 07:42:07 -05:00
waitDiags , cancelled := runner . wait ( tfCtx , runningCtx , run , file , nil )
2023-08-01 02:47:00 -05:00
2023-08-07 07:42:07 -05:00
if cancelled {
diags = diags . Append ( tfdiags . Sourceless ( tfdiags . Error , "Test interrupted" , "The test operation could not be completed due to an interrupt signal. Please read the remaining diagnostics carefully for any sign of failed state cleanup or dangling resources." ) )
2023-08-01 02:47:00 -05:00
}
2023-08-07 07:42:07 -05:00
diags = diags . Append ( waitDiags )
diags = diags . Append ( planDiags )
if diags . HasErrors ( ) {
return state , diags
2023-08-01 02:47:00 -05:00
}
2024-11-12 17:27:55 -06:00
_ , updated , applyDiags := runner . apply ( ctx , plan , state , config , run , file )
2023-08-07 07:42:07 -05:00
diags = diags . Append ( applyDiags )
return updated , diags
}
2024-11-12 17:19:45 -06:00
func ( runner * TestFileRunner ) plan ( ctx context . Context , config * configs . Config , state * states . State , run * moduletest . Run , file * moduletest . File ) ( * tofu . Context , * plans . Plan , tfdiags . Diagnostics ) {
2023-08-07 07:42:07 -05:00
log . Printf ( "[TRACE] TestFileRunner: called plan for %s/%s" , file . Name , run . Name )
var diags tfdiags . Diagnostics
targets , targetDiags := run . GetTargets ( )
diags = diags . Append ( targetDiags )
2023-08-01 02:47:00 -05:00
2023-08-07 07:42:07 -05:00
replaces , replaceDiags := run . GetReplaces ( )
diags = diags . Append ( replaceDiags )
references , referenceDiags := run . GetReferences ( )
diags = diags . Append ( referenceDiags )
2024-06-03 08:14:05 -05:00
evalCtx , ctxDiags := getEvalContextForTest ( runner . States , config , runner . Suite . GlobalVariables )
diags = diags . Append ( ctxDiags )
variables , variableDiags := buildInputVariablesForTest ( run , file , config , runner . Suite . GlobalVariables , evalCtx )
2023-08-01 02:47:00 -05:00
diags = diags . Append ( variableDiags )
2023-08-07 07:42:07 -05:00
if diags . HasErrors ( ) {
return nil , nil , diags
2023-08-01 02:47:00 -05:00
}
2023-09-20 07:16:53 -05:00
planOpts := & tofu . PlanOpts {
2023-08-07 07:42:07 -05:00
Mode : func ( ) plans . Mode {
switch run . Config . Options . Mode {
case configs . RefreshOnlyTestMode :
return plans . RefreshOnlyMode
default :
return plans . NormalMode
}
} ( ) ,
Targets : targets ,
ForceReplace : replaces ,
SkipRefresh : ! run . Config . Options . Refresh ,
SetVariables : variables ,
ExternalReferences : references ,
}
2023-08-01 02:47:00 -05:00
2023-09-20 07:16:53 -05:00
tfCtx , ctxDiags := tofu . NewContext ( runner . Suite . Opts )
2023-08-01 02:47:00 -05:00
diags = diags . Append ( ctxDiags )
if ctxDiags . HasErrors ( ) {
2023-08-07 07:42:07 -05:00
return nil , nil , diags
2023-08-01 02:47:00 -05:00
}
2024-11-12 17:19:45 -06:00
runningCtx , done := context . WithCancel ( context . WithoutCancel ( ctx ) )
2023-08-01 02:47:00 -05:00
2023-07-10 08:53:13 -05:00
var plan * plans . Plan
var planDiags tfdiags . Diagnostics
2024-03-26 06:41:16 -05:00
panicHandler := logging . PanicHandlerWithTraceFn ( )
2023-07-10 08:53:13 -05:00
go func ( ) {
2024-03-26 06:41:16 -05:00
defer panicHandler ( )
2023-07-10 08:53:13 -05:00
defer done ( )
2023-07-26 03:11:27 -05:00
2023-08-07 07:42:07 -05:00
log . Printf ( "[DEBUG] TestFileRunner: starting plan for %s/%s" , file . Name , run . Name )
2024-11-12 17:19:45 -06:00
plan , planDiags = tfCtx . Plan ( ctx , config , state , planOpts )
2023-08-07 07:42:07 -05:00
log . Printf ( "[DEBUG] TestFileRunner: completed plan for %s/%s" , file . Name , run . Name )
2023-07-10 08:53:13 -05:00
} ( )
2023-08-07 07:42:07 -05:00
waitDiags , cancelled := runner . wait ( tfCtx , runningCtx , run , file , nil )
2023-07-10 08:33:15 -05:00
2023-07-10 08:53:13 -05:00
if cancelled {
2023-08-07 07:42:07 -05:00
diags = diags . Append ( tfdiags . Sourceless ( tfdiags . Error , "Test interrupted" , "The test operation could not be completed due to an interrupt signal. Please read the remaining diagnostics carefully for any sign of failed state cleanup or dangling resources." ) )
2023-07-10 08:33:15 -05:00
}
2023-06-28 02:37:42 -05:00
2023-08-07 07:42:07 -05:00
diags = diags . Append ( waitDiags )
diags = diags . Append ( planDiags )
2023-07-19 02:44:40 -05:00
2023-08-07 07:42:07 -05:00
return tfCtx , plan , diags
}
2024-11-12 17:27:55 -06:00
func ( runner * TestFileRunner ) apply ( ctx context . Context , plan * plans . Plan , state * states . State , config * configs . Config , run * moduletest . Run , file * moduletest . File ) ( * tofu . Context , * states . State , tfdiags . Diagnostics ) {
2023-08-07 07:42:07 -05:00
log . Printf ( "[TRACE] TestFileRunner: called apply for %s/%s" , file . Name , run . Name )
2023-06-28 02:37:42 -05:00
2023-08-07 07:42:07 -05:00
var diags tfdiags . Diagnostics
2023-07-10 08:53:13 -05:00
2023-07-19 03:31:32 -05:00
// If things get cancelled while we are executing the apply operation below
// we want to print out all the objects that we were creating so the user
// can verify we managed to tidy everything up possibly.
//
// Unfortunately, this creates a race condition as the apply operation can
// edit the plan (by removing changes once they are applied) while at the
// same time our cancellation process will try to read the plan.
//
// We take a quick copy of the changes we care about here, which will then
// be used in place of the plan when we print out the objects to be created
// as part of the cancellation process.
var created [ ] * plans . ResourceInstanceChangeSrc
for _ , change := range plan . Changes . Resources {
if change . Action != plans . Create {
continue
}
created = append ( created , change )
}
2023-09-20 07:16:53 -05:00
tfCtx , ctxDiags := tofu . NewContext ( runner . Suite . Opts )
2023-08-07 07:42:07 -05:00
diags = diags . Append ( ctxDiags )
if ctxDiags . HasErrors ( ) {
return nil , state , diags
}
2024-11-12 17:27:55 -06:00
runningCtx , done := context . WithCancel ( context . WithoutCancel ( ctx ) )
2023-08-07 07:42:07 -05:00
2023-07-10 08:53:13 -05:00
var updated * states . State
var applyDiags tfdiags . Diagnostics
2024-03-26 06:41:16 -05:00
panicHandler := logging . PanicHandlerWithTraceFn ( )
2023-07-10 08:53:13 -05:00
go func ( ) {
2024-03-26 06:41:16 -05:00
defer panicHandler ( )
2023-07-10 08:53:13 -05:00
defer done ( )
2023-08-07 07:42:07 -05:00
log . Printf ( "[DEBUG] TestFileRunner: starting apply for %s/%s" , file . Name , run . Name )
2024-11-12 17:27:55 -06:00
updated , applyDiags = tfCtx . Apply ( ctx , plan , config )
2023-08-07 07:42:07 -05:00
log . Printf ( "[DEBUG] TestFileRunner: completed apply for %s/%s" , file . Name , run . Name )
2023-07-10 08:53:13 -05:00
} ( )
2023-08-07 07:42:07 -05:00
waitDiags , cancelled := runner . wait ( tfCtx , runningCtx , run , file , created )
if cancelled {
diags = diags . Append ( tfdiags . Sourceless ( tfdiags . Error , "Test interrupted" , "The test operation could not be completed due to an interrupt signal. Please read the remaining diagnostics carefully for any sign of failed state cleanup or dangling resources." ) )
}
2023-07-10 08:53:13 -05:00
2023-08-07 07:42:07 -05:00
diags = diags . Append ( waitDiags )
2023-07-10 08:53:13 -05:00
diags = diags . Append ( applyDiags )
2023-08-07 07:42:07 -05:00
return tfCtx , updated , diags
2023-07-10 08:53:13 -05:00
}
2023-09-20 07:16:53 -05:00
func ( runner * TestFileRunner ) wait ( ctx * tofu . Context , runningCtx context . Context , run * moduletest . Run , file * moduletest . File , created [ ] * plans . ResourceInstanceChangeSrc ) ( diags tfdiags . Diagnostics , cancelled bool ) {
2023-07-26 03:56:44 -05:00
var identifier string
if file == nil {
identifier = "validate"
} else {
identifier = file . Name
if run != nil {
identifier = fmt . Sprintf ( "%s/%s" , identifier , run . Name )
}
2023-07-26 03:11:27 -05:00
}
2023-08-07 07:42:07 -05:00
log . Printf ( "[TRACE] TestFileRunner: waiting for execution during %s" , identifier )
2023-07-10 08:53:13 -05:00
2023-07-19 03:31:32 -05:00
// This function handles what happens when the user presses the second
// interrupt. This is a "hard cancel", we are going to stop doing whatever
// it is we're doing. This means even if we're halfway through creating or
// destroying infrastructure we just give up.
handleCancelled := func ( ) {
2023-08-07 07:42:07 -05:00
log . Printf ( "[DEBUG] TestFileRunner: test execution cancelled during %s" , identifier )
2023-07-26 03:56:44 -05:00
2023-08-07 07:42:07 -05:00
states := make ( map [ * moduletest . Run ] * states . State )
states [ nil ] = runner . States [ MainStateIdentifier ] . State
for key , module := range runner . States {
if key == MainStateIdentifier {
continue
2023-07-26 03:56:44 -05:00
}
2023-08-07 07:42:07 -05:00
states [ module . Run ] = module . State
2023-07-19 03:31:32 -05:00
}
2023-08-07 07:42:07 -05:00
runner . Suite . View . FatalInterruptSummary ( run , file , states , created )
2023-07-10 08:53:13 -05:00
2023-07-19 03:31:32 -05:00
cancelled = true
go ctx . Stop ( )
2023-07-10 08:53:13 -05:00
2023-07-19 03:31:32 -05:00
// Just wait for things to finish now, the overall test execution will
// exit early if this takes too long.
<- runningCtx . Done ( )
}
2023-07-10 08:53:13 -05:00
2023-07-19 03:31:32 -05:00
// This function handles what happens when the user presses the first
// interrupt. This is essentially a "soft cancel", we're not going to do
// anything but just wait for things to finish safely. But, we do listen
// for the crucial second interrupt which will prompt a hard stop / cancel.
handleStopped := func ( ) {
2023-08-07 07:42:07 -05:00
log . Printf ( "[DEBUG] TestFileRunner: test execution stopped during %s" , identifier )
2023-07-26 03:11:27 -05:00
2023-07-19 03:31:32 -05:00
select {
2023-08-07 07:42:07 -05:00
case <- runner . Suite . CancelledCtx . Done ( ) :
2023-07-19 03:31:32 -05:00
// We've been asked again. This time we stop whatever we're doing
// and abandon all attempts to do anything reasonable.
handleCancelled ( )
2023-07-10 08:53:13 -05:00
case <- runningCtx . Done ( ) :
2023-07-19 03:31:32 -05:00
// Do nothing, we finished safely and skipping the remaining tests
// will be handled elsewhere.
2023-07-10 08:53:13 -05:00
}
2023-07-19 03:31:32 -05:00
}
2023-07-10 08:53:13 -05:00
2023-07-19 03:31:32 -05:00
select {
2023-08-07 07:42:07 -05:00
case <- runner . Suite . StoppedCtx . Done ( ) :
2023-07-19 03:31:32 -05:00
handleStopped ( )
2023-08-07 07:42:07 -05:00
case <- runner . Suite . CancelledCtx . Done ( ) :
2023-07-19 03:31:32 -05:00
handleCancelled ( )
2023-07-10 08:53:13 -05:00
case <- runningCtx . Done ( ) :
// The operation exited normally.
2023-06-28 02:37:42 -05:00
}
2023-07-10 08:53:13 -05:00
return diags , cancelled
2023-07-10 05:42:05 -05:00
}
2024-11-12 17:19:45 -06:00
func ( runner * TestFileRunner ) Cleanup ( ctx context . Context , file * moduletest . File ) {
2023-08-07 07:42:07 -05:00
log . Printf ( "[TRACE] TestStateManager: cleaning up state for %s" , file . Name )
2023-07-10 05:42:05 -05:00
2023-08-07 07:42:07 -05:00
if runner . Suite . Cancelled {
// Don't try and clean anything up if the execution has been cancelled.
log . Printf ( "[DEBUG] TestStateManager: skipping state cleanup for %s due to cancellation" , file . Name )
return
}
2023-07-10 05:42:05 -05:00
2023-08-07 07:42:07 -05:00
var states [ ] * TestFileState
for key , state := range runner . States {
if state . Run == nil {
if state . State . Empty ( ) {
// We can see a run block being empty when the state is empty if
// a module was only used to execute plan commands. So this is
// okay, and means we have nothing to cleanup so we'll just
// skip it.
continue
}
2023-12-20 08:38:42 -06:00
if key == MainStateIdentifier {
log . Printf ( "[ERROR] TestFileRunner: found inconsistent run block and state file in %s" , file . Name )
} else {
log . Printf ( "[ERROR] TestFileRunner: found inconsistent run block and state file in %s for module %s" , file . Name , key )
}
2023-07-26 03:11:27 -05:00
2023-08-07 07:42:07 -05:00
// Otherwise something bad has happened, and we have no way to
// recover from it. This shouldn't happen in reality, but we'll
// print a diagnostic instead of panicking later.
2023-07-10 08:53:13 -05:00
2023-08-07 07:42:07 -05:00
var diags tfdiags . Diagnostics
2023-09-21 07:38:46 -05:00
diags = diags . Append ( tfdiags . Sourceless ( tfdiags . Error , "Inconsistent state" , fmt . Sprintf ( "Found inconsistent state while cleaning up %s. This is a bug in OpenTofu - please report it" , file . Name ) ) )
2023-08-07 07:42:07 -05:00
runner . Suite . View . DestroySummary ( diags , nil , file , state . State )
continue
}
2023-07-10 05:42:05 -05:00
2023-08-07 07:42:07 -05:00
states = append ( states , state )
2023-07-19 03:31:32 -05:00
}
2023-11-20 07:37:59 -06:00
slices . SortFunc ( states , func ( a , b * TestFileState ) int {
2023-08-07 07:42:07 -05:00
// We want to clean up later run blocks first. So, we'll sort this in
// reverse according to index. This means larger indices first.
2023-12-20 08:38:42 -06:00
return b . Run . Index - a . Run . Index
2023-08-07 07:42:07 -05:00
} )
2023-12-20 08:38:42 -06:00
// Clean up all the states (for main and custom modules) in reverse order.
2023-08-07 07:42:07 -05:00
for _ , state := range states {
log . Printf ( "[DEBUG] TestStateManager: cleaning up state for %s/%s" , file . Name , state . Run . Name )
2023-07-10 08:53:13 -05:00
2023-08-07 07:42:07 -05:00
if runner . Suite . Cancelled {
2023-07-10 08:53:13 -05:00
// In case the cancellation came while a previous state was being
// destroyed.
2023-08-07 07:42:07 -05:00
log . Printf ( "[DEBUG] TestStateManager: skipping state cleanup for %s/%s due to cancellation" , file . Name , state . Run . Name )
2023-07-19 03:31:32 -05:00
return
2023-07-10 08:53:13 -05:00
}
2023-08-07 07:42:07 -05:00
var diags tfdiags . Diagnostics
2023-12-20 08:38:42 -06:00
var runConfig * configs . Config
isMainState := state . Run . Config . Module == nil
if isMainState {
runConfig = runner . Suite . Config
} else {
runConfig = state . Run . Config . ConfigUnderTest
}
2023-08-07 07:42:07 -05:00
2023-12-20 08:38:42 -06:00
reset , configDiags := runConfig . TransformForTest ( state . Run . Config , file . Config )
2023-08-07 07:42:07 -05:00
diags = diags . Append ( configDiags )
updated := state . State
if ! diags . HasErrors ( ) {
var destroyDiags tfdiags . Diagnostics
2024-11-12 17:19:45 -06:00
updated , destroyDiags = runner . destroy ( ctx , runConfig , state . State , state . Run , file )
2023-08-07 07:42:07 -05:00
diags = diags . Append ( destroyDiags )
}
runner . Suite . View . DestroySummary ( diags , state . Run , file , updated )
2024-03-12 09:29:06 -05:00
if updated . HasManagedResourceInstanceObjects ( ) {
views . SaveErroredTestStateFile ( updated , state . Run , file , runner . Suite . View )
}
2023-08-07 07:42:07 -05:00
reset ( )
2023-07-10 08:53:13 -05:00
}
}
// helper functions
2023-09-20 07:16:53 -05:00
// buildInputVariablesForTest creates a tofu.InputValues mapping for
2023-07-10 08:53:13 -05:00
// variable values that are relevant to the config being tested.
//
2023-08-01 02:59:29 -05:00
// Crucially, it differs from prepareInputVariablesForAssertions in that it only
2023-07-10 08:53:13 -05:00
// includes variables that are reference by the config and not everything that
// is defined within the test run block and test file.
2024-06-03 08:14:05 -05:00
func buildInputVariablesForTest ( run * moduletest . Run , file * moduletest . File , config * configs . Config , globals map [ string ] backend . UnparsedVariableValue , evalCtx * hcl . EvalContext ) ( tofu . InputValues , tfdiags . Diagnostics ) {
2023-07-19 03:07:46 -05:00
variables := make ( map [ string ] backend . UnparsedVariableValue )
2023-07-10 08:53:13 -05:00
for name := range config . Module . Variables {
if run != nil {
if expr , exists := run . Config . Variables [ name ] ; exists {
// Local variables take precedence.
2024-02-19 04:18:56 -06:00
variables [ name ] = testVariableValueExpression {
2023-07-19 03:07:46 -05:00
expr : expr ,
2023-09-20 07:16:53 -05:00
sourceType : tofu . ValueFromConfig ,
2024-02-19 04:18:56 -06:00
ctx : evalCtx ,
2023-07-19 03:07:46 -05:00
}
2023-07-10 08:53:13 -05:00
continue
}
}
if file != nil {
if expr , exists := file . Config . Variables [ name ] ; exists {
2023-07-19 03:07:46 -05:00
// If it's not set locally, it maybe set for the entire file.
2024-06-03 08:14:05 -05:00
variables [ name ] = testVariableValueExpression {
2023-07-19 03:07:46 -05:00
expr : expr ,
2023-09-20 07:16:53 -05:00
sourceType : tofu . ValueFromConfig ,
2024-06-03 08:14:05 -05:00
ctx : evalCtx ,
2023-07-19 03:07:46 -05:00
}
2023-07-10 08:53:13 -05:00
continue
}
}
2023-07-19 03:07:46 -05:00
if globals != nil {
// If it's not set locally or at the file level, maybe it was
// defined globally.
if variable , exists := globals [ name ] ; exists {
variables [ name ] = variable
}
}
2023-07-10 08:53:13 -05:00
// If it's not set at all that might be okay if the variable is optional
// so we'll just not add anything to the map.
2023-06-28 02:37:42 -05:00
}
2023-07-10 08:53:13 -05:00
2023-07-19 03:07:46 -05:00
return backend . ParseVariableValues ( variables , config . Module . Variables )
2023-07-10 08:53:13 -05:00
}
2024-06-03 08:14:05 -05:00
// getEvalContextForTest constructs an hcl.EvalContext based on the provided map of
// TestFileState instances, configuration and global variables.
// It extracts the relevant information from the input parameters to create a
// context suitable for HCL evaluation.
func getEvalContextForTest ( states map [ string ] * TestFileState , config * configs . Config , globals map [ string ] backend . UnparsedVariableValue ) ( * hcl . EvalContext , tfdiags . Diagnostics ) {
var diags tfdiags . Diagnostics
2024-02-19 04:18:56 -06:00
runCtx := make ( map [ string ] cty . Value )
for _ , state := range states {
if state . Run == nil {
continue
}
outputs := make ( map [ string ] cty . Value )
mod := state . State . Modules [ "" ] // Empty string is what is used by the module in the test runner
for outName , out := range mod . OutputValues {
outputs [ outName ] = out . Value
}
runCtx [ state . Run . Name ] = cty . ObjectVal ( outputs )
}
2024-06-03 08:14:05 -05:00
// If the variable is referenced in the tfvars file or TF_VAR_ environment variable, then lookup the value
// in global variables; otherwise, assign the default value.
inputValues , diags := parseAndApplyDefaultValues ( globals , config . Module . Variables )
diags . Append ( diags )
varCtx := make ( map [ string ] cty . Value )
for name , val := range inputValues {
varCtx [ name ] = val . Value
}
ctx := & hcl . EvalContext {
Variables : map [ string ] cty . Value {
"run" : cty . ObjectVal ( runCtx ) ,
"var" : cty . ObjectVal ( varCtx ) ,
} ,
}
return ctx , diags
2024-02-19 04:18:56 -06:00
}
2024-01-17 05:57:14 -06:00
type testVariableValueExpression struct {
expr hcl . Expression
sourceType tofu . ValueSourceType
ctx * hcl . EvalContext
}
func ( v testVariableValueExpression ) ParseVariableValue ( mode configs . VariableParsingMode ) ( * tofu . InputValue , tfdiags . Diagnostics ) {
var diags tfdiags . Diagnostics
val , hclDiags := v . expr . Value ( v . ctx )
diags = diags . Append ( hclDiags )
rng := tfdiags . SourceRangeFromHCL ( v . expr . Range ( ) )
return & tofu . InputValue {
Value : val ,
SourceType : v . sourceType ,
SourceRange : rng ,
} , diags
}
2024-06-03 08:14:05 -05:00
// parseAndApplyDefaultValues parses the given unparsed variables into tofu.InputValues
// and applies default values from the configuration variables where applicable.
// This ensures all variables are correctly initialized and returns the resulting tofu.InputValues.
func parseAndApplyDefaultValues ( unparsedVariables map [ string ] backend . UnparsedVariableValue , configVariables map [ string ] * configs . Variable ) ( tofu . InputValues , tfdiags . Diagnostics ) {
var diags tfdiags . Diagnostics
inputs := make ( tofu . InputValues , len ( unparsedVariables ) )
for name , variable := range unparsedVariables {
value , valueDiags := variable . ParseVariableValue ( configs . VariableParseLiteral )
diags = diags . Append ( valueDiags )
2024-12-04 08:48:08 -06:00
// Even so the variable is declared, some of the fields could
// be empty and filled in via type default values.
if confVariable , ok := configVariables [ name ] ; ok && confVariable . TypeDefaults != nil {
value . Value = confVariable . TypeDefaults . Apply ( value . Value )
}
2024-06-03 08:14:05 -05:00
inputs [ name ] = value
}
// Now, we're going to apply any default values from the configuration.
// We do this after the conversion into tofu.InputValues, as the
// defaults have already been converted into cty.Value objects.
for name , variable := range configVariables {
if _ , exists := unparsedVariables [ name ] ; exists {
// Then we don't want to apply the default for this variable as we
// already have a value.
continue
}
if variable . Default != cty . NilVal {
inputs [ name ] = & tofu . InputValue {
Value : variable . Default ,
SourceType : tofu . ValueFromConfig ,
SourceRange : tfdiags . SourceRangeFromHCL ( variable . DeclRange ) ,
}
}
}
return inputs , diags
}
2023-09-20 07:16:53 -05:00
// prepareInputVariablesForAssertions creates a tofu.InputValues mapping
2023-08-01 02:59:29 -05:00
// that contains all the variables defined for a given run and file, alongside
// any unset variables that have defaults within the provided config.
2023-07-10 08:53:13 -05:00
//
// Crucially, it differs from buildInputVariablesForTest in that the returned
// input values include all variables available even if they are not defined
2023-07-26 03:24:25 -05:00
// within the config. This allows the assertions to refer to variables defined
// solely within the test file, and not only those within the configuration.
2023-08-01 02:59:29 -05:00
//
2024-01-17 05:57:14 -06:00
// It also allows references to previously run test module's outputs as variable
// expressions. This relies upon the evaluation order and will not sort the test cases
// to run in the dependent order.
//
2023-08-01 02:59:29 -05:00
// In addition, it modifies the provided config so that any variables that are
// available are also defined in the config. It returns a function that resets
// the config which must be called so the config can be reused going forward.
2024-01-17 05:57:14 -06:00
func ( runner * TestFileRunner ) prepareInputVariablesForAssertions ( config * configs . Config , run * moduletest . Run , file * moduletest . File , globals map [ string ] backend . UnparsedVariableValue ) ( tofu . InputValues , func ( ) , tfdiags . Diagnostics ) {
2024-06-03 08:14:05 -05:00
var diags tfdiags . Diagnostics
ctx , ctxDiags := getEvalContextForTest ( runner . States , config , globals )
diags = diags . Append ( ctxDiags )
2024-01-17 05:57:14 -06:00
2023-07-19 03:07:46 -05:00
variables := make ( map [ string ] backend . UnparsedVariableValue )
2023-07-10 08:53:13 -05:00
if run != nil {
for name , expr := range run . Config . Variables {
2024-01-17 05:57:14 -06:00
variables [ name ] = testVariableValueExpression {
2023-07-19 03:07:46 -05:00
expr : expr ,
2023-09-20 07:16:53 -05:00
sourceType : tofu . ValueFromConfig ,
2024-01-17 05:57:14 -06:00
ctx : ctx ,
2023-07-19 03:07:46 -05:00
}
2023-07-10 08:53:13 -05:00
}
}
if file != nil {
for name , expr := range file . Config . Variables {
2023-07-19 03:07:46 -05:00
if _ , exists := variables [ name ] ; exists {
2023-07-10 08:53:13 -05:00
// Then this variable was defined at the run level and we want
// that value to take precedence.
continue
}
2024-01-17 05:57:14 -06:00
variables [ name ] = testVariableValueExpression {
2023-07-19 03:07:46 -05:00
expr : expr ,
2023-09-20 07:16:53 -05:00
sourceType : tofu . ValueFromConfig ,
2024-01-17 05:57:14 -06:00
ctx : ctx ,
2023-07-19 03:07:46 -05:00
}
2023-07-10 08:53:13 -05:00
}
}
2023-07-19 03:07:46 -05:00
for name , variable := range globals {
if _ , exists := variables [ name ] ; exists {
// Then this value was already defined at either the run level
// or the file level, and we want those values to take
// precedence.
continue
2023-07-10 08:53:13 -05:00
}
2023-07-19 03:07:46 -05:00
variables [ name ] = variable
2023-07-10 08:53:13 -05:00
}
2023-07-19 03:07:46 -05:00
2023-08-01 02:59:29 -05:00
// We've gathered all the values we have, let's convert them into
2023-12-13 10:35:41 -06:00
// tofu.InputValues so they can be passed into the OpenTofu graph.
2024-06-03 08:14:05 -05:00
// Also, apply default values from the configuration variables where applicable.
inputs , valDiags := parseAndApplyDefaultValues ( variables , config . Module . Variables )
diags . Append ( valDiags )
2023-08-01 02:59:29 -05:00
// Finally, we're going to do a some modifications to the config.
// If we have got variable values from the test file we need to make sure
// they have an equivalent entry in the configuration. We're going to do
// that dynamically here.
// First, take a backup of the existing configuration so we can easily
// restore it later.
currentVars := make ( map [ string ] * configs . Variable )
for name , variable := range config . Module . Variables {
currentVars [ name ] = variable
}
// Next, let's go through our entire inputs and add any that aren't already
// defined into the config.
for name , value := range inputs {
if _ , exists := config . Module . Variables [ name ] ; exists {
continue
}
config . Module . Variables [ name ] = & configs . Variable {
Name : name ,
Type : value . Value . Type ( ) ,
ConstraintType : value . Value . Type ( ) ,
DeclRange : value . SourceRange . ToHCL ( ) ,
}
}
// We return our input values, a function that will reset the variables
// within the config so it can be used again, and any diagnostics reporting
// variables that we couldn't parse.
return inputs , func ( ) {
config . Module . Variables = currentVars
} , diags
2023-06-28 02:37:42 -05:00
}
2024-10-15 03:20:11 -05:00
// checkProblematicPlanErrors checks for plan errors that are also "expected" by the tests. In some cases we expect an error, however,
// what causes the error might not be what we expected. So we try to warn about that here.
func checkProblematicPlanErrors ( expectedFailures addrs . Map [ addrs . Referenceable , bool ] , planDiags tfdiags . Diagnostics ) tfdiags . Diagnostics {
for _ , diag := range planDiags {
rule , ok := addrs . DiagnosticOriginatesFromCheckRule ( diag )
if ! ok {
continue
}
if rule . Container . CheckableKind ( ) != addrs . CheckableInputVariable {
continue
}
addr , ok := rule . Container . ( addrs . AbsInputVariableInstance )
if ok && expectedFailures . Has ( addr . Variable ) {
planDiags = planDiags . Append ( tfdiags . Sourceless (
tfdiags . Warning ,
"Invalid Variable in test file" ,
fmt . Sprintf ( "Variable %s, has an invalid value within the test. Although this was an expected failure, it has meant the apply stage was unable to run so the overall test will fail." , rule . Container . String ( ) ) ) )
}
}
return planDiags
}
2024-12-10 08:34:25 -06:00
// simulateStateSerialization takes a state, serializes it, deserializes it
// and then returns. This is useful for state writing side effects without
// actually writing a state file.
func simulateStateSerialization ( state * states . State ) ( * states . State , error ) {
buff := & bytes . Buffer { }
f := statefile . New ( state , "" , 0 )
err := statefile . Write ( f , buff , encryption . StateEncryptionDisabled ( ) )
if err != nil {
return nil , fmt . Errorf ( "writing state to buffer: %w" , err )
}
f , err = statefile . Read ( buff , encryption . StateEncryptionDisabled ( ) )
if err != nil {
return nil , fmt . Errorf ( "reading state from buffer: %w" , err )
}
return f . State , nil
}