mirror of
https://github.com/opentofu/opentofu.git
synced 2025-01-26 16:36:26 -06:00
[Testing Framework] Add test structure to views package for rendering test output (#33324)
* Add test structure to views package for rendering test output * address comments
This commit is contained in:
parent
ce8790c939
commit
ce8fd2943d
116
internal/command/views/test.go
Normal file
116
internal/command/views/test.go
Normal file
@ -0,0 +1,116 @@
|
||||
package views
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/mitchellh/colorstring"
|
||||
|
||||
"github.com/hashicorp/terraform/internal/command/arguments"
|
||||
"github.com/hashicorp/terraform/internal/moduletest"
|
||||
)
|
||||
|
||||
// Test renders outputs for test executions.
|
||||
type Test interface {
|
||||
// Abstract should print an early summary of the tests that will be
|
||||
// executed. This will be called before the tests have been executed so
|
||||
// the status for everything within suite will be test.Pending.
|
||||
//
|
||||
// This should be used to state what is going to be tested.
|
||||
Abstract(suite *moduletest.Suite)
|
||||
|
||||
// Conclusion should print out a summary of the tests including their
|
||||
// completed status.
|
||||
Conclusion(suite *moduletest.Suite)
|
||||
|
||||
// File prints out the summary for an entire test file.
|
||||
File(file *moduletest.File)
|
||||
|
||||
// Run prints out the summary for a single test run block.
|
||||
Run(run *moduletest.Run)
|
||||
}
|
||||
|
||||
func NewTest(vt arguments.ViewType, view *View) Test {
|
||||
switch vt {
|
||||
case arguments.ViewJSON:
|
||||
// TODO(liamcervante): Add support for JSON outputs.
|
||||
panic("not supported yet")
|
||||
case arguments.ViewHuman:
|
||||
return &TestHuman{
|
||||
view: view,
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown view type %v", vt))
|
||||
}
|
||||
}
|
||||
|
||||
type TestHuman struct {
|
||||
view *View
|
||||
}
|
||||
|
||||
var _ Test = (*TestHuman)(nil)
|
||||
|
||||
func (t *TestHuman) Abstract(_ *moduletest.Suite) {
|
||||
// Do nothing, we don't print an abstract for the human view.
|
||||
}
|
||||
|
||||
func (t *TestHuman) Conclusion(suite *moduletest.Suite) {
|
||||
t.view.streams.Println()
|
||||
|
||||
counts := make(map[moduletest.Status]int)
|
||||
for _, file := range suite.Files {
|
||||
for _, run := range file.Runs {
|
||||
count := counts[run.Status]
|
||||
counts[run.Status] = count + 1
|
||||
}
|
||||
}
|
||||
|
||||
if suite.Status <= moduletest.Skip {
|
||||
// Then no tests.
|
||||
t.view.streams.Printf("Executed 0 tests")
|
||||
if counts[moduletest.Skip] > 0 {
|
||||
t.view.streams.Printf(", %d skipped.\n", counts[moduletest.Skip])
|
||||
} else {
|
||||
t.view.streams.Println(".")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if suite.Status == moduletest.Pass {
|
||||
t.view.streams.Print(t.view.colorize.Color("[green]Success![reset]"))
|
||||
} else {
|
||||
t.view.streams.Print(t.view.colorize.Color("[red]Failure![reset]"))
|
||||
}
|
||||
|
||||
t.view.streams.Printf(" %d passed, %d failed", counts[moduletest.Pass], counts[moduletest.Fail]+counts[moduletest.Error])
|
||||
if counts[moduletest.Skip] > 0 {
|
||||
t.view.streams.Printf(", %d skipped.\n", counts[moduletest.Skip])
|
||||
} else {
|
||||
t.view.streams.Println(".")
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TestHuman) File(file *moduletest.File) {
|
||||
t.view.streams.Printf("%s... %s\n", file.Name, colorizeTestStatus(file.Status, t.view.colorize))
|
||||
}
|
||||
|
||||
func (t *TestHuman) Run(run *moduletest.Run) {
|
||||
t.view.streams.Printf(" run %q... %s\n", run.Name, colorizeTestStatus(run.Status, t.view.colorize))
|
||||
|
||||
// Finally we'll print out a summary of the diagnostics from the run.
|
||||
t.view.Diagnostics(run.Diagnostics)
|
||||
}
|
||||
|
||||
func colorizeTestStatus(status moduletest.Status, color *colorstring.Colorize) string {
|
||||
switch status {
|
||||
case moduletest.Error, moduletest.Fail:
|
||||
return color.Color("[red]fail[reset]")
|
||||
case moduletest.Pass:
|
||||
return color.Color("[green]pass[reset]")
|
||||
case moduletest.Skip:
|
||||
return color.Color("[light_gray]skip[reset]")
|
||||
case moduletest.Pending:
|
||||
return color.Color("[light_gray]pending[reset]")
|
||||
default:
|
||||
panic("unrecognized status: " + status.String())
|
||||
}
|
||||
}
|
548
internal/command/views/test_test.go
Normal file
548
internal/command/views/test_test.go
Normal file
@ -0,0 +1,548 @@
|
||||
package views
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"github.com/hashicorp/terraform/internal/command/arguments"
|
||||
"github.com/hashicorp/terraform/internal/moduletest"
|
||||
"github.com/hashicorp/terraform/internal/terminal"
|
||||
"github.com/hashicorp/terraform/internal/tfdiags"
|
||||
)
|
||||
|
||||
func TestTestHuman_Conclusion(t *testing.T) {
|
||||
tcs := map[string]struct {
|
||||
Suite *moduletest.Suite
|
||||
Expected string
|
||||
}{
|
||||
"no tests": {
|
||||
Suite: &moduletest.Suite{},
|
||||
Expected: "\nExecuted 0 tests.\n",
|
||||
},
|
||||
|
||||
"only skipped tests": {
|
||||
Suite: &moduletest.Suite{
|
||||
Status: moduletest.Skip,
|
||||
Files: map[string]*moduletest.File{
|
||||
"descriptive_test_name.tftest": {
|
||||
Name: "descriptive_test_name.tftest",
|
||||
Status: moduletest.Skip,
|
||||
Runs: []*moduletest.Run{
|
||||
{
|
||||
Name: "test_one",
|
||||
Status: moduletest.Skip,
|
||||
},
|
||||
{
|
||||
Name: "test_two",
|
||||
Status: moduletest.Skip,
|
||||
},
|
||||
{
|
||||
Name: "test_three",
|
||||
Status: moduletest.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
"other_descriptive_test_name.tftest": {
|
||||
Name: "other_descriptive_test_name.tftest",
|
||||
Status: moduletest.Skip,
|
||||
Runs: []*moduletest.Run{
|
||||
{
|
||||
Name: "test_one",
|
||||
Status: moduletest.Skip,
|
||||
},
|
||||
{
|
||||
Name: "test_two",
|
||||
Status: moduletest.Skip,
|
||||
},
|
||||
{
|
||||
Name: "test_three",
|
||||
Status: moduletest.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Expected: "\nExecuted 0 tests, 6 skipped.\n",
|
||||
},
|
||||
|
||||
"only passed tests": {
|
||||
Suite: &moduletest.Suite{
|
||||
Status: moduletest.Pass,
|
||||
Files: map[string]*moduletest.File{
|
||||
"descriptive_test_name.tftest": {
|
||||
Name: "descriptive_test_name.tftest",
|
||||
Status: moduletest.Pass,
|
||||
Runs: []*moduletest.Run{
|
||||
{
|
||||
Name: "test_one",
|
||||
Status: moduletest.Pass,
|
||||
},
|
||||
{
|
||||
Name: "test_two",
|
||||
Status: moduletest.Pass,
|
||||
},
|
||||
{
|
||||
Name: "test_three",
|
||||
Status: moduletest.Pass,
|
||||
},
|
||||
},
|
||||
},
|
||||
"other_descriptive_test_name.tftest": {
|
||||
Name: "other_descriptive_test_name.tftest",
|
||||
Status: moduletest.Pass,
|
||||
Runs: []*moduletest.Run{
|
||||
{
|
||||
Name: "test_one",
|
||||
Status: moduletest.Pass,
|
||||
},
|
||||
{
|
||||
Name: "test_two",
|
||||
Status: moduletest.Pass,
|
||||
},
|
||||
{
|
||||
Name: "test_three",
|
||||
Status: moduletest.Pass,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Expected: "\nSuccess! 6 passed, 0 failed.\n",
|
||||
},
|
||||
|
||||
"passed and skipped tests": {
|
||||
Suite: &moduletest.Suite{
|
||||
Status: moduletest.Pass,
|
||||
Files: map[string]*moduletest.File{
|
||||
"descriptive_test_name.tftest": {
|
||||
Name: "descriptive_test_name.tftest",
|
||||
Status: moduletest.Pass,
|
||||
Runs: []*moduletest.Run{
|
||||
{
|
||||
Name: "test_one",
|
||||
Status: moduletest.Pass,
|
||||
},
|
||||
{
|
||||
Name: "test_two",
|
||||
Status: moduletest.Skip,
|
||||
},
|
||||
{
|
||||
Name: "test_three",
|
||||
Status: moduletest.Pass,
|
||||
},
|
||||
},
|
||||
},
|
||||
"other_descriptive_test_name.tftest": {
|
||||
Name: "other_descriptive_test_name.tftest",
|
||||
Status: moduletest.Pass,
|
||||
Runs: []*moduletest.Run{
|
||||
{
|
||||
Name: "test_one",
|
||||
Status: moduletest.Skip,
|
||||
},
|
||||
{
|
||||
Name: "test_two",
|
||||
Status: moduletest.Pass,
|
||||
},
|
||||
{
|
||||
Name: "test_three",
|
||||
Status: moduletest.Pass,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Expected: "\nSuccess! 4 passed, 0 failed, 2 skipped.\n",
|
||||
},
|
||||
|
||||
"only failed tests": {
|
||||
Suite: &moduletest.Suite{
|
||||
Status: moduletest.Fail,
|
||||
Files: map[string]*moduletest.File{
|
||||
"descriptive_test_name.tftest": {
|
||||
Name: "descriptive_test_name.tftest",
|
||||
Status: moduletest.Fail,
|
||||
Runs: []*moduletest.Run{
|
||||
{
|
||||
Name: "test_one",
|
||||
Status: moduletest.Fail,
|
||||
},
|
||||
{
|
||||
Name: "test_two",
|
||||
Status: moduletest.Fail,
|
||||
},
|
||||
{
|
||||
Name: "test_three",
|
||||
Status: moduletest.Fail,
|
||||
},
|
||||
},
|
||||
},
|
||||
"other_descriptive_test_name.tftest": {
|
||||
Name: "other_descriptive_test_name.tftest",
|
||||
Status: moduletest.Fail,
|
||||
Runs: []*moduletest.Run{
|
||||
{
|
||||
Name: "test_one",
|
||||
Status: moduletest.Fail,
|
||||
},
|
||||
{
|
||||
Name: "test_two",
|
||||
Status: moduletest.Fail,
|
||||
},
|
||||
{
|
||||
Name: "test_three",
|
||||
Status: moduletest.Fail,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Expected: "\nFailure! 0 passed, 6 failed.\n",
|
||||
},
|
||||
|
||||
"failed and skipped tests": {
|
||||
Suite: &moduletest.Suite{
|
||||
Status: moduletest.Fail,
|
||||
Files: map[string]*moduletest.File{
|
||||
"descriptive_test_name.tftest": {
|
||||
Name: "descriptive_test_name.tftest",
|
||||
Status: moduletest.Fail,
|
||||
Runs: []*moduletest.Run{
|
||||
{
|
||||
Name: "test_one",
|
||||
Status: moduletest.Fail,
|
||||
},
|
||||
{
|
||||
Name: "test_two",
|
||||
Status: moduletest.Skip,
|
||||
},
|
||||
{
|
||||
Name: "test_three",
|
||||
Status: moduletest.Fail,
|
||||
},
|
||||
},
|
||||
},
|
||||
"other_descriptive_test_name.tftest": {
|
||||
Name: "other_descriptive_test_name.tftest",
|
||||
Status: moduletest.Fail,
|
||||
Runs: []*moduletest.Run{
|
||||
{
|
||||
Name: "test_one",
|
||||
Status: moduletest.Fail,
|
||||
},
|
||||
{
|
||||
Name: "test_two",
|
||||
Status: moduletest.Fail,
|
||||
},
|
||||
{
|
||||
Name: "test_three",
|
||||
Status: moduletest.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Expected: "\nFailure! 0 passed, 4 failed, 2 skipped.\n",
|
||||
},
|
||||
|
||||
"failed, passed and skipped tests": {
|
||||
Suite: &moduletest.Suite{
|
||||
Status: moduletest.Fail,
|
||||
Files: map[string]*moduletest.File{
|
||||
"descriptive_test_name.tftest": {
|
||||
Name: "descriptive_test_name.tftest",
|
||||
Status: moduletest.Fail,
|
||||
Runs: []*moduletest.Run{
|
||||
{
|
||||
Name: "test_one",
|
||||
Status: moduletest.Fail,
|
||||
},
|
||||
{
|
||||
Name: "test_two",
|
||||
Status: moduletest.Pass,
|
||||
},
|
||||
{
|
||||
Name: "test_three",
|
||||
Status: moduletest.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
"other_descriptive_test_name.tftest": {
|
||||
Name: "other_descriptive_test_name.tftest",
|
||||
Status: moduletest.Fail,
|
||||
Runs: []*moduletest.Run{
|
||||
{
|
||||
Name: "test_one",
|
||||
Status: moduletest.Skip,
|
||||
},
|
||||
{
|
||||
Name: "test_two",
|
||||
Status: moduletest.Fail,
|
||||
},
|
||||
{
|
||||
Name: "test_three",
|
||||
Status: moduletest.Pass,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Expected: "\nFailure! 2 passed, 2 failed, 2 skipped.\n",
|
||||
},
|
||||
|
||||
"failed and errored tests": {
|
||||
Suite: &moduletest.Suite{
|
||||
Status: moduletest.Error,
|
||||
Files: map[string]*moduletest.File{
|
||||
"descriptive_test_name.tftest": {
|
||||
Name: "descriptive_test_name.tftest",
|
||||
Status: moduletest.Error,
|
||||
Runs: []*moduletest.Run{
|
||||
{
|
||||
Name: "test_one",
|
||||
Status: moduletest.Fail,
|
||||
},
|
||||
{
|
||||
Name: "test_two",
|
||||
Status: moduletest.Error,
|
||||
},
|
||||
{
|
||||
Name: "test_three",
|
||||
Status: moduletest.Fail,
|
||||
},
|
||||
},
|
||||
},
|
||||
"other_descriptive_test_name.tftest": {
|
||||
Name: "other_descriptive_test_name.tftest",
|
||||
Status: moduletest.Error,
|
||||
Runs: []*moduletest.Run{
|
||||
{
|
||||
Name: "test_one",
|
||||
Status: moduletest.Fail,
|
||||
},
|
||||
{
|
||||
Name: "test_two",
|
||||
Status: moduletest.Error,
|
||||
},
|
||||
{
|
||||
Name: "test_three",
|
||||
Status: moduletest.Error,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Expected: "\nFailure! 0 passed, 6 failed.\n",
|
||||
},
|
||||
|
||||
"failed, errored, passed, and skipped tests": {
|
||||
Suite: &moduletest.Suite{
|
||||
Status: moduletest.Error,
|
||||
Files: map[string]*moduletest.File{
|
||||
"descriptive_test_name.tftest": {
|
||||
Name: "descriptive_test_name.tftest",
|
||||
Status: moduletest.Fail,
|
||||
Runs: []*moduletest.Run{
|
||||
{
|
||||
Name: "test_one",
|
||||
Status: moduletest.Pass,
|
||||
},
|
||||
{
|
||||
Name: "test_two",
|
||||
Status: moduletest.Pass,
|
||||
},
|
||||
{
|
||||
Name: "test_three",
|
||||
Status: moduletest.Fail,
|
||||
},
|
||||
},
|
||||
},
|
||||
"other_descriptive_test_name.tftest": {
|
||||
Name: "other_descriptive_test_name.tftest",
|
||||
Status: moduletest.Error,
|
||||
Runs: []*moduletest.Run{
|
||||
{
|
||||
Name: "test_one",
|
||||
Status: moduletest.Error,
|
||||
},
|
||||
{
|
||||
Name: "test_two",
|
||||
Status: moduletest.Skip,
|
||||
},
|
||||
{
|
||||
Name: "test_three",
|
||||
Status: moduletest.Skip,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Expected: "\nFailure! 2 passed, 2 failed, 2 skipped.\n",
|
||||
},
|
||||
}
|
||||
for name, tc := range tcs {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
|
||||
streams, done := terminal.StreamsForTesting(t)
|
||||
view := NewTest(arguments.ViewHuman, NewView(streams))
|
||||
|
||||
view.Conclusion(tc.Suite)
|
||||
|
||||
actual := done(t).Stdout()
|
||||
expected := tc.Expected
|
||||
if diff := cmp.Diff(expected, actual); len(diff) > 0 {
|
||||
t.Fatalf("expected:\n%s\nactual:\n%s\ndiff:\n%s", expected, actual, diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTestHuman_File(t *testing.T) {
|
||||
tcs := map[string]struct {
|
||||
File *moduletest.File
|
||||
Expected string
|
||||
}{
|
||||
"pass": {
|
||||
File: &moduletest.File{Name: "main.tf", Status: moduletest.Pass},
|
||||
Expected: "main.tf... pass\n",
|
||||
},
|
||||
|
||||
"pending": {
|
||||
File: &moduletest.File{Name: "main.tf", Status: moduletest.Pending},
|
||||
Expected: "main.tf... pending\n",
|
||||
},
|
||||
|
||||
"skip": {
|
||||
File: &moduletest.File{Name: "main.tf", Status: moduletest.Skip},
|
||||
Expected: "main.tf... skip\n",
|
||||
},
|
||||
|
||||
"fail": {
|
||||
File: &moduletest.File{Name: "main.tf", Status: moduletest.Fail},
|
||||
Expected: "main.tf... fail\n",
|
||||
},
|
||||
|
||||
"error": {
|
||||
File: &moduletest.File{Name: "main.tf", Status: moduletest.Error},
|
||||
Expected: "main.tf... fail\n",
|
||||
},
|
||||
}
|
||||
for name, tc := range tcs {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
|
||||
streams, done := terminal.StreamsForTesting(t)
|
||||
view := NewTest(arguments.ViewHuman, NewView(streams))
|
||||
|
||||
view.File(tc.File)
|
||||
|
||||
actual := done(t).Stdout()
|
||||
expected := tc.Expected
|
||||
if diff := cmp.Diff(expected, actual); len(diff) > 0 {
|
||||
t.Fatalf("expected:\n%s\nactual:\n%s\ndiff:\n%s", expected, actual, diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTestHuman_Run(t *testing.T) {
|
||||
tcs := map[string]struct {
|
||||
Run *moduletest.Run
|
||||
StdOut string
|
||||
StdErr string
|
||||
}{
|
||||
"pass": {
|
||||
Run: &moduletest.Run{Name: "run_block", Status: moduletest.Pass},
|
||||
StdOut: " run \"run_block\"... pass\n",
|
||||
},
|
||||
|
||||
"pass_with_diags": {
|
||||
Run: &moduletest.Run{
|
||||
Name: "run_block",
|
||||
Status: moduletest.Pass,
|
||||
Diagnostics: tfdiags.Diagnostics{tfdiags.Sourceless(tfdiags.Warning, "a warning occurred", "some warning happened during this test")},
|
||||
},
|
||||
StdOut: ` run "run_block"... pass
|
||||
|
||||
Warning: a warning occurred
|
||||
|
||||
some warning happened during this test
|
||||
`,
|
||||
},
|
||||
|
||||
"pending": {
|
||||
Run: &moduletest.Run{Name: "run_block", Status: moduletest.Pending},
|
||||
StdOut: " run \"run_block\"... pending\n",
|
||||
},
|
||||
|
||||
"skip": {
|
||||
Run: &moduletest.Run{Name: "run_block", Status: moduletest.Skip},
|
||||
StdOut: " run \"run_block\"... skip\n",
|
||||
},
|
||||
|
||||
"fail": {
|
||||
Run: &moduletest.Run{Name: "run_block", Status: moduletest.Fail},
|
||||
StdOut: " run \"run_block\"... fail\n",
|
||||
},
|
||||
|
||||
"fail_with_diags": {
|
||||
Run: &moduletest.Run{
|
||||
Name: "run_block",
|
||||
Status: moduletest.Fail,
|
||||
Diagnostics: tfdiags.Diagnostics{
|
||||
tfdiags.Sourceless(tfdiags.Error, "a comparison failed", "details details details"),
|
||||
tfdiags.Sourceless(tfdiags.Error, "a second comparison failed", "other details"),
|
||||
},
|
||||
},
|
||||
StdOut: " run \"run_block\"... fail\n",
|
||||
StdErr: `
|
||||
Error: a comparison failed
|
||||
|
||||
details details details
|
||||
|
||||
Error: a second comparison failed
|
||||
|
||||
other details
|
||||
`,
|
||||
},
|
||||
|
||||
"error": {
|
||||
Run: &moduletest.Run{Name: "run_block", Status: moduletest.Error},
|
||||
StdOut: " run \"run_block\"... fail\n",
|
||||
},
|
||||
|
||||
"error_with_diags": {
|
||||
Run: &moduletest.Run{
|
||||
Name: "run_block",
|
||||
Status: moduletest.Error,
|
||||
Diagnostics: tfdiags.Diagnostics{tfdiags.Sourceless(tfdiags.Error, "an error occurred", "something bad happened during this test")},
|
||||
},
|
||||
StdOut: " run \"run_block\"... fail\n",
|
||||
StdErr: `
|
||||
Error: an error occurred
|
||||
|
||||
something bad happened during this test
|
||||
`,
|
||||
},
|
||||
}
|
||||
for name, tc := range tcs {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
|
||||
streams, done := terminal.StreamsForTesting(t)
|
||||
view := NewTest(arguments.ViewHuman, NewView(streams))
|
||||
|
||||
view.Run(tc.Run)
|
||||
|
||||
output := done(t)
|
||||
actual, expected := output.Stdout(), tc.StdOut
|
||||
if diff := cmp.Diff(expected, actual); len(diff) > 0 {
|
||||
t.Errorf("expected:\n%s\nactual:\n%s\ndiff:\n%s", expected, actual, diff)
|
||||
}
|
||||
|
||||
actual, expected = output.Stderr(), tc.StdErr
|
||||
if diff := cmp.Diff(expected, actual); len(diff) > 0 {
|
||||
t.Errorf("expected:\n%s\nactual:\n%s\ndiff:\n%s", expected, actual, diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
8
internal/moduletest/file.go
Normal file
8
internal/moduletest/file.go
Normal file
@ -0,0 +1,8 @@
|
||||
package moduletest
|
||||
|
||||
type File struct {
|
||||
Name string
|
||||
Status Status
|
||||
|
||||
Runs []*Run
|
||||
}
|
12
internal/moduletest/run.go
Normal file
12
internal/moduletest/run.go
Normal file
@ -0,0 +1,12 @@
|
||||
package moduletest
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/internal/tfdiags"
|
||||
)
|
||||
|
||||
type Run struct {
|
||||
Name string
|
||||
Status Status
|
||||
|
||||
Diagnostics tfdiags.Diagnostics
|
||||
}
|
41
internal/moduletest/status.go
Normal file
41
internal/moduletest/status.go
Normal file
@ -0,0 +1,41 @@
|
||||
package moduletest
|
||||
|
||||
// Status represents the status of a test case, and is defined as an iota within
|
||||
// this file.
|
||||
//
|
||||
// The order of the definitions matter as different statuses do naturally take
|
||||
// precedence over others. A test suite that has a mix of pass and fail statuses
|
||||
// has failed overall and therefore the fail status is of higher precedence than
|
||||
// the pass status.
|
||||
//
|
||||
// See the Status.Merge function for this requirement being used in action.
|
||||
//
|
||||
//go:generate go run golang.org/x/tools/cmd/stringer -type=Status status.go
|
||||
type Status int
|
||||
|
||||
const (
|
||||
Pending Status = iota
|
||||
Skip
|
||||
Pass
|
||||
Fail
|
||||
Error
|
||||
)
|
||||
|
||||
// Merge compares two statuses and returns a status that best represents the two
|
||||
// together.
|
||||
//
|
||||
// This should be used to collate the overall status of a test file or test
|
||||
// suite from the collection of test runs that have been executed.
|
||||
//
|
||||
// Essentially, if a test suite has a bunch of failures and passes the overall
|
||||
// status would be failure. If a test suite has all passes, then the test suite
|
||||
// would be pass overall.
|
||||
//
|
||||
// The implementation basically always returns the highest of the two, which
|
||||
// means the order the statuses are defined within the iota matters.
|
||||
func (status Status) Merge(next Status) Status {
|
||||
if next > status {
|
||||
return next
|
||||
}
|
||||
return status
|
||||
}
|
27
internal/moduletest/status_string.go
Normal file
27
internal/moduletest/status_string.go
Normal file
@ -0,0 +1,27 @@
|
||||
// Code generated by "stringer -type=Status status.go"; DO NOT EDIT.
|
||||
|
||||
package moduletest
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[Pending-0]
|
||||
_ = x[Skip-1]
|
||||
_ = x[Pass-2]
|
||||
_ = x[Fail-3]
|
||||
_ = x[Error-4]
|
||||
}
|
||||
|
||||
const _Status_name = "PendingSkipPassFailError"
|
||||
|
||||
var _Status_index = [...]uint8{0, 7, 11, 15, 19, 24}
|
||||
|
||||
func (i Status) String() string {
|
||||
if i < 0 || i >= Status(len(_Status_index)-1) {
|
||||
return "Status(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _Status_name[_Status_index[i]:_Status_index[i+1]]
|
||||
}
|
7
internal/moduletest/suite.go
Normal file
7
internal/moduletest/suite.go
Normal file
@ -0,0 +1,7 @@
|
||||
package moduletest
|
||||
|
||||
type Suite struct {
|
||||
Status Status
|
||||
|
||||
Files map[string]*File
|
||||
}
|
Loading…
Reference in New Issue
Block a user