mirror of
https://github.com/opentofu/opentofu.git
synced 2025-01-12 09:01:58 -06:00
checks: hide check diagnostics during plans that will not wait for approval (#32938)
* checks: filter out check diagnostics during certain plans * wrap diagnostics produced by check blocks in a dedicated check block diagnostic * address comments
This commit is contained in:
parent
c993569591
commit
9c87006c34
@ -180,6 +180,32 @@ func (b *Local) opApply(
|
||||
runningOp.Result = backend.OperationFailure
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// If we didn't ask for confirmation from the user, and they have
|
||||
// included any failing checks in their configuration, then they
|
||||
// will see a very confusing output after the apply operation
|
||||
// completes. This is because all the diagnostics from the plan
|
||||
// operation will now be shown alongside the diagnostics from the
|
||||
// apply operation. For check diagnostics, the plan output is
|
||||
// irrelevant and simple noise after the same set of checks have
|
||||
// been executed again during the apply stage. As such, we are going
|
||||
// to remove all diagnostics marked as check diagnostics at this
|
||||
// stage, so we will only show the user the check results from the
|
||||
// apply operation.
|
||||
//
|
||||
// Note, if we did ask for approval then we would have displayed the
|
||||
// plan check results at that point which is useful as the user can
|
||||
// use them to make a decision about whether to apply the changes.
|
||||
// It's just that if we didn't ask for approval then showing the
|
||||
// user the checks from the plan alongside the checks from the apply
|
||||
// is needlessly confusing.
|
||||
var filteredDiags tfdiags.Diagnostics
|
||||
for _, diag := range diags {
|
||||
if !tfdiags.IsFromCheckBlock(diag) {
|
||||
filteredDiags = filteredDiags.Append(diag)
|
||||
}
|
||||
}
|
||||
diags = filteredDiags
|
||||
}
|
||||
} else {
|
||||
plan = lr.Plan
|
||||
|
@ -72,6 +72,50 @@ test_instance.foo:
|
||||
t.Fatalf("unexpected error output:\n%s", errOutput)
|
||||
}
|
||||
}
|
||||
func TestLocal_applyCheck(t *testing.T) {
|
||||
b := TestLocal(t)
|
||||
|
||||
p := TestLocalProvider(t, b, "test", applyFixtureSchema())
|
||||
p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("yes"),
|
||||
"ami": cty.StringVal("bar"),
|
||||
})}
|
||||
|
||||
op, configCleanup, done := testOperationApply(t, "./testdata/apply-check")
|
||||
defer configCleanup()
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
<-run.Done()
|
||||
if run.Result != backend.OperationSuccess {
|
||||
t.Fatal("operation failed")
|
||||
}
|
||||
|
||||
if p.ReadResourceCalled {
|
||||
t.Fatal("ReadResource should not be called")
|
||||
}
|
||||
|
||||
if !p.PlanResourceChangeCalled {
|
||||
t.Fatal("diff should be called")
|
||||
}
|
||||
|
||||
if !p.ApplyResourceChangeCalled {
|
||||
t.Fatal("apply should be called")
|
||||
}
|
||||
|
||||
d := done(t)
|
||||
if errOutput := d.Stderr(); errOutput != "" {
|
||||
t.Fatalf("unexpected error output:\n%s", errOutput)
|
||||
}
|
||||
|
||||
if stdOutput := d.Stdout(); strings.Contains(stdOutput, "Check block assertion known after apply") {
|
||||
// As we are running an auto approved plan the warning that was
|
||||
// generated during the plan should have been hidden.
|
||||
t.Fatalf("std output contained unexpected check output:\n%s", stdOutput)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocal_applyEmptyDir(t *testing.T) {
|
||||
b := TestLocal(t)
|
||||
|
10
internal/backend/local/testdata/apply-check/main.tf
vendored
Normal file
10
internal/backend/local/testdata/apply-check/main.tf
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
resource "test_instance" "foo" {
|
||||
ami = "bar"
|
||||
}
|
||||
|
||||
check "test_instance_exists" {
|
||||
assert {
|
||||
condition = test_instance.foo.id != null
|
||||
error_message = "value should have been computed"
|
||||
}
|
||||
}
|
@ -127,14 +127,14 @@ func evalCheckRule(typ addrs.CheckRuleType, rule *configs.CheckRule, ctx EvalCon
|
||||
|
||||
// Check assertions warn if a status is unknown.
|
||||
if typ == addrs.CheckAssertion {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
diags = diags.Append(tfdiags.AsCheckBlockDiagnostic(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagWarning,
|
||||
Summary: fmt.Sprintf("%s known after apply", typ.Description()),
|
||||
Detail: "The condition could not be evaluated at this time, a result will be known when this plan is applied.",
|
||||
Subject: rule.Condition.Range().Ptr(),
|
||||
Expression: rule.Condition,
|
||||
EvalContext: hclCtx,
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
// We'll wait until we've learned more, then.
|
||||
@ -186,7 +186,7 @@ func evalCheckRule(typ addrs.CheckRuleType, rule *configs.CheckRule, ctx EvalCon
|
||||
if errorMessageForDiags == "" {
|
||||
errorMessageForDiags = "This check failed, but has an invalid error message as described in the other accompanying messages."
|
||||
}
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
diag := &hcl.Diagnostic{
|
||||
// The caller gets to choose the severity of this one, because we
|
||||
// treat condition failures as warnings in the presence of
|
||||
// certain special planning options.
|
||||
@ -196,7 +196,13 @@ func evalCheckRule(typ addrs.CheckRuleType, rule *configs.CheckRule, ctx EvalCon
|
||||
Subject: rule.Condition.Range().Ptr(),
|
||||
Expression: rule.Condition,
|
||||
EvalContext: hclCtx,
|
||||
})
|
||||
}
|
||||
|
||||
if typ == addrs.CheckAssertion {
|
||||
diags = diags.Append(tfdiags.AsCheckBlockDiagnostic(diag))
|
||||
} else {
|
||||
diags = diags.Append(diag)
|
||||
}
|
||||
|
||||
return checkResult{
|
||||
Status: status,
|
||||
|
@ -1685,7 +1685,6 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, checkRule
|
||||
// We have a complete configuration with no dependencies to wait on, so we
|
||||
// can read the data source into the state.
|
||||
newVal, readDiags := n.readDataSource(ctx, configVal)
|
||||
diags = diags.Append(readDiags)
|
||||
|
||||
// Now we've loaded the data, and diags tells us whether we were successful
|
||||
// or not, we are going to create our plannedChange and our
|
||||
@ -1709,7 +1708,7 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, checkRule
|
||||
// and handle the error. We'll also create the plannedChange if
|
||||
// appropriate.
|
||||
|
||||
if diags.HasErrors() {
|
||||
if readDiags.HasErrors() {
|
||||
// If we had errors, then we can cover that up by marking the new
|
||||
// state as unknown.
|
||||
unmarkedConfigVal, configMarkPaths := configVal.UnmarkDeepWithPaths()
|
||||
@ -1718,13 +1717,14 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, checkRule
|
||||
|
||||
// We still want to report the check as failed even if we are still
|
||||
// letting it run again during the apply stage.
|
||||
ctx.Checks().ReportCheckFailure(check.Addr().Absolute(n.Addr.Module), addrs.CheckDataResource, 0, diags.Err().Error())
|
||||
|
||||
// Also, let's hide the errors so that execution can continue as
|
||||
// normal.
|
||||
diags = tfdiags.WithErrorsAsWarnings(diags)
|
||||
ctx.Checks().ReportCheckFailure(check.Addr().Absolute(n.Addr.Module), addrs.CheckDataResource, 0, readDiags.Err().Error())
|
||||
}
|
||||
|
||||
// Any warning or error diagnostics we'll wrap with some special checks
|
||||
// diagnostics. This is so we can identify them later, and so they'll
|
||||
// only report as warnings.
|
||||
readDiags = tfdiags.AsCheckBlockDiagnostics(readDiags)
|
||||
|
||||
if !skipPlanChanges {
|
||||
// refreshOnly plans cannot produce planned changes, so we only do
|
||||
// this if skipPlanChanges is false.
|
||||
@ -1740,9 +1740,9 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, checkRule
|
||||
ActionReason: plans.ResourceInstanceReadBecauseCheckNested,
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
diags = diags.Append(readDiags)
|
||||
if !diags.HasErrors() {
|
||||
// Finally, let's make our new state.
|
||||
plannedNewState = &states.ResourceInstanceObject{
|
||||
@ -1883,20 +1883,26 @@ func (n *NodeAbstractResourceInstance) applyDataSource(ctx EvalContext, planned
|
||||
}
|
||||
|
||||
newVal, readDiags := n.readDataSource(ctx, configVal)
|
||||
diags = diags.Append(readDiags)
|
||||
|
||||
if check, nested := n.nestedInCheckBlock(); nested {
|
||||
// We're just going to jump in here and hide away any erros for nested
|
||||
// We're just going to jump in here and hide away any errors for nested
|
||||
// data blocks.
|
||||
if diags.HasErrors() {
|
||||
ctx.Checks().ReportCheckFailure(check.Addr().Absolute(n.Addr.Module), addrs.CheckDataResource, 0, diags.Err().Error())
|
||||
return nil, keyData, tfdiags.WithErrorsAsWarnings(diags)
|
||||
if readDiags.HasErrors() {
|
||||
ctx.Checks().ReportCheckFailure(check.Addr().Absolute(n.Addr.Module), addrs.CheckDataResource, 0, readDiags.Err().Error())
|
||||
diags = diags.Append(tfdiags.AsCheckBlockDiagnostics(readDiags))
|
||||
return nil, keyData, diags
|
||||
}
|
||||
|
||||
// Even though we know there are no errors here, we still want to
|
||||
// identify these diags has having been generated from a check block.
|
||||
readDiags = tfdiags.AsCheckBlockDiagnostics(readDiags)
|
||||
|
||||
// If no errors, just remember to report this as a success and continue
|
||||
// as normal.
|
||||
ctx.Checks().ReportCheckResult(check.Addr().Absolute(n.Addr.Module), addrs.CheckDataResource, 0, checks.StatusPass)
|
||||
} else if diags.HasErrors() {
|
||||
}
|
||||
|
||||
diags = diags.Append(readDiags)
|
||||
if readDiags.HasErrors() {
|
||||
return nil, keyData, diags
|
||||
}
|
||||
|
||||
|
73
internal/tfdiags/checks.go
Normal file
73
internal/tfdiags/checks.go
Normal file
@ -0,0 +1,73 @@
|
||||
package tfdiags
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
var _ Diagnostic = CheckBlockDiagnostic{}
|
||||
|
||||
// CheckBlockDiagnostic is a diagnostic produced by a Terraform config Check block.
|
||||
//
|
||||
// It only ever returns warnings, and will not be consolidated as part of the
|
||||
// Diagnostics.ConsolidateWarnings function.
|
||||
type CheckBlockDiagnostic struct {
|
||||
diag Diagnostic
|
||||
}
|
||||
|
||||
// AsCheckBlockDiagnostics will wrap every diagnostic in diags in a
|
||||
// CheckBlockDiagnostic.
|
||||
func AsCheckBlockDiagnostics(diags Diagnostics) Diagnostics {
|
||||
if len(diags) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
ret := make(Diagnostics, len(diags))
|
||||
for i, diag := range diags {
|
||||
ret[i] = CheckBlockDiagnostic{diag}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// AsCheckBlockDiagnostic will wrap a Diagnostic or a hcl.Diagnostic in a
|
||||
// CheckBlockDiagnostic.
|
||||
func AsCheckBlockDiagnostic(diag interface{}) Diagnostic {
|
||||
switch d := diag.(type) {
|
||||
case Diagnostic:
|
||||
return CheckBlockDiagnostic{d}
|
||||
case *hcl.Diagnostic:
|
||||
return CheckBlockDiagnostic{hclDiagnostic{d}}
|
||||
default:
|
||||
panic(fmt.Errorf("can't construct diagnostic from %T", diag))
|
||||
}
|
||||
}
|
||||
|
||||
// IsFromCheckBlock returns true if the specified Diagnostic is a
|
||||
// CheckBlockDiagnostic.
|
||||
func IsFromCheckBlock(diag Diagnostic) bool {
|
||||
_, ok := diag.(CheckBlockDiagnostic)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (c CheckBlockDiagnostic) Severity() Severity {
|
||||
// Regardless of the severity of the underlying diagnostic, check blocks
|
||||
// only ever report Warning severity.
|
||||
return Warning
|
||||
}
|
||||
|
||||
func (c CheckBlockDiagnostic) Description() Description {
|
||||
return c.diag.Description()
|
||||
}
|
||||
|
||||
func (c CheckBlockDiagnostic) Source() Source {
|
||||
return c.diag.Source()
|
||||
}
|
||||
|
||||
func (c CheckBlockDiagnostic) FromExpr() *FromExpr {
|
||||
return c.diag.FromExpr()
|
||||
}
|
||||
|
||||
func (c CheckBlockDiagnostic) ExtraInfo() interface{} {
|
||||
return c.diag.ExtraInfo()
|
||||
}
|
@ -43,6 +43,13 @@ func (diags Diagnostics) ConsolidateWarnings(threshold int) Diagnostics {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := diag.(CheckBlockDiagnostic); ok {
|
||||
// Check diagnostics are never consolidated, the users have asked
|
||||
// to be informed about each of these.
|
||||
newDiags = newDiags.Append(diag)
|
||||
continue
|
||||
}
|
||||
|
||||
desc := diag.Description()
|
||||
summary := desc.Summary
|
||||
if g, ok := warningGroups[summary]; ok {
|
||||
|
@ -1,41 +0,0 @@
|
||||
package tfdiags
|
||||
|
||||
type diagForceWarningSeverity struct {
|
||||
wrapped Diagnostic
|
||||
}
|
||||
|
||||
func WithErrorsAsWarnings(diags Diagnostics) Diagnostics {
|
||||
if len(diags) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
ret := make(Diagnostics, len(diags))
|
||||
for i, diag := range diags {
|
||||
if diag.Severity() == Error {
|
||||
ret[i] = diagForceWarningSeverity{diag}
|
||||
} else {
|
||||
ret[i] = diag
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (diag diagForceWarningSeverity) Severity() Severity {
|
||||
return Warning
|
||||
}
|
||||
|
||||
func (diag diagForceWarningSeverity) Description() Description {
|
||||
return diag.wrapped.Description()
|
||||
}
|
||||
|
||||
func (diag diagForceWarningSeverity) Source() Source {
|
||||
return diag.wrapped.Source()
|
||||
}
|
||||
|
||||
func (diag diagForceWarningSeverity) FromExpr() *FromExpr {
|
||||
return diag.wrapped.FromExpr()
|
||||
}
|
||||
|
||||
func (diag diagForceWarningSeverity) ExtraInfo() any {
|
||||
return diag.wrapped.ExtraInfo()
|
||||
}
|
Loading…
Reference in New Issue
Block a user