opentofu/terraform/eval_validate.go
Martin Atkins cf4a5e6336 core: Don't DynamicExpand during validate
Previously we would attempt to DynamicExpand during the validate walk and
then validate each expanded instance separately. However, this meant that
we would not be able to validate the contents of a block where count = 0
or if count is not yet known.

Here we instead do a more static validation pass against the resource
configuration itself, setting count.index to cty.UnknownVal(cty.Number) so
we can type-check everything inside the block as being correct regardless
of the final count.

This is another step towards repairing the "validate" command for our
changed assumptions in a world where we have a more sophisticated type
checker.

This doesn't yet address the remaining problem that the expression
evaluator can't, with the current state structures, distinguish between
a completed resource with count = 0 and a resource that doesn't exist
at all (during validate), and so we'll still get errors if an expression
elsewhere in configuration refers to a dynamic index of a resource with
"count" set. That's a pre-existing condition that's no longer being masked
by _this_ problem, but can't be addressed until we've introduced the new
state types (states.State, etc) and thus we _can_ distinguish these two
situations. That will therefore be addressed in a later commit.
2018-10-16 18:50:29 -07:00

535 lines
15 KiB
Go

package terraform
import (
"fmt"
"log"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/config/configschema"
"github.com/hashicorp/terraform/configs"
"github.com/hashicorp/terraform/tfdiags"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/convert"
"github.com/zclconf/go-cty/cty/gocty"
)
// EvalValidateCount is an EvalNode implementation that validates
// the count of a resource.
type EvalValidateCount struct {
Resource *configs.Resource
}
// TODO: test
func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) {
var diags tfdiags.Diagnostics
var count int
var err error
val, valDiags := ctx.EvaluateExpr(n.Resource.Count, cty.Number, nil)
diags = diags.Append(valDiags)
if valDiags.HasErrors() {
goto RETURN
}
if val.IsNull() || !val.IsKnown() {
goto RETURN
}
err = gocty.FromCtyValue(val, &count)
if err != nil {
// The EvaluateExpr call above already guaranteed us a number value,
// so if we end up here then we have something that is out of range
// for an int, and the error message will include a description of
// the valid range.
rawVal := val.AsBigFloat()
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid count value",
Detail: fmt.Sprintf("The number %s is not a valid count value: %s.", rawVal, err),
Subject: n.Resource.Count.Range().Ptr(),
})
} else if count < 0 {
rawVal := val.AsBigFloat()
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid count value",
Detail: fmt.Sprintf("The number %s is not a valid count value: count must not be negative.", rawVal),
Subject: n.Resource.Count.Range().Ptr(),
})
}
RETURN:
return nil, diags.NonFatalErr()
}
// EvalValidateProvider is an EvalNode implementation that validates
// a provider configuration.
type EvalValidateProvider struct {
Addr addrs.ProviderConfig
Provider *ResourceProvider
Config *configs.Provider
}
func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) {
var diags tfdiags.Diagnostics
provider := *n.Provider
configBody := buildProviderConfig(ctx, n.Addr, n.Config)
schema, err := provider.GetSchema(&ProviderSchemaRequest{})
if err != nil {
diags = diags.Append(err)
return nil, diags.NonFatalErr()
}
if schema == nil {
return nil, fmt.Errorf("no schema is available for %s", n.Addr)
}
configSchema := schema.Provider
if configSchema == nil {
// Should never happen in real code, but often comes up in tests where
// mock schemas are being used that tend to be incomplete.
log.Printf("[WARN] EvalValidateProvider: no config schema is available for %s, so using empty schema", n.Addr)
configSchema = &configschema.Block{}
}
configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey)
diags = diags.Append(evalDiags)
if evalDiags.HasErrors() {
return nil, diags.NonFatalErr()
}
// The provider API expects our legacy ResourceConfig type, so we'll need
// to shim here.
rc := NewResourceConfigShimmed(configVal, configSchema)
warns, errs := provider.Validate(rc)
if len(warns) == 0 && len(errs) == 0 {
return nil, nil
}
// FIXME: Once provider.Validate itself returns diagnostics, just
// return diags.NonFatalErr() immediately here.
for _, warn := range warns {
diags = diags.Append(tfdiags.SimpleWarning(warn))
}
for _, err := range errs {
diags = diags.Append(err)
}
return nil, diags.NonFatalErr()
}
// EvalValidateProvisioner is an EvalNode implementation that validates
// the configuration of a provisioner belonging to a resource.
type EvalValidateProvisioner struct {
ResourceAddr addrs.Resource
Provisioner *ResourceProvisioner
Schema **configschema.Block
Config *configs.Provisioner
ConnConfig *configs.Connection
ResourceHasCount bool
}
func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) {
provisioner := *n.Provisioner
config := *n.Config
schema := *n.Schema
var warns []string
var errs []error
var diags tfdiags.Diagnostics
{
// Validate the provisioner's own config first
configVal, _, configDiags := n.evaluateBlock(ctx, config.Config, schema)
diags = diags.Append(configDiags)
if configDiags.HasErrors() {
return nil, diags.Err()
}
if configVal == cty.NilVal {
// Should never happen for a well-behaved EvaluateBlock implementation
return nil, fmt.Errorf("EvaluateBlock returned nil value")
}
// The provisioner API still uses our legacy ResourceConfig type, so
// we need to shim it.
legacyRC := NewResourceConfigShimmed(configVal, schema)
w, e := provisioner.Validate(legacyRC)
warns = append(warns, w...)
errs = append(errs, e...)
// FIXME: Once the provisioner API itself returns diagnostics, just
// return diags.NonFatalErr() here.
for _, warn := range warns {
diags = diags.Append(tfdiags.SimpleWarning(warn))
}
for _, err := range errs {
diags = diags.Append(err)
}
}
{
// Now validate the connection config, which might either be from
// the provisioner block itself or inherited from the resource's
// shared connection info.
connDiags := n.validateConnConfig(ctx, n.ConnConfig, n.ResourceAddr)
diags = diags.Append(connDiags)
}
return nil, diags.NonFatalErr()
}
func (n *EvalValidateProvisioner) validateConnConfig(ctx EvalContext, config *configs.Connection, self addrs.Referenceable) tfdiags.Diagnostics {
// We can't comprehensively validate the connection config since its
// final structure is decided by the communicator and we can't instantiate
// that until we have a complete instance state. However, we *can* catch
// configuration keys that are not valid for *any* communicator, catching
// typos early rather than waiting until we actually try to run one of
// the resource's provisioners.
var diags tfdiags.Diagnostics
if config == nil || config.Config == nil {
// No block to validate
return diags
}
// We evaluate here just by evaluating the block and returning any
// diagnostics we get, since evaluation alone is enough to check for
// extraneous arguments and incorrectly-typed arguments.
_, _, configDiags := n.evaluateBlock(ctx, config.Config, connectionBlockSupersetSchema)
diags = diags.Append(configDiags)
return diags
}
func (n *EvalValidateProvisioner) evaluateBlock(ctx EvalContext, body hcl.Body, schema *configschema.Block) (cty.Value, hcl.Body, tfdiags.Diagnostics) {
keyData := EvalDataForNoInstanceKey
selfAddr := n.ResourceAddr.Instance(addrs.NoKey)
if n.ResourceHasCount {
// For a resource that has count, we allow count.index but don't
// know at this stage what it will return.
keyData = InstanceKeyEvalData{
CountIndex: cty.UnknownVal(cty.Number),
}
// "self" can't point to an unknown key, but we'll force it to be
// key 0 here, which should return an unknown value of the
// expected type since none of these elements are known at this
// point anyway.
selfAddr = n.ResourceAddr.Instance(addrs.IntKey(0))
}
return ctx.EvaluateBlock(body, schema, selfAddr, keyData)
}
// connectionBlockSupersetSchema is a schema representing the superset of all
// possible arguments for "connection" blocks across all supported connection
// types.
//
// This currently lives here because we've not yet updated our communicator
// subsystem to be aware of schema itself. Once that is done, we can remove
// this and use a type-specific schema from the communicator to validate
// exactly what is expected for a given connection type.
var connectionBlockSupersetSchema = &configschema.Block{
Attributes: map[string]*configschema.Attribute{
// NOTE: "type" is not included here because it's treated special
// by the config loader and stored away in a separate field.
// Common attributes for both connection types
"type": {
Type: cty.String,
Optional: true,
},
"user": {
Type: cty.String,
Optional: true,
},
"password": {
Type: cty.String,
Optional: true,
},
"host": {
Type: cty.String,
Optional: true,
},
"port": {
Type: cty.String,
Optional: true,
},
"timeout": {
Type: cty.String,
Optional: true,
},
"script_path": {
Type: cty.String,
Optional: true,
},
// For type=ssh only (enforced in ssh communicator)
"private_key": {
Type: cty.String,
Optional: true,
},
"host_key": {
Type: cty.String,
Optional: true,
},
"agent": {
Type: cty.Bool,
Optional: true,
},
"agent_identity": {
Type: cty.String,
Optional: true,
},
"bastion_host": {
Type: cty.String,
Optional: true,
},
"bastion_host_key": {
Type: cty.String,
Optional: true,
},
"bastion_port": {
Type: cty.Number,
Optional: true,
},
"bastion_user": {
Type: cty.String,
Optional: true,
},
"bastion_password": {
Type: cty.String,
Optional: true,
},
"bastion_private_key": {
Type: cty.String,
Optional: true,
},
// For type=winrm only (enforced in winrm communicator)
"https": {
Type: cty.Bool,
Optional: true,
},
"insecure": {
Type: cty.Bool,
Optional: true,
},
"cacert": {
Type: cty.String,
Optional: true,
},
"use_ntlm": {
Type: cty.Bool,
Optional: true,
},
},
}
// EvalValidateResource is an EvalNode implementation that validates
// the configuration of a resource.
type EvalValidateResource struct {
Addr addrs.Resource
Provider *ResourceProvider
ProviderSchema **ProviderSchema
Config *configs.Resource
// IgnoreWarnings means that warnings will not be passed through. This allows
// "just-in-time" passes of validation to continue execution through warnings.
IgnoreWarnings bool
// ConfigVal, if non-nil, will be updated with the value resulting from
// evaluating the given configuration body. Since validation is performed
// very early, this value is likely to contain lots of unknown values,
// but its type will conform to the schema of the resource type associated
// with the resource instance being validated.
ConfigVal *cty.Value
}
func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) {
if n.ProviderSchema == nil || *n.ProviderSchema == nil {
return nil, fmt.Errorf("EvalValidateResource has nil schema for %s", n.Addr)
}
var diags tfdiags.Diagnostics
provider := *n.Provider
cfg := *n.Config
schema := *n.ProviderSchema
mode := cfg.Mode
keyData := EvalDataForNoInstanceKey
if n.Config.Count != nil {
// If the config block has count, we'll evaluate with an unknown
// number as count.index so we can still type check even though
// we won't expand count until the plan phase.
keyData = InstanceKeyEvalData{
CountIndex: cty.UnknownVal(cty.Number),
}
// Basic type-checking of the count argument. More complete validation
// of this will happen when we DynamicExpand during the plan walk.
countDiags := n.validateCount(ctx, n.Config.Count)
diags = diags.Append(countDiags)
}
var warns []string
var errs []error
// Provider entry point varies depending on resource mode, because
// managed resources and data resources are two distinct concepts
// in the provider abstraction.
switch mode {
case addrs.ManagedResourceMode:
schema, exists := schema.ResourceTypes[cfg.Type]
if !exists {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid resource type",
Detail: fmt.Sprintf("The provider %s does not support resource type %q.", cfg.ProviderConfigAddr(), cfg.Type),
Subject: &cfg.TypeRange,
})
return nil, diags.Err()
}
configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData)
diags = diags.Append(valDiags)
if valDiags.HasErrors() {
return nil, diags.Err()
}
// The provider API still expects our legacy types, so we must do some
// shimming here.
legacyCfg := NewResourceConfigShimmed(configVal, schema)
warns, errs = provider.ValidateResource(cfg.Type, legacyCfg)
if n.ConfigVal != nil {
*n.ConfigVal = configVal
}
case addrs.DataResourceMode:
schema, exists := schema.DataSources[cfg.Type]
if !exists {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid data source",
Detail: fmt.Sprintf("The provider %s does not support data source %q.", cfg.ProviderConfigAddr(), cfg.Type),
Subject: &cfg.TypeRange,
})
return nil, diags.Err()
}
configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData)
diags = diags.Append(valDiags)
if valDiags.HasErrors() {
return nil, diags.Err()
}
// The provider API still expects our legacy types, so we must do some
// shimming here.
legacyCfg := NewResourceConfigShimmed(configVal, schema)
warns, errs = provider.ValidateDataSource(cfg.Type, legacyCfg)
if n.ConfigVal != nil {
*n.ConfigVal = configVal
}
}
// FIXME: Update the provider API to actually return diagnostics here,
// and then we can remove all this shimming and use its diagnostics
// directly.
for _, warn := range warns {
diags = diags.Append(tfdiags.SimpleWarning(warn))
}
for _, err := range errs {
diags = diags.Append(err)
}
if n.IgnoreWarnings {
// If we _only_ have warnings then we'll return nil.
if diags.HasErrors() {
return nil, diags.NonFatalErr()
}
return nil, nil
} else {
// We'll return an error if there are any diagnostics at all, even if
// some of them are warnings.
return nil, diags.NonFatalErr()
}
}
func (n *EvalValidateResource) validateCount(ctx EvalContext, expr hcl.Expression) tfdiags.Diagnostics {
if expr == nil {
return nil
}
var diags tfdiags.Diagnostics
countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil)
diags = diags.Append(countDiags)
if diags.HasErrors() {
return diags
}
if countVal.IsNull() {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid count argument",
Detail: `The given "count" argument value is null. An integer is required.`,
Subject: expr.Range().Ptr(),
})
return diags
}
var err error
countVal, err = convert.Convert(countVal, cty.Number)
if err != nil {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid count argument",
Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err),
Subject: expr.Range().Ptr(),
})
return diags
}
// If the value isn't known then that's the best we can do for now, but
// we'll check more thoroughly during the plan walk.
if !countVal.IsKnown() {
return diags
}
// If we _do_ know the value, then we can do a few more checks here.
var count int
err = gocty.FromCtyValue(countVal, &count)
if err != nil {
// Isn't a whole number, etc.
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid count argument",
Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err),
Subject: expr.Range().Ptr(),
})
return diags
}
if count < 0 {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid count argument",
Detail: `The given "count" argument value is unsuitable: count cannot be negative.`,
Subject: expr.Range().Ptr(),
})
return diags
}
return diags
}