opentofu/internal/configs/module.go
Andy Hayes 389f33fdc5
300/provider foreach (#1911)
Signed-off-by: Andrew Hayes <andrew.hayes@harness.io>
2024-09-06 14:33:05 +01:00

897 lines
32 KiB
Go

// Copyright (c) The OpenTofu Authors
// SPDX-License-Identifier: MPL-2.0
// Copyright (c) 2023 HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package configs
import (
"fmt"
"github.com/hashicorp/hcl/v2"
"github.com/opentofu/opentofu/internal/addrs"
"github.com/opentofu/opentofu/internal/encryption/config"
"github.com/opentofu/opentofu/internal/experiments"
tfversion "github.com/opentofu/opentofu/version"
)
// Module is a container for a set of configuration constructs that are
// evaluated within a common namespace.
type Module struct {
// SourceDir is the filesystem directory that the module was loaded from.
//
// This is populated automatically only for configurations loaded with
// LoadConfigDir. If the parser is using a virtual filesystem then the
// path here will be in terms of that virtual filesystem.
// Any other caller that constructs a module directly with NewModule may
// assign a suitable value to this attribute before using it for other
// purposes. It should be treated as immutable by all consumers of Module
// values.
SourceDir string
CoreVersionConstraints []VersionConstraint
ActiveExperiments experiments.Set
Backend *Backend
CloudConfig *CloudConfig
ProviderConfigs map[string]*Provider
ProviderRequirements *RequiredProviders
ProviderLocalNames map[addrs.Provider]string
ProviderMetas map[addrs.Provider]*ProviderMeta
Encryption *config.EncryptionConfig
Variables map[string]*Variable
Locals map[string]*Local
Outputs map[string]*Output
ModuleCalls map[string]*ModuleCall
ManagedResources map[string]*Resource
DataResources map[string]*Resource
Moved []*Moved
Import []*Import
Removed []*Removed
Checks map[string]*Check
Tests map[string]*TestFile
// IsOverridden indicates if the module is being overridden. It's used in
// testing framework to not call the underlying module.
IsOverridden bool
// StaticEvaluator is used to evaluate static expressions in the scope of the Module.
StaticEvaluator *StaticEvaluator
}
// GetProviderConfig uses name and alias to find the respective Provider configuration.
func (m *Module) GetProviderConfig(name, alias string) (*Provider, bool) {
tp := &Provider{ProviderCommon: ProviderCommon{Name: name}, Alias: alias}
p, ok := m.ProviderConfigs[tp.Addr().StringCompact()]
return p, ok
}
// File describes the contents of a single configuration file.
//
// Individual files are not usually used alone, but rather combined together
// with other files (conventionally, those in the same directory) to produce
// a *Module, using NewModule.
//
// At the level of an individual file we represent directly the structural
// elements present in the file, without any attempt to detect conflicting
// declarations. A File object can therefore be used for some basic static
// analysis of individual elements, but must be built into a Module to detect
// duplicate declarations.
type File struct {
CoreVersionConstraints []VersionConstraint
ActiveExperiments experiments.Set
Backends []*Backend
CloudConfigs []*CloudConfig
ProviderConfigs []*ProviderBlock
ProviderMetas []*ProviderMeta
RequiredProviders []*RequiredProviders
Encryptions []*config.EncryptionConfig
Variables []*Variable
Locals []*Local
Outputs []*Output
ModuleCalls []*ModuleCall
ManagedResources []*Resource
DataResources []*Resource
Moved []*Moved
Import []*Import
Removed []*Removed
Checks []*Check
}
// SelectiveLoader allows the consumer to only load and validate the portions of files needed for the given operations/contexts
type SelectiveLoader int
const (
SelectiveLoadAll SelectiveLoader = 0
SelectiveLoadBackend SelectiveLoader = 1
SelectiveLoadEncryption SelectiveLoader = 2
)
// Apply the selective filter to the input files
func (s SelectiveLoader) filter(input []*File) []*File {
if s == SelectiveLoadAll {
return input
}
out := make([]*File, len(input))
for i, inFile := range input {
outFile := &File{
Variables: inFile.Variables,
Locals: inFile.Locals,
}
switch s { //nolint:exhaustive // SelectiveLoadAll handled above
case SelectiveLoadBackend:
outFile.Backends = inFile.Backends
outFile.CloudConfigs = inFile.CloudConfigs
case SelectiveLoadEncryption:
outFile.Encryptions = inFile.Encryptions
}
out[i] = outFile
}
return out
}
// NewModuleWithTests matches NewModule except it will also load in the provided
// test files.
func NewModuleWithTests(primaryFiles, overrideFiles []*File, testFiles map[string]*TestFile, call StaticModuleCall, sourceDir string) (*Module, hcl.Diagnostics) {
mod, diags := NewModule(primaryFiles, overrideFiles, call, sourceDir, SelectiveLoadAll)
if mod != nil {
mod.Tests = testFiles
}
return mod, diags
}
// NewModule takes a list of primary files and a list of override files and
// produces a *Module by combining the files together.
//
// If there are any conflicting declarations in the given files -- for example,
// if the same variable name is defined twice -- then the resulting module
// will be incomplete and error diagnostics will be returned. Careful static
// analysis of the returned Module is still possible in this case, but the
// module will probably not be semantically valid.
func NewModule(primaryFiles, overrideFiles []*File, call StaticModuleCall, sourceDir string, load SelectiveLoader) (*Module, hcl.Diagnostics) {
var diags hcl.Diagnostics
mod := &Module{
ProviderConfigs: map[string]*Provider{},
ProviderLocalNames: map[addrs.Provider]string{},
Variables: map[string]*Variable{},
Locals: map[string]*Local{},
Outputs: map[string]*Output{},
ModuleCalls: map[string]*ModuleCall{},
ManagedResources: map[string]*Resource{},
DataResources: map[string]*Resource{},
Checks: map[string]*Check{},
ProviderMetas: map[addrs.Provider]*ProviderMeta{},
Tests: map[string]*TestFile{},
SourceDir: sourceDir,
}
// Apply selective load rules
primaryFiles = load.filter(primaryFiles)
overrideFiles = load.filter(overrideFiles)
// Process the required_providers blocks first, to ensure that all
// resources have access to the correct provider FQNs
for _, file := range primaryFiles {
for _, r := range file.RequiredProviders {
if mod.ProviderRequirements != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate required providers configuration",
Detail: fmt.Sprintf("A module may have only one required providers configuration. The required providers were previously configured at %s.", mod.ProviderRequirements.DeclRange),
Subject: &r.DeclRange,
})
continue
}
mod.ProviderRequirements = r
}
}
// If no required_providers block is configured, create a useful empty
// state to reduce nil checks elsewhere
if mod.ProviderRequirements == nil {
mod.ProviderRequirements = &RequiredProviders{
RequiredProviders: make(map[string]*RequiredProvider),
}
}
// Any required_providers blocks in override files replace the entire
// block for each provider
for _, file := range overrideFiles {
for _, override := range file.RequiredProviders {
for name, rp := range override.RequiredProviders {
mod.ProviderRequirements.RequiredProviders[name] = rp
}
}
}
for _, file := range primaryFiles {
fileDiags := mod.appendFile(file)
diags = append(diags, fileDiags...)
}
for _, file := range overrideFiles {
fileDiags := mod.mergeFile(file)
diags = append(diags, fileDiags...)
}
// Static evaluation to build a StaticContext now that module has all relevant Locals / Variables
mod.StaticEvaluator = NewStaticEvaluator(mod, call)
// If we have a backend, it may have fields that require locals/vars
if mod.Backend != nil {
// We don't know the backend type / loader at this point so we save the context for later use
mod.Backend.Eval = mod.StaticEvaluator
}
if mod.CloudConfig != nil {
mod.CloudConfig.eval = mod.StaticEvaluator
}
// Process all providers with the static context
for _, file := range primaryFiles {
fileDiags := mod.appendFileProviders(file)
diags = append(diags, fileDiags...)
}
for _, file := range overrideFiles {
fileDiags := mod.mergeFileProviders(file)
diags = append(diags, fileDiags...)
}
// Process all module calls now that we have the static context
for _, mc := range mod.ModuleCalls {
mDiags := mc.decodeStaticFields(mod.StaticEvaluator)
diags = append(diags, mDiags...)
}
diags = append(diags, checkModuleExperiments(mod)...)
// Generate the FQN -> LocalProviderName map
mod.gatherProviderLocalNames()
return mod, diags
}
// ResourceByAddr returns the configuration for the resource with the given
// address, or nil if there is no such resource.
func (m *Module) ResourceByAddr(addr addrs.Resource) *Resource {
key := addr.String()
switch addr.Mode {
case addrs.ManagedResourceMode:
return m.ManagedResources[key]
case addrs.DataResourceMode:
return m.DataResources[key]
default:
return nil
}
}
func (m *Module) appendFile(file *File) hcl.Diagnostics {
var diags hcl.Diagnostics
// If there are any conflicting requirements then we'll catch them
// when we actually check these constraints.
m.CoreVersionConstraints = append(m.CoreVersionConstraints, file.CoreVersionConstraints...)
m.ActiveExperiments = experiments.SetUnion(m.ActiveExperiments, file.ActiveExperiments)
for _, b := range file.Backends {
if m.Backend != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate backend configuration",
Detail: fmt.Sprintf("A module may have only one backend configuration. The backend was previously configured at %s.", m.Backend.DeclRange),
Subject: &b.DeclRange,
})
continue
}
m.Backend = b
}
for _, c := range file.CloudConfigs {
if m.CloudConfig != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate cloud configurations",
Detail: fmt.Sprintf("A module may have only one 'cloud' block configuring a cloud backend. A cloud backend was previously configured at %s.", m.CloudConfig.DeclRange),
Subject: &c.DeclRange,
})
continue
}
m.CloudConfig = c
}
if m.Backend != nil && m.CloudConfig != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Both a backend and a cloud configuration are present",
Detail: fmt.Sprintf("A module may declare either one 'cloud' block configuring a cloud backend OR one 'backend' block configuring a state backend. A cloud backend is configured at %s; a backend is configured at %s. Remove the backend block to configure a cloud backend.", m.CloudConfig.DeclRange, m.Backend.DeclRange),
Subject: &m.Backend.DeclRange,
})
}
for _, pm := range file.ProviderMetas {
provider := m.ProviderForLocalConfig(addrs.LocalProviderConfig{LocalName: pm.Provider})
if existing, exists := m.ProviderMetas[provider]; exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate provider_meta block",
Detail: fmt.Sprintf("A provider_meta block for provider %q was already declared at %s. Providers may only have one provider_meta block per module.", existing.Provider, existing.DeclRange),
Subject: &pm.DeclRange,
})
}
m.ProviderMetas[provider] = pm
}
for _, e := range file.Encryptions {
if m.Encryption != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate encryption configuration",
Detail: fmt.Sprintf("A module may have only one encryption configuration. Encryption was previously configured at %s.", m.Encryption.DeclRange),
Subject: &e.DeclRange,
})
continue
}
m.Encryption = e
}
for _, v := range file.Variables {
if existing, exists := m.Variables[v.Name]; exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate variable declaration",
Detail: fmt.Sprintf("A variable named %q was already declared at %s. Variable names must be unique within a module.", existing.Name, existing.DeclRange),
Subject: &v.DeclRange,
})
}
m.Variables[v.Name] = v
}
for _, l := range file.Locals {
if existing, exists := m.Locals[l.Name]; exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate local value definition",
Detail: fmt.Sprintf("A local value named %q was already defined at %s. Local value names must be unique within a module.", existing.Name, existing.DeclRange),
Subject: &l.DeclRange,
})
}
m.Locals[l.Name] = l
}
for _, o := range file.Outputs {
if existing, exists := m.Outputs[o.Name]; exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate output definition",
Detail: fmt.Sprintf("An output named %q was already defined at %s. Output names must be unique within a module.", existing.Name, existing.DeclRange),
Subject: &o.DeclRange,
})
}
m.Outputs[o.Name] = o
}
for _, mc := range file.ModuleCalls {
if existing, exists := m.ModuleCalls[mc.Name]; exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate module call",
Detail: fmt.Sprintf("A module call named %q was already defined at %s. Module calls must have unique names within a module.", existing.Name, existing.DeclRange),
Subject: &mc.DeclRange,
})
}
m.ModuleCalls[mc.Name] = mc
}
for _, r := range file.ManagedResources {
key := r.moduleUniqueKey()
if existing, exists := m.ManagedResources[key]; exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: fmt.Sprintf("Duplicate resource %q configuration", existing.Type),
Detail: fmt.Sprintf("A %s resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange),
Subject: &r.DeclRange,
})
continue
}
m.ManagedResources[key] = r
// set the provider FQN for the resource
if r.ProviderConfigRef != nil {
r.Provider = m.ProviderForLocalConfig(r.ProviderConfigAddr())
} else {
// an invalid resource name (for e.g. "null resource" instead of
// "null_resource") can cause a panic down the line in addrs:
// https://github.com/hashicorp/terraform/issues/25560
implied, err := addrs.ParseProviderPart(r.Addr().ImpliedProvider())
if err == nil {
r.Provider = m.ImpliedProviderForUnqualifiedType(implied)
}
// We don't return a diagnostic because the invalid resource name
// will already have been caught.
}
}
// Data sources can either be defined at the module root level, or within a
// single check block. We'll merge the data sources from both into the
// single module level DataResources map.
for _, r := range file.DataResources {
key := r.moduleUniqueKey()
if existing, exists := m.DataResources[key]; exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: fmt.Sprintf("Duplicate data %q configuration", existing.Type),
Detail: fmt.Sprintf("A %s data resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange),
Subject: &r.DeclRange,
})
continue
}
m.DataResources[key] = r
}
for _, c := range file.Checks {
if c.DataResource != nil {
key := c.DataResource.moduleUniqueKey()
if existing, exists := m.DataResources[key]; exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: fmt.Sprintf("Duplicate data %q configuration", existing.Type),
Detail: fmt.Sprintf("A %s data resource named %q was already declared at %s. Resource names must be unique per type in each module, including within check blocks.", existing.Type, existing.Name, existing.DeclRange),
Subject: &c.DataResource.DeclRange,
})
continue
}
m.DataResources[key] = c.DataResource
}
if existing, exists := m.Checks[c.Name]; exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: fmt.Sprintf("Duplicate check %q configuration", existing.Name),
Detail: fmt.Sprintf("A check block named %q was already declared at %s. Check blocks must be unique within each module.", existing.Name, existing.DeclRange),
Subject: &c.DeclRange,
})
continue
}
m.Checks[c.Name] = c
}
// Handle the provider associations for all data resources together.
for _, r := range m.DataResources {
// set the provider FQN for the resource
if r.ProviderConfigRef != nil {
r.Provider = m.ProviderForLocalConfig(r.ProviderConfigAddr())
} else {
// an invalid data source name (for e.g. "null resource" instead of
// "null_resource") can cause a panic down the line in addrs:
// https://github.com/hashicorp/terraform/issues/25560
implied, err := addrs.ParseProviderPart(r.Addr().ImpliedProvider())
if err == nil {
r.Provider = m.ImpliedProviderForUnqualifiedType(implied)
}
// We don't return a diagnostic because the invalid resource name
// will already have been caught.
}
}
// "Moved" blocks just append, because they are all independent of one
// another at this level. (We handle any references between them at
// runtime.)
m.Moved = append(m.Moved, file.Moved...)
for _, i := range file.Import {
for _, mi := range m.Import {
if i.ResolvedTo != nil && mi.ResolvedTo != nil && (*i.ResolvedTo).Equal(*mi.ResolvedTo) {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: fmt.Sprintf("Duplicate import configuration for %q", *i.ResolvedTo),
Detail: fmt.Sprintf("An import block for the resource %q was already declared at %s. A resource can have only one import block.", *i.ResolvedTo, mi.DeclRange),
Subject: &i.DeclRange,
})
continue
}
}
if i.ProviderConfigRef != nil {
i.Provider = m.ProviderForLocalConfig(addrs.LocalProviderConfig{
LocalName: i.ProviderConfigRef.Name,
Alias: i.ProviderConfigRef.Alias,
})
} else {
implied, err := addrs.ParseProviderPart(i.StaticTo.Resource.ImpliedProvider())
if err == nil {
i.Provider = m.ImpliedProviderForUnqualifiedType(implied)
}
// We don't return a diagnostic because the invalid resource name
// will already have been caught.
}
m.Import = append(m.Import, i)
}
m.Removed = append(m.Removed, file.Removed...)
return diags
}
func (m *Module) mergeFile(file *File) hcl.Diagnostics {
var diags hcl.Diagnostics
if len(file.CoreVersionConstraints) != 0 {
// This is a bit of a strange case for overriding since we normally
// would union together across multiple files anyway, but we'll
// allow it and have each override file clobber any existing list.
m.CoreVersionConstraints = nil
m.CoreVersionConstraints = append(m.CoreVersionConstraints, file.CoreVersionConstraints...)
}
if len(file.Backends) != 0 {
switch len(file.Backends) {
case 1:
m.CloudConfig = nil // A backend block is mutually exclusive with a cloud one, and overwrites any cloud config
m.Backend = file.Backends[0]
default:
// An override file with multiple backends is still invalid, even
// though it can override backends from _other_ files.
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate backend configuration",
Detail: fmt.Sprintf("Each override file may have only one backend configuration. A backend was previously configured at %s.", file.Backends[0].DeclRange),
Subject: &file.Backends[1].DeclRange,
})
}
}
if len(file.CloudConfigs) != 0 {
switch len(file.CloudConfigs) {
case 1:
m.Backend = nil // A cloud block is mutually exclusive with a backend one, and overwrites any backend
m.CloudConfig = file.CloudConfigs[0]
default:
// An override file with multiple cloud blocks is still invalid, even
// though it can override cloud/backend blocks from _other_ files.
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate cloud configurations",
Detail: fmt.Sprintf("A module may have only one 'cloud' block configuring a cloud backend. A cloud backend was previously configured at %s.", file.CloudConfigs[0].DeclRange),
Subject: &file.CloudConfigs[1].DeclRange,
})
}
}
if len(file.Encryptions) != 0 {
switch len(file.Encryptions) {
case 1:
m.Encryption = m.Encryption.Merge(file.Encryptions[0])
default:
// An override file with multiple encryptions is still invalid, even
// though it can override encryptions from _other_ files.
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate encryption configuration",
Detail: fmt.Sprintf("Each override file may have only one encryption configuration. Encryption was previously configured at %s.", file.Encryptions[0].DeclRange),
Subject: &file.Encryptions[1].DeclRange,
})
}
}
for _, v := range file.Variables {
existing, exists := m.Variables[v.Name]
if !exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing base variable declaration to override",
Detail: fmt.Sprintf("There is no variable named %q. An override file can only override a variable that was already declared in a primary configuration file.", v.Name),
Subject: &v.DeclRange,
})
continue
}
mergeDiags := existing.merge(v)
diags = append(diags, mergeDiags...)
}
for _, l := range file.Locals {
existing, exists := m.Locals[l.Name]
if !exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing base local value definition to override",
Detail: fmt.Sprintf("There is no local value named %q. An override file can only override a local value that was already defined in a primary configuration file.", l.Name),
Subject: &l.DeclRange,
})
continue
}
mergeDiags := existing.merge(l)
diags = append(diags, mergeDiags...)
}
for _, o := range file.Outputs {
existing, exists := m.Outputs[o.Name]
if !exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing base output definition to override",
Detail: fmt.Sprintf("There is no output named %q. An override file can only override an output that was already defined in a primary configuration file.", o.Name),
Subject: &o.DeclRange,
})
continue
}
mergeDiags := existing.merge(o)
diags = append(diags, mergeDiags...)
}
for _, mc := range file.ModuleCalls {
existing, exists := m.ModuleCalls[mc.Name]
if !exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing module call to override",
Detail: fmt.Sprintf("There is no module call named %q. An override file can only override a module call that was defined in a primary configuration file.", mc.Name),
Subject: &mc.DeclRange,
})
continue
}
mergeDiags := existing.merge(mc)
diags = append(diags, mergeDiags...)
}
for _, r := range file.ManagedResources {
key := r.moduleUniqueKey()
existing, exists := m.ManagedResources[key]
if !exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing resource to override",
Detail: fmt.Sprintf("There is no %s resource named %q. An override file can only override a resource block defined in a primary configuration file.", r.Type, r.Name),
Subject: &r.DeclRange,
})
continue
}
mergeDiags := existing.merge(r, m.ProviderRequirements.RequiredProviders)
diags = append(diags, mergeDiags...)
}
for _, r := range file.DataResources {
key := r.moduleUniqueKey()
existing, exists := m.DataResources[key]
if !exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing data resource to override",
Detail: fmt.Sprintf("There is no %s data resource named %q. An override file can only override a data block defined in a primary configuration file.", r.Type, r.Name),
Subject: &r.DeclRange,
})
continue
}
mergeDiags := existing.merge(r, m.ProviderRequirements.RequiredProviders)
diags = append(diags, mergeDiags...)
}
for _, m := range file.Moved {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Cannot override 'moved' blocks",
Detail: "Records of moved objects can appear only in normal files, not in override files.",
Subject: m.DeclRange.Ptr(),
})
}
for _, m := range file.Import {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Cannot override 'import' blocks",
Detail: "Import blocks can appear only in normal files, not in override files.",
Subject: m.DeclRange.Ptr(),
})
}
for _, m := range file.Removed {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Cannot override 'Removed' blocks",
Detail: "Removed blocks can appear only in normal files, not in override files.",
Subject: m.DeclRange.Ptr(),
})
}
return diags
}
func (m *Module) appendFileProviders(file *File) hcl.Diagnostics {
var diags hcl.Diagnostics
for _, pci := range file.ProviderConfigs {
pcs, decodeDiags := pci.decodeStaticFields(m.StaticEvaluator)
diags = append(diags, decodeDiags...)
if decodeDiags.HasErrors() {
continue
}
for _, pc := range pcs {
key := pc.Addr().StringCompact()
if existing, exists := m.ProviderConfigs[key]; exists {
if existing.Alias == "" {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate provider configuration",
Detail: fmt.Sprintf("A default (non-aliased) provider configuration for %q was already given at %s. If multiple configurations are required, set the \"alias\" argument for alternative configurations.", existing.Name, existing.DeclRange),
Subject: &pc.DeclRange,
})
} else {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate provider configuration",
Detail: fmt.Sprintf("A provider configuration for %q with alias %q was already given at %s. Each configuration for the same provider must have a distinct alias.", existing.Name, existing.Alias, existing.DeclRange),
Subject: &pc.DeclRange,
})
}
continue
}
m.ProviderConfigs[key] = pc
}
}
return diags
}
func (m *Module) mergeFileProviders(file *File) hcl.Diagnostics {
var diags hcl.Diagnostics
for _, pci := range file.ProviderConfigs {
pcs, decodeDiags := pci.decodeStaticFields(m.StaticEvaluator)
diags = append(diags, decodeDiags...)
if decodeDiags.HasErrors() {
continue
}
for _, pc := range pcs {
key := pc.Addr().StringCompact()
existing, exists := m.ProviderConfigs[key]
if pc.Alias == "" {
// We allow overriding a non-existing _default_ provider configuration
// because the user model is that an absent provider configuration
// implies an empty provider configuration, which is what the user
// is therefore overriding here.
if exists {
mergeDiags := existing.merge(pc)
diags = append(diags, mergeDiags...)
} else {
m.ProviderConfigs[key] = pc
}
} else {
// For aliased providers, there must be a base configuration to
// override. This allows us to detect and report alias typos
// that might otherwise cause the override to not apply.
if !exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing base provider configuration for override",
Detail: fmt.Sprintf("There is no %s provider configuration with the alias %q. An override file can only override an aliased provider configuration that was already defined in a primary configuration file.", pc.Name, pc.Alias),
Subject: &pc.DeclRange,
})
continue
}
mergeDiags := existing.merge(pc)
diags = append(diags, mergeDiags...)
}
}
}
return diags
}
// gatherProviderLocalNames is a helper function that populatesA a map of
// provider FQNs -> provider local names. This information is useful for
// user-facing output, which should include both the FQN and LocalName. It must
// only be populated after the module has been parsed.
func (m *Module) gatherProviderLocalNames() {
providers := make(map[addrs.Provider]string)
for k, v := range m.ProviderRequirements.RequiredProviders {
providers[v.Type] = k
}
m.ProviderLocalNames = providers
}
// LocalNameForProvider returns the module-specific user-supplied local name for
// a given provider FQN, or the default local name if none was supplied.
func (m *Module) LocalNameForProvider(p addrs.Provider) string {
if existing, exists := m.ProviderLocalNames[p]; exists {
return existing
} else {
// If there isn't a map entry, fall back to the default:
// Type = LocalName
return p.Type
}
}
// ProviderForLocalConfig returns the provider FQN for a given
// LocalProviderConfig, based on its local name.
func (m *Module) ProviderForLocalConfig(pc addrs.LocalProviderConfig) addrs.Provider {
return m.ImpliedProviderForUnqualifiedType(pc.LocalName)
}
// ImpliedProviderForUnqualifiedType returns the provider FQN for a given type,
// first by looking up the type in the provider requirements map, and falling
// back to an implied default provider.
//
// The intended behaviour is that configuring a provider with local name "foo"
// in a required_providers block will result in resources with type "foo" using
// that provider.
func (m *Module) ImpliedProviderForUnqualifiedType(pType string) addrs.Provider {
if provider, exists := m.ProviderRequirements.RequiredProviders[pType]; exists {
return provider.Type
}
return addrs.ImpliedProviderForUnqualifiedType(pType)
}
func (m *Module) CheckCoreVersionRequirements(path addrs.Module, sourceAddr addrs.ModuleSource) hcl.Diagnostics {
var diags hcl.Diagnostics
for _, constraint := range m.CoreVersionConstraints {
// Before checking if the constraints are met, check that we are not using any prerelease fields as these
// are not currently supported.
var prereleaseDiags hcl.Diagnostics
for _, required := range constraint.Required {
if required.Prerelease() {
prereleaseDiags = prereleaseDiags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid required_version constraint",
Detail: fmt.Sprintf(
"Prerelease version constraints are not supported: %s. Remove the prerelease information from the constraint. Prerelease versions of OpenTofu will match constraints using their version core only.",
required.String()),
Subject: constraint.DeclRange.Ptr(),
})
}
}
if len(prereleaseDiags) > 0 {
// There were some prerelease fields in the constraints. Don't check the constraints as they will
// fail, and populate the diagnostics for these constraints with the prerelease diagnostics.
diags = diags.Extend(prereleaseDiags)
continue
}
if !constraint.Required.Check(tfversion.SemVer) {
switch {
case len(path) == 0:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unsupported OpenTofu Core version",
Detail: fmt.Sprintf(
"This configuration does not support OpenTofu version %s. To proceed, either choose another supported OpenTofu version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.",
tfversion.String(),
),
Subject: constraint.DeclRange.Ptr(),
})
default:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unsupported OpenTofu Core version",
Detail: fmt.Sprintf(
"Module %s (from %s) does not support OpenTofu version %s. To proceed, either choose another supported OpenTofu version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.",
path, sourceAddr, tfversion.String(),
),
Subject: constraint.DeclRange.Ptr(),
})
}
}
}
return diags
}