opentofu/internal/terraform/context_plugins.go
Martin Atkins 8d193ad268 core: Simplify and centralize plugin availability checks
Historically the responsibility for making sure that all of the available
providers are of suitable versions and match the appropriate checksums has
been split rather inexplicably over multiple different layers, with some
of the checks happening as late as creating a terraform.Context.

We're gradually iterating towards making that all be handled in one place,
but in this step we're just cleaning up some old remnants from the
main "terraform" package, which is now no longer responsible for any
version or checksum verification and instead just assumes it's been
provided with suitable factory functions by its caller.

We do still have a pre-check here to make sure that we at least have a
factory function for each plugin the configuration seems to depend on,
because if we don't do that up front then it ends up getting caught
instead deep inside the Terraform runtime, often inside a concurrent
graph walk and thus it's not deterministic which codepath will happen to
catch it on a particular run.

As of this commit, this actually does leave some holes in our checks: the
command package is using the dependency lock file to make sure we have
exactly the provider packages we expect (exact versions and checksums),
which is the most crucial part, but we don't yet have any spot where
we make sure that the lock file is consistent with the current
configuration, and we are no longer preserving the provider checksums as
part of a saved plan.

Both of those will come in subsequent commits. While it's unusual to have
a series of commits that briefly subtracts functionality and then adds
back in equivalent functionality later, the lock file checking is the only
part that's crucial for security reasons, with everything else mainly just
being to give better feedback when folks seem to be using Terraform
incorrectly. The other bits are therefore mostly cosmetic and okay to be
absent briefly as we work towards a better design that is clearer about
where that responsibility belongs.
2021-10-01 14:43:58 -07:00

210 lines
7.6 KiB
Go

package terraform
import (
"fmt"
"log"
"sync"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/configs/configschema"
"github.com/hashicorp/terraform/internal/providers"
"github.com/hashicorp/terraform/internal/provisioners"
)
// contextPlugins represents a library of available plugins (providers and
// provisioners) which we assume will all be used with the same
// terraform.Context, and thus it'll be safe to cache certain information
// about the providers for performance reasons.
type contextPlugins struct {
providerFactories map[addrs.Provider]providers.Factory
provisionerFactories map[string]provisioners.Factory
// We memoize the schemas we've previously loaded in here, to avoid
// repeatedly paying the cost of activating the same plugins to access
// their schemas in various different spots. We use schemas for many
// purposes in Terraform, so there isn't a single choke point where
// it makes sense to preload all of them.
providerSchemas map[addrs.Provider]*ProviderSchema
provisionerSchemas map[string]*configschema.Block
schemasLock sync.Mutex
}
func newContextPlugins(providerFactories map[addrs.Provider]providers.Factory, provisionerFactories map[string]provisioners.Factory) *contextPlugins {
ret := &contextPlugins{
providerFactories: providerFactories,
provisionerFactories: provisionerFactories,
}
ret.init()
return ret
}
func (cp *contextPlugins) init() {
cp.providerSchemas = make(map[addrs.Provider]*ProviderSchema, len(cp.providerFactories))
cp.provisionerSchemas = make(map[string]*configschema.Block, len(cp.provisionerFactories))
}
func (cp *contextPlugins) HasProvider(addr addrs.Provider) bool {
_, ok := cp.providerFactories[addr]
return ok
}
func (cp *contextPlugins) NewProviderInstance(addr addrs.Provider) (providers.Interface, error) {
f, ok := cp.providerFactories[addr]
if !ok {
return nil, fmt.Errorf("unavailable provider %q", addr.String())
}
return f()
}
func (cp *contextPlugins) HasProvisioner(typ string) bool {
_, ok := cp.provisionerFactories[typ]
return ok
}
func (cp *contextPlugins) NewProvisionerInstance(typ string) (provisioners.Interface, error) {
f, ok := cp.provisionerFactories[typ]
if !ok {
return nil, fmt.Errorf("unavailable provisioner %q", typ)
}
return f()
}
// ProviderSchema uses a temporary instance of the provider with the given
// address to obtain the full schema for all aspects of that provider.
//
// ProviderSchema memoizes results by unique provider address, so it's fine
// to repeatedly call this method with the same address if various different
// parts of Terraform all need the same schema information.
func (cp *contextPlugins) ProviderSchema(addr addrs.Provider) (*ProviderSchema, error) {
cp.schemasLock.Lock()
defer cp.schemasLock.Unlock()
if schema, ok := cp.providerSchemas[addr]; ok {
return schema, nil
}
log.Printf("[TRACE] terraform.contextPlugins: Initializing provider %q to read its schema", addr)
provider, err := cp.NewProviderInstance(addr)
if err != nil {
return nil, fmt.Errorf("failed to instantiate provider %q to obtain schema: %s", addr, err)
}
defer provider.Close()
resp := provider.GetProviderSchema()
if resp.Diagnostics.HasErrors() {
return nil, fmt.Errorf("failed to retrieve schema from provider %q: %s", addr, resp.Diagnostics.Err())
}
s := &ProviderSchema{
Provider: resp.Provider.Block,
ResourceTypes: make(map[string]*configschema.Block),
DataSources: make(map[string]*configschema.Block),
ResourceTypeSchemaVersions: make(map[string]uint64),
}
if resp.Provider.Version < 0 {
// We're not using the version numbers here yet, but we'll check
// for validity anyway in case we start using them in future.
return nil, fmt.Errorf("provider %s has invalid negative schema version for its configuration blocks,which is a bug in the provider ", addr)
}
for t, r := range resp.ResourceTypes {
if err := r.Block.InternalValidate(); err != nil {
return nil, fmt.Errorf("provider %s has invalid schema for managed resource type %q, which is a bug in the provider: %q", addr, t, err)
}
s.ResourceTypes[t] = r.Block
s.ResourceTypeSchemaVersions[t] = uint64(r.Version)
if r.Version < 0 {
return nil, fmt.Errorf("provider %s has invalid negative schema version for managed resource type %q, which is a bug in the provider", addr, t)
}
}
for t, d := range resp.DataSources {
if err := d.Block.InternalValidate(); err != nil {
return nil, fmt.Errorf("provider %s has invalid schema for data resource type %q, which is a bug in the provider: %q", addr, t, err)
}
s.DataSources[t] = d.Block
if d.Version < 0 {
// We're not using the version numbers here yet, but we'll check
// for validity anyway in case we start using them in future.
return nil, fmt.Errorf("provider %s has invalid negative schema version for data resource type %q, which is a bug in the provider", addr, t)
}
}
if resp.ProviderMeta.Block != nil {
s.ProviderMeta = resp.ProviderMeta.Block
}
cp.providerSchemas[addr] = s
return s, nil
}
// ProviderConfigSchema is a helper wrapper around ProviderSchema which first
// reads the full schema of the given provider and then extracts just the
// provider's configuration schema, which defines what's expected in a
// "provider" block in the configuration when configuring this provider.
func (cp *contextPlugins) ProviderConfigSchema(providerAddr addrs.Provider) (*configschema.Block, error) {
providerSchema, err := cp.ProviderSchema(providerAddr)
if err != nil {
return nil, err
}
return providerSchema.Provider, nil
}
// ResourceTypeSchema is a helper wrapper around ProviderSchema which first
// reads the schema of the given provider and then tries to find the schema
// for the resource type of the given resource mode in that provider.
//
// ResourceTypeSchema will return an error if the provider schema lookup
// fails, but will return nil if the provider schema lookup succeeds but then
// the provider doesn't have a resource of the requested type.
//
// Managed resource types have versioned schemas, so the second return value
// is the current schema version number for the requested resource. The version
// is irrelevant for other resource modes.
func (cp *contextPlugins) ResourceTypeSchema(providerAddr addrs.Provider, resourceMode addrs.ResourceMode, resourceType string) (*configschema.Block, uint64, error) {
providerSchema, err := cp.ProviderSchema(providerAddr)
if err != nil {
return nil, 0, err
}
schema, version := providerSchema.SchemaForResourceType(resourceMode, resourceType)
return schema, version, nil
}
// ProvisionerSchema uses a temporary instance of the provisioner with the
// given type name to obtain the schema for that provisioner's configuration.
//
// ProvisionerSchema memoizes results by provisioner type name, so it's fine
// to repeatedly call this method with the same name if various different
// parts of Terraform all need the same schema information.
func (cp *contextPlugins) ProvisionerSchema(typ string) (*configschema.Block, error) {
cp.schemasLock.Lock()
defer cp.schemasLock.Unlock()
if schema, ok := cp.provisionerSchemas[typ]; ok {
return schema, nil
}
log.Printf("[TRACE] terraform.contextPlugins: Initializing provisioner %q to read its schema", typ)
provisioner, err := cp.NewProvisionerInstance(typ)
if err != nil {
return nil, fmt.Errorf("failed to instantiate provisioner %q to obtain schema: %s", typ, err)
}
defer provisioner.Close()
resp := provisioner.GetSchema()
if resp.Diagnostics.HasErrors() {
return nil, fmt.Errorf("failed to retrieve schema from provisioner %q: %s", typ, resp.Diagnostics.Err())
}
cp.provisionerSchemas[typ] = resp.Provisioner
return resp.Provisioner, nil
}