diff --git a/CHANGELOG.md b/CHANGELOG.md index abd1b283ae..66200204c5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,12 +2,18 @@ BACKWARDS INCOMPATIBILITIES / NOTES: + * Terraform's built-in plugins are now distributed as part of the main Terraform binary, and use the go-plugin framework. Overrides are still available using separate binaries, but will need recompiling against Terraform 0.7. + * The `concat()` interpolation function can no longer be used to join strings. + FEATURES: + * **New command:** `terraform state` to provide access to a variety of state manipulation functions [GH-5811] + * core: Lists and maps can now be used as first class types for variables, and may be passed between modules [GH-6322] + IMPROVEMENTS: -* provider/clc: Fix optional server password [GH-6414] -* provider/clc: Add support for hyperscale and bareMetal server types and package installation + * provider/clc: Fix optional server password [GH-6414] + * provider/clc: Add support for hyperscale and bareMetal server types and package installation BUG FIXES: diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index b1b16df765..7c5802e4b8 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -716,6 +716,10 @@ "ImportPath": "github.com/hashicorp/go-multierror", "Rev": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5" }, + { + "ImportPath": "github.com/hashicorp/go-plugin", + "Rev": "cccb4a1328abbb89898f3ecf4311a05bddc4de6d" + }, { "ImportPath": "github.com/hashicorp/go-retryablehttp", "Rev": "5ec125ef739293cb4d57c3456dd92ba9af29ed6e" @@ -778,11 +782,11 @@ }, { "ImportPath": "github.com/hashicorp/hil", - "Rev": "0640fefa3817883b16b77bf760c4c3a6f2589545" + "Rev": "01dc167cd239b7ccab78a683b866536cd5904719" }, { "ImportPath": "github.com/hashicorp/hil/ast", - "Rev": "0640fefa3817883b16b77bf760c4c3a6f2589545" + "Rev": "01dc167cd239b7ccab78a683b866536cd5904719" }, { "ImportPath": "github.com/hashicorp/logutils", @@ -958,7 +962,7 @@ }, { "ImportPath": "github.com/mitchellh/cli", - "Rev": "cb6853d606ea4a12a15ac83cc43503df99fd28fb" + "Rev": "83f97d41cf100ee5f33944a8815c167d5e4aa272" }, { "ImportPath": "github.com/mitchellh/cloudflare-go", @@ -1229,6 +1233,16 @@ "Comment": "v1.0.0-884-gc54bbac", "Rev": "c54bbac81d19eb4df3ad167764dbb6ff2e7194de" }, + { + "ImportPath": "github.com/ryanuber/columnize", + "Comment": "v2.0.1-8-g983d3a5", + "Rev": "983d3a5fab1bf04d1b412465d2d9f8430e2e917e" + }, + { + "ImportPath": "github.com/ryanuber/columnize", + "Comment": "v2.0.1-8-g983d3a5", + "Rev": "983d3a5fab1bf04d1b412465d2d9f8430e2e917e" + }, { "ImportPath": "github.com/satori/go.uuid", "Rev": "d41af8bb6a7704f00bc3b7cba9355ae6a5a80048" diff --git a/Makefile b/Makefile index eb3157d14e..624d728efb 100644 --- a/Makefile +++ b/Makefile @@ -19,11 +19,11 @@ quickdev: generate # changes will require a rebuild of everything, in which case the dev # target should be used. core-dev: generate - go install github.com/hashicorp/terraform + go install -tags 'core' github.com/hashicorp/terraform # Shorthand for quickly testing the core of Terraform (i.e. "not providers") core-test: generate - @echo "Testing core packages..." && go test $(shell go list ./... | grep -v -E 'builtin|vendor') + @echo "Testing core packages..." && go test -tags 'core' $(shell go list ./... | grep -v -E 'builtin|vendor') # Shorthand for building and installing just one plugin for local testing. # Run as (for example): make plugin-dev PLUGIN=provider-aws @@ -77,6 +77,7 @@ generate: go get -u golang.org/x/tools/cmd/stringer; \ fi go generate $$(go list ./... | grep -v /vendor/) + @go fmt command/internal_plugin_list.go > /dev/null fmt: gofmt -w . diff --git a/builtin/providers/terraform/resource_state.go b/builtin/providers/terraform/resource_state.go index fb0e85ee2c..8f5855573b 100644 --- a/builtin/providers/terraform/resource_state.go +++ b/builtin/providers/terraform/resource_state.go @@ -60,7 +60,7 @@ func resourceRemoteStateRead(d *schema.ResourceData, meta interface{}) error { return err } - var outputs map[string]string + var outputs map[string]interface{} if !state.State().Empty() { outputs = state.State().RootModule().Outputs } diff --git a/builtin/providers/tls/resource_cert_request_test.go b/builtin/providers/tls/resource_cert_request_test.go index 5ddad805c2..2c2c4f5d48 100644 --- a/builtin/providers/tls/resource_cert_request_test.go +++ b/builtin/providers/tls/resource_cert_request_test.go @@ -50,7 +50,13 @@ EOT } `, testPrivateKey), Check: func(s *terraform.State) error { - got := s.RootModule().Outputs["key_pem"] + gotUntyped := s.RootModule().Outputs["key_pem"] + + got, ok := gotUntyped.(string) + if !ok { + return fmt.Errorf("output for \"key_pem\" is not a string") + } + if !strings.HasPrefix(got, "-----BEGIN CERTIFICATE REQUEST----") { return fmt.Errorf("key is missing CSR PEM preamble") } diff --git a/builtin/providers/tls/resource_locally_signed_cert_test.go b/builtin/providers/tls/resource_locally_signed_cert_test.go index 7e9688d121..aa705ece8e 100644 --- a/builtin/providers/tls/resource_locally_signed_cert_test.go +++ b/builtin/providers/tls/resource_locally_signed_cert_test.go @@ -47,7 +47,11 @@ EOT } `, testCertRequest, testCACert, testCAPrivateKey), Check: func(s *terraform.State) error { - got := s.RootModule().Outputs["cert_pem"] + gotUntyped := s.RootModule().Outputs["cert_pem"] + got, ok := gotUntyped.(string) + if !ok { + return fmt.Errorf("output for \"cert_pem\" is not a string") + } if !strings.HasPrefix(got, "-----BEGIN CERTIFICATE----") { return fmt.Errorf("key is missing cert PEM preamble") } diff --git a/builtin/providers/tls/resource_private_key_test.go b/builtin/providers/tls/resource_private_key_test.go index 00fc8abbd6..cec3a81984 100644 --- a/builtin/providers/tls/resource_private_key_test.go +++ b/builtin/providers/tls/resource_private_key_test.go @@ -29,7 +29,12 @@ func TestPrivateKeyRSA(t *testing.T) { } `, Check: func(s *terraform.State) error { - gotPrivate := s.RootModule().Outputs["private_key_pem"] + gotPrivateUntyped := s.RootModule().Outputs["private_key_pem"] + gotPrivate, ok := gotPrivateUntyped.(string) + if !ok { + return fmt.Errorf("output for \"private_key_pem\" is not a string") + } + if !strings.HasPrefix(gotPrivate, "-----BEGIN RSA PRIVATE KEY----") { return fmt.Errorf("private key is missing RSA key PEM preamble") } @@ -37,12 +42,20 @@ func TestPrivateKeyRSA(t *testing.T) { return fmt.Errorf("private key PEM looks too long for a 2048-bit key (got %v characters)", len(gotPrivate)) } - gotPublic := s.RootModule().Outputs["public_key_pem"] + gotPublicUntyped := s.RootModule().Outputs["public_key_pem"] + gotPublic, ok := gotPublicUntyped.(string) + if !ok { + return fmt.Errorf("output for \"public_key_pem\" is not a string") + } if !strings.HasPrefix(gotPublic, "-----BEGIN PUBLIC KEY----") { return fmt.Errorf("public key is missing public key PEM preamble") } - gotPublicSSH := s.RootModule().Outputs["public_key_openssh"] + gotPublicSSHUntyped := s.RootModule().Outputs["public_key_openssh"] + gotPublicSSH, ok := gotPublicSSHUntyped.(string) + if !ok { + return fmt.Errorf("output for \"public_key_openssh\" is not a string") + } if !strings.HasPrefix(gotPublicSSH, "ssh-rsa ") { return fmt.Errorf("SSH public key is missing ssh-rsa prefix") } @@ -61,7 +74,11 @@ func TestPrivateKeyRSA(t *testing.T) { } `, Check: func(s *terraform.State) error { - got := s.RootModule().Outputs["key_pem"] + gotUntyped := s.RootModule().Outputs["key_pem"] + got, ok := gotUntyped.(string) + if !ok { + return fmt.Errorf("output for \"key_pem\" is not a string") + } if !strings.HasPrefix(got, "-----BEGIN RSA PRIVATE KEY----") { return fmt.Errorf("key is missing RSA key PEM preamble") } @@ -95,12 +112,22 @@ func TestPrivateKeyECDSA(t *testing.T) { } `, Check: func(s *terraform.State) error { - gotPrivate := s.RootModule().Outputs["private_key_pem"] + gotPrivateUntyped := s.RootModule().Outputs["private_key_pem"] + gotPrivate, ok := gotPrivateUntyped.(string) + if !ok { + return fmt.Errorf("output for \"private_key_pem\" is not a string") + } + if !strings.HasPrefix(gotPrivate, "-----BEGIN EC PRIVATE KEY----") { return fmt.Errorf("Private key is missing EC key PEM preamble") } - gotPublic := s.RootModule().Outputs["public_key_pem"] + gotPublicUntyped := s.RootModule().Outputs["public_key_pem"] + gotPublic, ok := gotPublicUntyped.(string) + if !ok { + return fmt.Errorf("output for \"public_key_pem\" is not a string") + } + if !strings.HasPrefix(gotPublic, "-----BEGIN PUBLIC KEY----") { return fmt.Errorf("public key is missing public key PEM preamble") } @@ -130,17 +157,29 @@ func TestPrivateKeyECDSA(t *testing.T) { } `, Check: func(s *terraform.State) error { - gotPrivate := s.RootModule().Outputs["private_key_pem"] + gotPrivateUntyped := s.RootModule().Outputs["private_key_pem"] + gotPrivate, ok := gotPrivateUntyped.(string) + if !ok { + return fmt.Errorf("output for \"private_key_pem\" is not a string") + } if !strings.HasPrefix(gotPrivate, "-----BEGIN EC PRIVATE KEY----") { return fmt.Errorf("Private key is missing EC key PEM preamble") } - gotPublic := s.RootModule().Outputs["public_key_pem"] + gotPublicUntyped := s.RootModule().Outputs["public_key_pem"] + gotPublic, ok := gotPublicUntyped.(string) + if !ok { + return fmt.Errorf("output for \"public_key_pem\" is not a string") + } if !strings.HasPrefix(gotPublic, "-----BEGIN PUBLIC KEY----") { return fmt.Errorf("public key is missing public key PEM preamble") } - gotPublicSSH := s.RootModule().Outputs["public_key_openssh"] + gotPublicSSHUntyped := s.RootModule().Outputs["public_key_openssh"] + gotPublicSSH, ok := gotPublicSSHUntyped.(string) + if !ok { + return fmt.Errorf("output for \"public_key_openssh\" is not a string") + } if !strings.HasPrefix(gotPublicSSH, "ecdsa-sha2-nistp256 ") { return fmt.Errorf("P256 SSH public key is missing ecdsa prefix") } diff --git a/builtin/providers/tls/resource_self_signed_cert_test.go b/builtin/providers/tls/resource_self_signed_cert_test.go index 2ba3b29396..b403956f4f 100644 --- a/builtin/providers/tls/resource_self_signed_cert_test.go +++ b/builtin/providers/tls/resource_self_signed_cert_test.go @@ -60,7 +60,12 @@ EOT } `, testPrivateKey), Check: func(s *terraform.State) error { - got := s.RootModule().Outputs["key_pem"] + gotUntyped := s.RootModule().Outputs["key_pem"] + got, ok := gotUntyped.(string) + if !ok { + return fmt.Errorf("output for \"public_key_openssh\" is not a string") + } + if !strings.HasPrefix(got, "-----BEGIN CERTIFICATE----") { return fmt.Errorf("key is missing cert PEM preamble") } diff --git a/command/apply.go b/command/apply.go index 9d0c3956a9..5598d5c355 100644 --- a/command/apply.go +++ b/command/apply.go @@ -251,7 +251,7 @@ func (c *ApplyCommand) Run(args []string) int { } if !c.Destroy { - if outputs := outputsAsString(state, ctx.Module().Config().Outputs); outputs != "" { + if outputs := outputsAsString(state, ctx.Module().Config().Outputs, true); outputs != "" { c.Ui.Output(c.Colorize().Color(outputs)) } } @@ -377,7 +377,7 @@ Options: return strings.TrimSpace(helpText) } -func outputsAsString(state *terraform.State, schema []*config.Output) string { +func outputsAsString(state *terraform.State, schema []*config.Output, includeHeader bool) string { if state == nil { return "" } @@ -386,37 +386,44 @@ func outputsAsString(state *terraform.State, schema []*config.Output) string { outputBuf := new(bytes.Buffer) if len(outputs) > 0 { schemaMap := make(map[string]*config.Output) - for _, s := range schema { - schemaMap[s.Name] = s + if schema != nil { + for _, s := range schema { + schemaMap[s.Name] = s + } } - outputBuf.WriteString("[reset][bold][green]\nOutputs:\n\n") + if includeHeader { + outputBuf.WriteString("[reset][bold][green]\nOutputs:\n\n") + } // Output the outputs in alphabetical order keyLen := 0 - keys := make([]string, 0, len(outputs)) + ks := make([]string, 0, len(outputs)) for key, _ := range outputs { - keys = append(keys, key) + ks = append(ks, key) if len(key) > keyLen { keyLen = len(key) } } - sort.Strings(keys) + sort.Strings(ks) + + for _, k := range ks { + schema, ok := schemaMap[k] + if ok && schema.Sensitive { + outputBuf.WriteString(fmt.Sprintf("%s = \n", k)) + continue + } - for _, k := range keys { v := outputs[k] - - if schemaMap[k].Sensitive { - outputBuf.WriteString(fmt.Sprintf( - " %s%s = \n", - k, - strings.Repeat(" ", keyLen-len(k)))) - } else { - outputBuf.WriteString(fmt.Sprintf( - " %s%s = %s\n", - k, - strings.Repeat(" ", keyLen-len(k)), - v)) + switch typedV := v.(type) { + case string: + outputBuf.WriteString(fmt.Sprintf("%s = %s\n", k, typedV)) + case []interface{}: + outputBuf.WriteString(formatListOutput("", k, typedV)) + outputBuf.WriteString("\n") + case map[string]interface{}: + outputBuf.WriteString(formatMapOutput("", k, typedV)) + outputBuf.WriteString("\n") } } } diff --git a/command/apply_test.go b/command/apply_test.go index dfb4a37e2a..b847a769d1 100644 --- a/command/apply_test.go +++ b/command/apply_test.go @@ -887,8 +887,6 @@ func TestApply_stateNoExist(t *testing.T) { } func TestApply_sensitiveOutput(t *testing.T) { - statePath := testTempFile(t) - p := testProvider() ui := new(cli.MockUi) c := &ApplyCommand{ @@ -898,6 +896,8 @@ func TestApply_sensitiveOutput(t *testing.T) { }, } + statePath := testTempFile(t) + args := []string{ "-state", statePath, testFixturePath("apply-sensitive-output"), @@ -911,11 +911,75 @@ func TestApply_sensitiveOutput(t *testing.T) { if !strings.Contains(output, "notsensitive = Hello world") { t.Fatalf("bad: output should contain 'notsensitive' output\n%s", output) } - if !strings.Contains(output, "sensitive = ") { + if !strings.Contains(output, "sensitive = ") { t.Fatalf("bad: output should contain 'sensitive' output\n%s", output) } } +func TestApply_stateFuture(t *testing.T) { + originalState := testState() + originalState.TFVersion = "99.99.99" + statePath := testStateFile(t, originalState) + + p := testProvider() + ui := new(cli.MockUi) + c := &ApplyCommand{ + Meta: Meta{ + ContextOpts: testCtxConfig(p), + Ui: ui, + }, + } + + args := []string{ + "-state", statePath, + testFixturePath("apply"), + } + if code := c.Run(args); code == 0 { + t.Fatal("should fail") + } + + f, err := os.Open(statePath) + if err != nil { + t.Fatalf("err: %s", err) + } + + newState, err := terraform.ReadState(f) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + if !newState.Equal(originalState) { + t.Fatalf("bad: %#v", newState) + } + if newState.TFVersion != originalState.TFVersion { + t.Fatalf("bad: %#v", newState) + } +} + +func TestApply_statePast(t *testing.T) { + originalState := testState() + originalState.TFVersion = "0.1.0" + statePath := testStateFile(t, originalState) + + p := testProvider() + ui := new(cli.MockUi) + c := &ApplyCommand{ + Meta: Meta{ + ContextOpts: testCtxConfig(p), + Ui: ui, + }, + } + + args := []string{ + "-state", statePath, + testFixturePath("apply"), + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } +} + func TestApply_vars(t *testing.T) { statePath := testTempFile(t) diff --git a/command/internal_plugin.go b/command/internal_plugin.go new file mode 100644 index 0000000000..01d8c77b93 --- /dev/null +++ b/command/internal_plugin.go @@ -0,0 +1,90 @@ +package command + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/plugin" + "github.com/kardianos/osext" +) + +// InternalPluginCommand is a Command implementation that allows plugins to be +// compiled into the main Terraform binary and executed via a subcommand. +type InternalPluginCommand struct { + Meta +} + +const TFSPACE = "-TFSPACE-" + +// BuildPluginCommandString builds a special string for executing internal +// plugins. It has the following format: +// +// /path/to/terraform-TFSPACE-internal-plugin-TFSPACE-terraform-provider-aws +// +// We split the string on -TFSPACE- to build the command executor. The reason we +// use -TFSPACE- is so we can support spaces in the /path/to/terraform part. +func BuildPluginCommandString(pluginType, pluginName string) (string, error) { + terraformPath, err := osext.Executable() + if err != nil { + return "", err + } + parts := []string{terraformPath, "internal-plugin", pluginType, pluginName} + return strings.Join(parts, TFSPACE), nil +} + +func (c *InternalPluginCommand) Run(args []string) int { + if len(args) != 2 { + log.Printf("Wrong number of args; expected: terraform internal-plugin pluginType pluginName") + return 1 + } + + pluginType := args[0] + pluginName := args[1] + + log.SetPrefix(fmt.Sprintf("%s-%s (internal) ", pluginName, pluginType)) + + switch pluginType { + case "provider": + pluginFunc, found := InternalProviders[pluginName] + if !found { + log.Printf("[ERROR] Could not load provider: %s", pluginName) + return 1 + } + log.Printf("[INFO] Starting provider plugin %s", pluginName) + plugin.Serve(&plugin.ServeOpts{ + ProviderFunc: pluginFunc, + }) + case "provisioner": + pluginFunc, found := InternalProvisioners[pluginName] + if !found { + log.Printf("[ERROR] Could not load provisioner: %s", pluginName) + return 1 + } + log.Printf("[INFO] Starting provisioner plugin %s", pluginName) + plugin.Serve(&plugin.ServeOpts{ + ProvisionerFunc: pluginFunc, + }) + default: + log.Printf("[ERROR] Invalid plugin type %s", pluginType) + return 1 + } + + return 0 +} + +func (c *InternalPluginCommand) Help() string { + helpText := ` +Usage: terraform internal-plugin pluginType pluginName + + Runs an internally-compiled version of a plugin from the terraform binary. + + NOTE: this is an internal command and you should not call it yourself. +` + + return strings.TrimSpace(helpText) +} + +func (c *InternalPluginCommand) Synopsis() string { + return "internal plugin command" +} diff --git a/command/internal_plugin_core.go b/command/internal_plugin_core.go new file mode 100644 index 0000000000..e0d2c94774 --- /dev/null +++ b/command/internal_plugin_core.go @@ -0,0 +1,13 @@ +// +build core + +// This file is included whenever the 'core' build tag is specified. This is +// used by make core-dev and make core-test to compile a build significantly +// more quickly, but it will not include any provider or provisioner plugins. + +package command + +import "github.com/hashicorp/terraform/plugin" + +var InternalProviders = map[string]plugin.ProviderFunc{} + +var InternalProvisioners = map[string]plugin.ProvisionerFunc{} diff --git a/command/internal_plugin_list.go b/command/internal_plugin_list.go new file mode 100644 index 0000000000..bb565fcf91 --- /dev/null +++ b/command/internal_plugin_list.go @@ -0,0 +1,106 @@ +// +build !core + +// +// This file is automatically generated by scripts/generate-plugins.go -- Do not edit! +// +package command + +import ( + atlasprovider "github.com/hashicorp/terraform/builtin/providers/atlas" + awsprovider "github.com/hashicorp/terraform/builtin/providers/aws" + azureprovider "github.com/hashicorp/terraform/builtin/providers/azure" + azurermprovider "github.com/hashicorp/terraform/builtin/providers/azurerm" + chefprovider "github.com/hashicorp/terraform/builtin/providers/chef" + clcprovider "github.com/hashicorp/terraform/builtin/providers/clc" + cloudflareprovider "github.com/hashicorp/terraform/builtin/providers/cloudflare" + cloudstackprovider "github.com/hashicorp/terraform/builtin/providers/cloudstack" + cobblerprovider "github.com/hashicorp/terraform/builtin/providers/cobbler" + consulprovider "github.com/hashicorp/terraform/builtin/providers/consul" + datadogprovider "github.com/hashicorp/terraform/builtin/providers/datadog" + digitaloceanprovider "github.com/hashicorp/terraform/builtin/providers/digitalocean" + dmeprovider "github.com/hashicorp/terraform/builtin/providers/dme" + dnsimpleprovider "github.com/hashicorp/terraform/builtin/providers/dnsimple" + dockerprovider "github.com/hashicorp/terraform/builtin/providers/docker" + dynprovider "github.com/hashicorp/terraform/builtin/providers/dyn" + fastlyprovider "github.com/hashicorp/terraform/builtin/providers/fastly" + githubprovider "github.com/hashicorp/terraform/builtin/providers/github" + googleprovider "github.com/hashicorp/terraform/builtin/providers/google" + herokuprovider "github.com/hashicorp/terraform/builtin/providers/heroku" + influxdbprovider "github.com/hashicorp/terraform/builtin/providers/influxdb" + libratoprovider "github.com/hashicorp/terraform/builtin/providers/librato" + mailgunprovider "github.com/hashicorp/terraform/builtin/providers/mailgun" + mysqlprovider "github.com/hashicorp/terraform/builtin/providers/mysql" + nullprovider "github.com/hashicorp/terraform/builtin/providers/null" + openstackprovider "github.com/hashicorp/terraform/builtin/providers/openstack" + packetprovider "github.com/hashicorp/terraform/builtin/providers/packet" + postgresqlprovider "github.com/hashicorp/terraform/builtin/providers/postgresql" + powerdnsprovider "github.com/hashicorp/terraform/builtin/providers/powerdns" + rundeckprovider "github.com/hashicorp/terraform/builtin/providers/rundeck" + softlayerprovider "github.com/hashicorp/terraform/builtin/providers/softlayer" + statuscakeprovider "github.com/hashicorp/terraform/builtin/providers/statuscake" + templateprovider "github.com/hashicorp/terraform/builtin/providers/template" + terraformprovider "github.com/hashicorp/terraform/builtin/providers/terraform" + testprovider "github.com/hashicorp/terraform/builtin/providers/test" + tlsprovider "github.com/hashicorp/terraform/builtin/providers/tls" + tritonprovider "github.com/hashicorp/terraform/builtin/providers/triton" + ultradnsprovider "github.com/hashicorp/terraform/builtin/providers/ultradns" + vcdprovider "github.com/hashicorp/terraform/builtin/providers/vcd" + vsphereprovider "github.com/hashicorp/terraform/builtin/providers/vsphere" + chefresourceprovisioner "github.com/hashicorp/terraform/builtin/provisioners/chef" + fileresourceprovisioner "github.com/hashicorp/terraform/builtin/provisioners/file" + localexecresourceprovisioner "github.com/hashicorp/terraform/builtin/provisioners/local-exec" + remoteexecresourceprovisioner "github.com/hashicorp/terraform/builtin/provisioners/remote-exec" + + "github.com/hashicorp/terraform/plugin" + "github.com/hashicorp/terraform/terraform" +) + +var InternalProviders = map[string]plugin.ProviderFunc{ + "atlas": atlasprovider.Provider, + "aws": awsprovider.Provider, + "azure": azureprovider.Provider, + "azurerm": azurermprovider.Provider, + "chef": chefprovider.Provider, + "clc": clcprovider.Provider, + "cloudflare": cloudflareprovider.Provider, + "cloudstack": cloudstackprovider.Provider, + "cobbler": cobblerprovider.Provider, + "consul": consulprovider.Provider, + "datadog": datadogprovider.Provider, + "digitalocean": digitaloceanprovider.Provider, + "dme": dmeprovider.Provider, + "dnsimple": dnsimpleprovider.Provider, + "docker": dockerprovider.Provider, + "dyn": dynprovider.Provider, + "fastly": fastlyprovider.Provider, + "github": githubprovider.Provider, + "google": googleprovider.Provider, + "heroku": herokuprovider.Provider, + "influxdb": influxdbprovider.Provider, + "librato": libratoprovider.Provider, + "mailgun": mailgunprovider.Provider, + "mysql": mysqlprovider.Provider, + "null": nullprovider.Provider, + "openstack": openstackprovider.Provider, + "packet": packetprovider.Provider, + "postgresql": postgresqlprovider.Provider, + "powerdns": powerdnsprovider.Provider, + "rundeck": rundeckprovider.Provider, + "softlayer": softlayerprovider.Provider, + "statuscake": statuscakeprovider.Provider, + "template": templateprovider.Provider, + "terraform": terraformprovider.Provider, + "test": testprovider.Provider, + "tls": tlsprovider.Provider, + "triton": tritonprovider.Provider, + "ultradns": ultradnsprovider.Provider, + "vcd": vcdprovider.Provider, + "vsphere": vsphereprovider.Provider, +} + +var InternalProvisioners = map[string]plugin.ProvisionerFunc{ + "chef": func() terraform.ResourceProvisioner { return new(chefresourceprovisioner.ResourceProvisioner) }, + "file": func() terraform.ResourceProvisioner { return new(fileresourceprovisioner.ResourceProvisioner) }, + "local-exec": func() terraform.ResourceProvisioner { return new(localexecresourceprovisioner.ResourceProvisioner) }, + "remote-exec": func() terraform.ResourceProvisioner { return new(remoteexecresourceprovisioner.ResourceProvisioner) }, +} diff --git a/command/internal_plugin_test.go b/command/internal_plugin_test.go new file mode 100644 index 0000000000..6a28f41dc5 --- /dev/null +++ b/command/internal_plugin_test.go @@ -0,0 +1,34 @@ +// +build !core + +package command + +import "testing" + +func TestInternalPlugin_InternalProviders(t *testing.T) { + // Note this is a randomish sample and does not check for all plugins + for _, name := range []string{"atlas", "consul", "docker", "template"} { + if _, ok := InternalProviders[name]; !ok { + t.Errorf("Expected to find %s in InternalProviders", name) + } + } +} + +func TestInternalPlugin_InternalProvisioners(t *testing.T) { + for _, name := range []string{"chef", "file", "local-exec", "remote-exec"} { + if _, ok := InternalProvisioners[name]; !ok { + t.Errorf("Expected to find %s in InternalProvisioners", name) + } + } +} + +func TestInternalPlugin_BuildPluginCommandString(t *testing.T) { + actual, err := BuildPluginCommandString("provisioner", "remote-exec") + if err != nil { + t.Fatalf(err.Error()) + } + + expected := "-TFSPACE-internal-plugin-TFSPACE-provisioner-TFSPACE-remote-exec" + if actual[len(actual)-len(expected):] != expected { + t.Errorf("Expected command to end with %s; got:\n%s\n", expected, actual) + } +} diff --git a/command/meta.go b/command/meta.go index bd4855964c..092b37dd5a 100644 --- a/command/meta.go +++ b/command/meta.go @@ -126,7 +126,8 @@ func (m *Meta) Context(copts contextOpts) (*terraform.Context, bool, error) { "variable values, create a new plan file.") } - return plan.Context(opts), true, nil + ctx, err := plan.Context(opts) + return ctx, true, err } } @@ -158,8 +159,8 @@ func (m *Meta) Context(copts contextOpts) (*terraform.Context, bool, error) { opts.Module = mod opts.Parallelism = copts.Parallelism opts.State = state.State() - ctx := terraform.NewContext(opts) - return ctx, false, nil + ctx, err := terraform.NewContext(opts) + return ctx, false, err } // DataDir returns the directory where local data will be stored. @@ -326,6 +327,9 @@ func (m *Meta) flagSet(n string) *flag.FlagSet { }() f.SetOutput(errW) + // Set the default Usage to empty + f.Usage = func() {} + return f } diff --git a/command/output.go b/command/output.go index 7c2324b41b..5808bae77a 100644 --- a/command/output.go +++ b/command/output.go @@ -1,9 +1,11 @@ package command import ( + "bytes" "flag" "fmt" "sort" + "strconv" "strings" ) @@ -27,7 +29,7 @@ func (c *OutputCommand) Run(args []string) int { } args = cmdFlags.Args() - if len(args) > 1 { + if len(args) > 2 { c.Ui.Error( "The output command expects exactly one argument with the name\n" + "of an output variable or no arguments to show all outputs.\n") @@ -40,6 +42,11 @@ func (c *OutputCommand) Run(args []string) int { name = args[0] } + index := "" + if len(args) > 1 { + index = args[1] + } + stateStore, err := c.Meta.State() if err != nil { c.Ui.Error(fmt.Sprintf("Error reading state: %s", err)) @@ -74,17 +81,7 @@ func (c *OutputCommand) Run(args []string) int { } if name == "" { - ks := make([]string, 0, len(mod.Outputs)) - for k, _ := range mod.Outputs { - ks = append(ks, k) - } - sort.Strings(ks) - - for _, k := range ks { - v := mod.Outputs[k] - - c.Ui.Output(fmt.Sprintf("%s = %s", k, v)) - } + c.Ui.Output(outputsAsString(state, nil, false)) return 0 } @@ -98,10 +95,101 @@ func (c *OutputCommand) Run(args []string) int { return 1 } - c.Ui.Output(v) + switch output := v.(type) { + case string: + c.Ui.Output(output) + return 0 + case []interface{}: + if index == "" { + c.Ui.Output(formatListOutput("", "", output)) + break + } + + indexInt, err := strconv.Atoi(index) + if err != nil { + c.Ui.Error(fmt.Sprintf( + "The index %q requested is not valid for the list output\n"+ + "%q - indices must be numeric, and in the range 0-%d", index, name, + len(output)-1)) + break + } + + if indexInt < 0 || indexInt >= len(output) { + c.Ui.Error(fmt.Sprintf( + "The index %d requested is not valid for the list output\n"+ + "%q - indices must be in the range 0-%d", indexInt, name, + len(output)-1)) + break + } + + c.Ui.Output(fmt.Sprintf("%s", output[indexInt])) + return 0 + case map[string]interface{}: + if index == "" { + c.Ui.Output(formatMapOutput("", "", output)) + break + } + + if value, ok := output[index]; ok { + c.Ui.Output(fmt.Sprintf("%s", value)) + return 0 + } else { + return 1 + } + default: + panic(fmt.Errorf("Unknown output type: %T", output)) + } + return 0 } +func formatListOutput(indent, outputName string, outputList []interface{}) string { + keyIndent := "" + + outputBuf := new(bytes.Buffer) + if outputName != "" { + outputBuf.WriteString(fmt.Sprintf("%s%s = [", indent, outputName)) + keyIndent = " " + } + + for _, value := range outputList { + outputBuf.WriteString(fmt.Sprintf("\n%s%s%s", indent, keyIndent, value)) + } + + if outputName != "" { + outputBuf.WriteString(fmt.Sprintf("\n%s]", indent)) + } + + return strings.TrimPrefix(outputBuf.String(), "\n") +} + +func formatMapOutput(indent, outputName string, outputMap map[string]interface{}) string { + ks := make([]string, 0, len(outputMap)) + for k, _ := range outputMap { + ks = append(ks, k) + } + sort.Strings(ks) + + keyIndent := "" + + outputBuf := new(bytes.Buffer) + if outputName != "" { + outputBuf.WriteString(fmt.Sprintf("%s%s = {", indent, outputName)) + keyIndent = " " + } + + for _, k := range ks { + v := outputMap[k] + outputBuf.WriteString(fmt.Sprintf("\n%s%s%s = %v", indent, keyIndent, k, v)) + } + + if outputName != "" { + outputBuf.WriteString(fmt.Sprintf("\n%s}", indent)) + } + + return strings.TrimPrefix(outputBuf.String(), "\n") +} + func (c *OutputCommand) Help() string { helpText := ` Usage: terraform output [options] [NAME] diff --git a/command/output_test.go b/command/output_test.go index e8d4690298..9c79f82ca1 100644 --- a/command/output_test.go +++ b/command/output_test.go @@ -16,7 +16,7 @@ func TestOutput(t *testing.T) { Modules: []*terraform.ModuleState{ &terraform.ModuleState{ Path: []string{"root"}, - Outputs: map[string]string{ + Outputs: map[string]interface{}{ "foo": "bar", }, }, @@ -52,13 +52,13 @@ func TestModuleOutput(t *testing.T) { Modules: []*terraform.ModuleState{ &terraform.ModuleState{ Path: []string{"root"}, - Outputs: map[string]string{ + Outputs: map[string]interface{}{ "foo": "bar", }, }, &terraform.ModuleState{ Path: []string{"root", "my_module"}, - Outputs: map[string]string{ + Outputs: map[string]interface{}{ "blah": "tastatur", }, }, @@ -96,7 +96,7 @@ func TestMissingModuleOutput(t *testing.T) { Modules: []*terraform.ModuleState{ &terraform.ModuleState{ Path: []string{"root"}, - Outputs: map[string]string{ + Outputs: map[string]interface{}{ "foo": "bar", }, }, @@ -129,7 +129,7 @@ func TestOutput_badVar(t *testing.T) { Modules: []*terraform.ModuleState{ &terraform.ModuleState{ Path: []string{"root"}, - Outputs: map[string]string{ + Outputs: map[string]interface{}{ "foo": "bar", }, }, @@ -160,7 +160,7 @@ func TestOutput_blank(t *testing.T) { Modules: []*terraform.ModuleState{ &terraform.ModuleState{ Path: []string{"root"}, - Outputs: map[string]string{ + Outputs: map[string]interface{}{ "foo": "bar", "name": "john-doe", }, @@ -253,7 +253,7 @@ func TestOutput_noVars(t *testing.T) { Modules: []*terraform.ModuleState{ &terraform.ModuleState{ Path: []string{"root"}, - Outputs: map[string]string{}, + Outputs: map[string]interface{}{}, }, }, } @@ -282,7 +282,7 @@ func TestOutput_stateDefault(t *testing.T) { Modules: []*terraform.ModuleState{ &terraform.ModuleState{ Path: []string{"root"}, - Outputs: map[string]string{ + Outputs: map[string]interface{}{ "foo": "bar", }, }, diff --git a/command/plan_test.go b/command/plan_test.go index 9b89018bf4..9357931741 100644 --- a/command/plan_test.go +++ b/command/plan_test.go @@ -345,6 +345,70 @@ func TestPlan_stateDefault(t *testing.T) { } } +func TestPlan_stateFuture(t *testing.T) { + originalState := testState() + originalState.TFVersion = "99.99.99" + statePath := testStateFile(t, originalState) + + p := testProvider() + ui := new(cli.MockUi) + c := &PlanCommand{ + Meta: Meta{ + ContextOpts: testCtxConfig(p), + Ui: ui, + }, + } + + args := []string{ + "-state", statePath, + testFixturePath("plan"), + } + if code := c.Run(args); code == 0 { + t.Fatal("should fail") + } + + f, err := os.Open(statePath) + if err != nil { + t.Fatalf("err: %s", err) + } + + newState, err := terraform.ReadState(f) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + if !newState.Equal(originalState) { + t.Fatalf("bad: %#v", newState) + } + if newState.TFVersion != originalState.TFVersion { + t.Fatalf("bad: %#v", newState) + } +} + +func TestPlan_statePast(t *testing.T) { + originalState := testState() + originalState.TFVersion = "0.1.0" + statePath := testStateFile(t, originalState) + + p := testProvider() + ui := new(cli.MockUi) + c := &PlanCommand{ + Meta: Meta{ + ContextOpts: testCtxConfig(p), + Ui: ui, + }, + } + + args := []string{ + "-state", statePath, + testFixturePath("plan"), + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } +} + func TestPlan_vars(t *testing.T) { p := testProvider() ui := new(cli.MockUi) diff --git a/command/refresh.go b/command/refresh.go index 7a6f7f9c38..0c41bcbe43 100644 --- a/command/refresh.go +++ b/command/refresh.go @@ -109,7 +109,7 @@ func (c *RefreshCommand) Run(args []string) int { return 1 } - if outputs := outputsAsString(newState, ctx.Module().Config().Outputs); outputs != "" { + if outputs := outputsAsString(newState, ctx.Module().Config().Outputs, true); outputs != "" { c.Ui.Output(c.Colorize().Color(outputs)) } diff --git a/command/refresh_test.go b/command/refresh_test.go index b7cf3b7d16..91ef22b173 100644 --- a/command/refresh_test.go +++ b/command/refresh_test.go @@ -221,6 +221,109 @@ func TestRefresh_defaultState(t *testing.T) { } } +func TestRefresh_futureState(t *testing.T) { + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("err: %s", err) + } + if err := os.Chdir(testFixturePath("refresh")); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chdir(cwd) + + state := testState() + state.TFVersion = "99.99.99" + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + c := &RefreshCommand{ + Meta: Meta{ + ContextOpts: testCtxConfig(p), + Ui: ui, + }, + } + + args := []string{ + "-state", statePath, + } + if code := c.Run(args); code == 0 { + t.Fatal("should fail") + } + + if p.RefreshCalled { + t.Fatal("refresh should not be called") + } + + f, err := os.Open(statePath) + if err != nil { + t.Fatalf("err: %s", err) + } + + newState, err := terraform.ReadState(f) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(newState.String()) + expected := strings.TrimSpace(state.String()) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } +} + +func TestRefresh_pastState(t *testing.T) { + state := testState() + state.TFVersion = "0.1.0" + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + c := &RefreshCommand{ + Meta: Meta{ + ContextOpts: testCtxConfig(p), + Ui: ui, + }, + } + + p.RefreshFn = nil + p.RefreshReturn = &terraform.InstanceState{ID: "yes"} + + args := []string{ + "-state", statePath, + testFixturePath("refresh"), + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + if !p.RefreshCalled { + t.Fatal("refresh should be called") + } + + f, err := os.Open(statePath) + if err != nil { + t.Fatalf("err: %s", err) + } + + newState, err := terraform.ReadState(f) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(newState.String()) + expected := strings.TrimSpace(testRefreshStr) + if actual != expected { + t.Fatalf("bad:\n\n%s", actual) + } + + if newState.TFVersion != terraform.Version { + t.Fatalf("bad:\n\n%s", newState.TFVersion) + } +} + func TestRefresh_outPath(t *testing.T) { state := testState() statePath := testStateFile(t, state) diff --git a/command/state_command.go b/command/state_command.go new file mode 100644 index 0000000000..ce4e0a2ec8 --- /dev/null +++ b/command/state_command.go @@ -0,0 +1,40 @@ +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +// StateCommand is a Command implementation that just shows help for +// the subcommands nested below it. +type StateCommand struct { + Meta +} + +func (c *StateCommand) Run(args []string) int { + return cli.RunResultHelp +} + +func (c *StateCommand) Help() string { + helpText := ` +Usage: terraform state [options] [args] + + This command has subcommands for advanced state management. + + These subcommands can be used to slice and dice the Terraform state. + This is sometimes necessary in advanced cases. For your safety, all + state management commands that modify the state create a timestamped + backup of the state prior to making modifications. + + The structure and output of the commands is specifically tailored to work + well with the common Unix utilities such as grep, awk, etc. We recommend + using those tools to perform more advanced state tasks. + +` + return strings.TrimSpace(helpText) +} + +func (c *StateCommand) Synopsis() string { + return "Advanced state management" +} diff --git a/command/state_list.go b/command/state_list.go new file mode 100644 index 0000000000..daa96b684c --- /dev/null +++ b/command/state_list.go @@ -0,0 +1,101 @@ +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/cli" +) + +// StateListCommand is a Command implementation that lists the resources +// within a state file. +type StateListCommand struct { + Meta +} + +func (c *StateListCommand) Run(args []string) int { + args = c.Meta.process(args, true) + + cmdFlags := c.Meta.flagSet("state list") + cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") + if err := cmdFlags.Parse(args); err != nil { + return cli.RunResultHelp + } + args = cmdFlags.Args() + + state, err := c.State() + if err != nil { + c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) + return cli.RunResultHelp + } + + stateReal := state.State() + if stateReal == nil { + c.Ui.Error(fmt.Sprintf(errStateNotFound)) + return 1 + } + + filter := &terraform.StateFilter{State: stateReal} + results, err := filter.Filter(args...) + if err != nil { + c.Ui.Error(fmt.Sprintf(errStateFilter, err)) + return cli.RunResultHelp + } + + for _, result := range results { + if _, ok := result.Value.(*terraform.InstanceState); ok { + c.Ui.Output(result.Address) + } + } + + return 0 +} + +func (c *StateListCommand) Help() string { + helpText := ` +Usage: terraform state list [options] [pattern...] + + List resources in the Terraform state. + + This command lists resources in the Terraform state. The pattern argument + can be used to filter the resources by resource or module. If no pattern + is given, all resources are listed. + + The pattern argument is meant to provide very simple filtering. For + advanced filtering, please use tools such as "grep". The output of this + command is designed to be friendly for this usage. + + The pattern argument accepts any resource targeting syntax. Please + refer to the documentation on resource targeting syntax for more + information. + +Options: + + -state=statefile Path to a Terraform state file to use to look + up Terraform-managed resources. By default it will + use the state "terraform.tfstate" if it exists. + +` + return strings.TrimSpace(helpText) +} + +func (c *StateListCommand) Synopsis() string { + return "List resources in the state" +} + +const errStateFilter = `Error filtering state: %[1]s + +Please ensure that all your addresses are formatted properly.` + +const errStateLoadingState = `Error loading the state: %[1]s + +Please ensure that your Terraform state exists and that you've +configured it properly. You can use the "-state" flag to point +Terraform at another state file.` + +const errStateNotFound = `No state file was found! + +State management commands require a state file. Run this command +in a directory where Terraform has been run or use the -state flag +to point the command to a specific state location.` diff --git a/command/state_list_test.go b/command/state_list_test.go new file mode 100644 index 0000000000..0edb97d693 --- /dev/null +++ b/command/state_list_test.go @@ -0,0 +1,59 @@ +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func TestStateList(t *testing.T) { + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + c := &StateListCommand{ + Meta: Meta{ + ContextOpts: testCtxConfig(p), + Ui: ui, + }, + } + + args := []string{ + "-state", statePath, + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test that outputs were displayed + expected := strings.TrimSpace(testStateListOutput) + "\n" + actual := ui.OutputWriter.String() + if actual != expected { + t.Fatalf("Expected:\n%q\n\nTo equal: %q", actual, expected) + } +} + +func TestStateList_noState(t *testing.T) { + tmp, cwd := testCwd(t) + defer testFixCwd(t, tmp, cwd) + + p := testProvider() + ui := new(cli.MockUi) + c := &StateListCommand{ + Meta: Meta{ + ContextOpts: testCtxConfig(p), + Ui: ui, + }, + } + + args := []string{} + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } +} + +const testStateListOutput = ` +test_instance.foo +` diff --git a/command/state_meta.go b/command/state_meta.go new file mode 100644 index 0000000000..f576004e34 --- /dev/null +++ b/command/state_meta.go @@ -0,0 +1,34 @@ +package command + +import ( + "errors" + + "github.com/hashicorp/terraform/terraform" +) + +// StateMeta is the meta struct that should be embedded in state subcommands. +type StateMeta struct{} + +// filterInstance filters a single instance out of filter results. +func (c *StateMeta) filterInstance(rs []*terraform.StateFilterResult) (*terraform.StateFilterResult, error) { + var result *terraform.StateFilterResult + for _, r := range rs { + if _, ok := r.Value.(*terraform.InstanceState); !ok { + continue + } + + if result != nil { + return nil, errors.New(errStateMultiple) + } + + result = r + } + + return result, nil +} + +const errStateMultiple = `Multiple instances found for the given pattern! + +This command requires that the pattern match exactly one instance +of a resource. To view the matched instances, use "terraform state list". +Please modify the pattern to match only a single instance.` diff --git a/command/state_show.go b/command/state_show.go new file mode 100644 index 0000000000..55bc309104 --- /dev/null +++ b/command/state_show.go @@ -0,0 +1,100 @@ +package command + +import ( + "fmt" + "sort" + "strings" + + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/cli" + "github.com/ryanuber/columnize" +) + +// StateShowCommand is a Command implementation that shows a single resource. +type StateShowCommand struct { + Meta + StateMeta +} + +func (c *StateShowCommand) Run(args []string) int { + args = c.Meta.process(args, true) + + cmdFlags := c.Meta.flagSet("state show") + cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") + if err := cmdFlags.Parse(args); err != nil { + return cli.RunResultHelp + } + args = cmdFlags.Args() + + state, err := c.State() + if err != nil { + c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) + return cli.RunResultHelp + } + + stateReal := state.State() + if stateReal == nil { + c.Ui.Error(fmt.Sprintf(errStateNotFound)) + return 1 + } + + filter := &terraform.StateFilter{State: stateReal} + results, err := filter.Filter(args...) + if err != nil { + c.Ui.Error(fmt.Sprintf(errStateFilter, err)) + return 1 + } + + instance, err := c.filterInstance(results) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + is := instance.Value.(*terraform.InstanceState) + + // Sort the keys + keys := make([]string, 0, len(is.Attributes)) + for k, _ := range is.Attributes { + keys = append(keys, k) + } + sort.Strings(keys) + + // Build the output + output := make([]string, 0, len(is.Attributes)+1) + output = append(output, fmt.Sprintf("id | %s", is.ID)) + for _, k := range keys { + if k != "id" { + output = append(output, fmt.Sprintf("%s | %s", k, is.Attributes[k])) + } + } + + // Output + config := columnize.DefaultConfig() + config.Glue = " = " + c.Ui.Output(columnize.Format(output, config)) + return 0 +} + +func (c *StateShowCommand) Help() string { + helpText := ` +Usage: terraform state show [options] ADDRESS + + Shows the attributes of a resource in the Terraform state. + + This command shows the attributes of a single resource in the Terraform + state. The address argument must be used to specify a single resource. + You can view the list of available resources with "terraform state list". + +Options: + + -state=statefile Path to a Terraform state file to use to look + up Terraform-managed resources. By default it will + use the state "terraform.tfstate" if it exists. + +` + return strings.TrimSpace(helpText) +} + +func (c *StateShowCommand) Synopsis() string { + return "Show a resource in the state" +} diff --git a/command/state_show_test.go b/command/state_show_test.go new file mode 100644 index 0000000000..1e2dba08c2 --- /dev/null +++ b/command/state_show_test.go @@ -0,0 +1,133 @@ +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/cli" +) + +func TestStateShow(t *testing.T) { + state := &terraform.State{ + Modules: []*terraform.ModuleState{ + &terraform.ModuleState{ + Path: []string{"root"}, + Resources: map[string]*terraform.ResourceState{ + "test_instance.foo": &terraform.ResourceState{ + Type: "test_instance", + Primary: &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "foo": "value", + "bar": "value", + }, + }, + }, + }, + }, + }, + } + + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + c := &StateShowCommand{ + Meta: Meta{ + ContextOpts: testCtxConfig(p), + Ui: ui, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Test that outputs were displayed + expected := strings.TrimSpace(testStateShowOutput) + "\n" + actual := ui.OutputWriter.String() + if actual != expected { + t.Fatalf("Expected:\n%q\n\nTo equal: %q", actual, expected) + } +} + +func TestStateShow_multi(t *testing.T) { + state := &terraform.State{ + Modules: []*terraform.ModuleState{ + &terraform.ModuleState{ + Path: []string{"root"}, + Resources: map[string]*terraform.ResourceState{ + "test_instance.foo.0": &terraform.ResourceState{ + Type: "test_instance", + Primary: &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "foo": "value", + "bar": "value", + }, + }, + }, + "test_instance.foo.1": &terraform.ResourceState{ + Type: "test_instance", + Primary: &terraform.InstanceState{ + ID: "bar", + Attributes: map[string]string{ + "foo": "value", + "bar": "value", + }, + }, + }, + }, + }, + }, + } + + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + c := &StateShowCommand{ + Meta: Meta{ + ContextOpts: testCtxConfig(p), + Ui: ui, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + } + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } +} + +func TestStateShow_noState(t *testing.T) { + tmp, cwd := testCwd(t) + defer testFixCwd(t, tmp, cwd) + + p := testProvider() + ui := new(cli.MockUi) + c := &StateShowCommand{ + Meta: Meta{ + ContextOpts: testCtxConfig(p), + Ui: ui, + }, + } + + args := []string{} + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } +} + +const testStateShowOutput = ` +id = bar +bar = value +foo = value +` diff --git a/commands.go b/commands.go index 903f965de1..290673c358 100644 --- a/commands.go +++ b/commands.go @@ -10,6 +10,7 @@ import ( // Commands is the mapping of all the available Terraform commands. var Commands map[string]cli.CommandFactory +var PlumbingCommands map[string]struct{} // Ui is the cli.Ui used for communicating to the outside world. var Ui cli.Ui @@ -34,6 +35,10 @@ func init() { Ui: Ui, } + PlumbingCommands = map[string]struct{}{ + "state": struct{}{}, // includes all subcommands + } + Commands = map[string]cli.CommandFactory{ "apply": func() (cli.Command, error) { return &command.ApplyCommand{ @@ -74,6 +79,12 @@ func init() { }, nil }, + "internal-plugin": func() (cli.Command, error) { + return &command.InternalPluginCommand{ + Meta: meta, + }, nil + }, + "output": func() (cli.Command, error) { return &command.OutputCommand{ Meta: meta, @@ -137,6 +148,28 @@ func init() { Meta: meta, }, nil }, + + //----------------------------------------------------------- + // Plumbing + //----------------------------------------------------------- + + "state": func() (cli.Command, error) { + return &command.StateCommand{ + Meta: meta, + }, nil + }, + + "state list": func() (cli.Command, error) { + return &command.StateListCommand{ + Meta: meta, + }, nil + }, + + "state show": func() (cli.Command, error) { + return &command.StateShowCommand{ + Meta: meta, + }, nil + }, } } diff --git a/config.go b/config.go index c9b2a7f754..5a3e929bde 100644 --- a/config.go +++ b/config.go @@ -1,3 +1,4 @@ +//go:generate go run ./scripts/generate-plugins.go package main import ( @@ -9,10 +10,13 @@ import ( "path/filepath" "strings" + "github.com/hashicorp/go-plugin" "github.com/hashicorp/hcl" - "github.com/hashicorp/terraform/plugin" + "github.com/hashicorp/terraform/command" + tfplugin "github.com/hashicorp/terraform/plugin" "github.com/hashicorp/terraform/terraform" "github.com/kardianos/osext" + "github.com/mitchellh/cli" ) // Config is the structure of the configuration for the Terraform CLI. @@ -73,18 +77,22 @@ func LoadConfig(path string) (*Config, error) { return &result, nil } -// Discover discovers plugins. +// Discover plugins located on disk, and fall back on plugins baked into the +// Terraform binary. // -// This looks in the directory of the executable and the CWD, in that -// order for priority. -func (c *Config) Discover() error { - // Look in the cwd. - if err := c.discover("."); err != nil { - return err - } - - // Look in the plugins directory. This will override any found - // in the current directory. +// We look in the following places for plugins: +// +// 1. Terraform configuration path +// 2. Path where Terraform is installed +// 3. Path where Terraform is invoked +// +// Whichever file is discoverd LAST wins. +// +// Finally, we look at the list of plugins compiled into Terraform. If any of +// them has not been found on disk we use the internal version. This allows +// users to add / replace plugins without recompiling the main binary. +func (c *Config) Discover(ui cli.Ui) error { + // Look in ~/.terraform.d/plugins/ dir, err := ConfigDir() if err != nil { log.Printf("[ERR] Error loading config directory: %s", err) @@ -94,8 +102,8 @@ func (c *Config) Discover() error { } } - // Next, look in the same directory as the executable. Any conflicts - // will overwrite those found in our current directory. + // Next, look in the same directory as the Terraform executable, usually + // /usr/local/bin. If found, this replaces what we found in the config path. exePath, err := osext.Executable() if err != nil { log.Printf("[ERR] Error loading exe directory: %s", err) @@ -105,6 +113,42 @@ func (c *Config) Discover() error { } } + // Finally look in the cwd (where we are invoke Terraform). If found, this + // replaces anything we found in the config / install paths. + if err := c.discover("."); err != nil { + return err + } + + // Finally, if we have a plugin compiled into Terraform and we didn't find + // a replacement on disk, we'll just use the internal version. + for name, _ := range command.InternalProviders { + if path, found := c.Providers[name]; found { + ui.Warn(fmt.Sprintf("[WARN] %s overrides an internal plugin for %s-provider.\n"+ + " If you did not expect to see this message you will need to remove the old plugin.\n"+ + " See https://www.terraform.io/docs/internals/internal-plugins.html", path, name)) + } else { + + cmd, err := command.BuildPluginCommandString("provider", name) + if err != nil { + return err + } + c.Providers[name] = cmd + } + } + for name, _ := range command.InternalProvisioners { + if path, found := c.Provisioners[name]; found { + ui.Warn(fmt.Sprintf("[WARN] %s overrides an internal plugin for %s-provisioner.\n"+ + " If you did not expect to see this message you will need to remove the old plugin.\n"+ + " See https://www.terraform.io/docs/internals/internal-plugins.html", path, name)) + } else { + cmd, err := command.BuildPluginCommandString("provisioner", name) + if err != nil { + return err + } + c.Provisioners[name] = cmd + } + } + return nil } @@ -202,7 +246,9 @@ func (c *Config) providerFactory(path string) terraform.ResourceProviderFactory // Build the plugin client configuration and init the plugin var config plugin.ClientConfig config.Cmd = pluginCmd(path) + config.HandshakeConfig = tfplugin.Handshake config.Managed = true + config.Plugins = tfplugin.PluginMap client := plugin.NewClient(&config) return func() (terraform.ResourceProvider, error) { @@ -213,7 +259,12 @@ func (c *Config) providerFactory(path string) terraform.ResourceProviderFactory return nil, err } - return rpcClient.ResourceProvider() + raw, err := rpcClient.Dispense(tfplugin.ProviderPluginName) + if err != nil { + return nil, err + } + + return raw.(terraform.ResourceProvider), nil } } @@ -232,8 +283,10 @@ func (c *Config) ProvisionerFactories() map[string]terraform.ResourceProvisioner func (c *Config) provisionerFactory(path string) terraform.ResourceProvisionerFactory { // Build the plugin client configuration and init the plugin var config plugin.ClientConfig + config.HandshakeConfig = tfplugin.Handshake config.Cmd = pluginCmd(path) config.Managed = true + config.Plugins = tfplugin.PluginMap client := plugin.NewClient(&config) return func() (terraform.ResourceProvisioner, error) { @@ -242,7 +295,12 @@ func (c *Config) provisionerFactory(path string) terraform.ResourceProvisionerFa return nil, err } - return rpcClient.ResourceProvisioner() + raw, err := rpcClient.Dispense(tfplugin.ProvisionerPluginName) + if err != nil { + return nil, err + } + + return raw.(terraform.ResourceProvisioner), nil } } @@ -270,6 +328,12 @@ func pluginCmd(path string) *exec.Cmd { } } + // No plugin binary found, so try to use an internal plugin. + if strings.Contains(path, command.TFSPACE) { + parts := strings.Split(path, command.TFSPACE) + return exec.Command(parts[0], parts[1:]...) + } + // If we still don't have a path, then just set it to the original // given path. if cmdPath == "" { diff --git a/config/config.go b/config/config.go index b3a48be1d1..d007940222 100644 --- a/config/config.go +++ b/config/config.go @@ -11,7 +11,6 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/hil" "github.com/hashicorp/hil/ast" - "github.com/hashicorp/terraform/flatmap" "github.com/mitchellh/mapstructure" "github.com/mitchellh/reflectwalk" ) @@ -162,6 +161,7 @@ type VariableType byte const ( VariableTypeUnknown VariableType = iota VariableTypeString + VariableTypeList VariableTypeMap ) @@ -171,6 +171,8 @@ func (v VariableType) Printable() string { return "string" case VariableTypeMap: return "map" + case VariableTypeList: + return "list" default: return "unknown" } @@ -239,7 +241,7 @@ func (c *Config) Validate() error { } interp := false - fn := func(ast.Node) (string, error) { + fn := func(ast.Node) (interface{}, error) { interp = true return "", nil } @@ -352,16 +354,30 @@ func (c *Config) Validate() error { m.Id())) } - // Check that the configuration can all be strings + // Check that the configuration can all be strings, lists or maps raw := make(map[string]interface{}) for k, v := range m.RawConfig.Raw { var strVal string - if err := mapstructure.WeakDecode(v, &strVal); err != nil { - errs = append(errs, fmt.Errorf( - "%s: variable %s must be a string value", - m.Id(), k)) + if err := mapstructure.WeakDecode(v, &strVal); err == nil { + raw[k] = strVal + continue } - raw[k] = strVal + + var mapVal map[string]interface{} + if err := mapstructure.WeakDecode(v, &mapVal); err == nil { + raw[k] = mapVal + continue + } + + var sliceVal []interface{} + if err := mapstructure.WeakDecode(v, &sliceVal); err == nil { + raw[k] = sliceVal + continue + } + + errs = append(errs, fmt.Errorf( + "%s: variable %s must be a string, list or map value", + m.Id(), k)) } // Check for invalid count variables @@ -450,7 +466,7 @@ func (c *Config) Validate() error { } // Interpolate with a fixed number to verify that its a number. - r.RawCount.interpolate(func(root ast.Node) (string, error) { + r.RawCount.interpolate(func(root ast.Node) (interface{}, error) { // Execute the node but transform the AST so that it returns // a fixed value of "5" for all interpolations. result, err := hil.Eval( @@ -461,7 +477,7 @@ func (c *Config) Validate() error { return "", err } - return result.Value.(string), nil + return result.Value, nil }) _, err := strconv.ParseInt(r.RawCount.Value().(string), 0, 0) if err != nil { @@ -722,7 +738,8 @@ func (c *Config) validateVarContextFn( if rv.Multi && rv.Index == -1 { *errs = append(*errs, fmt.Errorf( - "%s: multi-variable must be in a slice", source)) + "%s: use of the splat ('*') operator must be wrapped in a list declaration", + source)) } } } @@ -809,28 +826,6 @@ func (r *Resource) mergerMerge(m merger) merger { return &result } -// DefaultsMap returns a map of default values for this variable. -func (v *Variable) DefaultsMap() map[string]string { - if v.Default == nil { - return nil - } - - n := fmt.Sprintf("var.%s", v.Name) - switch v.Type() { - case VariableTypeString: - return map[string]string{n: v.Default.(string)} - case VariableTypeMap: - result := flatmap.Flatten(map[string]interface{}{ - n: v.Default.(map[string]string), - }) - result[n] = v.Name - - return result - default: - return nil - } -} - // Merge merges two variables to create a new third variable. func (v *Variable) Merge(v2 *Variable) *Variable { // Shallow copy the variable @@ -852,6 +847,7 @@ func (v *Variable) Merge(v2 *Variable) *Variable { var typeStringMap = map[string]VariableType{ "string": VariableTypeString, "map": VariableTypeMap, + "list": VariableTypeList, } // Type returns the type of variable this is. @@ -911,9 +907,9 @@ func (v *Variable) inferTypeFromDefault() VariableType { return VariableTypeString } - var strVal string - if err := mapstructure.WeakDecode(v.Default, &strVal); err == nil { - v.Default = strVal + var s string + if err := mapstructure.WeakDecode(v.Default, &s); err == nil { + v.Default = s return VariableTypeString } @@ -923,5 +919,11 @@ func (v *Variable) inferTypeFromDefault() VariableType { return VariableTypeMap } + var l []string + if err := mapstructure.WeakDecode(v.Default, &l); err == nil { + v.Default = l + return VariableTypeList + } + return VariableTypeUnknown } diff --git a/config/config_test.go b/config/config_test.go index b6303fb13f..5186a81de6 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -2,7 +2,6 @@ package config import ( "path/filepath" - "reflect" "strings" "testing" ) @@ -216,8 +215,15 @@ func TestConfigValidate_moduleVarInt(t *testing.T) { func TestConfigValidate_moduleVarMap(t *testing.T) { c := testConfig(t, "validate-module-var-map") - if err := c.Validate(); err == nil { - t.Fatal("should be invalid") + if err := c.Validate(); err != nil { + t.Fatalf("should be valid: %s", err) + } +} + +func TestConfigValidate_moduleVarList(t *testing.T) { + c := testConfig(t, "validate-module-var-list") + if err := c.Validate(); err != nil { + t.Fatalf("should be valid: %s", err) } } @@ -368,10 +374,10 @@ func TestConfigValidate_varDefault(t *testing.T) { } } -func TestConfigValidate_varDefaultBadType(t *testing.T) { - c := testConfig(t, "validate-var-default-bad-type") - if err := c.Validate(); err == nil { - t.Fatal("should not be valid") +func TestConfigValidate_varDefaultListType(t *testing.T) { + c := testConfig(t, "validate-var-default-list-type") + if err := c.Validate(); err != nil { + t.Fatalf("should be valid: %s", err) } } @@ -458,43 +464,6 @@ func TestProviderConfigName(t *testing.T) { } } -func TestVariableDefaultsMap(t *testing.T) { - cases := []struct { - Default interface{} - Output map[string]string - }{ - { - nil, - nil, - }, - - { - "foo", - map[string]string{"var.foo": "foo"}, - }, - - { - map[interface{}]interface{}{ - "foo": "bar", - "bar": "baz", - }, - map[string]string{ - "var.foo": "foo", - "var.foo.foo": "bar", - "var.foo.bar": "baz", - }, - }, - } - - for i, tc := range cases { - v := &Variable{Name: "foo", Default: tc.Default} - actual := v.DefaultsMap() - if !reflect.DeepEqual(actual, tc.Output) { - t.Fatalf("%d: bad: %#v", i, actual) - } - } -} - func testConfig(t *testing.T, name string) *Config { c, err := LoadFile(filepath.Join(fixtureDir, name, "main.tf")) if err != nil { diff --git a/config/interpolate.go b/config/interpolate.go index bfdd114c62..14e70bfcc1 100644 --- a/config/interpolate.go +++ b/config/interpolate.go @@ -284,18 +284,35 @@ func DetectVariables(root ast.Node) ([]InterpolatedVariable, error) { return n } - vn, ok := n.(*ast.VariableAccess) - if !ok { + switch vn := n.(type) { + case *ast.VariableAccess: + v, err := NewInterpolatedVariable(vn.Name) + if err != nil { + resultErr = err + return n + } + result = append(result, v) + case *ast.Index: + if va, ok := vn.Target.(*ast.VariableAccess); ok { + v, err := NewInterpolatedVariable(va.Name) + if err != nil { + resultErr = err + return n + } + result = append(result, v) + } + if va, ok := vn.Key.(*ast.VariableAccess); ok { + v, err := NewInterpolatedVariable(va.Name) + if err != nil { + resultErr = err + return n + } + result = append(result, v) + } + default: return n } - v, err := NewInterpolatedVariable(vn.Name) - if err != nil { - resultErr = err - return n - } - - result = append(result, v) return n } diff --git a/config/interpolate_funcs.go b/config/interpolate_funcs.go index 9f929e106d..e13c754867 100644 --- a/config/interpolate_funcs.go +++ b/config/interpolate_funcs.go @@ -1,7 +1,6 @@ package config import ( - "bytes" "crypto/md5" "crypto/sha1" "crypto/sha256" @@ -19,10 +18,36 @@ import ( "github.com/apparentlymart/go-cidr/cidr" "github.com/hashicorp/go-uuid" + "github.com/hashicorp/hil" "github.com/hashicorp/hil/ast" "github.com/mitchellh/go-homedir" ) +// stringSliceToVariableValue converts a string slice into the value +// required to be returned from interpolation functions which return +// TypeList. +func stringSliceToVariableValue(values []string) []ast.Variable { + output := make([]ast.Variable, len(values)) + for index, value := range values { + output[index] = ast.Variable{ + Type: ast.TypeString, + Value: value, + } + } + return output +} + +func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) { + output := make([]string, len(values)) + for index, value := range values { + if value.Type != ast.TypeString { + return []string{}, fmt.Errorf("list has non-string element (%T)", value.Type.String()) + } + output[index] = value.Value.(string) + } + return output, nil +} + // Funcs is the mapping of built-in functions for configuration. func Funcs() map[string]ast.Function { return map[string]ast.Function{ @@ -60,14 +85,23 @@ func Funcs() map[string]ast.Function { // (e.g. as returned by "split") of any empty strings. func interpolationFuncCompact() ast.Function { return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, Variadic: false, Callback: func(args []interface{}) (interface{}, error) { - if !IsStringList(args[0].(string)) { - return args[0].(string), nil + inputList := args[0].([]ast.Variable) + + var outputList []string + for _, val := range inputList { + if strVal, ok := val.Value.(string); ok { + if strVal == "" { + continue + } + + outputList = append(outputList, strVal) + } } - return StringList(args[0].(string)).Compact().String(), nil + return stringSliceToVariableValue(outputList), nil }, } } @@ -188,39 +222,32 @@ func interpolationFuncCoalesce() ast.Function { // compat we do this. func interpolationFuncConcat() ast.Function { return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, + ArgTypes: []ast.Type{ast.TypeAny}, + ReturnType: ast.TypeList, Variadic: true, - VariadicType: ast.TypeString, + VariadicType: ast.TypeAny, Callback: func(args []interface{}) (interface{}, error) { - var b bytes.Buffer - var finalList []string - - var isDeprecated = true + var finalListElements []string for _, arg := range args { - argument := arg.(string) - - if len(argument) == 0 { + // Append strings for backward compatibility + if argument, ok := arg.(string); ok { + finalListElements = append(finalListElements, argument) continue } - if IsStringList(argument) { - isDeprecated = false - finalList = append(finalList, StringList(argument).Slice()...) - } else { - finalList = append(finalList, argument) + // Otherwise variables + if argument, ok := arg.([]ast.Variable); ok { + for _, element := range argument { + finalListElements = append(finalListElements, element.Value.(string)) + } + continue } - // Deprecated concat behaviour - b.WriteString(argument) + return nil, fmt.Errorf("arguments to concat() must be a string or list") } - if isDeprecated { - return b.String(), nil - } - - return NewStringList(finalList).String(), nil + return stringSliceToVariableValue(finalListElements), nil }, } } @@ -265,10 +292,10 @@ func interpolationFuncFormat() ast.Function { // string formatting on lists. func interpolationFuncFormatList() ast.Function { return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, + ArgTypes: []ast.Type{ast.TypeAny}, Variadic: true, VariadicType: ast.TypeAny, - ReturnType: ast.TypeString, + ReturnType: ast.TypeList, Callback: func(args []interface{}) (interface{}, error) { // Make a copy of the variadic part of args // to avoid modifying the original. @@ -279,15 +306,15 @@ func interpolationFuncFormatList() ast.Function { // Confirm along the way that all lists have the same length (n). var n int for i := 1; i < len(args); i++ { - s, ok := args[i].(string) + s, ok := args[i].([]ast.Variable) if !ok { continue } - if !IsStringList(s) { - continue - } - parts := StringList(s).Slice() + parts, err := listVariableValueToStringSlice(s) + if err != nil { + return nil, err + } // otherwise the list is sent down to be indexed varargs[i-1] = parts @@ -324,7 +351,7 @@ func interpolationFuncFormatList() ast.Function { } list[i] = fmt.Sprintf(format, fmtargs...) } - return NewStringList(list).String(), nil + return stringSliceToVariableValue(list), nil }, } } @@ -333,13 +360,13 @@ func interpolationFuncFormatList() ast.Function { // find the index of a specific element in a list func interpolationFuncIndex() ast.Function { return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString, ast.TypeString}, + ArgTypes: []ast.Type{ast.TypeList, ast.TypeString}, ReturnType: ast.TypeInt, Callback: func(args []interface{}) (interface{}, error) { - haystack := StringList(args[0].(string)).Slice() + haystack := args[0].([]ast.Variable) needle := args[1].(string) for index, element := range haystack { - if needle == element { + if needle == element.Value { return index, nil } } @@ -352,13 +379,28 @@ func interpolationFuncIndex() ast.Function { // multi-variable values to be joined by some character. func interpolationFuncJoin() ast.Function { return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString, ast.TypeString}, - ReturnType: ast.TypeString, + ArgTypes: []ast.Type{ast.TypeString}, + Variadic: true, + VariadicType: ast.TypeList, + ReturnType: ast.TypeString, Callback: func(args []interface{}) (interface{}, error) { var list []string + + if len(args) < 2 { + return nil, fmt.Errorf("not enough arguments to join()") + } + for _, arg := range args[1:] { - parts := StringList(arg.(string)).Slice() - list = append(list, parts...) + if parts, ok := arg.(ast.Variable); ok { + for _, part := range parts.Value.([]ast.Variable) { + list = append(list, part.Value.(string)) + } + } + if parts, ok := arg.([]ast.Variable); ok { + for _, part := range parts { + list = append(list, part.Value.(string)) + } + } } return strings.Join(list, args[0].(string)), nil @@ -412,19 +454,20 @@ func interpolationFuncReplace() ast.Function { func interpolationFuncLength() ast.Function { return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, + ArgTypes: []ast.Type{ast.TypeAny}, ReturnType: ast.TypeInt, Variadic: false, Callback: func(args []interface{}) (interface{}, error) { - if !IsStringList(args[0].(string)) { - return len(args[0].(string)), nil + subject := args[0] + + switch typedSubject := subject.(type) { + case string: + return len(typedSubject), nil + case []ast.Variable: + return len(typedSubject), nil } - length := 0 - for _, arg := range args { - length += StringList(arg.(string)).Length() - } - return length, nil + return 0, fmt.Errorf("arguments to length() must be a string or list") }, } } @@ -453,11 +496,12 @@ func interpolationFuncSignum() ast.Function { func interpolationFuncSplit() ast.Function { return ast.Function{ ArgTypes: []ast.Type{ast.TypeString, ast.TypeString}, - ReturnType: ast.TypeString, + ReturnType: ast.TypeList, Callback: func(args []interface{}) (interface{}, error) { sep := args[0].(string) s := args[1].(string) - return NewStringList(strings.Split(s, sep)).String(), nil + elements := strings.Split(s, sep) + return stringSliceToVariableValue(elements), nil }, } } @@ -466,20 +510,22 @@ func interpolationFuncSplit() ast.Function { // dynamic lookups of map types within a Terraform configuration. func interpolationFuncLookup(vs map[string]ast.Variable) ast.Function { return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString, ast.TypeString}, + ArgTypes: []ast.Type{ast.TypeMap, ast.TypeString}, ReturnType: ast.TypeString, Callback: func(args []interface{}) (interface{}, error) { - k := fmt.Sprintf("var.%s.%s", args[0].(string), args[1].(string)) - v, ok := vs[k] + index := args[1].(string) + mapVar := args[0].(map[string]ast.Variable) + + v, ok := mapVar[index] if !ok { return "", fmt.Errorf( - "lookup in '%s' failed to find '%s'", - args[0].(string), args[1].(string)) + "lookup failed to find '%s'", + args[1].(string)) } if v.Type != ast.TypeString { return "", fmt.Errorf( - "lookup in '%s' for '%s' has bad type %s", - args[0].(string), args[1].(string), v.Type) + "lookup for '%s' has bad type %s", + args[1].(string), v.Type) } return v.Value.(string), nil @@ -492,10 +538,10 @@ func interpolationFuncLookup(vs map[string]ast.Variable) ast.Function { // wrap if the index is larger than the number of elements in the multi-variable value. func interpolationFuncElement() ast.Function { return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString, ast.TypeString}, + ArgTypes: []ast.Type{ast.TypeList, ast.TypeString}, ReturnType: ast.TypeString, Callback: func(args []interface{}) (interface{}, error) { - list := StringList(args[0].(string)) + list := args[0].([]ast.Variable) index, err := strconv.Atoi(args[1].(string)) if err != nil || index < 0 { @@ -503,7 +549,9 @@ func interpolationFuncElement() ast.Function { "invalid number for index, got %s", args[1]) } - v := list.Element(index) + resolvedIndex := index % len(list) + + v := list[resolvedIndex].Value return v, nil }, } @@ -513,28 +561,20 @@ func interpolationFuncElement() ast.Function { // keys of map types within a Terraform configuration. func interpolationFuncKeys(vs map[string]ast.Variable) ast.Function { return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, + ArgTypes: []ast.Type{ast.TypeMap}, + ReturnType: ast.TypeList, Callback: func(args []interface{}) (interface{}, error) { - // Prefix must include ending dot to be a map - prefix := fmt.Sprintf("var.%s.", args[0].(string)) - keys := make([]string, 0, len(vs)) - for k, _ := range vs { - if !strings.HasPrefix(k, prefix) { - continue - } - keys = append(keys, k[len(prefix):]) - } + mapVar := args[0].(map[string]ast.Variable) + keys := make([]string, 0) - if len(keys) <= 0 { - return "", fmt.Errorf( - "failed to find map '%s'", - args[0].(string)) + for k, _ := range mapVar { + keys = append(keys, k) } sort.Strings(keys) - return NewStringList(keys).String(), nil + //Keys are guaranteed to be strings + return stringSliceToVariableValue(keys), nil }, } } @@ -543,38 +583,34 @@ func interpolationFuncKeys(vs map[string]ast.Variable) ast.Function { // keys of map types within a Terraform configuration. func interpolationFuncValues(vs map[string]ast.Variable) ast.Function { return ast.Function{ - ArgTypes: []ast.Type{ast.TypeString}, - ReturnType: ast.TypeString, + ArgTypes: []ast.Type{ast.TypeMap}, + ReturnType: ast.TypeList, Callback: func(args []interface{}) (interface{}, error) { - // Prefix must include ending dot to be a map - prefix := fmt.Sprintf("var.%s.", args[0].(string)) - keys := make([]string, 0, len(vs)) - for k, _ := range vs { - if !strings.HasPrefix(k, prefix) { - continue - } - keys = append(keys, k) - } + mapVar := args[0].(map[string]ast.Variable) + keys := make([]string, 0) - if len(keys) <= 0 { - return "", fmt.Errorf( - "failed to find map '%s'", - args[0].(string)) + for k, _ := range mapVar { + keys = append(keys, k) } sort.Strings(keys) - vals := make([]string, 0, len(keys)) - - for _, k := range keys { - v := vs[k] - if v.Type != ast.TypeString { - return "", fmt.Errorf("values(): %q has bad type %s", k, v.Type) + values := make([]string, len(keys)) + for index, key := range keys { + if value, ok := mapVar[key].Value.(string); ok { + values[index] = value + } else { + return "", fmt.Errorf("values(): %q has element with bad type %s", + key, mapVar[key].Type) } - vals = append(vals, vs[k].Value.(string)) } - return NewStringList(vals).String(), nil + variable, err := hil.InterfaceToVariable(values) + if err != nil { + return nil, err + } + + return variable.Value, nil }, } } diff --git a/config/interpolate_funcs_test.go b/config/interpolate_funcs_test.go index 774a7bf458..d5f44d498d 100644 --- a/config/interpolate_funcs_test.go +++ b/config/interpolate_funcs_test.go @@ -17,21 +17,21 @@ func TestInterpolateFuncCompact(t *testing.T) { // empty string within array { `${compact(split(",", "a,,b"))}`, - NewStringList([]string{"a", "b"}).String(), + []interface{}{"a", "b"}, false, }, // empty string at the end of array { `${compact(split(",", "a,b,"))}`, - NewStringList([]string{"a", "b"}).String(), + []interface{}{"a", "b"}, false, }, // single empty string { `${compact(split(",", ""))}`, - NewStringList([]string{}).String(), + []interface{}{}, false, }, }, @@ -174,76 +174,52 @@ func TestInterpolateFuncCoalesce(t *testing.T) { }) } -func TestInterpolateFuncDeprecatedConcat(t *testing.T) { - testFunction(t, testFunctionConfig{ - Cases: []testFunctionCase{ - { - `${concat("foo", "bar")}`, - "foobar", - false, - }, - - { - `${concat("foo")}`, - "foo", - false, - }, - - { - `${concat()}`, - nil, - true, - }, - }, - }) -} - func TestInterpolateFuncConcat(t *testing.T) { testFunction(t, testFunctionConfig{ Cases: []testFunctionCase{ // String + list { `${concat("a", split(",", "b,c"))}`, - NewStringList([]string{"a", "b", "c"}).String(), + []interface{}{"a", "b", "c"}, false, }, // List + string { `${concat(split(",", "a,b"), "c")}`, - NewStringList([]string{"a", "b", "c"}).String(), + []interface{}{"a", "b", "c"}, false, }, // Single list { `${concat(split(",", ",foo,"))}`, - NewStringList([]string{"", "foo", ""}).String(), + []interface{}{"", "foo", ""}, false, }, { `${concat(split(",", "a,b,c"))}`, - NewStringList([]string{"a", "b", "c"}).String(), + []interface{}{"a", "b", "c"}, false, }, // Two lists { `${concat(split(",", "a,b,c"), split(",", "d,e"))}`, - NewStringList([]string{"a", "b", "c", "d", "e"}).String(), + []interface{}{"a", "b", "c", "d", "e"}, false, }, // Two lists with different separators { `${concat(split(",", "a,b,c"), split(" ", "d e"))}`, - NewStringList([]string{"a", "b", "c", "d", "e"}).String(), + []interface{}{"a", "b", "c", "d", "e"}, false, }, // More lists { `${concat(split(",", "a,b"), split(",", "c,d"), split(",", "e,f"), split(",", "0,1"))}`, - NewStringList([]string{"a", "b", "c", "d", "e", "f", "0", "1"}).String(), + []interface{}{"a", "b", "c", "d", "e", "f", "0", "1"}, false, }, }, @@ -338,7 +314,7 @@ func TestInterpolateFuncFormatList(t *testing.T) { // formatlist applies to each list element in turn { `${formatlist("<%s>", split(",", "A,B"))}`, - NewStringList([]string{"", ""}).String(), + []interface{}{"", ""}, false, }, // formatlist repeats scalar elements @@ -362,7 +338,7 @@ func TestInterpolateFuncFormatList(t *testing.T) { // Works with lists of length 1 [GH-2240] { `${formatlist("%s.id", split(",", "demo-rest-elb"))}`, - NewStringList([]string{"demo-rest-elb.id"}).String(), + []interface{}{"demo-rest-elb.id"}, false, }, }, @@ -371,6 +347,11 @@ func TestInterpolateFuncFormatList(t *testing.T) { func TestInterpolateFuncIndex(t *testing.T) { testFunction(t, testFunctionConfig{ + Vars: map[string]ast.Variable{ + "var.list1": interfaceToVariableSwallowError([]string{"notfoo", "stillnotfoo", "bar"}), + "var.list2": interfaceToVariableSwallowError([]string{"foo"}), + "var.list3": interfaceToVariableSwallowError([]string{"foo", "spam", "bar", "eggs"}), + }, Cases: []testFunctionCase{ { `${index("test", "")}`, @@ -379,22 +360,19 @@ func TestInterpolateFuncIndex(t *testing.T) { }, { - fmt.Sprintf(`${index("%s", "foo")}`, - NewStringList([]string{"notfoo", "stillnotfoo", "bar"}).String()), + `${index(var.list1, "foo")}`, nil, true, }, { - fmt.Sprintf(`${index("%s", "foo")}`, - NewStringList([]string{"foo"}).String()), + `${index(var.list2, "foo")}`, "0", false, }, { - fmt.Sprintf(`${index("%s", "bar")}`, - NewStringList([]string{"foo", "spam", "bar", "eggs"}).String()), + `${index(var.list3, "bar")}`, "2", false, }, @@ -404,6 +382,10 @@ func TestInterpolateFuncIndex(t *testing.T) { func TestInterpolateFuncJoin(t *testing.T) { testFunction(t, testFunctionConfig{ + Vars: map[string]ast.Variable{ + "var.a_list": interfaceToVariableSwallowError([]string{"foo"}), + "var.a_longer_list": interfaceToVariableSwallowError([]string{"foo", "bar", "baz"}), + }, Cases: []testFunctionCase{ { `${join(",")}`, @@ -412,24 +394,13 @@ func TestInterpolateFuncJoin(t *testing.T) { }, { - fmt.Sprintf(`${join(",", "%s")}`, - NewStringList([]string{"foo"}).String()), + `${join(",", var.a_list)}`, "foo", false, }, - /* - TODO - { - `${join(",", "foo", "bar")}`, - "foo,bar", - false, - }, - */ - { - fmt.Sprintf(`${join(".", "%s")}`, - NewStringList([]string{"foo", "bar", "baz"}).String()), + `${join(".", var.a_longer_list)}`, "foo.bar.baz", false, }, @@ -632,37 +603,37 @@ func TestInterpolateFuncSplit(t *testing.T) { { `${split(",", "")}`, - NewStringList([]string{""}).String(), + []interface{}{""}, false, }, { `${split(",", "foo")}`, - NewStringList([]string{"foo"}).String(), + []interface{}{"foo"}, false, }, { `${split(",", ",,,")}`, - NewStringList([]string{"", "", "", ""}).String(), + []interface{}{"", "", "", ""}, false, }, { `${split(",", "foo,")}`, - NewStringList([]string{"foo", ""}).String(), + []interface{}{"foo", ""}, false, }, { `${split(",", ",foo,")}`, - NewStringList([]string{"", "foo", ""}).String(), + []interface{}{"", "foo", ""}, false, }, { `${split(".", "foo.bar.baz")}`, - NewStringList([]string{"foo", "bar", "baz"}).String(), + []interface{}{"foo", "bar", "baz"}, false, }, }, @@ -672,28 +643,33 @@ func TestInterpolateFuncSplit(t *testing.T) { func TestInterpolateFuncLookup(t *testing.T) { testFunction(t, testFunctionConfig{ Vars: map[string]ast.Variable{ - "var.foo.bar": ast.Variable{ - Value: "baz", - Type: ast.TypeString, + "var.foo": ast.Variable{ + Type: ast.TypeMap, + Value: map[string]ast.Variable{ + "bar": ast.Variable{ + Type: ast.TypeString, + Value: "baz", + }, + }, }, }, Cases: []testFunctionCase{ { - `${lookup("foo", "bar")}`, + `${lookup(var.foo, "bar")}`, "baz", false, }, // Invalid key { - `${lookup("foo", "baz")}`, + `${lookup(var.foo, "baz")}`, nil, true, }, // Too many args { - `${lookup("foo", "bar", "baz")}`, + `${lookup(var.foo, "bar", "baz")}`, nil, true, }, @@ -704,13 +680,18 @@ func TestInterpolateFuncLookup(t *testing.T) { func TestInterpolateFuncKeys(t *testing.T) { testFunction(t, testFunctionConfig{ Vars: map[string]ast.Variable{ - "var.foo.bar": ast.Variable{ - Value: "baz", - Type: ast.TypeString, - }, - "var.foo.qux": ast.Variable{ - Value: "quack", - Type: ast.TypeString, + "var.foo": ast.Variable{ + Type: ast.TypeMap, + Value: map[string]ast.Variable{ + "bar": ast.Variable{ + Value: "baz", + Type: ast.TypeString, + }, + "qux": ast.Variable{ + Value: "quack", + Type: ast.TypeString, + }, + }, }, "var.str": ast.Variable{ Value: "astring", @@ -719,28 +700,28 @@ func TestInterpolateFuncKeys(t *testing.T) { }, Cases: []testFunctionCase{ { - `${keys("foo")}`, - NewStringList([]string{"bar", "qux"}).String(), + `${keys(var.foo)}`, + []interface{}{"bar", "qux"}, false, }, // Invalid key { - `${keys("not")}`, + `${keys(var.not)}`, nil, true, }, // Too many args { - `${keys("foo", "bar")}`, + `${keys(var.foo, "bar")}`, nil, true, }, // Not a map { - `${keys("str")}`, + `${keys(var.str)}`, nil, true, }, @@ -751,13 +732,18 @@ func TestInterpolateFuncKeys(t *testing.T) { func TestInterpolateFuncValues(t *testing.T) { testFunction(t, testFunctionConfig{ Vars: map[string]ast.Variable{ - "var.foo.bar": ast.Variable{ - Value: "quack", - Type: ast.TypeString, - }, - "var.foo.qux": ast.Variable{ - Value: "baz", - Type: ast.TypeString, + "var.foo": ast.Variable{ + Type: ast.TypeMap, + Value: map[string]ast.Variable{ + "bar": ast.Variable{ + Value: "quack", + Type: ast.TypeString, + }, + "qux": ast.Variable{ + Value: "baz", + Type: ast.TypeString, + }, + }, }, "var.str": ast.Variable{ Value: "astring", @@ -766,28 +752,28 @@ func TestInterpolateFuncValues(t *testing.T) { }, Cases: []testFunctionCase{ { - `${values("foo")}`, - NewStringList([]string{"quack", "baz"}).String(), + `${values(var.foo)}`, + []interface{}{"quack", "baz"}, false, }, // Invalid key { - `${values("not")}`, + `${values(var.not)}`, nil, true, }, // Too many args { - `${values("foo", "bar")}`, + `${values(var.foo, "bar")}`, nil, true, }, // Not a map { - `${values("str")}`, + `${values(var.str)}`, nil, true, }, @@ -795,43 +781,47 @@ func TestInterpolateFuncValues(t *testing.T) { }) } +func interfaceToVariableSwallowError(input interface{}) ast.Variable { + variable, _ := hil.InterfaceToVariable(input) + return variable +} + func TestInterpolateFuncElement(t *testing.T) { testFunction(t, testFunctionConfig{ + Vars: map[string]ast.Variable{ + "var.a_list": interfaceToVariableSwallowError([]string{"foo", "baz"}), + "var.a_short_list": interfaceToVariableSwallowError([]string{"foo"}), + }, Cases: []testFunctionCase{ { - fmt.Sprintf(`${element("%s", "1")}`, - NewStringList([]string{"foo", "baz"}).String()), + `${element(var.a_list, "1")}`, "baz", false, }, { - fmt.Sprintf(`${element("%s", "0")}`, - NewStringList([]string{"foo"}).String()), + `${element(var.a_short_list, "0")}`, "foo", false, }, // Invalid index should wrap vs. out-of-bounds { - fmt.Sprintf(`${element("%s", "2")}`, - NewStringList([]string{"foo", "baz"}).String()), + `${element(var.a_list, "2")}`, "foo", false, }, // Negative number should fail { - fmt.Sprintf(`${element("%s", "-1")}`, - NewStringList([]string{"foo"}).String()), + `${element(var.a_short_list, "-1")}`, nil, true, }, // Too many args { - fmt.Sprintf(`${element("%s", "0", "2")}`, - NewStringList([]string{"foo", "baz"}).String()), + `${element(var.a_list, "0", "2")}`, nil, true, }, diff --git a/config/interpolate_walk.go b/config/interpolate_walk.go index 333cf33ed9..143b96131a 100644 --- a/config/interpolate_walk.go +++ b/config/interpolate_walk.go @@ -42,7 +42,7 @@ type interpolationWalker struct { // // If Replace is set to false in interpolationWalker, then the replace // value can be anything as it will have no effect. -type interpolationWalkerFunc func(ast.Node) (string, error) +type interpolationWalkerFunc func(ast.Node) (interface{}, error) // interpolationWalkerContextFunc is called by interpolationWalk if // ContextF is set. This receives both the interpolation and the location @@ -150,12 +150,15 @@ func (w *interpolationWalker) Primitive(v reflect.Value) error { // set if it is computed. This behavior is different if we're // splitting (in a SliceElem) or not. remove := false - if w.loc == reflectwalk.SliceElem && IsStringList(replaceVal) { - parts := StringList(replaceVal).Slice() - for _, p := range parts { - if p == UnknownVariableValue { + if w.loc == reflectwalk.SliceElem { + switch typedReplaceVal := replaceVal.(type) { + case string: + if typedReplaceVal == UnknownVariableValue { + remove = true + } + case []interface{}: + if hasUnknownValue(typedReplaceVal) { remove = true - break } } } else if replaceVal == UnknownVariableValue { @@ -226,63 +229,63 @@ func (w *interpolationWalker) replaceCurrent(v reflect.Value) { } } +func hasUnknownValue(variable []interface{}) bool { + for _, value := range variable { + if strVal, ok := value.(string); ok { + if strVal == UnknownVariableValue { + return true + } + } + } + return false +} + func (w *interpolationWalker) splitSlice() { - // Get the []interface{} slice so we can do some operations on - // it without dealing with reflection. We'll document each step - // here to be clear. - var s []interface{} raw := w.cs[len(w.cs)-1] + + var s []interface{} switch v := raw.Interface().(type) { case []interface{}: s = v case []map[string]interface{}: return - default: - panic("Unknown kind: " + raw.Kind().String()) } - // Check if we have any elements that we need to split. If not, then - // just return since we're done. split := false - for _, v := range s { - sv, ok := v.(string) - if !ok { - continue - } - if IsStringList(sv) { + for _, val := range s { + if varVal, ok := val.(ast.Variable); ok && varVal.Type == ast.TypeList { + split = true + } + if _, ok := val.([]interface{}); ok { split = true - break } } + if !split { return } - // Make a new result slice that is twice the capacity to fit our growth. - result := make([]interface{}, 0, len(s)*2) - - // Go over each element of the original slice and start building up - // the resulting slice by splitting where we have to. + result := make([]interface{}, 0) for _, v := range s { - sv, ok := v.(string) - if !ok { - // Not a string, so just set it - result = append(result, v) - continue - } - - if IsStringList(sv) { - for _, p := range StringList(sv).Slice() { - result = append(result, p) + switch val := v.(type) { + case ast.Variable: + switch val.Type { + case ast.TypeList: + elements := val.Value.([]ast.Variable) + for _, element := range elements { + result = append(result, element.Value) + } + default: + result = append(result, val.Value) } - continue + case []interface{}: + for _, element := range val { + result = append(result, element) + } + default: + result = append(result, v) } - - // Not a string list, so just set it - result = append(result, sv) } - // Our slice is now done, we have to replace the slice now - // with this new one that we have. w.replaceCurrent(reflect.ValueOf(result)) } diff --git a/config/interpolate_walk_test.go b/config/interpolate_walk_test.go index b7c308cd4c..70067a99cb 100644 --- a/config/interpolate_walk_test.go +++ b/config/interpolate_walk_test.go @@ -89,7 +89,7 @@ func TestInterpolationWalker_detect(t *testing.T) { for i, tc := range cases { var actual []string - detectFn := func(root ast.Node) (string, error) { + detectFn := func(root ast.Node) (interface{}, error) { actual = append(actual, fmt.Sprintf("%s", root)) return "", nil } @@ -109,7 +109,7 @@ func TestInterpolationWalker_replace(t *testing.T) { cases := []struct { Input interface{} Output interface{} - Value string + Value interface{} }{ { Input: map[string]interface{}{ @@ -159,7 +159,7 @@ func TestInterpolationWalker_replace(t *testing.T) { "bing", }, }, - Value: NewStringList([]string{"bar", "baz"}).String(), + Value: []interface{}{"bar", "baz"}, }, { @@ -170,12 +170,12 @@ func TestInterpolationWalker_replace(t *testing.T) { }, }, Output: map[string]interface{}{}, - Value: NewStringList([]string{UnknownVariableValue, "baz"}).String(), + Value: []interface{}{UnknownVariableValue, "baz"}, }, } for i, tc := range cases { - fn := func(ast.Node) (string, error) { + fn := func(ast.Node) (interface{}, error) { return tc.Value, nil } diff --git a/config/raw_config.go b/config/raw_config.go index 6fc15ebd5e..18b9dcaf2d 100644 --- a/config/raw_config.go +++ b/config/raw_config.go @@ -108,7 +108,7 @@ func (r *RawConfig) Interpolate(vs map[string]ast.Variable) error { defer r.lock.Unlock() config := langEvalConfig(vs) - return r.interpolate(func(root ast.Node) (string, error) { + return r.interpolate(func(root ast.Node) (interface{}, error) { // We detect the variables again and check if the value of any // of the variables is the computed value. If it is, then we // treat this entire value as computed. @@ -137,7 +137,7 @@ func (r *RawConfig) Interpolate(vs map[string]ast.Variable) error { return "", err } - return result.Value.(string), nil + return result.Value, nil }) } @@ -194,7 +194,7 @@ func (r *RawConfig) init() error { r.Interpolations = nil r.Variables = nil - fn := func(node ast.Node) (string, error) { + fn := func(node ast.Node) (interface{}, error) { r.Interpolations = append(r.Interpolations, node) vars, err := DetectVariables(node) if err != nil { diff --git a/config/string_list.go b/config/string_list.go deleted file mode 100644 index e3caea70bc..0000000000 --- a/config/string_list.go +++ /dev/null @@ -1,89 +0,0 @@ -package config - -import ( - "fmt" - "strings" -) - -// StringList represents the "poor man's list" that terraform uses -// internally -type StringList string - -// This is the delimiter used to recognize and split StringLists -// -// It plays two semantic roles: -// * It introduces a list -// * It terminates each element -// -// Example representations: -// [] => SLD -// [""] => SLDSLD -// [" "] => SLD SLD -// ["foo"] => SLDfooSLD -// ["foo", "bar"] => SLDfooSLDbarSLD -// ["", ""] => SLDSLDSLD -const stringListDelim = `B780FFEC-B661-4EB8-9236-A01737AD98B6` - -// Takes a Stringlist and returns one without empty strings in it -func (sl StringList) Compact() StringList { - parts := sl.Slice() - - newlist := []string{} - // drop the empty strings - for i := range parts { - if parts[i] != "" { - newlist = append(newlist, parts[i]) - } - } - return NewStringList(newlist) -} - -// Build a StringList from a slice -func NewStringList(parts []string) StringList { - // We have to special case the empty list representation - if len(parts) == 0 { - return StringList(stringListDelim) - } - return StringList(fmt.Sprintf("%s%s%s", - stringListDelim, - strings.Join(parts, stringListDelim), - stringListDelim, - )) -} - -// Returns an element at the index, wrapping around the length of the string -// when index > list length -func (sl StringList) Element(index int) string { - if sl.Length() == 0 { - return "" - } - return sl.Slice()[index%sl.Length()] -} - -// Returns the length of the StringList -func (sl StringList) Length() int { - return len(sl.Slice()) -} - -// Returns a slice of strings as represented by this StringList -func (sl StringList) Slice() []string { - parts := strings.Split(string(sl), stringListDelim) - - // split on an empty StringList will have a length of 2, since there is - // always at least one deliminator - if len(parts) <= 2 { - return []string{} - } - - // strip empty elements generated by leading and trailing delimiters - return parts[1 : len(parts)-1] -} - -func (sl StringList) String() string { - return string(sl) -} - -// Determines if a given string represents a StringList -func IsStringList(s string) bool { - return strings.Contains(s, stringListDelim) -} diff --git a/config/string_list_test.go b/config/string_list_test.go deleted file mode 100644 index 3fe57dfe28..0000000000 --- a/config/string_list_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package config - -import ( - "reflect" - "testing" -) - -func TestStringList_slice(t *testing.T) { - expected := []string{"apple", "banana", "pear"} - l := NewStringList(expected) - actual := l.Slice() - - if !reflect.DeepEqual(expected, actual) { - t.Fatalf("Expected %q, got %q", expected, actual) - } -} - -func TestStringList_element(t *testing.T) { - list := []string{"apple", "banana", "pear"} - l := NewStringList(list) - actual := l.Element(1) - - expected := "banana" - - if actual != expected { - t.Fatalf("Expected 2nd element from %q to be %q, got %q", - list, expected, actual) - } -} - -func TestStringList_empty_slice(t *testing.T) { - expected := []string{} - l := NewStringList(expected) - actual := l.Slice() - - if !reflect.DeepEqual(expected, actual) { - t.Fatalf("Expected %q, got %q", expected, actual) - } -} - -func TestStringList_empty_slice_length(t *testing.T) { - list := []string{} - l := NewStringList([]string{}) - actual := l.Length() - - expected := 0 - - if actual != expected { - t.Fatalf("Expected length of %q to be %d, got %d", - list, expected, actual) - } -} diff --git a/config/test-fixtures/validate-module-var-list/main.tf b/config/test-fixtures/validate-module-var-list/main.tf new file mode 100644 index 0000000000..e3a97b7de2 --- /dev/null +++ b/config/test-fixtures/validate-module-var-list/main.tf @@ -0,0 +1,4 @@ +module "foo" { + source = "./foo" + nodes = [1,2,3] +} diff --git a/config/test-fixtures/validate-module-var-map/main.tf b/config/test-fixtures/validate-module-var-map/main.tf index e3a97b7de2..b42ff010d2 100644 --- a/config/test-fixtures/validate-module-var-map/main.tf +++ b/config/test-fixtures/validate-module-var-map/main.tf @@ -1,4 +1,7 @@ module "foo" { source = "./foo" - nodes = [1,2,3] + nodes = { + key1 = "value1" + key2 = "value2" + } } diff --git a/config/test-fixtures/validate-var-default-bad-type/main.tf b/config/test-fixtures/validate-var-default-list-type/main.tf similarity index 100% rename from config/test-fixtures/validate-var-default-bad-type/main.tf rename to config/test-fixtures/validate-var-default-list-type/main.tf diff --git a/help.go b/help.go new file mode 100644 index 0000000000..fcc6dc95d9 --- /dev/null +++ b/help.go @@ -0,0 +1,84 @@ +package main + +import ( + "bytes" + "fmt" + "log" + "sort" + "strings" + + "github.com/mitchellh/cli" +) + +// helpFunc is a cli.HelpFunc that can is used to output the help for Terraform. +func helpFunc(commands map[string]cli.CommandFactory) string { + // Determine the maximum key length, and classify based on type + porcelain := make(map[string]cli.CommandFactory) + plumbing := make(map[string]cli.CommandFactory) + maxKeyLen := 0 + for key, f := range commands { + if len(key) > maxKeyLen { + maxKeyLen = len(key) + } + + if _, ok := PlumbingCommands[key]; ok { + plumbing[key] = f + } else { + porcelain[key] = f + } + } + + var buf bytes.Buffer + buf.WriteString("usage: terraform [--version] [--help] [args]\n\n") + buf.WriteString( + "The available commands for execution are listed below.\n" + + "The most common, useful commands are shown first, followed by\n" + + "less common or more advanced commands. If you're just getting\n" + + "started with Terraform, stick with the common commands. For the\n" + + "other commands, please read the help and docs before usage.\n\n") + buf.WriteString("Common commands:\n") + buf.WriteString(listCommands(porcelain, maxKeyLen)) + buf.WriteString("\nAll other commands:\n") + buf.WriteString(listCommands(plumbing, maxKeyLen)) + return buf.String() +} + +// listCommands just lists the commands in the map with the +// given maximum key length. +func listCommands(commands map[string]cli.CommandFactory, maxKeyLen int) string { + var buf bytes.Buffer + + // Get the list of keys so we can sort them, and also get the maximum + // key length so they can be aligned properly. + keys := make([]string, 0, len(commands)) + for key, _ := range commands { + // This is an internal command that users should never call directly so + // we will hide it from the command listing. + if key == "internal-plugin" { + continue + } + keys = append(keys, key) + } + sort.Strings(keys) + + for _, key := range keys { + commandFunc, ok := commands[key] + if !ok { + // This should never happen since we JUST built the list of + // keys. + panic("command not found: " + key) + } + + command, err := commandFunc() + if err != nil { + log.Printf("[ERR] cli: Command '%s' failed to load: %s", + key, err) + continue + } + + key = fmt.Sprintf("%s%s", key, strings.Repeat(" ", maxKeyLen-len(key))) + buf.WriteString(fmt.Sprintf(" %s %s\n", key, command.Synopsis())) + } + + return buf.String() +} diff --git a/helper/resource/testing.go b/helper/resource/testing.go index 94e03b5311..07eec7ae86 100644 --- a/helper/resource/testing.go +++ b/helper/resource/testing.go @@ -284,7 +284,10 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r // Initialize the context opts.Module = mod opts.State = state - ctx := terraform.NewContext(&opts) + ctx, err := terraform.NewContext(&opts) + if err != nil { + return err + } if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 { if len(es) > 0 { estrs := make([]string, len(es)) @@ -362,7 +365,10 @@ func testStep( opts.Module = mod opts.State = state opts.Destroy = step.Destroy - ctx := terraform.NewContext(&opts) + ctx, err := terraform.NewContext(&opts) + if err != nil { + return state, fmt.Errorf("Error initializing context: %s", err) + } if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 { if len(es) > 0 { estrs := make([]string, len(es)) diff --git a/helper/schema/field_reader_config.go b/helper/schema/field_reader_config.go index 3cf4f5fc30..0d4c2a97c9 100644 --- a/helper/schema/field_reader_config.go +++ b/helper/schema/field_reader_config.go @@ -100,7 +100,8 @@ func (r *ConfigFieldReader) readField( func (r *ConfigFieldReader) readMap(k string) (FieldReadResult, error) { // We want both the raw value and the interpolated. We use the interpolated // to store actual values and we use the raw one to check for - // computed keys. + // computed keys. Actual values are obtained in the switch, depending on + // the type of the raw value. mraw, ok := r.Config.GetRaw(k) if !ok { return FieldReadResult{}, nil @@ -109,6 +110,25 @@ func (r *ConfigFieldReader) readMap(k string) (FieldReadResult, error) { result := make(map[string]interface{}) computed := false switch m := mraw.(type) { + case string: + // This is a map which has come out of an interpolated variable, so we + // can just get the value directly from config. Values cannot be computed + // currently. + v, _ := r.Config.Get(k) + + // If this isn't a map[string]interface, it must be computed. + mapV, ok := v.(map[string]interface{}) + if !ok { + return FieldReadResult{ + Exists: true, + Computed: true, + }, nil + } + + // Otherwise we can proceed as usual. + for i, iv := range mapV { + result[i] = iv + } case []interface{}: for i, innerRaw := range m { for ik := range innerRaw.(map[string]interface{}) { diff --git a/helper/schema/field_reader_config_test.go b/helper/schema/field_reader_config_test.go index 2defb0e02e..9daeab3a01 100644 --- a/helper/schema/field_reader_config_test.go +++ b/helper/schema/field_reader_config_test.go @@ -183,6 +183,36 @@ func TestConfigFieldReader_ComputedMap(t *testing.T) { }), false, }, + + "native map": { + []string{"map"}, + FieldReadResult{ + Value: map[string]interface{}{ + "bar": "baz", + "baz": "bar", + }, + Exists: true, + Computed: false, + }, + testConfigInterpolate(t, map[string]interface{}{ + "map": "${var.foo}", + }, map[string]ast.Variable{ + "var.foo": ast.Variable{ + Type: ast.TypeMap, + Value: map[string]ast.Variable{ + "bar": ast.Variable{ + Type: ast.TypeString, + Value: "baz", + }, + "baz": ast.Variable{ + Type: ast.TypeString, + Value: "bar", + }, + }, + }, + }), + false, + }, } for name, tc := range cases { @@ -305,6 +335,7 @@ func testConfigInterpolate( t *testing.T, raw map[string]interface{}, vs map[string]ast.Variable) *terraform.ResourceConfig { + rc, err := config.NewRawConfig(raw) if err != nil { t.Fatalf("err: %s", err) diff --git a/helper/schema/schema.go b/helper/schema/schema.go index 43b5cc21ac..acffd249e6 100644 --- a/helper/schema/schema.go +++ b/helper/schema/schema.go @@ -19,8 +19,10 @@ import ( "strconv" "strings" + "github.com/davecgh/go-spew/spew" "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/mapstructure" + "log" ) // Schema is used to describe the structure of a value. @@ -1120,11 +1122,21 @@ func (m schemaMap) validateMap( // case to []interface{} unless the slice is exactly that type. rawV := reflect.ValueOf(raw) switch rawV.Kind() { + case reflect.String: + // If raw and reified are equal, this is a string and should + // be rejected. + reified, reifiedOk := c.Get(k) + log.Printf("[jen20] reified: %s", spew.Sdump(reified)) + log.Printf("[jen20] raw: %s", spew.Sdump(raw)) + if reifiedOk && raw == reified && !c.IsComputed(k) { + return nil, []error{fmt.Errorf("%s: should be a map", k)} + } + // Otherwise it's likely raw is an interpolation. + return nil, nil case reflect.Map: case reflect.Slice: default: - return nil, []error{fmt.Errorf( - "%s: should be a map", k)} + return nil, []error{fmt.Errorf("%s: should be a map", k)} } // If it is not a slice, it is valid diff --git a/helper/schema/schema_test.go b/helper/schema/schema_test.go index dba10ee34e..2ec1aa78ae 100644 --- a/helper/schema/schema_test.go +++ b/helper/schema/schema_test.go @@ -8,6 +8,7 @@ import ( "strconv" "testing" + "github.com/hashicorp/hil" "github.com/hashicorp/hil/ast" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/helper/hashcode" @@ -123,12 +124,17 @@ func TestValueType_Zero(t *testing.T) { } } +func interfaceToVariableSwallowError(input interface{}) ast.Variable { + variable, _ := hil.InterfaceToVariable(input) + return variable +} + func TestSchemaMap_Diff(t *testing.T) { cases := map[string]struct { Schema map[string]*Schema State *terraform.InstanceState Config map[string]interface{} - ConfigVariables map[string]string + ConfigVariables map[string]ast.Variable Diff *terraform.InstanceDiff Err bool }{ @@ -396,8 +402,8 @@ func TestSchemaMap_Diff(t *testing.T) { "availability_zone": "${var.foo}", }, - ConfigVariables: map[string]string{ - "var.foo": "bar", + ConfigVariables: map[string]ast.Variable{ + "var.foo": interfaceToVariableSwallowError("bar"), }, Diff: &terraform.InstanceDiff{ @@ -426,8 +432,8 @@ func TestSchemaMap_Diff(t *testing.T) { "availability_zone": "${var.foo}", }, - ConfigVariables: map[string]string{ - "var.foo": config.UnknownVariableValue, + ConfigVariables: map[string]ast.Variable{ + "var.foo": interfaceToVariableSwallowError(config.UnknownVariableValue), }, Diff: &terraform.InstanceDiff{ @@ -576,8 +582,8 @@ func TestSchemaMap_Diff(t *testing.T) { "ports": []interface{}{1, "${var.foo}"}, }, - ConfigVariables: map[string]string{ - "var.foo": config.NewStringList([]string{"2", "5"}).String(), + ConfigVariables: map[string]ast.Variable{ + "var.foo": interfaceToVariableSwallowError([]interface{}{"2", "5"}), }, Diff: &terraform.InstanceDiff{ @@ -619,9 +625,9 @@ func TestSchemaMap_Diff(t *testing.T) { "ports": []interface{}{1, "${var.foo}"}, }, - ConfigVariables: map[string]string{ - "var.foo": config.NewStringList([]string{ - config.UnknownVariableValue, "5"}).String(), + ConfigVariables: map[string]ast.Variable{ + "var.foo": interfaceToVariableSwallowError([]interface{}{ + config.UnknownVariableValue, "5"}), }, Diff: &terraform.InstanceDiff{ @@ -886,8 +892,8 @@ func TestSchemaMap_Diff(t *testing.T) { "ports": []interface{}{"${var.foo}", 1}, }, - ConfigVariables: map[string]string{ - "var.foo": config.NewStringList([]string{"2", "5"}).String(), + ConfigVariables: map[string]ast.Variable{ + "var.foo": interfaceToVariableSwallowError([]interface{}{"2", "5"}), }, Diff: &terraform.InstanceDiff{ @@ -932,9 +938,9 @@ func TestSchemaMap_Diff(t *testing.T) { "ports": []interface{}{1, "${var.foo}"}, }, - ConfigVariables: map[string]string{ - "var.foo": config.NewStringList([]string{ - config.UnknownVariableValue, "5"}).String(), + ConfigVariables: map[string]ast.Variable{ + "var.foo": interfaceToVariableSwallowError([]interface{}{ + config.UnknownVariableValue, "5"}), }, Diff: &terraform.InstanceDiff{ @@ -1603,8 +1609,8 @@ func TestSchemaMap_Diff(t *testing.T) { "instances": []interface{}{"${var.foo}"}, }, - ConfigVariables: map[string]string{ - "var.foo": config.UnknownVariableValue, + ConfigVariables: map[string]ast.Variable{ + "var.foo": interfaceToVariableSwallowError(config.UnknownVariableValue), }, Diff: &terraform.InstanceDiff{ @@ -1654,8 +1660,8 @@ func TestSchemaMap_Diff(t *testing.T) { }, }, - ConfigVariables: map[string]string{ - "var.foo": config.UnknownVariableValue, + ConfigVariables: map[string]ast.Variable{ + "var.foo": interfaceToVariableSwallowError(config.UnknownVariableValue), }, Diff: &terraform.InstanceDiff{ @@ -1720,8 +1726,8 @@ func TestSchemaMap_Diff(t *testing.T) { }, }, - ConfigVariables: map[string]string{ - "var.foo": config.UnknownVariableValue, + ConfigVariables: map[string]ast.Variable{ + "var.foo": interfaceToVariableSwallowError(config.UnknownVariableValue), }, Diff: &terraform.InstanceDiff{ @@ -1787,8 +1793,8 @@ func TestSchemaMap_Diff(t *testing.T) { }, }, - ConfigVariables: map[string]string{ - "var.foo": config.UnknownVariableValue, + ConfigVariables: map[string]ast.Variable{ + "var.foo": interfaceToVariableSwallowError(config.UnknownVariableValue), }, Diff: &terraform.InstanceDiff{ @@ -2134,8 +2140,8 @@ func TestSchemaMap_Diff(t *testing.T) { "ports": []interface{}{1, "${var.foo}32"}, }, - ConfigVariables: map[string]string{ - "var.foo": config.UnknownVariableValue, + ConfigVariables: map[string]ast.Variable{ + "var.foo": interfaceToVariableSwallowError(config.UnknownVariableValue), }, Diff: &terraform.InstanceDiff{ @@ -2403,12 +2409,7 @@ func TestSchemaMap_Diff(t *testing.T) { } if len(tc.ConfigVariables) > 0 { - vars := make(map[string]ast.Variable) - for k, v := range tc.ConfigVariables { - vars[k] = ast.Variable{Value: v, Type: ast.TypeString} - } - - if err := c.Interpolate(vars); err != nil { + if err := c.Interpolate(tc.ConfigVariables); err != nil { t.Fatalf("#%q err: %s", tn, err) } } @@ -2420,7 +2421,7 @@ func TestSchemaMap_Diff(t *testing.T) { } if !reflect.DeepEqual(tc.Diff, d) { - t.Fatalf("#%q:\n\nexpected: %#v\n\ngot:\n\n%#v", tn, tc.Diff, d) + t.Fatalf("#%q:\n\nexpected:\n%#v\n\ngot:\n%#v", tn, tc.Diff, d) } } } diff --git a/main.go b/main.go index 806efe7b60..db0237383f 100644 --- a/main.go +++ b/main.go @@ -9,8 +9,8 @@ import ( "runtime" "sync" + "github.com/hashicorp/go-plugin" "github.com/hashicorp/terraform/helper/logging" - "github.com/hashicorp/terraform/plugin" "github.com/mattn/go-colorable" "github.com/mitchellh/cli" "github.com/mitchellh/panicwrap" @@ -18,6 +18,8 @@ import ( ) func main() { + // Override global prefix set by go-dynect during init() + log.SetPrefix("") os.Exit(realMain()) } @@ -86,7 +88,7 @@ func wrappedMain() int { // Load the configuration config := BuiltinConfig - if err := config.Discover(); err != nil { + if err := config.Discover(Ui); err != nil { Ui.Error(fmt.Sprintf("Error discovering plugins: %s", err)) return 1 } @@ -113,7 +115,7 @@ func wrappedMain() int { cli := &cli.CLI{ Args: args, Commands: Commands, - HelpFunc: cli.BasicHelpFunc("terraform"), + HelpFunc: helpFunc, HelpWriter: os.Stdout, } diff --git a/plugin/client.go b/plugin/client.go deleted file mode 100644 index 8a3b03fc0a..0000000000 --- a/plugin/client.go +++ /dev/null @@ -1,339 +0,0 @@ -package plugin - -import ( - "bufio" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "os" - "os/exec" - "path/filepath" - "strings" - "sync" - "time" - "unicode" - - tfrpc "github.com/hashicorp/terraform/rpc" -) - -// If this is true, then the "unexpected EOF" panic will not be -// raised throughout the clients. -var Killed = false - -// This is a slice of the "managed" clients which are cleaned up when -// calling Cleanup -var managedClients = make([]*Client, 0, 5) - -// Client handles the lifecycle of a plugin application, determining its -// RPC address, and returning various types of Terraform interface implementations -// across the multi-process communication layer. -type Client struct { - config *ClientConfig - exited bool - doneLogging chan struct{} - l sync.Mutex - address net.Addr - client *tfrpc.Client -} - -// ClientConfig is the configuration used to initialize a new -// plugin client. After being used to initialize a plugin client, -// that configuration must not be modified again. -type ClientConfig struct { - // The unstarted subprocess for starting the plugin. - Cmd *exec.Cmd - - // Managed represents if the client should be managed by the - // plugin package or not. If true, then by calling CleanupClients, - // it will automatically be cleaned up. Otherwise, the client - // user is fully responsible for making sure to Kill all plugin - // clients. By default the client is _not_ managed. - Managed bool - - // The minimum and maximum port to use for communicating with - // the subprocess. If not set, this defaults to 10,000 and 25,000 - // respectively. - MinPort, MaxPort uint - - // StartTimeout is the timeout to wait for the plugin to say it - // has started successfully. - StartTimeout time.Duration - - // If non-nil, then the stderr of the client will be written to here - // (as well as the log). - Stderr io.Writer -} - -// This makes sure all the managed subprocesses are killed and properly -// logged. This should be called before the parent process running the -// plugins exits. -// -// This must only be called _once_. -func CleanupClients() { - // Set the killed to true so that we don't get unexpected panics - Killed = true - - // Kill all the managed clients in parallel and use a WaitGroup - // to wait for them all to finish up. - var wg sync.WaitGroup - for _, client := range managedClients { - wg.Add(1) - - go func(client *Client) { - client.Kill() - wg.Done() - }(client) - } - - log.Println("[DEBUG] waiting for all plugin processes to complete...") - wg.Wait() -} - -// Creates a new plugin client which manages the lifecycle of an external -// plugin and gets the address for the RPC connection. -// -// The client must be cleaned up at some point by calling Kill(). If -// the client is a managed client (created with NewManagedClient) you -// can just call CleanupClients at the end of your program and they will -// be properly cleaned. -func NewClient(config *ClientConfig) (c *Client) { - if config.MinPort == 0 && config.MaxPort == 0 { - config.MinPort = 10000 - config.MaxPort = 25000 - } - - if config.StartTimeout == 0 { - config.StartTimeout = 1 * time.Minute - } - - if config.Stderr == nil { - config.Stderr = ioutil.Discard - } - - c = &Client{config: config} - if config.Managed { - managedClients = append(managedClients, c) - } - - return -} - -// Client returns an RPC client for the plugin. -// -// Subsequent calls to this will return the same RPC client. -func (c *Client) Client() (*tfrpc.Client, error) { - addr, err := c.Start() - if err != nil { - return nil, err - } - - c.l.Lock() - defer c.l.Unlock() - - if c.client != nil { - return c.client, nil - } - - c.client, err = tfrpc.Dial(addr.Network(), addr.String()) - if err != nil { - return nil, err - } - - return c.client, nil -} - -// Tells whether or not the underlying process has exited. -func (c *Client) Exited() bool { - c.l.Lock() - defer c.l.Unlock() - return c.exited -} - -// End the executing subprocess (if it is running) and perform any cleanup -// tasks necessary such as capturing any remaining logs and so on. -// -// This method blocks until the process successfully exits. -// -// This method can safely be called multiple times. -func (c *Client) Kill() { - cmd := c.config.Cmd - - if cmd.Process == nil { - return - } - - cmd.Process.Kill() - - // Wait for the client to finish logging so we have a complete log - <-c.doneLogging -} - -// Starts the underlying subprocess, communicating with it to negotiate -// a port for RPC connections, and returning the address to connect via RPC. -// -// This method is safe to call multiple times. Subsequent calls have no effect. -// Once a client has been started once, it cannot be started again, even if -// it was killed. -func (c *Client) Start() (addr net.Addr, err error) { - c.l.Lock() - defer c.l.Unlock() - - if c.address != nil { - return c.address, nil - } - - c.doneLogging = make(chan struct{}) - - env := []string{ - fmt.Sprintf("%s=%s", MagicCookieKey, MagicCookieValue), - fmt.Sprintf("TF_PLUGIN_MIN_PORT=%d", c.config.MinPort), - fmt.Sprintf("TF_PLUGIN_MAX_PORT=%d", c.config.MaxPort), - } - - stdout_r, stdout_w := io.Pipe() - stderr_r, stderr_w := io.Pipe() - - cmd := c.config.Cmd - cmd.Env = append(cmd.Env, os.Environ()...) - cmd.Env = append(cmd.Env, env...) - cmd.Stdin = os.Stdin - cmd.Stderr = stderr_w - cmd.Stdout = stdout_w - - log.Printf("[DEBUG] Starting plugin: %s %#v", cmd.Path, cmd.Args) - err = cmd.Start() - if err != nil { - return - } - - // Make sure the command is properly cleaned up if there is an error - defer func() { - r := recover() - - if err != nil || r != nil { - cmd.Process.Kill() - } - - if r != nil { - panic(r) - } - }() - - // Start goroutine to wait for process to exit - exitCh := make(chan struct{}) - go func() { - // Make sure we close the write end of our stderr/stdout so - // that the readers send EOF properly. - defer stderr_w.Close() - defer stdout_w.Close() - - // Wait for the command to end. - cmd.Wait() - - // Log and make sure to flush the logs write away - log.Printf("[DEBUG] %s: plugin process exited\n", cmd.Path) - os.Stderr.Sync() - - // Mark that we exited - close(exitCh) - - // Set that we exited, which takes a lock - c.l.Lock() - defer c.l.Unlock() - c.exited = true - }() - - // Start goroutine that logs the stderr - go c.logStderr(stderr_r) - - // Start a goroutine that is going to be reading the lines - // out of stdout - linesCh := make(chan []byte) - go func() { - defer close(linesCh) - - buf := bufio.NewReader(stdout_r) - for { - line, err := buf.ReadBytes('\n') - if line != nil { - linesCh <- line - } - - if err == io.EOF { - return - } - } - }() - - // Make sure after we exit we read the lines from stdout forever - // so they don't block since it is an io.Pipe - defer func() { - go func() { - for _ = range linesCh { - } - }() - }() - - // Some channels for the next step - timeout := time.After(c.config.StartTimeout) - - // Start looking for the address - log.Printf("[DEBUG] Waiting for RPC address for: %s", cmd.Path) - select { - case <-timeout: - err = errors.New("timeout while waiting for plugin to start") - case <-exitCh: - err = errors.New("plugin exited before we could connect") - case lineBytes := <-linesCh: - // Trim the line and split by "|" in order to get the parts of - // the output. - line := strings.TrimSpace(string(lineBytes)) - parts := strings.SplitN(line, "|", 3) - if len(parts) < 3 { - err = fmt.Errorf("Unrecognized remote plugin message: %s", line) - return - } - - // Test the API version - if parts[0] != APIVersion { - err = fmt.Errorf("Incompatible API version with plugin. "+ - "Plugin version: %s, Ours: %s", parts[0], APIVersion) - return - } - - switch parts[1] { - case "tcp": - addr, err = net.ResolveTCPAddr("tcp", parts[2]) - case "unix": - addr, err = net.ResolveUnixAddr("unix", parts[2]) - default: - err = fmt.Errorf("Unknown address type: %s", parts[1]) - } - } - - c.address = addr - return -} - -func (c *Client) logStderr(r io.Reader) { - bufR := bufio.NewReader(r) - for { - line, err := bufR.ReadString('\n') - if line != "" { - c.config.Stderr.Write([]byte(line)) - - line = strings.TrimRightFunc(line, unicode.IsSpace) - log.Printf("[DEBUG] %s: %s", filepath.Base(c.config.Cmd.Path), line) - } - - if err == io.EOF { - break - } - } - - // Flag that we've completed logging for others - close(c.doneLogging) -} diff --git a/plugin/client_test.go b/plugin/client_test.go deleted file mode 100644 index 68b995c139..0000000000 --- a/plugin/client_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package plugin - -import ( - "bytes" - "io/ioutil" - "os" - "strings" - "testing" - "time" -) - -func TestClient(t *testing.T) { - process := helperProcess("mock") - c := NewClient(&ClientConfig{Cmd: process}) - defer c.Kill() - - // Test that it parses the proper address - addr, err := c.Start() - if err != nil { - t.Fatalf("err should be nil, got %s", err) - } - - if addr.Network() != "tcp" { - t.Fatalf("bad: %#v", addr) - } - - if addr.String() != ":1234" { - t.Fatalf("bad: %#v", addr) - } - - // Test that it exits properly if killed - c.Kill() - - if process.ProcessState == nil { - t.Fatal("should have process state") - } - - // Test that it knows it is exited - if !c.Exited() { - t.Fatal("should say client has exited") - } -} - -func TestClientStart_badVersion(t *testing.T) { - config := &ClientConfig{ - Cmd: helperProcess("bad-version"), - StartTimeout: 50 * time.Millisecond, - } - - c := NewClient(config) - defer c.Kill() - - _, err := c.Start() - if err == nil { - t.Fatal("err should not be nil") - } -} - -func TestClient_Start_Timeout(t *testing.T) { - config := &ClientConfig{ - Cmd: helperProcess("start-timeout"), - StartTimeout: 50 * time.Millisecond, - } - - c := NewClient(config) - defer c.Kill() - - _, err := c.Start() - if err == nil { - t.Fatal("err should not be nil") - } -} - -func TestClient_Stderr(t *testing.T) { - stderr := new(bytes.Buffer) - process := helperProcess("stderr") - c := NewClient(&ClientConfig{ - Cmd: process, - Stderr: stderr, - }) - defer c.Kill() - - if _, err := c.Start(); err != nil { - t.Fatalf("err: %s", err) - } - - for !c.Exited() { - time.Sleep(10 * time.Millisecond) - } - - if !strings.Contains(stderr.String(), "HELLO\n") { - t.Fatalf("bad log data: '%s'", stderr.String()) - } - - if !strings.Contains(stderr.String(), "WORLD\n") { - t.Fatalf("bad log data: '%s'", stderr.String()) - } -} - -func TestClient_Stdin(t *testing.T) { - // Overwrite stdin for this test with a temporary file - tf, err := ioutil.TempFile("", "terraform") - if err != nil { - t.Fatalf("err: %s", err) - } - defer os.Remove(tf.Name()) - defer tf.Close() - - if _, err = tf.WriteString("hello"); err != nil { - t.Fatalf("error: %s", err) - } - - if err = tf.Sync(); err != nil { - t.Fatalf("error: %s", err) - } - - if _, err = tf.Seek(0, 0); err != nil { - t.Fatalf("error: %s", err) - } - - oldStdin := os.Stdin - defer func() { os.Stdin = oldStdin }() - os.Stdin = tf - - process := helperProcess("stdin") - c := NewClient(&ClientConfig{Cmd: process}) - defer c.Kill() - - _, err = c.Start() - if err != nil { - t.Fatalf("error: %s", err) - } - - for { - if c.Exited() { - break - } - - time.Sleep(50 * time.Millisecond) - } - - if !process.ProcessState.Success() { - t.Fatal("process didn't exit cleanly") - } -} diff --git a/plugin/plugin.go b/plugin/plugin.go index 8589467004..00fa7b2967 100644 --- a/plugin/plugin.go +++ b/plugin/plugin.go @@ -1,10 +1,13 @@ -// The plugin package exposes functions and helpers for communicating to -// Terraform plugins which are implemented as standalone binary applications. -// -// plugin.Client fully manages the lifecycle of executing the application, -// connecting to it, and returning the RPC client and service names for -// connecting to it using the terraform/rpc package. -// -// plugin.Serve fully manages listeners to expose an RPC server from a binary -// that plugin.Client can connect to. package plugin + +import ( + "github.com/hashicorp/go-plugin" +) + +// See serve.go for serving plugins + +// PluginMap should be used by clients for the map of plugins. +var PluginMap = map[string]plugin.Plugin{ + "provider": &ResourceProviderPlugin{}, + "provisioner": &ResourceProvisionerPlugin{}, +} diff --git a/plugin/plugin_test.go b/plugin/plugin_test.go index d395837c3b..ddef40ab21 100644 --- a/plugin/plugin_test.go +++ b/plugin/plugin_test.go @@ -1,107 +1,16 @@ package plugin import ( - "fmt" - "log" - "os" - "os/exec" - "testing" - "time" - - tfrpc "github.com/hashicorp/terraform/rpc" "github.com/hashicorp/terraform/terraform" ) -func helperProcess(s ...string) *exec.Cmd { - cs := []string{"-test.run=TestHelperProcess", "--"} - cs = append(cs, s...) - env := []string{ - "GO_WANT_HELPER_PROCESS=1", - "TF_PLUGIN_MIN_PORT=10000", - "TF_PLUGIN_MAX_PORT=25000", - } - - cmd := exec.Command(os.Args[0], cs...) - cmd.Env = append(env, os.Environ()...) - return cmd -} - -// This is not a real test. This is just a helper process kicked off by -// tests. -func TestHelperProcess(*testing.T) { - if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { - return - } - - defer os.Exit(0) - - args := os.Args - for len(args) > 0 { - if args[0] == "--" { - args = args[1:] - break - } - - args = args[1:] - } - - if len(args) == 0 { - fmt.Fprintf(os.Stderr, "No command\n") - os.Exit(2) - } - - cmd, args := args[0], args[1:] - switch cmd { - case "bad-version": - fmt.Printf("%s1|tcp|:1234\n", APIVersion) - <-make(chan int) - case "resource-provider": - Serve(&ServeOpts{ - ProviderFunc: testProviderFixed(new(terraform.MockResourceProvider)), - }) - case "resource-provisioner": - Serve(&ServeOpts{ - ProvisionerFunc: testProvisionerFixed( - new(terraform.MockResourceProvisioner)), - }) - case "invalid-rpc-address": - fmt.Println("lolinvalid") - case "mock": - fmt.Printf("%s|tcp|:1234\n", APIVersion) - <-make(chan int) - case "start-timeout": - time.Sleep(1 * time.Minute) - os.Exit(1) - case "stderr": - fmt.Printf("%s|tcp|:1234\n", APIVersion) - log.Println("HELLO") - log.Println("WORLD") - case "stdin": - fmt.Printf("%s|tcp|:1234\n", APIVersion) - data := make([]byte, 5) - if _, err := os.Stdin.Read(data); err != nil { - log.Printf("stdin read error: %s", err) - os.Exit(100) - } - - if string(data) == "hello" { - os.Exit(0) - } - - os.Exit(1) - default: - fmt.Fprintf(os.Stderr, "Unknown command: %q\n", cmd) - os.Exit(2) - } -} - -func testProviderFixed(p terraform.ResourceProvider) tfrpc.ProviderFunc { +func testProviderFixed(p terraform.ResourceProvider) ProviderFunc { return func() terraform.ResourceProvider { return p } } -func testProvisionerFixed(p terraform.ResourceProvisioner) tfrpc.ProvisionerFunc { +func testProvisionerFixed(p terraform.ResourceProvisioner) ProvisionerFunc { return func() terraform.ResourceProvisioner { return p } diff --git a/rpc/resource_provider.go b/plugin/resource_provider.go similarity index 80% rename from rpc/resource_provider.go rename to plugin/resource_provider.go index 3fe6927de8..712e79c863 100644 --- a/rpc/resource_provider.go +++ b/plugin/resource_provider.go @@ -1,24 +1,38 @@ -package rpc +package plugin import ( "net/rpc" + "github.com/hashicorp/go-plugin" "github.com/hashicorp/terraform/terraform" ) +// ResourceProviderPlugin is the plugin.Plugin implementation. +type ResourceProviderPlugin struct { + F func() terraform.ResourceProvider +} + +func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { + return &ResourceProviderServer{Broker: b, Provider: p.F()}, nil +} + +func (p *ResourceProviderPlugin) Client( + b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { + return &ResourceProvider{Broker: b, Client: c}, nil +} + // ResourceProvider is an implementation of terraform.ResourceProvider // that communicates over RPC. type ResourceProvider struct { - Broker *muxBroker + Broker *plugin.MuxBroker Client *rpc.Client - Name string } func (p *ResourceProvider) Input( input terraform.UIInput, c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { id := p.Broker.NextId() - go acceptAndServe(p.Broker, id, "UIInput", &UIInputServer{ + go p.Broker.AcceptAndServe(id, &UIInputServer{ UIInput: input, }) @@ -28,7 +42,7 @@ func (p *ResourceProvider) Input( Config: c, } - err := p.Client.Call(p.Name+".Input", &args, &resp) + err := p.Client.Call("Plugin.Input", &args, &resp) if err != nil { return nil, err } @@ -46,7 +60,7 @@ func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []er Config: c, } - err := p.Client.Call(p.Name+".Validate", &args, &resp) + err := p.Client.Call("Plugin.Validate", &args, &resp) if err != nil { return nil, []error{err} } @@ -70,7 +84,7 @@ func (p *ResourceProvider) ValidateResource( Type: t, } - err := p.Client.Call(p.Name+".ValidateResource", &args, &resp) + err := p.Client.Call("Plugin.ValidateResource", &args, &resp) if err != nil { return nil, []error{err} } @@ -88,7 +102,7 @@ func (p *ResourceProvider) ValidateResource( func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error { var resp ResourceProviderConfigureResponse - err := p.Client.Call(p.Name+".Configure", c, &resp) + err := p.Client.Call("Plugin.Configure", c, &resp) if err != nil { return err } @@ -110,7 +124,7 @@ func (p *ResourceProvider) Apply( Diff: d, } - err := p.Client.Call(p.Name+".Apply", args, &resp) + err := p.Client.Call("Plugin.Apply", args, &resp) if err != nil { return nil, err } @@ -131,7 +145,7 @@ func (p *ResourceProvider) Diff( State: s, Config: c, } - err := p.Client.Call(p.Name+".Diff", args, &resp) + err := p.Client.Call("Plugin.Diff", args, &resp) if err != nil { return nil, err } @@ -151,7 +165,7 @@ func (p *ResourceProvider) Refresh( State: s, } - err := p.Client.Call(p.Name+".Refresh", args, &resp) + err := p.Client.Call("Plugin.Refresh", args, &resp) if err != nil { return nil, err } @@ -165,7 +179,7 @@ func (p *ResourceProvider) Refresh( func (p *ResourceProvider) Resources() []terraform.ResourceType { var result []terraform.ResourceType - err := p.Client.Call(p.Name+".Resources", new(interface{}), &result) + err := p.Client.Call("Plugin.Resources", new(interface{}), &result) if err != nil { // TODO: panic, log, what? return nil @@ -181,12 +195,12 @@ func (p *ResourceProvider) Close() error { // ResourceProviderServer is a net/rpc compatible structure for serving // a ResourceProvider. This should not be used directly. type ResourceProviderServer struct { - Broker *muxBroker + Broker *plugin.MuxBroker Provider terraform.ResourceProvider } type ResourceProviderConfigureResponse struct { - Error *BasicError + Error *plugin.BasicError } type ResourceProviderInputArgs struct { @@ -196,7 +210,7 @@ type ResourceProviderInputArgs struct { type ResourceProviderInputResponse struct { Config *terraform.ResourceConfig - Error *BasicError + Error *plugin.BasicError } type ResourceProviderApplyArgs struct { @@ -207,7 +221,7 @@ type ResourceProviderApplyArgs struct { type ResourceProviderApplyResponse struct { State *terraform.InstanceState - Error *BasicError + Error *plugin.BasicError } type ResourceProviderDiffArgs struct { @@ -218,7 +232,7 @@ type ResourceProviderDiffArgs struct { type ResourceProviderDiffResponse struct { Diff *terraform.InstanceDiff - Error *BasicError + Error *plugin.BasicError } type ResourceProviderRefreshArgs struct { @@ -228,7 +242,7 @@ type ResourceProviderRefreshArgs struct { type ResourceProviderRefreshResponse struct { State *terraform.InstanceState - Error *BasicError + Error *plugin.BasicError } type ResourceProviderValidateArgs struct { @@ -237,7 +251,7 @@ type ResourceProviderValidateArgs struct { type ResourceProviderValidateResponse struct { Warnings []string - Errors []*BasicError + Errors []*plugin.BasicError } type ResourceProviderValidateResourceArgs struct { @@ -247,7 +261,7 @@ type ResourceProviderValidateResourceArgs struct { type ResourceProviderValidateResourceResponse struct { Warnings []string - Errors []*BasicError + Errors []*plugin.BasicError } func (s *ResourceProviderServer) Input( @@ -256,22 +270,19 @@ func (s *ResourceProviderServer) Input( conn, err := s.Broker.Dial(args.InputId) if err != nil { *reply = ResourceProviderInputResponse{ - Error: NewBasicError(err), + Error: plugin.NewBasicError(err), } return nil } client := rpc.NewClient(conn) defer client.Close() - input := &UIInput{ - Client: client, - Name: "UIInput", - } + input := &UIInput{Client: client} config, err := s.Provider.Input(input, args.Config) *reply = ResourceProviderInputResponse{ Config: config, - Error: NewBasicError(err), + Error: plugin.NewBasicError(err), } return nil @@ -281,9 +292,9 @@ func (s *ResourceProviderServer) Validate( args *ResourceProviderValidateArgs, reply *ResourceProviderValidateResponse) error { warns, errs := s.Provider.Validate(args.Config) - berrs := make([]*BasicError, len(errs)) + berrs := make([]*plugin.BasicError, len(errs)) for i, err := range errs { - berrs[i] = NewBasicError(err) + berrs[i] = plugin.NewBasicError(err) } *reply = ResourceProviderValidateResponse{ Warnings: warns, @@ -296,9 +307,9 @@ func (s *ResourceProviderServer) ValidateResource( args *ResourceProviderValidateResourceArgs, reply *ResourceProviderValidateResourceResponse) error { warns, errs := s.Provider.ValidateResource(args.Type, args.Config) - berrs := make([]*BasicError, len(errs)) + berrs := make([]*plugin.BasicError, len(errs)) for i, err := range errs { - berrs[i] = NewBasicError(err) + berrs[i] = plugin.NewBasicError(err) } *reply = ResourceProviderValidateResourceResponse{ Warnings: warns, @@ -312,7 +323,7 @@ func (s *ResourceProviderServer) Configure( reply *ResourceProviderConfigureResponse) error { err := s.Provider.Configure(config) *reply = ResourceProviderConfigureResponse{ - Error: NewBasicError(err), + Error: plugin.NewBasicError(err), } return nil } @@ -323,7 +334,7 @@ func (s *ResourceProviderServer) Apply( state, err := s.Provider.Apply(args.Info, args.State, args.Diff) *result = ResourceProviderApplyResponse{ State: state, - Error: NewBasicError(err), + Error: plugin.NewBasicError(err), } return nil } @@ -334,7 +345,7 @@ func (s *ResourceProviderServer) Diff( diff, err := s.Provider.Diff(args.Info, args.State, args.Config) *result = ResourceProviderDiffResponse{ Diff: diff, - Error: NewBasicError(err), + Error: plugin.NewBasicError(err), } return nil } @@ -345,7 +356,7 @@ func (s *ResourceProviderServer) Refresh( newState, err := s.Provider.Refresh(args.Info, args.State) *result = ResourceProviderRefreshResponse{ State: newState, - Error: NewBasicError(err), + Error: plugin.NewBasicError(err), } return nil } diff --git a/plugin/resource_provider_test.go b/plugin/resource_provider_test.go index 41cbb81912..944041d3e5 100644 --- a/plugin/resource_provider_test.go +++ b/plugin/resource_provider_test.go @@ -1,15 +1,624 @@ package plugin import ( + "errors" + "reflect" "testing" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform/terraform" ) -func TestResourceProvider(t *testing.T) { - c := NewClient(&ClientConfig{Cmd: helperProcess("resource-provider")}) - defer c.Kill() +func TestResourceProvider_impl(t *testing.T) { + var _ plugin.Plugin = new(ResourceProviderPlugin) + var _ terraform.ResourceProvider = new(ResourceProvider) +} - _, err := c.Client() +func TestResourceProvider_input(t *testing.T) { + // Create a mock provider + p := new(terraform.MockResourceProvider) + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProviderFunc: testProviderFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProviderPluginName) if err != nil { - t.Fatalf("should not have error: %s", err) + t.Fatalf("err: %s", err) + } + provider := raw.(terraform.ResourceProvider) + + input := new(terraform.MockUIInput) + + expected := &terraform.ResourceConfig{ + Raw: map[string]interface{}{"bar": "baz"}, + } + p.InputReturnConfig = expected + + // Input + config := &terraform.ResourceConfig{ + Raw: map[string]interface{}{"foo": "bar"}, + } + actual, err := provider.Input(input, config) + if !p.InputCalled { + t.Fatal("input should be called") + } + if !reflect.DeepEqual(p.InputConfig, config) { + t.Fatalf("bad: %#v", p.InputConfig) + } + if err != nil { + t.Fatalf("bad: %#v", err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceProvider_configure(t *testing.T) { + // Create a mock provider + p := new(terraform.MockResourceProvider) + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProviderFunc: testProviderFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProviderPluginName) + if err != nil { + t.Fatalf("err: %s", err) + } + provider := raw.(terraform.ResourceProvider) + + // Configure + config := &terraform.ResourceConfig{ + Raw: map[string]interface{}{"foo": "bar"}, + } + e := provider.Configure(config) + if !p.ConfigureCalled { + t.Fatal("configure should be called") + } + if !reflect.DeepEqual(p.ConfigureConfig, config) { + t.Fatalf("bad: %#v", p.ConfigureConfig) + } + if e != nil { + t.Fatalf("bad: %#v", e) + } +} + +func TestResourceProvider_configure_errors(t *testing.T) { + p := new(terraform.MockResourceProvider) + p.ConfigureReturnError = errors.New("foo") + + // Create a mock provider + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProviderFunc: testProviderFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProviderPluginName) + if err != nil { + t.Fatalf("err: %s", err) + } + provider := raw.(terraform.ResourceProvider) + + // Configure + config := &terraform.ResourceConfig{ + Raw: map[string]interface{}{"foo": "bar"}, + } + e := provider.Configure(config) + if !p.ConfigureCalled { + t.Fatal("configure should be called") + } + if !reflect.DeepEqual(p.ConfigureConfig, config) { + t.Fatalf("bad: %#v", p.ConfigureConfig) + } + if e == nil { + t.Fatal("should have error") + } + if e.Error() != "foo" { + t.Fatalf("bad: %s", e) + } +} + +func TestResourceProvider_configure_warnings(t *testing.T) { + p := new(terraform.MockResourceProvider) + + // Create a mock provider + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProviderFunc: testProviderFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProviderPluginName) + if err != nil { + t.Fatalf("err: %s", err) + } + provider := raw.(terraform.ResourceProvider) + + // Configure + config := &terraform.ResourceConfig{ + Raw: map[string]interface{}{"foo": "bar"}, + } + e := provider.Configure(config) + if !p.ConfigureCalled { + t.Fatal("configure should be called") + } + if !reflect.DeepEqual(p.ConfigureConfig, config) { + t.Fatalf("bad: %#v", p.ConfigureConfig) + } + if e != nil { + t.Fatalf("bad: %#v", e) + } +} + +func TestResourceProvider_apply(t *testing.T) { + p := new(terraform.MockResourceProvider) + + // Create a mock provider + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProviderFunc: testProviderFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProviderPluginName) + if err != nil { + t.Fatalf("err: %s", err) + } + provider := raw.(terraform.ResourceProvider) + + p.ApplyReturn = &terraform.InstanceState{ + ID: "bob", + } + + // Apply + info := &terraform.InstanceInfo{} + state := &terraform.InstanceState{} + diff := &terraform.InstanceDiff{} + newState, err := provider.Apply(info, state, diff) + if !p.ApplyCalled { + t.Fatal("apply should be called") + } + if !reflect.DeepEqual(p.ApplyDiff, diff) { + t.Fatalf("bad: %#v", p.ApplyDiff) + } + if err != nil { + t.Fatalf("bad: %#v", err) + } + if !reflect.DeepEqual(p.ApplyReturn, newState) { + t.Fatalf("bad: %#v", newState) + } +} + +func TestResourceProvider_diff(t *testing.T) { + p := new(terraform.MockResourceProvider) + + // Create a mock provider + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProviderFunc: testProviderFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProviderPluginName) + if err != nil { + t.Fatalf("err: %s", err) + } + provider := raw.(terraform.ResourceProvider) + + p.DiffReturn = &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{ + "foo": &terraform.ResourceAttrDiff{ + Old: "", + New: "bar", + }, + }, + } + + // Diff + info := &terraform.InstanceInfo{} + state := &terraform.InstanceState{} + config := &terraform.ResourceConfig{ + Raw: map[string]interface{}{"foo": "bar"}, + } + diff, err := provider.Diff(info, state, config) + if !p.DiffCalled { + t.Fatal("diff should be called") + } + if !reflect.DeepEqual(p.DiffDesired, config) { + t.Fatalf("bad: %#v", p.DiffDesired) + } + if err != nil { + t.Fatalf("bad: %#v", err) + } + if !reflect.DeepEqual(p.DiffReturn, diff) { + t.Fatalf("bad: %#v", diff) + } +} + +func TestResourceProvider_diff_error(t *testing.T) { + p := new(terraform.MockResourceProvider) + + // Create a mock provider + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProviderFunc: testProviderFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProviderPluginName) + if err != nil { + t.Fatalf("err: %s", err) + } + provider := raw.(terraform.ResourceProvider) + + p.DiffReturnError = errors.New("foo") + + // Diff + info := &terraform.InstanceInfo{} + state := &terraform.InstanceState{} + config := &terraform.ResourceConfig{ + Raw: map[string]interface{}{"foo": "bar"}, + } + diff, err := provider.Diff(info, state, config) + if !p.DiffCalled { + t.Fatal("diff should be called") + } + if !reflect.DeepEqual(p.DiffDesired, config) { + t.Fatalf("bad: %#v", p.DiffDesired) + } + if err == nil { + t.Fatal("should have error") + } + if diff != nil { + t.Fatal("should not have diff") + } +} + +func TestResourceProvider_refresh(t *testing.T) { + p := new(terraform.MockResourceProvider) + + // Create a mock provider + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProviderFunc: testProviderFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProviderPluginName) + if err != nil { + t.Fatalf("err: %s", err) + } + provider := raw.(terraform.ResourceProvider) + + p.RefreshReturn = &terraform.InstanceState{ + ID: "bob", + } + + // Refresh + info := &terraform.InstanceInfo{} + state := &terraform.InstanceState{} + newState, err := provider.Refresh(info, state) + if !p.RefreshCalled { + t.Fatal("refresh should be called") + } + if !reflect.DeepEqual(p.RefreshState, state) { + t.Fatalf("bad: %#v", p.RefreshState) + } + if err != nil { + t.Fatalf("bad: %#v", err) + } + if !reflect.DeepEqual(p.RefreshReturn, newState) { + t.Fatalf("bad: %#v", newState) + } +} + +func TestResourceProvider_resources(t *testing.T) { + p := new(terraform.MockResourceProvider) + + // Create a mock provider + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProviderFunc: testProviderFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProviderPluginName) + if err != nil { + t.Fatalf("err: %s", err) + } + provider := raw.(terraform.ResourceProvider) + + expected := []terraform.ResourceType{ + {"foo"}, + {"bar"}, + } + + p.ResourcesReturn = expected + + // Resources + result := provider.Resources() + if !p.ResourcesCalled { + t.Fatal("resources should be called") + } + if !reflect.DeepEqual(result, expected) { + t.Fatalf("bad: %#v", result) + } +} + +func TestResourceProvider_validate(t *testing.T) { + p := new(terraform.MockResourceProvider) + + // Create a mock provider + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProviderFunc: testProviderFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProviderPluginName) + if err != nil { + t.Fatalf("err: %s", err) + } + provider := raw.(terraform.ResourceProvider) + + // Configure + config := &terraform.ResourceConfig{ + Raw: map[string]interface{}{"foo": "bar"}, + } + w, e := provider.Validate(config) + if !p.ValidateCalled { + t.Fatal("configure should be called") + } + if !reflect.DeepEqual(p.ValidateConfig, config) { + t.Fatalf("bad: %#v", p.ValidateConfig) + } + if w != nil { + t.Fatalf("bad: %#v", w) + } + if e != nil { + t.Fatalf("bad: %#v", e) + } +} + +func TestResourceProvider_validate_errors(t *testing.T) { + p := new(terraform.MockResourceProvider) + + // Create a mock provider + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProviderFunc: testProviderFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProviderPluginName) + if err != nil { + t.Fatalf("err: %s", err) + } + provider := raw.(terraform.ResourceProvider) + + p.ValidateReturnErrors = []error{errors.New("foo")} + + // Configure + config := &terraform.ResourceConfig{ + Raw: map[string]interface{}{"foo": "bar"}, + } + w, e := provider.Validate(config) + if !p.ValidateCalled { + t.Fatal("configure should be called") + } + if !reflect.DeepEqual(p.ValidateConfig, config) { + t.Fatalf("bad: %#v", p.ValidateConfig) + } + if w != nil { + t.Fatalf("bad: %#v", w) + } + + if len(e) != 1 { + t.Fatalf("bad: %#v", e) + } + if e[0].Error() != "foo" { + t.Fatalf("bad: %#v", e) + } +} + +func TestResourceProvider_validate_warns(t *testing.T) { + p := new(terraform.MockResourceProvider) + + // Create a mock provider + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProviderFunc: testProviderFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProviderPluginName) + if err != nil { + t.Fatalf("err: %s", err) + } + provider := raw.(terraform.ResourceProvider) + + p.ValidateReturnWarns = []string{"foo"} + + // Configure + config := &terraform.ResourceConfig{ + Raw: map[string]interface{}{"foo": "bar"}, + } + w, e := provider.Validate(config) + if !p.ValidateCalled { + t.Fatal("configure should be called") + } + if !reflect.DeepEqual(p.ValidateConfig, config) { + t.Fatalf("bad: %#v", p.ValidateConfig) + } + if e != nil { + t.Fatalf("bad: %#v", e) + } + + expected := []string{"foo"} + if !reflect.DeepEqual(w, expected) { + t.Fatalf("bad: %#v", w) + } +} + +func TestResourceProvider_validateResource(t *testing.T) { + p := new(terraform.MockResourceProvider) + + // Create a mock provider + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProviderFunc: testProviderFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProviderPluginName) + if err != nil { + t.Fatalf("err: %s", err) + } + provider := raw.(terraform.ResourceProvider) + + // Configure + config := &terraform.ResourceConfig{ + Raw: map[string]interface{}{"foo": "bar"}, + } + w, e := provider.ValidateResource("foo", config) + if !p.ValidateResourceCalled { + t.Fatal("configure should be called") + } + if p.ValidateResourceType != "foo" { + t.Fatalf("bad: %#v", p.ValidateResourceType) + } + if !reflect.DeepEqual(p.ValidateResourceConfig, config) { + t.Fatalf("bad: %#v", p.ValidateResourceConfig) + } + if w != nil { + t.Fatalf("bad: %#v", w) + } + if e != nil { + t.Fatalf("bad: %#v", e) + } +} + +func TestResourceProvider_validateResource_errors(t *testing.T) { + p := new(terraform.MockResourceProvider) + + // Create a mock provider + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProviderFunc: testProviderFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProviderPluginName) + if err != nil { + t.Fatalf("err: %s", err) + } + provider := raw.(terraform.ResourceProvider) + + p.ValidateResourceReturnErrors = []error{errors.New("foo")} + + // Configure + config := &terraform.ResourceConfig{ + Raw: map[string]interface{}{"foo": "bar"}, + } + w, e := provider.ValidateResource("foo", config) + if !p.ValidateResourceCalled { + t.Fatal("configure should be called") + } + if p.ValidateResourceType != "foo" { + t.Fatalf("bad: %#v", p.ValidateResourceType) + } + if !reflect.DeepEqual(p.ValidateResourceConfig, config) { + t.Fatalf("bad: %#v", p.ValidateResourceConfig) + } + if w != nil { + t.Fatalf("bad: %#v", w) + } + + if len(e) != 1 { + t.Fatalf("bad: %#v", e) + } + if e[0].Error() != "foo" { + t.Fatalf("bad: %#v", e) + } +} + +func TestResourceProvider_validateResource_warns(t *testing.T) { + p := new(terraform.MockResourceProvider) + + // Create a mock provider + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProviderFunc: testProviderFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProviderPluginName) + if err != nil { + t.Fatalf("err: %s", err) + } + provider := raw.(terraform.ResourceProvider) + + p.ValidateResourceReturnWarns = []string{"foo"} + + // Configure + config := &terraform.ResourceConfig{ + Raw: map[string]interface{}{"foo": "bar"}, + } + w, e := provider.ValidateResource("foo", config) + if !p.ValidateResourceCalled { + t.Fatal("configure should be called") + } + if p.ValidateResourceType != "foo" { + t.Fatalf("bad: %#v", p.ValidateResourceType) + } + if !reflect.DeepEqual(p.ValidateResourceConfig, config) { + t.Fatalf("bad: %#v", p.ValidateResourceConfig) + } + if e != nil { + t.Fatalf("bad: %#v", e) + } + + expected := []string{"foo"} + if !reflect.DeepEqual(w, expected) { + t.Fatalf("bad: %#v", w) + } +} + +func TestResourceProvider_close(t *testing.T) { + p := new(terraform.MockResourceProvider) + + // Create a mock provider + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProviderFunc: testProviderFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProviderPluginName) + if err != nil { + t.Fatalf("err: %s", err) + } + provider := raw.(terraform.ResourceProvider) + + var iface interface{} = provider + pCloser, ok := iface.(terraform.ResourceProviderCloser) + if !ok { + t.Fatal("should be a ResourceProviderCloser") + } + + if err := pCloser.Close(); err != nil { + t.Fatalf("failed to close provider: %s", err) + } + + // The connection should be closed now, so if we to make a + // new call we should get an error. + err = provider.Configure(&terraform.ResourceConfig{}) + if err == nil { + t.Fatal("should have error") } } diff --git a/rpc/resource_provisioner.go b/plugin/resource_provisioner.go similarity index 71% rename from rpc/resource_provisioner.go rename to plugin/resource_provisioner.go index 715704d024..9823095803 100644 --- a/rpc/resource_provisioner.go +++ b/plugin/resource_provisioner.go @@ -1,17 +1,31 @@ -package rpc +package plugin import ( "net/rpc" + "github.com/hashicorp/go-plugin" "github.com/hashicorp/terraform/terraform" ) +// ResourceProvisionerPlugin is the plugin.Plugin implementation. +type ResourceProvisionerPlugin struct { + F func() terraform.ResourceProvisioner +} + +func (p *ResourceProvisionerPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { + return &ResourceProvisionerServer{Broker: b, Provisioner: p.F()}, nil +} + +func (p *ResourceProvisionerPlugin) Client( + b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { + return &ResourceProvisioner{Broker: b, Client: c}, nil +} + // ResourceProvisioner is an implementation of terraform.ResourceProvisioner // that communicates over RPC. type ResourceProvisioner struct { - Broker *muxBroker + Broker *plugin.MuxBroker Client *rpc.Client - Name string } func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) { @@ -20,7 +34,7 @@ func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, [ Config: c, } - err := p.Client.Call(p.Name+".Validate", &args, &resp) + err := p.Client.Call("Plugin.Validate", &args, &resp) if err != nil { return nil, []error{err} } @@ -41,7 +55,7 @@ func (p *ResourceProvisioner) Apply( s *terraform.InstanceState, c *terraform.ResourceConfig) error { id := p.Broker.NextId() - go acceptAndServe(p.Broker, id, "UIOutput", &UIOutputServer{ + go p.Broker.AcceptAndServe(id, &UIOutputServer{ UIOutput: output, }) @@ -52,7 +66,7 @@ func (p *ResourceProvisioner) Apply( Config: c, } - err := p.Client.Call(p.Name+".Apply", args, &resp) + err := p.Client.Call("Plugin.Apply", args, &resp) if err != nil { return err } @@ -73,7 +87,7 @@ type ResourceProvisionerValidateArgs struct { type ResourceProvisionerValidateResponse struct { Warnings []string - Errors []*BasicError + Errors []*plugin.BasicError } type ResourceProvisionerApplyArgs struct { @@ -83,13 +97,13 @@ type ResourceProvisionerApplyArgs struct { } type ResourceProvisionerApplyResponse struct { - Error *BasicError + Error *plugin.BasicError } // ResourceProvisionerServer is a net/rpc compatible structure for serving // a ResourceProvisioner. This should not be used directly. type ResourceProvisionerServer struct { - Broker *muxBroker + Broker *plugin.MuxBroker Provisioner terraform.ResourceProvisioner } @@ -99,21 +113,18 @@ func (s *ResourceProvisionerServer) Apply( conn, err := s.Broker.Dial(args.OutputId) if err != nil { *result = ResourceProvisionerApplyResponse{ - Error: NewBasicError(err), + Error: plugin.NewBasicError(err), } return nil } client := rpc.NewClient(conn) defer client.Close() - output := &UIOutput{ - Client: client, - Name: "UIOutput", - } + output := &UIOutput{Client: client} err = s.Provisioner.Apply(output, args.State, args.Config) *result = ResourceProvisionerApplyResponse{ - Error: NewBasicError(err), + Error: plugin.NewBasicError(err), } return nil } @@ -122,9 +133,9 @@ func (s *ResourceProvisionerServer) Validate( args *ResourceProvisionerValidateArgs, reply *ResourceProvisionerValidateResponse) error { warns, errs := s.Provisioner.Validate(args.Config) - berrs := make([]*BasicError, len(errs)) + berrs := make([]*plugin.BasicError, len(errs)) for i, err := range errs { - berrs[i] = NewBasicError(err) + berrs[i] = plugin.NewBasicError(err) } *reply = ResourceProvisionerValidateResponse{ Warnings: warns, diff --git a/plugin/resource_provisioner_test.go b/plugin/resource_provisioner_test.go index e0920b4afb..073c8d2b7e 100644 --- a/plugin/resource_provisioner_test.go +++ b/plugin/resource_provisioner_test.go @@ -1,15 +1,193 @@ package plugin import ( + "errors" + "reflect" "testing" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform/terraform" ) -func TestResourceProvisioner(t *testing.T) { - c := NewClient(&ClientConfig{Cmd: helperProcess("resource-provisioner")}) - defer c.Kill() +func TestResourceProvisioner_impl(t *testing.T) { + var _ plugin.Plugin = new(ResourceProvisionerPlugin) + var _ terraform.ResourceProvisioner = new(ResourceProvisioner) +} - _, err := c.Client() +func TestResourceProvisioner_apply(t *testing.T) { + // Create a mock provider + p := new(terraform.MockResourceProvisioner) + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProvisionerFunc: testProvisionerFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProvisionerPluginName) if err != nil { - t.Fatalf("should not have error: %s", err) + t.Fatalf("err: %s", err) + } + provisioner := raw.(terraform.ResourceProvisioner) + + // Apply + output := &terraform.MockUIOutput{} + state := &terraform.InstanceState{} + conf := &terraform.ResourceConfig{} + err = provisioner.Apply(output, state, conf) + if !p.ApplyCalled { + t.Fatal("apply should be called") + } + if !reflect.DeepEqual(p.ApplyConfig, conf) { + t.Fatalf("bad: %#v", p.ApplyConfig) + } + if err != nil { + t.Fatalf("bad: %#v", err) + } +} + +func TestResourceProvisioner_validate(t *testing.T) { + // Create a mock provider + p := new(terraform.MockResourceProvisioner) + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProvisionerFunc: testProvisionerFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProvisionerPluginName) + if err != nil { + t.Fatalf("err: %s", err) + } + provisioner := raw.(terraform.ResourceProvisioner) + + // Configure + config := &terraform.ResourceConfig{ + Raw: map[string]interface{}{"foo": "bar"}, + } + w, e := provisioner.Validate(config) + if !p.ValidateCalled { + t.Fatal("configure should be called") + } + if !reflect.DeepEqual(p.ValidateConfig, config) { + t.Fatalf("bad: %#v", p.ValidateConfig) + } + if w != nil { + t.Fatalf("bad: %#v", w) + } + if e != nil { + t.Fatalf("bad: %#v", e) + } +} + +func TestResourceProvisioner_validate_errors(t *testing.T) { + // Create a mock provider + p := new(terraform.MockResourceProvisioner) + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProvisionerFunc: testProvisionerFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProvisionerPluginName) + if err != nil { + t.Fatalf("err: %s", err) + } + provisioner := raw.(terraform.ResourceProvisioner) + + p.ValidateReturnErrors = []error{errors.New("foo")} + + // Configure + config := &terraform.ResourceConfig{ + Raw: map[string]interface{}{"foo": "bar"}, + } + w, e := provisioner.Validate(config) + if !p.ValidateCalled { + t.Fatal("configure should be called") + } + if !reflect.DeepEqual(p.ValidateConfig, config) { + t.Fatalf("bad: %#v", p.ValidateConfig) + } + if w != nil { + t.Fatalf("bad: %#v", w) + } + + if len(e) != 1 { + t.Fatalf("bad: %#v", e) + } + if e[0].Error() != "foo" { + t.Fatalf("bad: %#v", e) + } +} + +func TestResourceProvisioner_validate_warns(t *testing.T) { + // Create a mock provider + p := new(terraform.MockResourceProvisioner) + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProvisionerFunc: testProvisionerFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProvisionerPluginName) + if err != nil { + t.Fatalf("err: %s", err) + } + provisioner := raw.(terraform.ResourceProvisioner) + + p.ValidateReturnWarns = []string{"foo"} + + // Configure + config := &terraform.ResourceConfig{ + Raw: map[string]interface{}{"foo": "bar"}, + } + w, e := provisioner.Validate(config) + if !p.ValidateCalled { + t.Fatal("configure should be called") + } + if !reflect.DeepEqual(p.ValidateConfig, config) { + t.Fatalf("bad: %#v", p.ValidateConfig) + } + if e != nil { + t.Fatalf("bad: %#v", e) + } + + expected := []string{"foo"} + if !reflect.DeepEqual(w, expected) { + t.Fatalf("bad: %#v", w) + } +} + +func TestResourceProvisioner_close(t *testing.T) { + // Create a mock provider + p := new(terraform.MockResourceProvisioner) + client, _ := plugin.TestPluginRPCConn(t, pluginMap(&ServeOpts{ + ProvisionerFunc: testProvisionerFixed(p), + })) + defer client.Close() + + // Request the provider + raw, err := client.Dispense(ProvisionerPluginName) + if err != nil { + t.Fatalf("err: %s", err) + } + provisioner := raw.(terraform.ResourceProvisioner) + + pCloser, ok := raw.(terraform.ResourceProvisionerCloser) + if !ok { + t.Fatal("should be a ResourceProvisionerCloser") + } + + if err := pCloser.Close(); err != nil { + t.Fatalf("failed to close provisioner: %s", err) + } + + // The connection should be closed now, so if we to make a + // new call we should get an error. + o := &terraform.MockUIOutput{} + s := &terraform.InstanceState{} + c := &terraform.ResourceConfig{} + err = provisioner.Apply(o, s, c) + if err == nil { + t.Fatal("should have error") } } diff --git a/plugin/serve.go b/plugin/serve.go new file mode 100644 index 0000000000..ba20e37345 --- /dev/null +++ b/plugin/serve.go @@ -0,0 +1,47 @@ +package plugin + +import ( + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform/terraform" +) + +// The constants below are the names of the plugins that can be dispensed +// from the plugin server. +const ( + ProviderPluginName = "provider" + ProvisionerPluginName = "provisioner" +) + +// Handshake is the HandshakeConfig used to configure clients and servers. +var Handshake = plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", + MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", +} + +type ProviderFunc func() terraform.ResourceProvider +type ProvisionerFunc func() terraform.ResourceProvisioner + +// ServeOpts are the configurations to serve a plugin. +type ServeOpts struct { + ProviderFunc ProviderFunc + ProvisionerFunc ProvisionerFunc +} + +// Serve serves a plugin. This function never returns and should be the final +// function called in the main function of the plugin. +func Serve(opts *ServeOpts) { + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: Handshake, + Plugins: pluginMap(opts), + }) +} + +// pluginMap returns the map[string]plugin.Plugin to use for configuring a plugin +// server or client. +func pluginMap(opts *ServeOpts) map[string]plugin.Plugin { + return map[string]plugin.Plugin{ + "provider": &ResourceProviderPlugin{F: opts.ProviderFunc}, + "provisioner": &ResourceProvisionerPlugin{F: opts.ProvisionerFunc}, + } +} diff --git a/plugin/server.go b/plugin/server.go deleted file mode 100644 index 3daa8a3dee..0000000000 --- a/plugin/server.go +++ /dev/null @@ -1,138 +0,0 @@ -package plugin - -import ( - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "os" - "os/signal" - "runtime" - "strconv" - "sync/atomic" - - tfrpc "github.com/hashicorp/terraform/rpc" -) - -// The APIVersion is outputted along with the RPC address. The plugin -// client validates this API version and will show an error if it doesn't -// know how to speak it. -const APIVersion = "2" - -// The "magic cookie" is used to verify that the user intended to -// actually run this binary. If this cookie isn't present as an -// environmental variable, then we bail out early with an error. -const MagicCookieKey = "TF_PLUGIN_MAGIC_COOKIE" -const MagicCookieValue = "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2" - -// ServeOpts configures what sorts of plugins are served. -type ServeOpts struct { - ProviderFunc tfrpc.ProviderFunc - ProvisionerFunc tfrpc.ProvisionerFunc -} - -// Serve serves the plugins given by ServeOpts. -// -// Serve doesn't return until the plugin is done being executed. Any -// errors will be outputted to the log. -func Serve(opts *ServeOpts) { - // First check the cookie - if os.Getenv(MagicCookieKey) != MagicCookieValue { - fmt.Fprintf(os.Stderr, - "This binary is a Terraform plugin. These are not meant to be\n"+ - "executed directly. Please execute `terraform`, which will load\n"+ - "any plugins automatically.\n") - os.Exit(1) - } - - // Register a listener so we can accept a connection - listener, err := serverListener() - if err != nil { - log.Printf("[ERR] plugin init: %s", err) - return - } - defer listener.Close() - - // Create the RPC server to dispense - server := &tfrpc.Server{ - ProviderFunc: opts.ProviderFunc, - ProvisionerFunc: opts.ProvisionerFunc, - } - - // Output the address and service name to stdout so that Terraform - // core can bring it up. - log.Printf("Plugin address: %s %s\n", - listener.Addr().Network(), listener.Addr().String()) - fmt.Printf("%s|%s|%s\n", - APIVersion, - listener.Addr().Network(), - listener.Addr().String()) - os.Stdout.Sync() - - // Eat the interrupts - ch := make(chan os.Signal, 1) - signal.Notify(ch, os.Interrupt) - go func() { - var count int32 = 0 - for { - <-ch - newCount := atomic.AddInt32(&count, 1) - log.Printf( - "Received interrupt signal (count: %d). Ignoring.", - newCount) - } - }() - - // Serve - server.Accept(listener) -} - -func serverListener() (net.Listener, error) { - if runtime.GOOS == "windows" { - return serverListener_tcp() - } - - return serverListener_unix() -} - -func serverListener_tcp() (net.Listener, error) { - minPort, err := strconv.ParseInt(os.Getenv("TF_PLUGIN_MIN_PORT"), 10, 32) - if err != nil { - return nil, err - } - - maxPort, err := strconv.ParseInt(os.Getenv("TF_PLUGIN_MAX_PORT"), 10, 32) - if err != nil { - return nil, err - } - - for port := minPort; port <= maxPort; port++ { - address := fmt.Sprintf("127.0.0.1:%d", port) - listener, err := net.Listen("tcp", address) - if err == nil { - return listener, nil - } - } - - return nil, errors.New("Couldn't bind plugin TCP listener") -} - -func serverListener_unix() (net.Listener, error) { - tf, err := ioutil.TempFile("", "tf-plugin") - if err != nil { - return nil, err - } - path := tf.Name() - - // Close the file and remove it because it has to not exist for - // the domain socket. - if err := tf.Close(); err != nil { - return nil, err - } - if err := os.Remove(path); err != nil { - return nil, err - } - - return net.Listen("unix", path) -} diff --git a/rpc/ui_input.go b/plugin/ui_input.go similarity index 84% rename from rpc/ui_input.go rename to plugin/ui_input.go index 6c95806c56..493efc0a91 100644 --- a/rpc/ui_input.go +++ b/plugin/ui_input.go @@ -1,8 +1,9 @@ -package rpc +package plugin import ( "net/rpc" + "github.com/hashicorp/go-plugin" "github.com/hashicorp/terraform/terraform" ) @@ -10,12 +11,11 @@ import ( // over RPC. type UIInput struct { Client *rpc.Client - Name string } func (i *UIInput) Input(opts *terraform.InputOpts) (string, error) { var resp UIInputInputResponse - err := i.Client.Call(i.Name+".Input", opts, &resp) + err := i.Client.Call("Plugin.Input", opts, &resp) if err != nil { return "", err } @@ -29,7 +29,7 @@ func (i *UIInput) Input(opts *terraform.InputOpts) (string, error) { type UIInputInputResponse struct { Value string - Error *BasicError + Error *plugin.BasicError } // UIInputServer is a net/rpc compatible structure for serving @@ -44,7 +44,7 @@ func (s *UIInputServer) Input( value, err := s.UIInput.Input(opts) *reply = UIInputInputResponse{ Value: value, - Error: NewBasicError(err), + Error: plugin.NewBasicError(err), } return nil diff --git a/rpc/ui_input_test.go b/plugin/ui_input_test.go similarity index 78% rename from rpc/ui_input_test.go rename to plugin/ui_input_test.go index 6de494831d..a13dc0ee18 100644 --- a/rpc/ui_input_test.go +++ b/plugin/ui_input_test.go @@ -1,9 +1,10 @@ -package rpc +package plugin import ( "reflect" "testing" + "github.com/hashicorp/go-plugin" "github.com/hashicorp/terraform/terraform" ) @@ -12,20 +13,20 @@ func TestUIInput_impl(t *testing.T) { } func TestUIInput_input(t *testing.T) { - client, server := testClientServer(t) + client, server := plugin.TestRPCConn(t) defer client.Close() i := new(terraform.MockUIInput) i.InputReturnString = "foo" - err := server.RegisterName("UIInput", &UIInputServer{ + err := server.RegisterName("Plugin", &UIInputServer{ UIInput: i, }) if err != nil { t.Fatalf("err: %s", err) } - input := &UIInput{Client: client, Name: "UIInput"} + input := &UIInput{Client: client} opts := &terraform.InputOpts{ Id: "foo", diff --git a/rpc/ui_output.go b/plugin/ui_output.go similarity index 85% rename from rpc/ui_output.go rename to plugin/ui_output.go index a997b943b2..c222b00cde 100644 --- a/rpc/ui_output.go +++ b/plugin/ui_output.go @@ -1,4 +1,4 @@ -package rpc +package plugin import ( "net/rpc" @@ -10,11 +10,10 @@ import ( // over RPC. type UIOutput struct { Client *rpc.Client - Name string } func (o *UIOutput) Output(v string) { - o.Client.Call(o.Name+".Output", v, new(interface{})) + o.Client.Call("Plugin.Output", v, new(interface{})) } // UIOutputServer is the RPC server for serving UIOutput. diff --git a/rpc/ui_output_test.go b/plugin/ui_output_test.go similarity index 72% rename from rpc/ui_output_test.go rename to plugin/ui_output_test.go index 0113a09038..50eadaa022 100644 --- a/rpc/ui_output_test.go +++ b/plugin/ui_output_test.go @@ -1,8 +1,9 @@ -package rpc +package plugin import ( "testing" + "github.com/hashicorp/go-plugin" "github.com/hashicorp/terraform/terraform" ) @@ -11,19 +12,19 @@ func TestUIOutput_impl(t *testing.T) { } func TestUIOutput_input(t *testing.T) { - client, server := testClientServer(t) + client, server := plugin.TestRPCConn(t) defer client.Close() o := new(terraform.MockUIOutput) - err := server.RegisterName("UIOutput", &UIOutputServer{ + err := server.RegisterName("Plugin", &UIOutputServer{ UIOutput: o, }) if err != nil { t.Fatalf("err: %s", err) } - output := &UIOutput{Client: client, Name: "UIOutput"} + output := &UIOutput{Client: client} output.Output("foo") if !o.OutputCalled { t.Fatal("output should be called") diff --git a/rpc/client.go b/rpc/client.go deleted file mode 100644 index 0c80385eef..0000000000 --- a/rpc/client.go +++ /dev/null @@ -1,108 +0,0 @@ -package rpc - -import ( - "io" - "net" - "net/rpc" - - "github.com/hashicorp/terraform/terraform" - "github.com/hashicorp/yamux" -) - -// Client connects to a Server in order to request plugin implementations -// for Terraform. -type Client struct { - broker *muxBroker - control *rpc.Client -} - -// Dial opens a connection to a Terraform RPC server and returns a client. -func Dial(network, address string) (*Client, error) { - conn, err := net.Dial(network, address) - if err != nil { - return nil, err - } - - if tcpConn, ok := conn.(*net.TCPConn); ok { - // Make sure to set keep alive so that the connection doesn't die - tcpConn.SetKeepAlive(true) - } - - return NewClient(conn) -} - -// NewClient creates a client from an already-open connection-like value. -// Dial is typically used instead. -func NewClient(conn io.ReadWriteCloser) (*Client, error) { - // Create the yamux client so we can multiplex - mux, err := yamux.Client(conn, nil) - if err != nil { - conn.Close() - return nil, err - } - - // Connect to the control stream. - control, err := mux.Open() - if err != nil { - mux.Close() - return nil, err - } - - // Create the broker and start it up - broker := newMuxBroker(mux) - go broker.Run() - - // Build the client using our broker and control channel. - return &Client{ - broker: broker, - control: rpc.NewClient(control), - }, nil -} - -// Close closes the connection. The client is no longer usable after this -// is called. -func (c *Client) Close() error { - if err := c.control.Close(); err != nil { - return err - } - - return c.broker.Close() -} - -func (c *Client) ResourceProvider() (terraform.ResourceProvider, error) { - var id uint32 - if err := c.control.Call( - "Dispenser.ResourceProvider", new(interface{}), &id); err != nil { - return nil, err - } - - conn, err := c.broker.Dial(id) - if err != nil { - return nil, err - } - - return &ResourceProvider{ - Broker: c.broker, - Client: rpc.NewClient(conn), - Name: "ResourceProvider", - }, nil -} - -func (c *Client) ResourceProvisioner() (terraform.ResourceProvisioner, error) { - var id uint32 - if err := c.control.Call( - "Dispenser.ResourceProvisioner", new(interface{}), &id); err != nil { - return nil, err - } - - conn, err := c.broker.Dial(id) - if err != nil { - return nil, err - } - - return &ResourceProvisioner{ - Broker: c.broker, - Client: rpc.NewClient(conn), - Name: "ResourceProvisioner", - }, nil -} diff --git a/rpc/client_test.go b/rpc/client_test.go deleted file mode 100644 index f8c286fe8e..0000000000 --- a/rpc/client_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package rpc - -import ( - "reflect" - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestClient_ResourceProvider(t *testing.T) { - clientConn, serverConn := testConn(t) - - p := new(terraform.MockResourceProvider) - server := &Server{ProviderFunc: testProviderFixed(p)} - go server.ServeConn(serverConn) - - client, err := NewClient(clientConn) - if err != nil { - t.Fatalf("err: %s", err) - } - defer client.Close() - - provider, err := client.ResourceProvider() - if err != nil { - t.Fatalf("err: %s", err) - } - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - e := provider.Configure(config) - if !p.ConfigureCalled { - t.Fatal("configure should be called") - } - if !reflect.DeepEqual(p.ConfigureConfig, config) { - t.Fatalf("bad: %#v", p.ConfigureConfig) - } - if e != nil { - t.Fatalf("bad: %#v", e) - } -} - -func TestClient_ResourceProvisioner(t *testing.T) { - clientConn, serverConn := testConn(t) - - p := new(terraform.MockResourceProvisioner) - server := &Server{ProvisionerFunc: testProvisionerFixed(p)} - go server.ServeConn(serverConn) - - client, err := NewClient(clientConn) - if err != nil { - t.Fatalf("err: %s", err) - } - defer client.Close() - - provisioner, err := client.ResourceProvisioner() - if err != nil { - t.Fatalf("err: %s", err) - } - - // Apply - output := &terraform.MockUIOutput{} - state := &terraform.InstanceState{} - conf := &terraform.ResourceConfig{} - err = provisioner.Apply(output, state, conf) - if !p.ApplyCalled { - t.Fatal("apply should be called") - } - if !reflect.DeepEqual(p.ApplyConfig, conf) { - t.Fatalf("bad: %#v", p.ApplyConfig) - } - if err != nil { - t.Fatalf("bad: %#v", err) - } -} diff --git a/rpc/error_test.go b/rpc/error_test.go deleted file mode 100644 index 8ca8b60ebf..0000000000 --- a/rpc/error_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package rpc - -import ( - "errors" - "testing" -) - -func TestBasicError_ImplementsError(t *testing.T) { - var _ error = new(BasicError) -} - -func TestBasicError_MatchesMessage(t *testing.T) { - err := errors.New("foo") - wrapped := NewBasicError(err) - - if wrapped.Error() != err.Error() { - t.Fatalf("bad: %#v", wrapped.Error()) - } -} - -func TestNewBasicError_nil(t *testing.T) { - r := NewBasicError(nil) - if r != nil { - t.Fatalf("bad: %#v", r) - } -} diff --git a/rpc/resource_provider_test.go b/rpc/resource_provider_test.go deleted file mode 100644 index 3efdbce25f..0000000000 --- a/rpc/resource_provider_test.go +++ /dev/null @@ -1,518 +0,0 @@ -package rpc - -import ( - "errors" - "reflect" - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceProvider_impl(t *testing.T) { - var _ terraform.ResourceProvider = new(ResourceProvider) -} - -func TestResourceProvider_input(t *testing.T) { - client, server := testNewClientServer(t) - defer client.Close() - - p := server.ProviderFunc().(*terraform.MockResourceProvider) - - provider, err := client.ResourceProvider() - if err != nil { - t.Fatalf("err: %s", err) - } - - input := new(terraform.MockUIInput) - - expected := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"bar": "baz"}, - } - p.InputReturnConfig = expected - - // Input - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - actual, err := provider.Input(input, config) - if !p.InputCalled { - t.Fatal("input should be called") - } - if !reflect.DeepEqual(p.InputConfig, config) { - t.Fatalf("bad: %#v", p.InputConfig) - } - if err != nil { - t.Fatalf("bad: %#v", err) - } - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: %#v", actual) - } -} - -func TestResourceProvider_configure(t *testing.T) { - client, server := testNewClientServer(t) - defer client.Close() - - p := server.ProviderFunc().(*terraform.MockResourceProvider) - - provider, err := client.ResourceProvider() - if err != nil { - t.Fatalf("err: %s", err) - } - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - e := provider.Configure(config) - if !p.ConfigureCalled { - t.Fatal("configure should be called") - } - if !reflect.DeepEqual(p.ConfigureConfig, config) { - t.Fatalf("bad: %#v", p.ConfigureConfig) - } - if e != nil { - t.Fatalf("bad: %#v", e) - } -} - -func TestResourceProvider_configure_errors(t *testing.T) { - p := new(terraform.MockResourceProvider) - client, server := testClientServer(t) - name, err := Register(server, p) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := &ResourceProvider{Client: client, Name: name} - - p.ConfigureReturnError = errors.New("foo") - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - e := provider.Configure(config) - if !p.ConfigureCalled { - t.Fatal("configure should be called") - } - if !reflect.DeepEqual(p.ConfigureConfig, config) { - t.Fatalf("bad: %#v", p.ConfigureConfig) - } - if e == nil { - t.Fatal("should have error") - } - if e.Error() != "foo" { - t.Fatalf("bad: %s", e) - } -} - -func TestResourceProvider_configure_warnings(t *testing.T) { - p := new(terraform.MockResourceProvider) - client, server := testClientServer(t) - name, err := Register(server, p) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := &ResourceProvider{Client: client, Name: name} - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - e := provider.Configure(config) - if !p.ConfigureCalled { - t.Fatal("configure should be called") - } - if !reflect.DeepEqual(p.ConfigureConfig, config) { - t.Fatalf("bad: %#v", p.ConfigureConfig) - } - if e != nil { - t.Fatalf("bad: %#v", e) - } -} - -func TestResourceProvider_apply(t *testing.T) { - p := new(terraform.MockResourceProvider) - client, server := testClientServer(t) - name, err := Register(server, p) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := &ResourceProvider{Client: client, Name: name} - - p.ApplyReturn = &terraform.InstanceState{ - ID: "bob", - } - - // Apply - info := &terraform.InstanceInfo{} - state := &terraform.InstanceState{} - diff := &terraform.InstanceDiff{} - newState, err := provider.Apply(info, state, diff) - if !p.ApplyCalled { - t.Fatal("apply should be called") - } - if !reflect.DeepEqual(p.ApplyDiff, diff) { - t.Fatalf("bad: %#v", p.ApplyDiff) - } - if err != nil { - t.Fatalf("bad: %#v", err) - } - if !reflect.DeepEqual(p.ApplyReturn, newState) { - t.Fatalf("bad: %#v", newState) - } -} - -func TestResourceProvider_diff(t *testing.T) { - p := new(terraform.MockResourceProvider) - client, server := testClientServer(t) - name, err := Register(server, p) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := &ResourceProvider{Client: client, Name: name} - - p.DiffReturn = &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{ - Old: "", - New: "bar", - }, - }, - } - - // Diff - info := &terraform.InstanceInfo{} - state := &terraform.InstanceState{} - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - diff, err := provider.Diff(info, state, config) - if !p.DiffCalled { - t.Fatal("diff should be called") - } - if !reflect.DeepEqual(p.DiffDesired, config) { - t.Fatalf("bad: %#v", p.DiffDesired) - } - if err != nil { - t.Fatalf("bad: %#v", err) - } - if !reflect.DeepEqual(p.DiffReturn, diff) { - t.Fatalf("bad: %#v", diff) - } -} - -func TestResourceProvider_diff_error(t *testing.T) { - p := new(terraform.MockResourceProvider) - client, server := testClientServer(t) - name, err := Register(server, p) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := &ResourceProvider{Client: client, Name: name} - - p.DiffReturnError = errors.New("foo") - - // Diff - info := &terraform.InstanceInfo{} - state := &terraform.InstanceState{} - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - diff, err := provider.Diff(info, state, config) - if !p.DiffCalled { - t.Fatal("diff should be called") - } - if !reflect.DeepEqual(p.DiffDesired, config) { - t.Fatalf("bad: %#v", p.DiffDesired) - } - if err == nil { - t.Fatal("should have error") - } - if diff != nil { - t.Fatal("should not have diff") - } -} - -func TestResourceProvider_refresh(t *testing.T) { - p := new(terraform.MockResourceProvider) - client, server := testClientServer(t) - name, err := Register(server, p) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := &ResourceProvider{Client: client, Name: name} - - p.RefreshReturn = &terraform.InstanceState{ - ID: "bob", - } - - // Refresh - info := &terraform.InstanceInfo{} - state := &terraform.InstanceState{} - newState, err := provider.Refresh(info, state) - if !p.RefreshCalled { - t.Fatal("refresh should be called") - } - if !reflect.DeepEqual(p.RefreshState, state) { - t.Fatalf("bad: %#v", p.RefreshState) - } - if err != nil { - t.Fatalf("bad: %#v", err) - } - if !reflect.DeepEqual(p.RefreshReturn, newState) { - t.Fatalf("bad: %#v", newState) - } -} - -func TestResourceProvider_resources(t *testing.T) { - p := new(terraform.MockResourceProvider) - client, server := testClientServer(t) - name, err := Register(server, p) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := &ResourceProvider{Client: client, Name: name} - - expected := []terraform.ResourceType{ - {"foo"}, - {"bar"}, - } - - p.ResourcesReturn = expected - - // Resources - result := provider.Resources() - if !p.ResourcesCalled { - t.Fatal("resources should be called") - } - if !reflect.DeepEqual(result, expected) { - t.Fatalf("bad: %#v", result) - } -} - -func TestResourceProvider_validate(t *testing.T) { - p := new(terraform.MockResourceProvider) - client, server := testClientServer(t) - name, err := Register(server, p) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := &ResourceProvider{Client: client, Name: name} - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - w, e := provider.Validate(config) - if !p.ValidateCalled { - t.Fatal("configure should be called") - } - if !reflect.DeepEqual(p.ValidateConfig, config) { - t.Fatalf("bad: %#v", p.ValidateConfig) - } - if w != nil { - t.Fatalf("bad: %#v", w) - } - if e != nil { - t.Fatalf("bad: %#v", e) - } -} - -func TestResourceProvider_validate_errors(t *testing.T) { - p := new(terraform.MockResourceProvider) - p.ValidateReturnErrors = []error{errors.New("foo")} - - client, server := testClientServer(t) - name, err := Register(server, p) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := &ResourceProvider{Client: client, Name: name} - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - w, e := provider.Validate(config) - if !p.ValidateCalled { - t.Fatal("configure should be called") - } - if !reflect.DeepEqual(p.ValidateConfig, config) { - t.Fatalf("bad: %#v", p.ValidateConfig) - } - if w != nil { - t.Fatalf("bad: %#v", w) - } - - if len(e) != 1 { - t.Fatalf("bad: %#v", e) - } - if e[0].Error() != "foo" { - t.Fatalf("bad: %#v", e) - } -} - -func TestResourceProvider_validate_warns(t *testing.T) { - p := new(terraform.MockResourceProvider) - p.ValidateReturnWarns = []string{"foo"} - - client, server := testClientServer(t) - name, err := Register(server, p) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := &ResourceProvider{Client: client, Name: name} - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - w, e := provider.Validate(config) - if !p.ValidateCalled { - t.Fatal("configure should be called") - } - if !reflect.DeepEqual(p.ValidateConfig, config) { - t.Fatalf("bad: %#v", p.ValidateConfig) - } - if e != nil { - t.Fatalf("bad: %#v", e) - } - - expected := []string{"foo"} - if !reflect.DeepEqual(w, expected) { - t.Fatalf("bad: %#v", w) - } -} - -func TestResourceProvider_validateResource(t *testing.T) { - p := new(terraform.MockResourceProvider) - client, server := testClientServer(t) - name, err := Register(server, p) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := &ResourceProvider{Client: client, Name: name} - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - w, e := provider.ValidateResource("foo", config) - if !p.ValidateResourceCalled { - t.Fatal("configure should be called") - } - if p.ValidateResourceType != "foo" { - t.Fatalf("bad: %#v", p.ValidateResourceType) - } - if !reflect.DeepEqual(p.ValidateResourceConfig, config) { - t.Fatalf("bad: %#v", p.ValidateResourceConfig) - } - if w != nil { - t.Fatalf("bad: %#v", w) - } - if e != nil { - t.Fatalf("bad: %#v", e) - } -} - -func TestResourceProvider_validateResource_errors(t *testing.T) { - p := new(terraform.MockResourceProvider) - p.ValidateResourceReturnErrors = []error{errors.New("foo")} - - client, server := testClientServer(t) - name, err := Register(server, p) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := &ResourceProvider{Client: client, Name: name} - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - w, e := provider.ValidateResource("foo", config) - if !p.ValidateResourceCalled { - t.Fatal("configure should be called") - } - if p.ValidateResourceType != "foo" { - t.Fatalf("bad: %#v", p.ValidateResourceType) - } - if !reflect.DeepEqual(p.ValidateResourceConfig, config) { - t.Fatalf("bad: %#v", p.ValidateResourceConfig) - } - if w != nil { - t.Fatalf("bad: %#v", w) - } - - if len(e) != 1 { - t.Fatalf("bad: %#v", e) - } - if e[0].Error() != "foo" { - t.Fatalf("bad: %#v", e) - } -} - -func TestResourceProvider_validateResource_warns(t *testing.T) { - p := new(terraform.MockResourceProvider) - p.ValidateResourceReturnWarns = []string{"foo"} - - client, server := testClientServer(t) - name, err := Register(server, p) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := &ResourceProvider{Client: client, Name: name} - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - w, e := provider.ValidateResource("foo", config) - if !p.ValidateResourceCalled { - t.Fatal("configure should be called") - } - if p.ValidateResourceType != "foo" { - t.Fatalf("bad: %#v", p.ValidateResourceType) - } - if !reflect.DeepEqual(p.ValidateResourceConfig, config) { - t.Fatalf("bad: %#v", p.ValidateResourceConfig) - } - if e != nil { - t.Fatalf("bad: %#v", e) - } - - expected := []string{"foo"} - if !reflect.DeepEqual(w, expected) { - t.Fatalf("bad: %#v", w) - } -} - -func TestResourceProvider_close(t *testing.T) { - client, _ := testNewClientServer(t) - defer client.Close() - - provider, err := client.ResourceProvider() - if err != nil { - t.Fatalf("err: %s", err) - } - - var p interface{} - p = provider - pCloser, ok := p.(terraform.ResourceProviderCloser) - if !ok { - t.Fatal("should be a ResourceProviderCloser") - } - - if err := pCloser.Close(); err != nil { - t.Fatalf("failed to close provider: %s", err) - } - - // The connection should be closed now, so if we to make a - // new call we should get an error. - err = provider.Configure(&terraform.ResourceConfig{}) - if err == nil { - t.Fatal("should have error") - } -} diff --git a/rpc/resource_provisioner_test.go b/rpc/resource_provisioner_test.go deleted file mode 100644 index 6fabdb6d41..0000000000 --- a/rpc/resource_provisioner_test.go +++ /dev/null @@ -1,165 +0,0 @@ -package rpc - -import ( - "errors" - "reflect" - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceProvisioner_impl(t *testing.T) { - var _ terraform.ResourceProvisioner = new(ResourceProvisioner) -} - -func TestResourceProvisioner_apply(t *testing.T) { - client, server := testNewClientServer(t) - defer client.Close() - - p := server.ProvisionerFunc().(*terraform.MockResourceProvisioner) - - provisioner, err := client.ResourceProvisioner() - if err != nil { - t.Fatalf("err: %s", err) - } - - // Apply - output := &terraform.MockUIOutput{} - state := &terraform.InstanceState{} - conf := &terraform.ResourceConfig{} - err = provisioner.Apply(output, state, conf) - if !p.ApplyCalled { - t.Fatal("apply should be called") - } - if !reflect.DeepEqual(p.ApplyConfig, conf) { - t.Fatalf("bad: %#v", p.ApplyConfig) - } - if err != nil { - t.Fatalf("bad: %#v", err) - } -} - -func TestResourceProvisioner_validate(t *testing.T) { - p := new(terraform.MockResourceProvisioner) - client, server := testClientServer(t) - name, err := Register(server, p) - if err != nil { - t.Fatalf("err: %s", err) - } - provisioner := &ResourceProvisioner{Client: client, Name: name} - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - w, e := provisioner.Validate(config) - if !p.ValidateCalled { - t.Fatal("configure should be called") - } - if !reflect.DeepEqual(p.ValidateConfig, config) { - t.Fatalf("bad: %#v", p.ValidateConfig) - } - if w != nil { - t.Fatalf("bad: %#v", w) - } - if e != nil { - t.Fatalf("bad: %#v", e) - } -} - -func TestResourceProvisioner_validate_errors(t *testing.T) { - p := new(terraform.MockResourceProvisioner) - p.ValidateReturnErrors = []error{errors.New("foo")} - - client, server := testClientServer(t) - name, err := Register(server, p) - if err != nil { - t.Fatalf("err: %s", err) - } - provisioner := &ResourceProvisioner{Client: client, Name: name} - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - w, e := provisioner.Validate(config) - if !p.ValidateCalled { - t.Fatal("configure should be called") - } - if !reflect.DeepEqual(p.ValidateConfig, config) { - t.Fatalf("bad: %#v", p.ValidateConfig) - } - if w != nil { - t.Fatalf("bad: %#v", w) - } - - if len(e) != 1 { - t.Fatalf("bad: %#v", e) - } - if e[0].Error() != "foo" { - t.Fatalf("bad: %#v", e) - } -} - -func TestResourceProvisioner_validate_warns(t *testing.T) { - p := new(terraform.MockResourceProvisioner) - p.ValidateReturnWarns = []string{"foo"} - - client, server := testClientServer(t) - name, err := Register(server, p) - if err != nil { - t.Fatalf("err: %s", err) - } - provisioner := &ResourceProvisioner{Client: client, Name: name} - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - w, e := provisioner.Validate(config) - if !p.ValidateCalled { - t.Fatal("configure should be called") - } - if !reflect.DeepEqual(p.ValidateConfig, config) { - t.Fatalf("bad: %#v", p.ValidateConfig) - } - if e != nil { - t.Fatalf("bad: %#v", e) - } - - expected := []string{"foo"} - if !reflect.DeepEqual(w, expected) { - t.Fatalf("bad: %#v", w) - } -} - -func TestResourceProvisioner_close(t *testing.T) { - client, _ := testNewClientServer(t) - defer client.Close() - - provisioner, err := client.ResourceProvisioner() - if err != nil { - t.Fatalf("err: %s", err) - } - - var p interface{} - p = provisioner - pCloser, ok := p.(terraform.ResourceProvisionerCloser) - if !ok { - t.Fatal("should be a ResourceProvisionerCloser") - } - - if err := pCloser.Close(); err != nil { - t.Fatalf("failed to close provisioner: %s", err) - } - - // The connection should be closed now, so if we to make a - // new call we should get an error. - o := &terraform.MockUIOutput{} - s := &terraform.InstanceState{} - c := &terraform.ResourceConfig{} - err = provisioner.Apply(o, s, c) - if err == nil { - t.Fatal("should have error") - } -} diff --git a/rpc/rpc.go b/rpc/rpc.go deleted file mode 100644 index f11a482f34..0000000000 --- a/rpc/rpc.go +++ /dev/null @@ -1,35 +0,0 @@ -package rpc - -import ( - "errors" - "fmt" - "net/rpc" - "sync" - - "github.com/hashicorp/terraform/terraform" -) - -// nextId is the next ID to use for names registered. -var nextId uint32 = 0 -var nextLock sync.Mutex - -// Register registers a Terraform thing with the RPC server and returns -// the name it is registered under. -func Register(server *rpc.Server, thing interface{}) (name string, err error) { - nextLock.Lock() - defer nextLock.Unlock() - - switch t := thing.(type) { - case terraform.ResourceProvider: - name = fmt.Sprintf("Terraform%d", nextId) - err = server.RegisterName(name, &ResourceProviderServer{Provider: t}) - case terraform.ResourceProvisioner: - name = fmt.Sprintf("Terraform%d", nextId) - err = server.RegisterName(name, &ResourceProvisionerServer{Provisioner: t}) - default: - return "", errors.New("Unknown type to register for RPC server.") - } - - nextId += 1 - return -} diff --git a/rpc/rpc_test.go b/rpc/rpc_test.go deleted file mode 100644 index f23d9332af..0000000000 --- a/rpc/rpc_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package rpc - -import ( - "net" - "net/rpc" - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func testConn(t *testing.T) (net.Conn, net.Conn) { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("err: %s", err) - } - - var serverConn net.Conn - doneCh := make(chan struct{}) - go func() { - defer close(doneCh) - defer l.Close() - var err error - serverConn, err = l.Accept() - if err != nil { - t.Fatalf("err: %s", err) - } - }() - - clientConn, err := net.Dial("tcp", l.Addr().String()) - if err != nil { - t.Fatalf("err: %s", err) - } - <-doneCh - - return clientConn, serverConn -} - -func testClientServer(t *testing.T) (*rpc.Client, *rpc.Server) { - clientConn, serverConn := testConn(t) - - server := rpc.NewServer() - go server.ServeConn(serverConn) - - client := rpc.NewClient(clientConn) - - return client, server -} - -func testNewClientServer(t *testing.T) (*Client, *Server) { - clientConn, serverConn := testConn(t) - - server := &Server{ - ProviderFunc: testProviderFixed(new(terraform.MockResourceProvider)), - ProvisionerFunc: testProvisionerFixed( - new(terraform.MockResourceProvisioner)), - } - go server.ServeConn(serverConn) - - client, err := NewClient(clientConn) - if err != nil { - t.Fatalf("err: %s", err) - } - - return client, server -} - -func testProviderFixed(p terraform.ResourceProvider) ProviderFunc { - return func() terraform.ResourceProvider { - return p - } -} - -func testProvisionerFixed(p terraform.ResourceProvisioner) ProvisionerFunc { - return func() terraform.ResourceProvisioner { - return p - } -} diff --git a/rpc/server.go b/rpc/server.go deleted file mode 100644 index dd1e9b7b08..0000000000 --- a/rpc/server.go +++ /dev/null @@ -1,147 +0,0 @@ -package rpc - -import ( - "io" - "log" - "net" - "net/rpc" - - "github.com/hashicorp/terraform/terraform" - "github.com/hashicorp/yamux" -) - -// Server listens for network connections and then dispenses interface -// implementations for Terraform over net/rpc. -type Server struct { - ProviderFunc ProviderFunc - ProvisionerFunc ProvisionerFunc -} - -// ProviderFunc creates terraform.ResourceProviders when they're requested -// from the server. -type ProviderFunc func() terraform.ResourceProvider - -// ProvisionerFunc creates terraform.ResourceProvisioners when they're requested -// from the server. -type ProvisionerFunc func() terraform.ResourceProvisioner - -// Accept accepts connections on a listener and serves requests for -// each incoming connection. Accept blocks; the caller typically invokes -// it in a go statement. -func (s *Server) Accept(lis net.Listener) { - for { - conn, err := lis.Accept() - if err != nil { - log.Printf("[ERR] plugin server: %s", err) - return - } - - go s.ServeConn(conn) - } -} - -// ServeConn runs a single connection. -// -// ServeConn blocks, serving the connection until the client hangs up. -func (s *Server) ServeConn(conn io.ReadWriteCloser) { - // First create the yamux server to wrap this connection - mux, err := yamux.Server(conn, nil) - if err != nil { - conn.Close() - log.Printf("[ERR] plugin: %s", err) - return - } - - // Accept the control connection - control, err := mux.Accept() - if err != nil { - mux.Close() - log.Printf("[ERR] plugin: %s", err) - return - } - - // Create the broker and start it up - broker := newMuxBroker(mux) - go broker.Run() - - // Use the control connection to build the dispenser and serve the - // connection. - server := rpc.NewServer() - server.RegisterName("Dispenser", &dispenseServer{ - ProviderFunc: s.ProviderFunc, - ProvisionerFunc: s.ProvisionerFunc, - - broker: broker, - }) - server.ServeConn(control) -} - -// dispenseServer dispenses variousinterface implementations for Terraform. -type dispenseServer struct { - ProviderFunc ProviderFunc - ProvisionerFunc ProvisionerFunc - - broker *muxBroker -} - -func (d *dispenseServer) ResourceProvider( - args interface{}, response *uint32) error { - id := d.broker.NextId() - *response = id - - go func() { - conn, err := d.broker.Accept(id) - if err != nil { - log.Printf("[ERR] Plugin dispense: %s", err) - return - } - - serve(conn, "ResourceProvider", &ResourceProviderServer{ - Broker: d.broker, - Provider: d.ProviderFunc(), - }) - }() - - return nil -} - -func (d *dispenseServer) ResourceProvisioner( - args interface{}, response *uint32) error { - id := d.broker.NextId() - *response = id - - go func() { - conn, err := d.broker.Accept(id) - if err != nil { - log.Printf("[ERR] Plugin dispense: %s", err) - return - } - - serve(conn, "ResourceProvisioner", &ResourceProvisionerServer{ - Broker: d.broker, - Provisioner: d.ProvisionerFunc(), - }) - }() - - return nil -} - -func acceptAndServe(mux *muxBroker, id uint32, n string, v interface{}) { - conn, err := mux.Accept(id) - if err != nil { - log.Printf("[ERR] Plugin acceptAndServe: %s", err) - return - } - - serve(conn, n, v) -} - -func serve(conn io.ReadWriteCloser, name string, v interface{}) { - server := rpc.NewServer() - if err := server.RegisterName(name, v); err != nil { - log.Printf("[ERR] Plugin dispense: %s", err) - return - } - - server.ServeConn(conn) -} diff --git a/scripts/build.sh b/scripts/build.sh index 76ff6dad61..b7d6856f19 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -47,16 +47,8 @@ gox \ -os="${XC_OS}" \ -arch="${XC_ARCH}" \ -ldflags "${LD_FLAGS}" \ - -output "pkg/{{.OS}}_{{.Arch}}/terraform-{{.Dir}}" \ - $(go list ./... | grep -v /vendor/) - -# Make sure "terraform-terraform" is renamed properly -for PLATFORM in $(find ./pkg -mindepth 1 -maxdepth 1 -type d); do - set +e - mv ${PLATFORM}/terraform-terraform.exe ${PLATFORM}/terraform.exe 2>/dev/null - mv ${PLATFORM}/terraform-terraform ${PLATFORM}/terraform 2>/dev/null - set -e -done + -output "pkg/{{.OS}}_{{.Arch}}/terraform" \ + . # Move all the compiled things to the $GOPATH/bin GOPATH=${GOPATH:-$(go env GOPATH)} diff --git a/scripts/generate-plugins.go b/scripts/generate-plugins.go new file mode 100644 index 0000000000..0867f97559 --- /dev/null +++ b/scripts/generate-plugins.go @@ -0,0 +1,283 @@ +// Generate Plugins is a small program that updates the lists of plugins in +// command/internal_plugin_list.go so they will be compiled into the main +// terraform binary. +package main + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "strings" +) + +const target = "command/internal_plugin_list.go" + +func main() { + wd, _ := os.Getwd() + if filepath.Base(wd) != "terraform" { + log.Fatalf("This program must be invoked in the terraform project root; in %s", wd) + } + + // Collect all of the data we need about plugins we have in the project + providers, err := discoverProviders() + if err != nil { + log.Fatalf("Failed to discover providers: %s", err) + } + + provisioners, err := discoverProvisioners() + if err != nil { + log.Fatalf("Failed to discover provisioners: %s", err) + } + + // Do some simple code generation and templating + output := source + output = strings.Replace(output, "IMPORTS", makeImports(providers, provisioners), 1) + output = strings.Replace(output, "PROVIDERS", makeProviderMap(providers), 1) + output = strings.Replace(output, "PROVISIONERS", makeProvisionerMap(provisioners), 1) + + // TODO sort the lists of plugins so we are not subjected to random OS ordering of the plugin lists + + // Write our generated code to the command/plugin.go file + file, err := os.Create(target) + defer file.Close() + if err != nil { + log.Fatalf("Failed to open %s for writing: %s", target, err) + } + + _, err = file.WriteString(output) + if err != nil { + log.Fatalf("Failed writing to %s: %s", target, err) + } + + log.Printf("Generated %s", target) +} + +type plugin struct { + Package string // Package name from ast remoteexec + PluginName string // Path via deriveName() remote-exec + TypeName string // Type of plugin provisioner + Path string // Relative import path builtin/provisioners/remote-exec + ImportName string // See deriveImport() remoteexecprovisioner +} + +// makeProviderMap creates a map of providers like this: +// +// var InternalProviders = map[string]plugin.ProviderFunc{ +// "aws": aws.Provider, +// "azurerm": azurerm.Provider, +// "cloudflare": cloudflare.Provider, +func makeProviderMap(items []plugin) string { + output := "" + for _, item := range items { + output += fmt.Sprintf("\t\"%s\": %s.%s,\n", item.PluginName, item.ImportName, item.TypeName) + } + return output +} + +// makeProvisionerMap creates a map of provisioners like this: +// +// "file": func() terraform.ResourceProvisioner { return new(file.ResourceProvisioner) }, +// "local-exec": func() terraform.ResourceProvisioner { return new(localexec.ResourceProvisioner) }, +// "remote-exec": func() terraform.ResourceProvisioner { return new(remoteexec.ResourceProvisioner) }, +// +// This is more verbose than the Provider case because there is no corresponding +// Provisioner function. +func makeProvisionerMap(items []plugin) string { + output := "" + for _, item := range items { + output += fmt.Sprintf("\t\"%s\": func() terraform.ResourceProvisioner { return new(%s.%s) },\n", item.PluginName, item.ImportName, item.TypeName) + } + return output +} + +func makeImports(providers, provisioners []plugin) string { + plugins := []string{} + + for _, provider := range providers { + plugins = append(plugins, fmt.Sprintf("\t%s \"github.com/hashicorp/terraform/%s\"\n", provider.ImportName, filepath.ToSlash(provider.Path))) + } + + for _, provisioner := range provisioners { + plugins = append(plugins, fmt.Sprintf("\t%s \"github.com/hashicorp/terraform/%s\"\n", provisioner.ImportName, filepath.ToSlash(provisioner.Path))) + } + + // Make things pretty + sort.Strings(plugins) + + return strings.Join(plugins, "") +} + +// listDirectories recursively lists directories under the specified path +func listDirectories(path string) ([]string, error) { + names := []string{} + items, err := ioutil.ReadDir(path) + if err != nil { + return names, err + } + + for _, item := range items { + // We only want directories + if item.IsDir() { + if item.Name() == "test-fixtures" { + continue + } + currentDir := filepath.Join(path, item.Name()) + names = append(names, currentDir) + + // Do some recursion + subNames, err := listDirectories(currentDir) + if err == nil { + names = append(names, subNames...) + } + } + } + + return names, nil +} + +// deriveName determines the name of the plugin relative to the specified root +// path. +func deriveName(root, full string) string { + short, _ := filepath.Rel(root, full) + bits := strings.Split(short, string(os.PathSeparator)) + return strings.Join(bits, "-") +} + +// deriveImport will build a unique import identifier based on packageName and +// the result of deriveName(). This is important for disambigutating between +// providers and provisioners that have the same name. This will be something +// like: +// +// remote-exec -> remoteexecprovisioner +// +// which is long, but is deterministic and unique. +func deriveImport(typeName, derivedName string) string { + return strings.Replace(derivedName, "-", "", -1) + strings.ToLower(typeName) +} + +// discoverTypesInPath searches for types of typeID in path using go's ast and +// returns a list of plugins it finds. +func discoverTypesInPath(path, typeID, typeName string) ([]plugin, error) { + pluginTypes := []plugin{} + + dirs, err := listDirectories(path) + if err != nil { + return pluginTypes, err + } + + for _, dir := range dirs { + fset := token.NewFileSet() + goPackages, err := parser.ParseDir(fset, dir, nil, parser.AllErrors) + if err != nil { + return pluginTypes, fmt.Errorf("Failed parsing directory %s: %s", dir, err) + } + + for _, goPackage := range goPackages { + ast.PackageExports(goPackage) + ast.Inspect(goPackage, func(n ast.Node) bool { + switch x := n.(type) { + case *ast.FuncDecl: + // If we get a function then we will check the function name + // against typeName and the function return type (Results) + // against typeID. + // + // There may be more than one return type but in the target + // case there should only be one. Also the return type is a + // ast.SelectorExpr which means we have multiple nodes. + // We'll read all of them as ast.Ident (identifier), join + // them via . to get a string like terraform.ResourceProvider + // and see if it matches our expected typeID + // + // This is somewhat verbose but prevents us from identifying + // the wrong types if the function name is amiguous or if + // there are other subfolders added later. + if x.Name.Name == typeName && len(x.Type.Results.List) == 1 { + node := x.Type.Results.List[0].Type + typeIdentifiers := []string{} + ast.Inspect(node, func(m ast.Node) bool { + switch y := m.(type) { + case *ast.Ident: + typeIdentifiers = append(typeIdentifiers, y.Name) + } + // We need all of the identifiers to join so we + // can't break early here. + return true + }) + if strings.Join(typeIdentifiers, ".") == typeID { + derivedName := deriveName(path, dir) + pluginTypes = append(pluginTypes, plugin{ + Package: goPackage.Name, + PluginName: derivedName, + ImportName: deriveImport(x.Name.Name, derivedName), + TypeName: x.Name.Name, + Path: dir, + }) + } + } + case *ast.TypeSpec: + // In the simpler case we will simply check whether the type + // declaration has the name we were looking for. + if x.Name.Name == typeID { + derivedName := deriveName(path, dir) + pluginTypes = append(pluginTypes, plugin{ + Package: goPackage.Name, + PluginName: derivedName, + ImportName: deriveImport(x.Name.Name, derivedName), + TypeName: x.Name.Name, + Path: dir, + }) + // The AST stops parsing when we return false. Once we + // find the symbol we want we can stop parsing. + return false + } + } + return true + }) + } + } + + return pluginTypes, nil +} + +func discoverProviders() ([]plugin, error) { + path := "./builtin/providers" + typeID := "terraform.ResourceProvider" + typeName := "Provider" + return discoverTypesInPath(path, typeID, typeName) +} + +func discoverProvisioners() ([]plugin, error) { + path := "./builtin/provisioners" + typeID := "ResourceProvisioner" + typeName := "" + return discoverTypesInPath(path, typeID, typeName) +} + +const source = `// +build !core + +// +// This file is automatically generated by scripts/generate-plugins.go -- Do not edit! +// +package command + +import ( +IMPORTS + "github.com/hashicorp/terraform/plugin" + "github.com/hashicorp/terraform/terraform" +) + +var InternalProviders = map[string]plugin.ProviderFunc{ +PROVIDERS +} + +var InternalProvisioners = map[string]plugin.ProvisionerFunc{ +PROVISIONERS +} + +` diff --git a/scripts/generate-plugins_test.go b/scripts/generate-plugins_test.go new file mode 100644 index 0000000000..bbb3fce180 --- /dev/null +++ b/scripts/generate-plugins_test.go @@ -0,0 +1,102 @@ +package main + +import "testing" + +func TestMakeProvisionerMap(t *testing.T) { + p := makeProvisionerMap([]plugin{ + { + Package: "file", + PluginName: "file", + TypeName: "ResourceProvisioner", + Path: "builtin/provisioners/file", + ImportName: "fileresourceprovisioner", + }, + { + Package: "localexec", + PluginName: "local-exec", + TypeName: "ResourceProvisioner", + Path: "builtin/provisioners/local-exec", + ImportName: "localexecresourceprovisioner", + }, + { + Package: "remoteexec", + PluginName: "remote-exec", + TypeName: "ResourceProvisioner", + Path: "builtin/provisioners/remote-exec", + ImportName: "remoteexecresourceprovisioner", + }, + }) + + expected := ` "file": func() terraform.ResourceProvisioner { return new(fileresourceprovisioner.ResourceProvisioner) }, + "local-exec": func() terraform.ResourceProvisioner { return new(localexecresourceprovisioner.ResourceProvisioner) }, + "remote-exec": func() terraform.ResourceProvisioner { return new(remoteexecresourceprovisioner.ResourceProvisioner) }, +` + + if p != expected { + t.Errorf("Provisioner output does not match expected format.\n -- Expected -- \n%s\n -- Found --\n%s\n", expected, p) + } +} + +func TestDeriveName(t *testing.T) { + actual := deriveName("builtin/provisioners", "builtin/provisioners/magic/remote-exec") + expected := "magic-remote-exec" + if actual != expected { + t.Errorf("Expected %s; found %s", expected, actual) + } +} + +func TestDeriveImport(t *testing.T) { + actual := deriveImport("provider", "magic-aws") + expected := "magicawsprovider" + if actual != expected { + t.Errorf("Expected %s; found %s", expected, actual) + } +} + +func contains(plugins []plugin, name string) bool { + for _, plugin := range plugins { + if plugin.PluginName == name { + return true + } + } + return false +} + +func TestDiscoverTypesProviders(t *testing.T) { + plugins, err := discoverTypesInPath("../builtin/providers", "terraform.ResourceProvider", "Provider") + if err != nil { + t.Fatalf(err.Error()) + } + // We're just going to spot-check, not do this exhaustively + if !contains(plugins, "aws") { + t.Errorf("Expected to find aws provider") + } + if !contains(plugins, "docker") { + t.Errorf("Expected to find docker provider") + } + if !contains(plugins, "dnsimple") { + t.Errorf("Expected to find dnsimple provider") + } + if !contains(plugins, "triton") { + t.Errorf("Expected to find triton provider") + } + if contains(plugins, "file") { + t.Errorf("Found unexpected provider file") + } +} + +func TestDiscoverTypesProvisioners(t *testing.T) { + plugins, err := discoverTypesInPath("../builtin/provisioners", "ResourceProvisioner", "") + if err != nil { + t.Fatalf(err.Error()) + } + if !contains(plugins, "chef") { + t.Errorf("Expected to find chef provisioner") + } + if !contains(plugins, "remote-exec") { + t.Errorf("Expected to find remote-exec provisioner") + } + if contains(plugins, "aws") { + t.Errorf("Found unexpected provisioner aws") + } +} diff --git a/state/remote/atlas_test.go b/state/remote/atlas_test.go index 847fb39cbb..5a58f9c79b 100644 --- a/state/remote/atlas_test.go +++ b/state/remote/atlas_test.go @@ -159,7 +159,7 @@ func TestAtlasClient_UnresolvableConflict(t *testing.T) { select { case <-doneCh: // OK - case <-time.After(50 * time.Millisecond): + case <-time.After(500 * time.Millisecond): t.Fatalf("Timed out after 50ms, probably because retrying infinitely.") } } @@ -245,7 +245,7 @@ func (f *fakeAtlas) handler(resp http.ResponseWriter, req *http.Request) { // loads the state. var testStateModuleOrderChange = []byte( `{ - "version": 1, + "version": 2, "serial": 1, "modules": [ { @@ -276,7 +276,7 @@ var testStateModuleOrderChange = []byte( var testStateSimple = []byte( `{ - "version": 1, + "version": 2, "serial": 1, "modules": [ { diff --git a/state/testing.go b/state/testing.go index 6a4a88ad0c..c5305ecefa 100644 --- a/state/testing.go +++ b/state/testing.go @@ -36,7 +36,7 @@ func TestState(t *testing.T, s interface{}) { if ws, ok := s.(StateWriter); ok { current.Modules = append(current.Modules, &terraform.ModuleState{ Path: []string{"root"}, - Outputs: map[string]string{ + Outputs: map[string]interface{}{ "bar": "baz", }, }) @@ -94,7 +94,7 @@ func TestState(t *testing.T, s interface{}) { current.Modules = []*terraform.ModuleState{ &terraform.ModuleState{ Path: []string{"root", "somewhere"}, - Outputs: map[string]string{"serialCheck": "true"}, + Outputs: map[string]interface{}{"serialCheck": "true"}, }, } if err := writer.WriteState(current); err != nil { @@ -123,7 +123,7 @@ func TestStateInitial() *terraform.State { Modules: []*terraform.ModuleState{ &terraform.ModuleState{ Path: []string{"root", "child"}, - Outputs: map[string]string{ + Outputs: map[string]interface{}{ "foo": "bar", }, }, diff --git a/terraform/context.go b/terraform/context.go index a645f29f78..8ddad3331e 100644 --- a/terraform/context.go +++ b/terraform/context.go @@ -35,16 +35,17 @@ const ( // ContextOpts are the user-configurable options to create a context with // NewContext. type ContextOpts struct { - Destroy bool - Diff *Diff - Hooks []Hook - Module *module.Tree - Parallelism int - State *State - Providers map[string]ResourceProviderFactory - Provisioners map[string]ResourceProvisionerFactory - Targets []string - Variables map[string]string + Destroy bool + Diff *Diff + Hooks []Hook + Module *module.Tree + Parallelism int + State *State + StateFutureAllowed bool + Providers map[string]ResourceProviderFactory + Provisioners map[string]ResourceProvisionerFactory + Targets []string + Variables map[string]string UIInput UIInput } @@ -78,7 +79,7 @@ type Context struct { // Once a Context is creator, the pointer values within ContextOpts // should not be mutated in any way, since the pointers are copied, not // the values themselves. -func NewContext(opts *ContextOpts) *Context { +func NewContext(opts *ContextOpts) (*Context, error) { // Copy all the hooks and add our stop hook. We don't append directly // to the Config so that we're not modifying that in-place. sh := new(stopHook) @@ -92,6 +93,22 @@ func NewContext(opts *ContextOpts) *Context { state.init() } + // If our state is from the future, then error. Callers can avoid + // this error by explicitly setting `StateFutureAllowed`. + if !opts.StateFutureAllowed && state.FromFutureTerraform() { + return nil, fmt.Errorf( + "Terraform doesn't allow running any operations against a state\n"+ + "that was written by a future Terraform version. The state is\n"+ + "reporting it is written by Terraform '%s'.\n\n"+ + "Please run at least that version of Terraform to continue.", + state.TFVersion) + } + + // Explicitly reset our state version to our current version so that + // any operations we do will write out that our latest version + // has run. + state.TFVersion = Version + // Determine parallelism, default to 10. We do this both to limit // CPU pressure but also to have an extra guard against rate throttling // from providers. @@ -135,7 +152,7 @@ func NewContext(opts *ContextOpts) *Context { parallelSem: NewSemaphore(par), providerInputConfig: make(map[string]map[string]interface{}), sh: sh, - } + }, nil } type ContextGraphOpts struct { @@ -208,6 +225,8 @@ func (c *Context) Input(mode InputMode) error { continue case config.VariableTypeMap: continue + case config.VariableTypeList: + continue case config.VariableTypeString: // Good! default: diff --git a/terraform/context_apply_test.go b/terraform/context_apply_test.go index c58ee803f7..fe030b2c6c 100644 --- a/terraform/context_apply_test.go +++ b/terraform/context_apply_test.go @@ -48,6 +48,45 @@ func TestContext2Apply(t *testing.T) { } } +func TestContext2Apply_mapVarBetweenModules(t *testing.T) { + m := testModule(t, "apply-map-var-through-module") + p := testProvider("null") + p.ApplyFn = testApplyFn + p.DiffFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "null": testProviderFuncFixed(p), + }, + }) + + if _, err := ctx.Plan(); err != nil { + t.Fatalf("err: %s", err) + } + + state, err := ctx.Apply() + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(` +Outputs: + +amis_from_module = {eu-west-1:ami-789012 eu-west-2:ami-989484 us-west-1:ami-123456 us-west-2:ami-456789 } + +module.test: + null_resource.noop: + ID = foo + + Outputs: + + amis_out = {eu-west-1:ami-789012 eu-west-2:ami-989484 us-west-1:ami-123456 us-west-2:ami-456789 }`) + if actual != expected { + t.Fatalf("expected: \n%s\n\ngot: \n%s\n", expected, actual) + } +} + func TestContext2Apply_providerAlias(t *testing.T) { m := testModule(t, "apply-provider-alias") p := testProvider("aws") @@ -969,7 +1008,7 @@ func TestContext2Apply_moduleDestroyOrder(t *testing.T) { }, }, }, - Outputs: map[string]string{ + Outputs: map[string]interface{}{ "a_output": "a", }, }, @@ -1438,7 +1477,7 @@ func TestContext2Apply_outputOrphan(t *testing.T) { Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, - Outputs: map[string]string{ + Outputs: map[string]interface{}{ "foo": "bar", "bar": "baz", }, @@ -2559,11 +2598,14 @@ func TestContext2Apply_destroyModuleWithAttrsReferencingResource(t *testing.T) { t.Fatalf("err: %s", err) } - ctx = planFromFile.Context(&ContextOpts{ + ctx, err = planFromFile.Context(&ContextOpts{ Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) + if err != nil { + t.Fatalf("err: %s", err) + } state, err = ctx.Apply() if err != nil { @@ -3066,7 +3108,7 @@ func TestContext2Apply_outputInvalid(t *testing.T) { if err == nil { t.Fatalf("err: %s", err) } - if !strings.Contains(err.Error(), "is not a string") { + if !strings.Contains(err.Error(), "is not a valid type") { t.Fatalf("err: %s", err) } } @@ -3144,7 +3186,7 @@ func TestContext2Apply_outputList(t *testing.T) { actual := strings.TrimSpace(state.String()) expected := strings.TrimSpace(testTerraformApplyOutputListStr) if actual != expected { - t.Fatalf("bad: \n%s", actual) + t.Fatalf("expected: \n%s\n\nbad: \n%s", expected, actual) } } @@ -3850,7 +3892,7 @@ func TestContext2Apply_vars(t *testing.T) { actual := strings.TrimSpace(state.String()) expected := strings.TrimSpace(testTerraformApplyVarsStr) if actual != expected { - t.Fatalf("bad: \n%s", actual) + t.Fatalf("expected: %s\n got:\n%s", expected, actual) } } @@ -4115,11 +4157,14 @@ func TestContext2Apply_issue5254(t *testing.T) { t.Fatalf("err: %s", err) } - ctx = planFromFile.Context(&ContextOpts{ + ctx, err = planFromFile.Context(&ContextOpts{ Providers: map[string]ResourceProviderFactory{ "template": testProviderFuncFixed(p), }, }) + if err != nil { + t.Fatalf("err: %s", err) + } state, err = ctx.Apply() if err != nil { @@ -4189,12 +4234,15 @@ func TestContext2Apply_targetedWithTaintedInState(t *testing.T) { t.Fatalf("err: %s", err) } - ctx = planFromFile.Context(&ContextOpts{ + ctx, err = planFromFile.Context(&ContextOpts{ Module: testModule(t, "apply-tainted-targets"), Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) + if err != nil { + t.Fatalf("err: %s", err) + } state, err := ctx.Apply() if err != nil { diff --git a/terraform/context_input_test.go b/terraform/context_input_test.go index 404ef0ffc4..dae45e0d0b 100644 --- a/terraform/context_input_test.go +++ b/terraform/context_input_test.go @@ -45,7 +45,7 @@ func TestContext2Input(t *testing.T) { actual := strings.TrimSpace(state.String()) expected := strings.TrimSpace(testTerraformInputVarsStr) if actual != expected { - t.Fatalf("bad: \n%s", actual) + t.Fatalf("expected:\n%s\ngot:\n%s", expected, actual) } } diff --git a/terraform/context_refresh_test.go b/terraform/context_refresh_test.go index dbab702550..3d46c27c25 100644 --- a/terraform/context_refresh_test.go +++ b/terraform/context_refresh_test.go @@ -452,7 +452,7 @@ func TestContext2Refresh_output(t *testing.T) { }, }, - Outputs: map[string]string{ + Outputs: map[string]interface{}{ "foo": "foo", }, }, @@ -738,7 +738,7 @@ func TestContext2Refresh_orphanModule(t *testing.T) { }, }, }, - Outputs: map[string]string{ + Outputs: map[string]interface{}{ "id": "i-bcd234", "grandchild_id": "i-cde345", }, @@ -752,7 +752,7 @@ func TestContext2Refresh_orphanModule(t *testing.T) { }, }, }, - Outputs: map[string]string{ + Outputs: map[string]interface{}{ "id": "i-cde345", }, }, diff --git a/terraform/context_test.go b/terraform/context_test.go index 015ae1921e..eee648cd2e 100644 --- a/terraform/context_test.go +++ b/terraform/context_test.go @@ -7,8 +7,71 @@ import ( "time" ) +func TestNewContextState(t *testing.T) { + cases := map[string]struct { + Input *ContextOpts + Err bool + }{ + "empty TFVersion": { + &ContextOpts{ + State: &State{}, + }, + false, + }, + + "past TFVersion": { + &ContextOpts{ + State: &State{TFVersion: "0.1.2"}, + }, + false, + }, + + "equal TFVersion": { + &ContextOpts{ + State: &State{TFVersion: Version}, + }, + false, + }, + + "future TFVersion": { + &ContextOpts{ + State: &State{TFVersion: "99.99.99"}, + }, + true, + }, + + "future TFVersion, allowed": { + &ContextOpts{ + State: &State{TFVersion: "99.99.99"}, + StateFutureAllowed: true, + }, + false, + }, + } + + for k, tc := range cases { + ctx, err := NewContext(tc.Input) + if (err != nil) != tc.Err { + t.Fatalf("%s: err: %s", k, err) + } + if err != nil { + continue + } + + // Version should always be set to our current + if ctx.state.TFVersion != Version { + t.Fatalf("%s: state not set to current version", k) + } + } +} + func testContext2(t *testing.T, opts *ContextOpts) *Context { - return NewContext(opts) + ctx, err := NewContext(opts) + if err != nil { + t.Fatalf("err: %s", err) + } + + return ctx } func testApplyFn( diff --git a/terraform/eval_context.go b/terraform/eval_context.go index f4427939ae..f2867511d7 100644 --- a/terraform/eval_context.go +++ b/terraform/eval_context.go @@ -68,7 +68,7 @@ type EvalContext interface { // SetVariables sets the variables for the module within // this context with the name n. This function call is additive: // the second parameter is merged with any previous call. - SetVariables(string, map[string]string) + SetVariables(string, map[string]interface{}) // Diff returns the global diff as well as the lock that should // be used to modify that diff. diff --git a/terraform/eval_context_builtin.go b/terraform/eval_context_builtin.go index a25c1c6a15..4dff93a4cb 100644 --- a/terraform/eval_context_builtin.go +++ b/terraform/eval_context_builtin.go @@ -23,7 +23,7 @@ type BuiltinEvalContext struct { // as the Interpolater itself, it is protected by InterpolaterVarLock // which must be locked during any access to the map. Interpolater *Interpolater - InterpolaterVars map[string]map[string]string + InterpolaterVars map[string]map[string]interface{} InterpolaterVarLock *sync.Mutex Hooks []Hook @@ -311,7 +311,7 @@ func (ctx *BuiltinEvalContext) Path() []string { return ctx.PathValue } -func (ctx *BuiltinEvalContext) SetVariables(n string, vs map[string]string) { +func (ctx *BuiltinEvalContext) SetVariables(n string, vs map[string]interface{}) { ctx.InterpolaterVarLock.Lock() defer ctx.InterpolaterVarLock.Unlock() @@ -322,7 +322,7 @@ func (ctx *BuiltinEvalContext) SetVariables(n string, vs map[string]string) { vars := ctx.InterpolaterVars[key] if vars == nil { - vars = make(map[string]string) + vars = make(map[string]interface{}) ctx.InterpolaterVars[key] = vars } diff --git a/terraform/eval_context_mock.go b/terraform/eval_context_mock.go index 60d83c7240..4f5c23bc49 100644 --- a/terraform/eval_context_mock.go +++ b/terraform/eval_context_mock.go @@ -74,7 +74,7 @@ type MockEvalContext struct { SetVariablesCalled bool SetVariablesModule string - SetVariablesVariables map[string]string + SetVariablesVariables map[string]interface{} DiffCalled bool DiffDiff *Diff @@ -183,7 +183,7 @@ func (c *MockEvalContext) Path() []string { return c.PathPath } -func (c *MockEvalContext) SetVariables(n string, vs map[string]string) { +func (c *MockEvalContext) SetVariables(n string, vs map[string]interface{}) { c.SetVariablesCalled = true c.SetVariablesModule = n c.SetVariablesVariables = vs diff --git a/terraform/eval_output.go b/terraform/eval_output.go index acdc268c37..b584bdeccd 100644 --- a/terraform/eval_output.go +++ b/terraform/eval_output.go @@ -2,6 +2,7 @@ package terraform import ( "fmt" + "log" "github.com/hashicorp/terraform/config" ) @@ -45,7 +46,8 @@ type EvalWriteOutput struct { func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { cfg, err := ctx.Interpolate(n.Value, nil) if err != nil { - // Ignore it + // Log error but continue anyway + log.Printf("[WARN] Output interpolation %q failed: %s", n.Name, err) } state, lock := ctx.State() @@ -76,16 +78,16 @@ func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { } } - // If it is a list of values, get the first one - if list, ok := valueRaw.([]interface{}); ok { - valueRaw = list[0] + switch valueTyped := valueRaw.(type) { + case string: + mod.Outputs[n.Name] = valueTyped + case []interface{}: + mod.Outputs[n.Name] = valueTyped + case map[string]interface{}: + mod.Outputs[n.Name] = valueTyped + default: + return nil, fmt.Errorf("output %s is not a valid type (%T)\n", n.Name, valueTyped) } - if _, ok := valueRaw.(string); !ok { - return nil, fmt.Errorf("output %s is not a string", n.Name) - } - - // Write the output - mod.Outputs[n.Name] = valueRaw.(string) return nil, nil } diff --git a/terraform/eval_variable.go b/terraform/eval_variable.go index 216efe5b8a..114f8eaa00 100644 --- a/terraform/eval_variable.go +++ b/terraform/eval_variable.go @@ -4,7 +4,6 @@ import ( "fmt" "strings" - "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" "github.com/mitchellh/mapstructure" @@ -26,7 +25,7 @@ import ( // use of the values since it is only valid to pass string values. The // structure is in place for extension of the type system, however. type EvalTypeCheckVariable struct { - Variables map[string]string + Variables map[string]interface{} ModulePath []string ModuleTree *module.Tree } @@ -43,29 +42,56 @@ func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) { prototypes[variable.Name] = variable.Type() } + // Only display a module in an error message if we are not in the root module + modulePathDescription := fmt.Sprintf(" in module %s", strings.Join(n.ModulePath[1:], ".")) + if len(n.ModulePath) == 1 { + modulePathDescription = "" + } + for name, declaredType := range prototypes { // This is only necessary when we _actually_ check. It is left as a reminder // that at the current time we are dealing with a type system consisting only // of strings and maps - where the only valid inter-module variable type is // string. - _, ok := n.Variables[name] + proposedValue, ok := n.Variables[name] if !ok { // This means the default value should be used as no overriding value // has been set. Therefore we should continue as no check is necessary. continue } + if proposedValue == config.UnknownVariableValue { + continue + } + switch declaredType { case config.VariableTypeString: // This will need actual verification once we aren't dealing with // a map[string]string but this is sufficient for now. - continue - default: - // Only display a module if we are not in the root module - modulePathDescription := fmt.Sprintf(" in module %s", strings.Join(n.ModulePath[1:], ".")) - if len(n.ModulePath) == 1 { - modulePathDescription = "" + switch proposedValue.(type) { + case string: + continue + default: + return nil, fmt.Errorf("variable %s%s should be type %s, got %T", + name, modulePathDescription, declaredType.Printable(), proposedValue) } + case config.VariableTypeMap: + switch proposedValue.(type) { + case map[string]interface{}: + continue + default: + return nil, fmt.Errorf("variable %s%s should be type %s, got %T", + name, modulePathDescription, declaredType.Printable(), proposedValue) + } + case config.VariableTypeList: + switch proposedValue.(type) { + case []interface{}: + continue + default: + return nil, fmt.Errorf("variable %s%s should be type %s, got %T", + name, modulePathDescription, declaredType.Printable(), proposedValue) + } + default: // This will need the actual type substituting when we have more than // just strings and maps. return nil, fmt.Errorf("variable %s%s should be type %s, got type string", @@ -80,7 +106,7 @@ func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) { // explicitly for interpolation later. type EvalSetVariables struct { Module *string - Variables map[string]string + Variables map[string]interface{} } // TODO: test @@ -93,31 +119,43 @@ func (n *EvalSetVariables) Eval(ctx EvalContext) (interface{}, error) { // given configuration, and uses the final values as a way to set the // mapping. type EvalVariableBlock struct { - Config **ResourceConfig - Variables map[string]string + Config **ResourceConfig + VariableValues map[string]interface{} } // TODO: test func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) { // Clear out the existing mapping - for k, _ := range n.Variables { - delete(n.Variables, k) + for k, _ := range n.VariableValues { + delete(n.VariableValues, k) } // Get our configuration rc := *n.Config for k, v := range rc.Config { - var vStr string - if err := mapstructure.WeakDecode(v, &vStr); err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf( - "%s: error reading value: {{err}}", k), err) + var vString string + if err := mapstructure.WeakDecode(v, &vString); err == nil { + n.VariableValues[k] = vString + continue } - n.Variables[k] = vStr + var vMap map[string]interface{} + if err := mapstructure.WeakDecode(v, &vMap); err == nil { + n.VariableValues[k] = vMap + continue + } + + var vSlice []interface{} + if err := mapstructure.WeakDecode(v, &vSlice); err == nil { + n.VariableValues[k] = vSlice + continue + } + + return nil, fmt.Errorf("Variable value for %s is not a string, list or map type", k) } for k, _ := range rc.Raw { - if _, ok := n.Variables[k]; !ok { - n.Variables[k] = config.UnknownVariableValue + if _, ok := n.VariableValues[k]; !ok { + n.VariableValues[k] = config.UnknownVariableValue } } diff --git a/terraform/graph_config_node_module.go b/terraform/graph_config_node_module.go index ba377e94d6..3e36e1ea59 100644 --- a/terraform/graph_config_node_module.go +++ b/terraform/graph_config_node_module.go @@ -69,7 +69,7 @@ func (n *GraphNodeConfigModule) Expand(b GraphBuilder) (GraphNodeSubgraph, error return &graphNodeModuleExpanded{ Original: n, Graph: graph, - Variables: make(map[string]string), + Variables: make(map[string]interface{}), }, nil } @@ -107,7 +107,7 @@ type graphNodeModuleExpanded struct { // Variables is a map of the input variables. This reference should // be shared with ModuleInputTransformer in order to create a connection // where the variables are set properly. - Variables map[string]string + Variables map[string]interface{} } func (n *graphNodeModuleExpanded) Name() string { @@ -147,8 +147,8 @@ func (n *graphNodeModuleExpanded) EvalTree() EvalNode { }, &EvalVariableBlock{ - Config: &resourceConfig, - Variables: n.Variables, + Config: &resourceConfig, + VariableValues: n.Variables, }, }, } diff --git a/terraform/graph_config_node_variable.go b/terraform/graph_config_node_variable.go index e462070d02..389d7babfa 100644 --- a/terraform/graph_config_node_variable.go +++ b/terraform/graph_config_node_variable.go @@ -114,7 +114,7 @@ func (n *GraphNodeConfigVariable) EvalTree() EvalNode { // Otherwise, interpolate the value of this variable and set it // within the variables mapping. var config *ResourceConfig - variables := make(map[string]string) + variables := make(map[string]interface{}) return &EvalSequence{ Nodes: []EvalNode{ &EvalInterpolate{ @@ -123,8 +123,8 @@ func (n *GraphNodeConfigVariable) EvalTree() EvalNode { }, &EvalVariableBlock{ - Config: &config, - Variables: variables, + Config: &config, + VariableValues: variables, }, &EvalTypeCheckVariable{ diff --git a/terraform/graph_walk_context.go b/terraform/graph_walk_context.go index ac6310d086..7424fdbbd3 100644 --- a/terraform/graph_walk_context.go +++ b/terraform/graph_walk_context.go @@ -27,7 +27,7 @@ type ContextGraphWalker struct { once sync.Once contexts map[string]*BuiltinEvalContext contextLock sync.Mutex - interpolaterVars map[string]map[string]string + interpolaterVars map[string]map[string]interface{} interpolaterVarLock sync.Mutex providerCache map[string]ResourceProvider providerConfigCache map[string]*ResourceConfig @@ -49,7 +49,7 @@ func (w *ContextGraphWalker) EnterPath(path []string) EvalContext { } // Setup the variables for this interpolater - variables := make(map[string]string) + variables := make(map[string]interface{}) if len(path) <= 1 { for k, v := range w.Context.variables { variables[k] = v @@ -81,12 +81,12 @@ func (w *ContextGraphWalker) EnterPath(path []string) EvalContext { StateValue: w.Context.state, StateLock: &w.Context.stateLock, Interpolater: &Interpolater{ - Operation: w.Operation, - Module: w.Context.module, - State: w.Context.state, - StateLock: &w.Context.stateLock, - Variables: variables, - VariablesLock: &w.interpolaterVarLock, + Operation: w.Operation, + Module: w.Context.module, + State: w.Context.state, + StateLock: &w.Context.stateLock, + VariableValues: variables, + VariableValuesLock: &w.interpolaterVarLock, }, InterpolaterVars: w.interpolaterVars, InterpolaterVarLock: &w.interpolaterVarLock, @@ -150,5 +150,5 @@ func (w *ContextGraphWalker) init() { w.providerCache = make(map[string]ResourceProvider, 5) w.providerConfigCache = make(map[string]*ResourceConfig, 5) w.provisionerCache = make(map[string]ResourceProvisioner, 5) - w.interpolaterVars = make(map[string]map[string]string, 5) + w.interpolaterVars = make(map[string]map[string]interface{}, 5) } diff --git a/terraform/interpolate.go b/terraform/interpolate.go index 7ec549c93b..11eb193b61 100644 --- a/terraform/interpolate.go +++ b/terraform/interpolate.go @@ -9,6 +9,7 @@ import ( "strings" "sync" + "github.com/hashicorp/hil" "github.com/hashicorp/hil/ast" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" @@ -23,12 +24,12 @@ const ( // Interpolater is the structure responsible for determining the values // for interpolations such as `aws_instance.foo.bar`. type Interpolater struct { - Operation walkOperation - Module *module.Tree - State *State - StateLock *sync.RWMutex - Variables map[string]string - VariablesLock *sync.Mutex + Operation walkOperation + Module *module.Tree + State *State + StateLock *sync.RWMutex + VariableValues map[string]interface{} + VariableValuesLock *sync.Mutex } // InterpolationScope is the current scope of execution. This is required @@ -52,12 +53,18 @@ func (i *Interpolater) Values( mod = i.Module.Child(scope.Path[1:]) } for _, v := range mod.Config().Variables { - for k, val := range v.DefaultsMap() { - result[k] = ast.Variable{ - Value: val, - Type: ast.TypeString, - } + // Set default variables + if v.Default == nil { + continue } + + n := fmt.Sprintf("var.%s", v.Name) + variable, err := hil.InterfaceToVariable(v.Default) + if err != nil { + return nil, fmt.Errorf("invalid default map value for %s: %v", v.Name, v.Default) + } + + result[n] = variable } } @@ -110,6 +117,13 @@ func (i *Interpolater) valueCountVar( } } +func unknownVariable() ast.Variable { + return ast.Variable{ + Type: ast.TypeString, + Value: config.UnknownVariableValue, + } +} + func (i *Interpolater) valueModuleVar( scope *InterpolationScope, n string, @@ -136,7 +150,6 @@ func (i *Interpolater) valueModuleVar( defer i.StateLock.RUnlock() // Get the module where we're looking for the value - var value string mod := i.State.ModuleByPath(path) if mod == nil { // If the module doesn't exist, then we can return an empty string. @@ -145,21 +158,22 @@ func (i *Interpolater) valueModuleVar( // modules reference other modules, and graph ordering should // ensure that the module is in the state, so if we reach this // point otherwise it really is a panic. - value = config.UnknownVariableValue + result[n] = unknownVariable() } else { // Get the value from the outputs - var ok bool - value, ok = mod.Outputs[v.Field] - if !ok { + if value, ok := mod.Outputs[v.Field]; ok { + output, err := hil.InterfaceToVariable(value) + if err != nil { + return err + } + result[n] = output + } else { // Same reasons as the comment above. - value = config.UnknownVariableValue + result[n] = unknownVariable() + } } - result[n] = ast.Variable{ - Value: value, - Type: ast.TypeString, - } return nil } @@ -216,21 +230,26 @@ func (i *Interpolater) valueResourceVar( return nil } - var attr string - var err error if v.Multi && v.Index == -1 { - attr, err = i.computeResourceMultiVariable(scope, v) + variable, err := i.computeResourceMultiVariable(scope, v) + if err != nil { + return err + } + if variable == nil { + return fmt.Errorf("no error reported by variable %q is nil", v.Name) + } + result[n] = *variable } else { - attr, err = i.computeResourceVariable(scope, v) - } - if err != nil { - return err + variable, err := i.computeResourceVariable(scope, v) + if err != nil { + return err + } + if variable == nil { + return fmt.Errorf("no error reported by variable %q is nil", v.Name) + } + result[n] = *variable } - result[n] = ast.Variable{ - Value: attr, - Type: ast.TypeString, - } return nil } @@ -274,33 +293,44 @@ func (i *Interpolater) valueUserVar( n string, v *config.UserVariable, result map[string]ast.Variable) error { - i.VariablesLock.Lock() - defer i.VariablesLock.Unlock() - val, ok := i.Variables[v.Name] + i.VariableValuesLock.Lock() + defer i.VariableValuesLock.Unlock() + val, ok := i.VariableValues[v.Name] if ok { - result[n] = ast.Variable{ - Value: val, - Type: ast.TypeString, + varValue, err := hil.InterfaceToVariable(val) + if err != nil { + return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s", + v.Name, val, err) } + result[n] = varValue return nil } if _, ok := result[n]; !ok && i.Operation == walkValidate { - result[n] = ast.Variable{ - Value: config.UnknownVariableValue, - Type: ast.TypeString, - } + result[n] = unknownVariable() return nil } // Look up if we have any variables with this prefix because // those are map overrides. Include those. - for k, val := range i.Variables { + for k, val := range i.VariableValues { if strings.HasPrefix(k, v.Name+".") { - result["var."+k] = ast.Variable{ - Value: val, - Type: ast.TypeString, + keyComponents := strings.Split(k, ".") + overrideKey := keyComponents[len(keyComponents)-1] + + mapInterface, ok := result["var."+v.Name] + if !ok { + return fmt.Errorf("override for non-existent variable: %s", v.Name) } + + mapVariable := mapInterface.Value.(map[string]ast.Variable) + + varValue, err := hil.InterfaceToVariable(val) + if err != nil { + return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s", + v.Name, val, err) + } + mapVariable[overrideKey] = varValue } } @@ -309,7 +339,7 @@ func (i *Interpolater) valueUserVar( func (i *Interpolater) computeResourceVariable( scope *InterpolationScope, - v *config.ResourceVariable) (string, error) { + v *config.ResourceVariable) (*ast.Variable, error) { id := v.ResourceId() if v.Multi { id = fmt.Sprintf("%s.%d", id, v.Index) @@ -318,16 +348,18 @@ func (i *Interpolater) computeResourceVariable( i.StateLock.RLock() defer i.StateLock.RUnlock() + unknownVariable := unknownVariable() + // Get the information about this resource variable, and verify // that it exists and such. module, _, err := i.resourceVariableInfo(scope, v) if err != nil { - return "", err + return nil, err } // If we have no module in the state yet or count, return empty if module == nil || len(module.Resources) == 0 { - return "", nil + return nil, nil } // Get the resource out from the state. We know the state exists @@ -349,12 +381,13 @@ func (i *Interpolater) computeResourceVariable( } if attr, ok := r.Primary.Attributes[v.Field]; ok { - return attr, nil + return &ast.Variable{Type: ast.TypeString, Value: attr}, nil } - // computed list attribute + // computed list or map attribute if _, ok := r.Primary.Attributes[v.Field+".#"]; ok { - return i.interpolateListAttribute(v.Field, r.Primary.Attributes) + variable, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes) + return &variable, err } // At apply time, we can't do the "maybe has it" check below @@ -377,13 +410,13 @@ func (i *Interpolater) computeResourceVariable( // Lists and sets make this key := fmt.Sprintf("%s.#", strings.Join(parts[:i], ".")) if attr, ok := r.Primary.Attributes[key]; ok { - return attr, nil + return &ast.Variable{Type: ast.TypeString, Value: attr}, nil } // Maps make this key = fmt.Sprintf("%s", strings.Join(parts[:i], ".")) if attr, ok := r.Primary.Attributes[key]; ok { - return attr, nil + return &ast.Variable{Type: ast.TypeString, Value: attr}, nil } } } @@ -393,7 +426,7 @@ MISSING: // semantic level. If we reached this point and don't have variables, // just return the computed value. if scope == nil && scope.Resource == nil { - return config.UnknownVariableValue, nil + return &unknownVariable, nil } // If the operation is refresh, it isn't an error for a value to @@ -407,10 +440,10 @@ MISSING: // For an input walk, computed values are okay to return because we're only // looking for missing variables to prompt the user for. if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy || i.Operation == walkInput { - return config.UnknownVariableValue, nil + return &unknownVariable, nil } - return "", fmt.Errorf( + return nil, fmt.Errorf( "Resource '%s' does not have attribute '%s' "+ "for variable '%s'", id, @@ -420,21 +453,23 @@ MISSING: func (i *Interpolater) computeResourceMultiVariable( scope *InterpolationScope, - v *config.ResourceVariable) (string, error) { + v *config.ResourceVariable) (*ast.Variable, error) { i.StateLock.RLock() defer i.StateLock.RUnlock() + unknownVariable := unknownVariable() + // Get the information about this resource variable, and verify // that it exists and such. module, cr, err := i.resourceVariableInfo(scope, v) if err != nil { - return "", err + return nil, err } // Get the count so we know how many to iterate over count, err := cr.Count() if err != nil { - return "", fmt.Errorf( + return nil, fmt.Errorf( "Error reading %s count: %s", v.ResourceId(), err) @@ -442,7 +477,7 @@ func (i *Interpolater) computeResourceMultiVariable( // If we have no module in the state yet or count, return empty if module == nil || len(module.Resources) == 0 || count == 0 { - return "", nil + return &ast.Variable{Type: ast.TypeString, Value: ""}, nil } var values []string @@ -464,32 +499,37 @@ func (i *Interpolater) computeResourceMultiVariable( continue } - attr, ok := r.Primary.Attributes[v.Field] - if !ok { - // computed list attribute - _, ok := r.Primary.Attributes[v.Field+".#"] - if !ok { - continue + if singleAttr, ok := r.Primary.Attributes[v.Field]; ok { + if singleAttr == config.UnknownVariableValue { + return &unknownVariable, nil } - attr, err = i.interpolateListAttribute(v.Field, r.Primary.Attributes) - if err != nil { - return "", err - } - } - if config.IsStringList(attr) { - for _, s := range config.StringList(attr).Slice() { - values = append(values, s) - } + values = append(values, singleAttr) continue } - // If any value is unknown, the whole thing is unknown - if attr == config.UnknownVariableValue { - return config.UnknownVariableValue, nil + // computed list attribute + _, ok = r.Primary.Attributes[v.Field+".#"] + if !ok { + continue + } + multiAttr, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes) + if err != nil { + return nil, err } - values = append(values, attr) + if multiAttr == unknownVariable { + return &ast.Variable{Type: ast.TypeString, Value: ""}, nil + } + + for _, element := range multiAttr.Value.([]ast.Variable) { + strVal := element.Value.(string) + if strVal == config.UnknownVariableValue { + return &unknownVariable, nil + } + + values = append(values, strVal) + } } if len(values) == 0 { @@ -504,10 +544,10 @@ func (i *Interpolater) computeResourceMultiVariable( // For an input walk, computed values are okay to return because we're only // looking for missing variables to prompt the user for. if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy || i.Operation == walkInput { - return config.UnknownVariableValue, nil + return &unknownVariable, nil } - return "", fmt.Errorf( + return nil, fmt.Errorf( "Resource '%s' does not have attribute '%s' "+ "for variable '%s'", v.ResourceId(), @@ -515,15 +555,16 @@ func (i *Interpolater) computeResourceMultiVariable( v.FullKey()) } - return config.NewStringList(values).String(), nil + variable, err := hil.InterfaceToVariable(values) + return &variable, err } -func (i *Interpolater) interpolateListAttribute( +func (i *Interpolater) interpolateComplexTypeAttribute( resourceID string, - attributes map[string]string) (string, error) { + attributes map[string]string) (ast.Variable, error) { attr := attributes[resourceID+".#"] - log.Printf("[DEBUG] Interpolating computed list attribute %s (%s)", + log.Printf("[DEBUG] Interpolating computed complex type attribute %s (%s)", resourceID, attr) // In Terraform's internal dotted representation of list-like attributes, the @@ -531,21 +572,40 @@ func (i *Interpolater) interpolateListAttribute( // unknown". We must honor that meaning here so computed references can be // treated properly during the plan phase. if attr == config.UnknownVariableValue { - return attr, nil + return unknownVariable(), nil } - // Otherwise we gather the values from the list-like attribute and return - // them. - var members []string - numberedListMember := regexp.MustCompile("^" + resourceID + "\\.[0-9]+$") - for id, value := range attributes { - if numberedListMember.MatchString(id) { - members = append(members, value) + // At this stage we don't know whether the item is a list or a map, so we + // examine the keys to see whether they are all numeric. + var numericKeys []string + var allKeys []string + numberedListKey := regexp.MustCompile("^" + resourceID + "\\.[0-9]+$") + otherListKey := regexp.MustCompile("^" + resourceID + "\\.([^#]+)$") + for id, _ := range attributes { + if numberedListKey.MatchString(id) { + numericKeys = append(numericKeys, id) + } + if submatches := otherListKey.FindAllStringSubmatch(id, -1); len(submatches) > 0 { + allKeys = append(allKeys, submatches[0][1]) } } - sort.Strings(members) - return config.NewStringList(members).String(), nil + if len(numericKeys) == len(allKeys) { + // This is a list + var members []string + for _, key := range numericKeys { + members = append(members, attributes[key]) + } + sort.Strings(members) + return hil.InterfaceToVariable(members) + } else { + // This is a map + members := make(map[string]interface{}) + for _, key := range allKeys { + members[key] = attributes[resourceID+"."+key] + } + return hil.InterfaceToVariable(members) + } } func (i *Interpolater) resourceVariableInfo( diff --git a/terraform/interpolate_test.go b/terraform/interpolate_test.go index 31d066ba9f..e3777ae4a3 100644 --- a/terraform/interpolate_test.go +++ b/terraform/interpolate_test.go @@ -7,6 +7,7 @@ import ( "sync" "testing" + "github.com/hashicorp/hil" "github.com/hashicorp/hil/ast" "github.com/hashicorp/terraform/config" ) @@ -67,7 +68,7 @@ func TestInterpolater_moduleVariable(t *testing.T) { }, &ModuleState{ Path: []string{RootModuleName, "child"}, - Outputs: map[string]string{ + Outputs: map[string]interface{}{ "foo": "bar", }, }, @@ -210,6 +211,11 @@ func TestInterpolater_resourceVariableMulti(t *testing.T) { }) } +func interfaceToVariableSwallowError(input interface{}) ast.Variable { + variable, _ := hil.InterfaceToVariable(input) + return variable +} + func TestInterpolator_resourceMultiAttributes(t *testing.T) { lock := new(sync.RWMutex) state := &State{ @@ -251,31 +257,24 @@ func TestInterpolator_resourceMultiAttributes(t *testing.T) { Path: rootModulePath, } - name_servers := []string{ + name_servers := []interface{}{ "ns-1334.awsdns-38.org", "ns-1680.awsdns-18.co.uk", "ns-498.awsdns-62.com", "ns-601.awsdns-11.net", } - expectedNameServers := config.NewStringList(name_servers).String() // More than 1 element - testInterpolate(t, i, scope, "aws_route53_zone.yada.name_servers", ast.Variable{ - Value: expectedNameServers, - Type: ast.TypeString, - }) + testInterpolate(t, i, scope, "aws_route53_zone.yada.name_servers", + interfaceToVariableSwallowError(name_servers)) // Exactly 1 element - testInterpolate(t, i, scope, "aws_route53_zone.yada.listeners", ast.Variable{ - Value: config.NewStringList([]string{"red"}).String(), - Type: ast.TypeString, - }) + testInterpolate(t, i, scope, "aws_route53_zone.yada.listeners", + interfaceToVariableSwallowError([]interface{}{"red"})) // Zero elements - testInterpolate(t, i, scope, "aws_route53_zone.yada.nothing", ast.Variable{ - Value: config.NewStringList([]string{}).String(), - Type: ast.TypeString, - }) + testInterpolate(t, i, scope, "aws_route53_zone.yada.nothing", + interfaceToVariableSwallowError([]interface{}{})) // Maps still need to work testInterpolate(t, i, scope, "aws_route53_zone.yada.tags.Name", ast.Variable{ @@ -290,7 +289,7 @@ func TestInterpolator_resourceMultiAttributesWithResourceCount(t *testing.T) { Path: rootModulePath, } - name_servers := []string{ + name_servers := []interface{}{ "ns-1334.awsdns-38.org", "ns-1680.awsdns-18.co.uk", "ns-498.awsdns-62.com", @@ -302,50 +301,38 @@ func TestInterpolator_resourceMultiAttributesWithResourceCount(t *testing.T) { } // More than 1 element - expectedNameServers := config.NewStringList(name_servers[0:4]).String() - testInterpolate(t, i, scope, "aws_route53_zone.terra.0.name_servers", ast.Variable{ - Value: expectedNameServers, - Type: ast.TypeString, - }) + testInterpolate(t, i, scope, "aws_route53_zone.terra.0.name_servers", + interfaceToVariableSwallowError(name_servers[0:4])) + // More than 1 element in both - expectedNameServers = config.NewStringList(name_servers).String() - testInterpolate(t, i, scope, "aws_route53_zone.terra.*.name_servers", ast.Variable{ - Value: expectedNameServers, - Type: ast.TypeString, - }) + testInterpolate(t, i, scope, "aws_route53_zone.terra.*.name_servers", + interfaceToVariableSwallowError(name_servers)) // Exactly 1 element - testInterpolate(t, i, scope, "aws_route53_zone.terra.0.listeners", ast.Variable{ - Value: config.NewStringList([]string{"red"}).String(), - Type: ast.TypeString, - }) + testInterpolate(t, i, scope, "aws_route53_zone.terra.0.listeners", + interfaceToVariableSwallowError([]interface{}{"red"})) + // Exactly 1 element in both - testInterpolate(t, i, scope, "aws_route53_zone.terra.*.listeners", ast.Variable{ - Value: config.NewStringList([]string{"red", "blue"}).String(), - Type: ast.TypeString, - }) + testInterpolate(t, i, scope, "aws_route53_zone.terra.*.listeners", + interfaceToVariableSwallowError([]interface{}{"red", "blue"})) // Zero elements - testInterpolate(t, i, scope, "aws_route53_zone.terra.0.nothing", ast.Variable{ - Value: config.NewStringList([]string{}).String(), - Type: ast.TypeString, - }) + testInterpolate(t, i, scope, "aws_route53_zone.terra.0.nothing", + interfaceToVariableSwallowError([]interface{}{})) + // Zero + 1 element - testInterpolate(t, i, scope, "aws_route53_zone.terra.*.special", ast.Variable{ - Value: config.NewStringList([]string{"extra"}).String(), - Type: ast.TypeString, - }) + testInterpolate(t, i, scope, "aws_route53_zone.terra.*.special", + interfaceToVariableSwallowError([]interface{}{"extra"})) // Maps still need to work testInterpolate(t, i, scope, "aws_route53_zone.terra.0.tags.Name", ast.Variable{ Value: "reindeer", Type: ast.TypeString, }) + // Maps still need to work in both - testInterpolate(t, i, scope, "aws_route53_zone.terra.*.tags.Name", ast.Variable{ - Value: config.NewStringList([]string{"reindeer", "white-hart"}).String(), - Type: ast.TypeString, - }) + testInterpolate(t, i, scope, "aws_route53_zone.terra.*.tags.Name", + interfaceToVariableSwallowError([]interface{}{"reindeer", "white-hart"})) } func TestInterpolator_resourceMultiAttributesComputed(t *testing.T) { diff --git a/terraform/plan.go b/terraform/plan.go index b15ea5c594..b2ff008ee5 100644 --- a/terraform/plan.go +++ b/terraform/plan.go @@ -34,7 +34,7 @@ type Plan struct { // // The following fields in opts are overridden by the plan: Config, // Diff, State, Variables. -func (p *Plan) Context(opts *ContextOpts) *Context { +func (p *Plan) Context(opts *ContextOpts) (*Context, error) { opts.Diff = p.Diff opts.Module = p.Module opts.State = p.State diff --git a/terraform/resource_address.go b/terraform/resource_address.go index d87f645ae6..d6bb0522e9 100644 --- a/terraform/resource_address.go +++ b/terraform/resource_address.go @@ -18,9 +18,10 @@ type ResourceAddress struct { // Addresses a specific resource that occurs in a list Index int - InstanceType InstanceType - Name string - Type string + InstanceType InstanceType + InstanceTypeSet bool + Name string + Type string } // Copy returns a copy of this ResourceAddress @@ -38,6 +39,35 @@ func (r *ResourceAddress) Copy() *ResourceAddress { return n } +// String outputs the address that parses into this address. +func (r *ResourceAddress) String() string { + var result []string + for _, p := range r.Path { + result = append(result, "module", p) + } + + if r.Type != "" { + result = append(result, r.Type) + } + + if r.Name != "" { + name := r.Name + switch r.InstanceType { + case TypeDeposed: + name += ".deposed" + case TypeTainted: + name += ".tainted" + } + + if r.Index >= 0 { + name += fmt.Sprintf("[%d]", r.Index) + } + result = append(result, name) + } + + return strings.Join(result, ".") +} + func ParseResourceAddress(s string) (*ResourceAddress, error) { matches, err := tokenizeResourceAddress(s) if err != nil { @@ -54,11 +84,12 @@ func ParseResourceAddress(s string) (*ResourceAddress, error) { path := ParseResourcePath(matches["path"]) return &ResourceAddress{ - Path: path, - Index: resourceIndex, - InstanceType: instanceType, - Name: matches["name"], - Type: matches["type"], + Path: path, + Index: resourceIndex, + InstanceType: instanceType, + InstanceTypeSet: matches["instance_type"] != "", + Name: matches["name"], + Type: matches["type"], }, nil } diff --git a/terraform/resource_address_test.go b/terraform/resource_address_test.go index 03e762a742..193c56c447 100644 --- a/terraform/resource_address_test.go +++ b/terraform/resource_address_test.go @@ -9,109 +9,124 @@ func TestParseResourceAddress(t *testing.T) { cases := map[string]struct { Input string Expected *ResourceAddress + Output string }{ "implicit primary, no specific index": { - Input: "aws_instance.foo", - Expected: &ResourceAddress{ + "aws_instance.foo", + &ResourceAddress{ Type: "aws_instance", Name: "foo", InstanceType: TypePrimary, Index: -1, }, + "", }, "implicit primary, explicit index": { - Input: "aws_instance.foo[2]", - Expected: &ResourceAddress{ + "aws_instance.foo[2]", + &ResourceAddress{ Type: "aws_instance", Name: "foo", InstanceType: TypePrimary, Index: 2, }, + "", }, "implicit primary, explicit index over ten": { - Input: "aws_instance.foo[12]", - Expected: &ResourceAddress{ + "aws_instance.foo[12]", + &ResourceAddress{ Type: "aws_instance", Name: "foo", InstanceType: TypePrimary, Index: 12, }, + "", }, "explicit primary, explicit index": { - Input: "aws_instance.foo.primary[2]", - Expected: &ResourceAddress{ - Type: "aws_instance", - Name: "foo", - InstanceType: TypePrimary, - Index: 2, + "aws_instance.foo.primary[2]", + &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + InstanceTypeSet: true, + Index: 2, }, + "aws_instance.foo[2]", }, "tainted": { - Input: "aws_instance.foo.tainted", - Expected: &ResourceAddress{ - Type: "aws_instance", - Name: "foo", - InstanceType: TypeTainted, - Index: -1, + "aws_instance.foo.tainted", + &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypeTainted, + InstanceTypeSet: true, + Index: -1, }, + "", }, "deposed": { - Input: "aws_instance.foo.deposed", - Expected: &ResourceAddress{ - Type: "aws_instance", - Name: "foo", - InstanceType: TypeDeposed, - Index: -1, + "aws_instance.foo.deposed", + &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypeDeposed, + InstanceTypeSet: true, + Index: -1, }, + "", }, "with a hyphen": { - Input: "aws_instance.foo-bar", - Expected: &ResourceAddress{ + "aws_instance.foo-bar", + &ResourceAddress{ Type: "aws_instance", Name: "foo-bar", InstanceType: TypePrimary, Index: -1, }, + "", }, "in a module": { - Input: "module.child.aws_instance.foo", - Expected: &ResourceAddress{ + "module.child.aws_instance.foo", + &ResourceAddress{ Path: []string{"child"}, Type: "aws_instance", Name: "foo", InstanceType: TypePrimary, Index: -1, }, + "", }, "nested modules": { - Input: "module.a.module.b.module.forever.aws_instance.foo", - Expected: &ResourceAddress{ + "module.a.module.b.module.forever.aws_instance.foo", + &ResourceAddress{ Path: []string{"a", "b", "forever"}, Type: "aws_instance", Name: "foo", InstanceType: TypePrimary, Index: -1, }, + "", }, "just a module": { - Input: "module.a", - Expected: &ResourceAddress{ + "module.a", + &ResourceAddress{ Path: []string{"a"}, Type: "", Name: "", InstanceType: TypePrimary, Index: -1, }, + "", }, "just a nested module": { - Input: "module.a.module.b", - Expected: &ResourceAddress{ + "module.a.module.b", + &ResourceAddress{ Path: []string{"a", "b"}, Type: "", Name: "", InstanceType: TypePrimary, Index: -1, }, + "", }, } @@ -124,6 +139,14 @@ func TestParseResourceAddress(t *testing.T) { if !reflect.DeepEqual(out, tc.Expected) { t.Fatalf("bad: %q\n\nexpected:\n%#v\n\ngot:\n%#v", tn, tc.Expected, out) } + + expected := tc.Input + if tc.Output != "" { + expected = tc.Output + } + if out.String() != expected { + t.Fatalf("bad: %q\n\nexpected: %s\n\ngot: %s", tn, expected, out) + } } } diff --git a/terraform/state.go b/terraform/state.go index d63313278e..fd853adf13 100644 --- a/terraform/state.go +++ b/terraform/state.go @@ -12,12 +12,13 @@ import ( "strconv" "strings" + "github.com/hashicorp/go-version" "github.com/hashicorp/terraform/config" ) const ( // StateVersion is the current version for our state file - StateVersion = 1 + StateVersion = 2 ) // rootModulePath is the path of the root module @@ -30,6 +31,9 @@ type State struct { // Version is the protocol version. Currently only "1". Version int `json:"version"` + // TFVersion is the version of Terraform that wrote this state. + TFVersion string `json:"terraform_version,omitempty"` + // Serial is incremented on any operation that modifies // the State file. It is used to detect potentially conflicting // updates. @@ -198,6 +202,122 @@ func (s *State) IsRemote() bool { return true } +// Remove removes the item in the state at the given address, returning +// any errors that may have occurred. +// +// If the address references a module state or resource, it will delete +// all children as well. To check what will be deleted, use a StateFilter +// first. +func (s *State) Remove(addr ...string) error { + // Filter out what we need to delete + filter := &StateFilter{State: s} + results, err := filter.Filter(addr...) + if err != nil { + return err + } + + // If we have no results, just exit early, we're not going to do anything. + // While what happens below is fairly fast, this is an important early + // exit since the prune below might modify the state more and we don't + // want to modify the state if we don't have to. + if len(results) == 0 { + return nil + } + + // Go through each result and grab what we need + removed := make(map[interface{}]struct{}) + for _, r := range results { + // Convert the path to our own type + path := append([]string{"root"}, r.Path...) + + // If we removed this already, then ignore + if _, ok := removed[r.Value]; ok { + continue + } + + // If we removed the parent already, then ignore + if r.Parent != nil { + if _, ok := removed[r.Parent.Value]; ok { + continue + } + } + + // Add this to the removed list + removed[r.Value] = struct{}{} + + switch v := r.Value.(type) { + case *ModuleState: + s.removeModule(path, v) + case *ResourceState: + s.removeResource(path, v) + case *InstanceState: + s.removeInstance(path, r.Parent.Value.(*ResourceState), v) + default: + return fmt.Errorf("unknown type to delete: %T", r.Value) + } + } + + // Prune since the removal functions often do the bare minimum to + // remove a thing and may leave around dangling empty modules, resources, + // etc. Prune will clean that all up. + s.prune() + + return nil +} + +func (s *State) removeModule(path []string, v *ModuleState) { + for i, m := range s.Modules { + if m == v { + s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil + return + } + } +} + +func (s *State) removeResource(path []string, v *ResourceState) { + // Get the module this resource lives in. If it doesn't exist, we're done. + mod := s.ModuleByPath(path) + if mod == nil { + return + } + + // Find this resource. This is a O(N) lookup when if we had the key + // it could be O(1) but even with thousands of resources this shouldn't + // matter right now. We can easily up performance here when the time comes. + for k, r := range mod.Resources { + if r == v { + // Found it + delete(mod.Resources, k) + return + } + } +} + +func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) { + // Go through the resource and find the instance that matches this + // (if any) and remove it. + + // Check primary + if r.Primary == v { + r.Primary = nil + return + } + + // Check lists + lists := [][]*InstanceState{r.Tainted, r.Deposed} + for _, is := range lists { + for i, instance := range is { + if instance == v { + // Found it, remove it + is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil + + // Done + return + } + } + } +} + // RootModule returns the ModuleState for the root module func (s *State) RootModule() *ModuleState { root := s.ModuleByPath(rootModulePath) @@ -246,9 +366,10 @@ func (s *State) DeepCopy() *State { return nil } n := &State{ - Version: s.Version, - Serial: s.Serial, - Modules: make([]*ModuleState, 0, len(s.Modules)), + Version: s.Version, + TFVersion: s.TFVersion, + Serial: s.Serial, + Modules: make([]*ModuleState, 0, len(s.Modules)), } for _, mod := range s.Modules { n.Modules = append(n.Modules, mod.deepcopy()) @@ -271,7 +392,7 @@ func (s *State) IncrementSerialMaybe(other *State) { if s.Serial > other.Serial { return } - if !s.Equal(other) { + if other.TFVersion != s.TFVersion || !s.Equal(other) { if other.Serial > s.Serial { s.Serial = other.Serial } @@ -280,6 +401,18 @@ func (s *State) IncrementSerialMaybe(other *State) { } } +// FromFutureTerraform checks if this state was written by a Terraform +// version from the future. +func (s *State) FromFutureTerraform() bool { + // No TF version means it is certainly from the past + if s.TFVersion == "" { + return false + } + + v := version.Must(version.NewVersion(s.TFVersion)) + return SemVersion.LessThan(v) +} + func (s *State) init() { if s.Version == 0 { s.Version = StateVersion @@ -407,7 +540,7 @@ type ModuleState struct { // Outputs declared by the module and maintained for each module // even though only the root module technically needs to be kept. // This allows operators to inspect values at the boundaries. - Outputs map[string]string `json:"outputs"` + Outputs map[string]interface{} `json:"outputs"` // Resources is a mapping of the logically named resource to // the state of the resource. Each resource may actually have @@ -442,7 +575,7 @@ func (m *ModuleState) Equal(other *ModuleState) bool { return false } for k, v := range m.Outputs { - if other.Outputs[k] != v { + if !reflect.DeepEqual(other.Outputs[k], v) { return false } } @@ -532,7 +665,7 @@ func (m *ModuleState) View(id string) *ModuleState { func (m *ModuleState) init() { if m.Outputs == nil { - m.Outputs = make(map[string]string) + m.Outputs = make(map[string]interface{}) } if m.Resources == nil { m.Resources = make(map[string]*ResourceState) @@ -545,7 +678,7 @@ func (m *ModuleState) deepcopy() *ModuleState { } n := &ModuleState{ Path: make([]string, len(m.Path)), - Outputs: make(map[string]string, len(m.Outputs)), + Outputs: make(map[string]interface{}, len(m.Outputs)), Resources: make(map[string]*ResourceState, len(m.Resources)), } copy(n.Path, m.Path) @@ -670,7 +803,27 @@ func (m *ModuleState) String() string { for _, k := range ks { v := m.Outputs[k] - buf.WriteString(fmt.Sprintf("%s = %s\n", k, v)) + switch vTyped := v.(type) { + case string: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case []interface{}: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case map[string]interface{}: + var mapKeys []string + for key, _ := range vTyped { + mapKeys = append(mapKeys, key) + } + sort.Strings(mapKeys) + + var mapBuf bytes.Buffer + mapBuf.WriteString("{") + for _, key := range mapKeys { + mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) + } + mapBuf.WriteString("}") + + buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) + } } } @@ -1191,21 +1344,22 @@ func (e *EphemeralState) deepcopy() *EphemeralState { func ReadState(src io.Reader) (*State, error) { buf := bufio.NewReader(src) - // Check if this is a V1 format + // Check if this is a V0 format start, err := buf.Peek(len(stateFormatMagic)) if err != nil { return nil, fmt.Errorf("Failed to check for magic bytes: %v", err) } if string(start) == stateFormatMagic { // Read the old state - old, err := ReadStateV1(buf) + old, err := ReadStateV0(buf) if err != nil { return nil, err } - return upgradeV1State(old) + return upgradeV0State(old) } - // Otherwise, must be V2 + // Otherwise, must be V2 or V3 - V2 reads as V3 however so we need take + // no special action here - new state will be written as V3. dec := json.NewDecoder(buf) state := &State{} if err := dec.Decode(state); err != nil { @@ -1219,6 +1373,19 @@ func ReadState(src io.Reader) (*State, error) { state.Version) } + // Make sure the version is semantic + if state.TFVersion != "" { + if _, err := version.NewVersion(state.TFVersion); err != nil { + return nil, fmt.Errorf( + "State contains invalid version: %s\n\n"+ + "Terraform validates the version format prior to writing it. This\n"+ + "means that this is invalid of the state becoming corrupted through\n"+ + "some external means. Please manually modify the Terraform version\n"+ + "field to be a proper semantic version.", + state.TFVersion) + } + } + // Sort it state.sort() @@ -1233,6 +1400,19 @@ func WriteState(d *State, dst io.Writer) error { // Ensure the version is set d.Version = StateVersion + // If the TFVersion is set, verify it. We used to just set the version + // here, but this isn't safe since it changes the MD5 sum on some remote + // state storage backends such as Atlas. We now leave it be if needed. + if d.TFVersion != "" { + if _, err := version.NewVersion(d.TFVersion); err != nil { + return fmt.Errorf( + "Error writing state, invalid version: %s\n\n"+ + "The Terraform version when writing the state must be a semantic\n"+ + "version.", + d.TFVersion) + } + } + // Encode the data in a human-friendly way data, err := json.MarshalIndent(d, "", " ") if err != nil { @@ -1250,9 +1430,9 @@ func WriteState(d *State, dst io.Writer) error { return nil } -// upgradeV1State is used to upgrade a V1 state representation +// upgradeV0State is used to upgrade a V0 state representation // into a proper State representation. -func upgradeV1State(old *StateV1) (*State, error) { +func upgradeV0State(old *StateV0) (*State, error) { s := &State{} s.init() @@ -1260,8 +1440,12 @@ func upgradeV1State(old *StateV1) (*State, error) { // directly into the root module. root := s.RootModule() - // Copy the outputs - root.Outputs = old.Outputs + // Copy the outputs, first converting them to map[string]interface{} + oldOutputs := make(map[string]interface{}, len(old.Outputs)) + for key, value := range old.Outputs { + oldOutputs[key] = value + } + root.Outputs = oldOutputs // Upgrade the resources for id, rs := range old.Resources { diff --git a/terraform/state_filter.go b/terraform/state_filter.go new file mode 100644 index 0000000000..43cca6bc96 --- /dev/null +++ b/terraform/state_filter.go @@ -0,0 +1,261 @@ +package terraform + +import ( + "fmt" + "sort" +) + +// StateFilter is responsible for filtering and searching a state. +// +// This is a separate struct from State rather than a method on State +// because StateFilter might create sidecar data structures to optimize +// filtering on the state. +// +// If you change the State, the filter created is invalid and either +// Reset should be called or a new one should be allocated. StateFilter +// will not watch State for changes and do this for you. If you filter after +// changing the State without calling Reset, the behavior is not defined. +type StateFilter struct { + State *State +} + +// Filter takes the addresses specified by fs and finds all the matches. +// The values of fs are resource addressing syntax that can be parsed by +// ParseResourceAddress. +func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) { + // Parse all the addresses + as := make([]*ResourceAddress, len(fs)) + for i, v := range fs { + a, err := ParseResourceAddress(v) + if err != nil { + return nil, fmt.Errorf("Error parsing address '%s': %s", v, err) + } + + as[i] = a + } + + // If we werent given any filters, then we list all + if len(fs) == 0 { + as = append(as, &ResourceAddress{Index: -1}) + } + + // Filter each of the address. We keep track of this in a map to + // strip duplicates. + resultSet := make(map[string]*StateFilterResult) + for _, a := range as { + for _, r := range f.filterSingle(a) { + resultSet[r.String()] = r + } + } + + // Make the result list + results := make([]*StateFilterResult, 0, len(resultSet)) + for _, v := range resultSet { + results = append(results, v) + } + + // Sort them and return + sort.Sort(StateFilterResultSlice(results)) + return results, nil +} + +func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { + // The slice to keep track of results + var results []*StateFilterResult + + // Go through modules first. + modules := make([]*ModuleState, 0, len(f.State.Modules)) + for _, m := range f.State.Modules { + if f.relevant(a, m) { + modules = append(modules, m) + + // Only add the module to the results if we haven't specified a type. + // We also ignore the root module. + if a.Type == "" && len(m.Path) > 1 { + results = append(results, &StateFilterResult{ + Path: m.Path[1:], + Address: (&ResourceAddress{Path: m.Path[1:]}).String(), + Value: m, + }) + } + } + } + + // With the modules set, go through all the resources within + // the modules to find relevant resources. + for _, m := range modules { + for n, r := range m.Resources { + if f.relevant(a, r) { + // The name in the state contains valuable information. Parse. + key, err := ParseResourceStateKey(n) + if err != nil { + // If we get an error parsing, then just ignore it + // out of the state. + continue + } + + if a.Index >= 0 && key.Index != a.Index { + // Index doesn't match + continue + } + + if a.Name != "" && a.Name != key.Name { + continue + } + + // Build the address for this resource + addr := &ResourceAddress{ + Path: m.Path[1:], + Name: key.Name, + Type: key.Type, + Index: key.Index, + } + + // Add the resource level result + resourceResult := &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Value: r, + } + if !a.InstanceTypeSet { + results = append(results, resourceResult) + } + + // Add the instances + if r.Primary != nil { + addr.InstanceType = TypePrimary + addr.InstanceTypeSet = true + results = append(results, &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Parent: resourceResult, + Value: r.Primary, + }) + } + + for _, instance := range r.Tainted { + if f.relevant(a, instance) { + addr.InstanceType = TypeTainted + addr.InstanceTypeSet = true + results = append(results, &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Parent: resourceResult, + Value: instance, + }) + } + } + + for _, instance := range r.Deposed { + if f.relevant(a, instance) { + addr.InstanceType = TypeDeposed + addr.InstanceTypeSet = true + results = append(results, &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Parent: resourceResult, + Value: instance, + }) + } + } + } + } + } + + return results +} + +// relevant checks for relevance of this address against the given value. +func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool { + switch v := raw.(type) { + case *ModuleState: + path := v.Path[1:] + + if len(addr.Path) > len(path) { + // Longer path in address means there is no way we match. + return false + } + + // Check for a prefix match + for i, p := range addr.Path { + if path[i] != p { + // Any mismatches don't match. + return false + } + } + + return true + case *ResourceState: + if addr.Type == "" { + // If we have no resource type, then we're interested in all! + return true + } + + // If the type doesn't match we fail immediately + if v.Type != addr.Type { + return false + } + + return true + default: + // If we don't know about it, let's just say no + return false + } +} + +// StateFilterResult is a single result from a filter operation. Filter +// can match multiple things within a state (module, resource, instance, etc.) +// and this unifies that. +type StateFilterResult struct { + // Module path of the result + Path []string + + // Address is the address that can be used to reference this exact result. + Address string + + // Parent, if non-nil, is a parent of this result. For instances, the + // parent would be a resource. For resources, the parent would be + // a module. For modules, this is currently nil. + Parent *StateFilterResult + + // Value is the actual value. This must be type switched on. It can be + // any data structures that `State` can hold: `ModuleState`, + // `ResourceState`, `InstanceState`. + Value interface{} +} + +func (r *StateFilterResult) String() string { + return fmt.Sprintf("%T: %s", r.Value, r.Address) +} + +func (r *StateFilterResult) sortedType() int { + switch r.Value.(type) { + case *ModuleState: + return 0 + case *ResourceState: + return 1 + case *InstanceState: + return 2 + default: + return 50 + } +} + +// StateFilterResultSlice is a slice of results that implements +// sort.Interface. The sorting goal is what is most appealing to +// human output. +type StateFilterResultSlice []*StateFilterResult + +func (s StateFilterResultSlice) Len() int { return len(s) } +func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s StateFilterResultSlice) Less(i, j int) bool { + a, b := s[i], s[j] + + // If the addresses are different it is just lexographic sorting + if a.Address != b.Address { + return a.Address < b.Address + } + + // Addresses are the same, which means it matters on the type + return a.sortedType() < b.sortedType() +} diff --git a/terraform/state_filter_test.go b/terraform/state_filter_test.go new file mode 100644 index 0000000000..f9187b4e9f --- /dev/null +++ b/terraform/state_filter_test.go @@ -0,0 +1,119 @@ +package terraform + +import ( + "os" + "path/filepath" + "reflect" + "testing" +) + +func TestStateFilterFilter(t *testing.T) { + cases := map[string]struct { + State string + Filters []string + Expected []string + }{ + "all": { + "small.tfstate", + []string{}, + []string{ + "*terraform.ResourceState: aws_key_pair.onprem", + "*terraform.InstanceState: aws_key_pair.onprem", + "*terraform.ModuleState: module.bootstrap", + "*terraform.ResourceState: module.bootstrap.aws_route53_record.oasis-consul-bootstrap-a", + "*terraform.InstanceState: module.bootstrap.aws_route53_record.oasis-consul-bootstrap-a", + "*terraform.ResourceState: module.bootstrap.aws_route53_record.oasis-consul-bootstrap-ns", + "*terraform.InstanceState: module.bootstrap.aws_route53_record.oasis-consul-bootstrap-ns", + "*terraform.ResourceState: module.bootstrap.aws_route53_zone.oasis-consul-bootstrap", + "*terraform.InstanceState: module.bootstrap.aws_route53_zone.oasis-consul-bootstrap", + }, + }, + + "single resource": { + "small.tfstate", + []string{"aws_key_pair.onprem"}, + []string{ + "*terraform.ResourceState: aws_key_pair.onprem", + "*terraform.InstanceState: aws_key_pair.onprem", + }, + }, + + "single instance": { + "small.tfstate", + []string{"aws_key_pair.onprem.primary"}, + []string{ + "*terraform.InstanceState: aws_key_pair.onprem", + }, + }, + + "module filter": { + "complete.tfstate", + []string{"module.bootstrap"}, + []string{ + "*terraform.ModuleState: module.bootstrap", + "*terraform.ResourceState: module.bootstrap.aws_route53_record.oasis-consul-bootstrap-a", + "*terraform.InstanceState: module.bootstrap.aws_route53_record.oasis-consul-bootstrap-a", + "*terraform.ResourceState: module.bootstrap.aws_route53_record.oasis-consul-bootstrap-ns", + "*terraform.InstanceState: module.bootstrap.aws_route53_record.oasis-consul-bootstrap-ns", + "*terraform.ResourceState: module.bootstrap.aws_route53_zone.oasis-consul-bootstrap", + "*terraform.InstanceState: module.bootstrap.aws_route53_zone.oasis-consul-bootstrap", + }, + }, + + "resource in module": { + "complete.tfstate", + []string{"module.bootstrap.aws_route53_zone.oasis-consul-bootstrap"}, + []string{ + "*terraform.ResourceState: module.bootstrap.aws_route53_zone.oasis-consul-bootstrap", + "*terraform.InstanceState: module.bootstrap.aws_route53_zone.oasis-consul-bootstrap", + }, + }, + + "resource in module 2": { + "resource-in-module-2.tfstate", + []string{"module.foo.aws_instance.foo"}, + []string{}, + }, + + "single count index": { + "complete.tfstate", + []string{"module.consul.aws_instance.consul-green[0]"}, + []string{ + "*terraform.ResourceState: module.consul.aws_instance.consul-green[0]", + "*terraform.InstanceState: module.consul.aws_instance.consul-green[0]", + }, + }, + } + + for n, tc := range cases { + // Load our state + f, err := os.Open(filepath.Join("./test-fixtures", "state-filter", tc.State)) + if err != nil { + t.Fatalf("%q: err: %s", n, err) + } + + state, err := ReadState(f) + f.Close() + if err != nil { + t.Fatalf("%q: err: %s", n, err) + } + + // Create the filter + filter := &StateFilter{State: state} + + // Filter! + results, err := filter.Filter(tc.Filters...) + if err != nil { + t.Fatalf("%q: err: %s", n, err) + } + + actual := make([]string, len(results)) + for i, result := range results { + actual[i] = result.String() + } + + if !reflect.DeepEqual(actual, tc.Expected) { + t.Fatalf("%q: expected, then actual\n\n%#v\n\n%#v", n, tc.Expected, actual) + } + } +} diff --git a/terraform/state_test.go b/terraform/state_test.go index f145652a43..a51b670b7d 100644 --- a/terraform/state_test.go +++ b/terraform/state_test.go @@ -76,6 +76,38 @@ func TestStateAddModule(t *testing.T) { } } +func TestStateOutputTypeRoundTrip(t *testing.T) { + state := &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: RootModulePath, + Outputs: map[string]interface{}{ + "string_output": "String Value", + "list_output": []interface{}{"List", "Value"}, + "map_output": map[string]interface{}{ + "key1": "Map", + "key2": "Value", + }, + }, + }, + }, + } + + buf := new(bytes.Buffer) + if err := WriteState(state, buf); err != nil { + t.Fatalf("err: %s", err) + } + + roundTripped, err := ReadState(buf) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(state, roundTripped) { + t.Fatalf("bad: %#v", roundTripped) + } +} + func TestStateModuleOrphans(t *testing.T) { state := &State{ Modules: []*ModuleState{ @@ -175,6 +207,35 @@ func TestStateModuleOrphans_deepNestedNilConfig(t *testing.T) { } } +func TestStateDeepCopy(t *testing.T) { + cases := []struct { + One, Two *State + F func(*State) interface{} + }{ + // Version + { + &State{Version: 5}, + &State{Version: 5}, + func(s *State) interface{} { return s.Version }, + }, + + // TFVersion + { + &State{TFVersion: "5"}, + &State{TFVersion: "5"}, + func(s *State) interface{} { return s.TFVersion }, + }, + } + + for i, tc := range cases { + actual := tc.F(tc.One.DeepCopy()) + expected := tc.F(tc.Two) + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Bad: %d\n\n%s\n\n%s", i, actual, expected) + } + } +} + func TestStateEqual(t *testing.T) { cases := []struct { Result bool @@ -348,6 +409,11 @@ func TestStateIncrementSerialMaybe(t *testing.T) { }, 5, }, + "S2 has a different TFVersion": { + &State{TFVersion: "0.1"}, + &State{TFVersion: "0.2"}, + 1, + }, } for name, tc := range cases { @@ -358,6 +424,277 @@ func TestStateIncrementSerialMaybe(t *testing.T) { } } +func TestStateRemove(t *testing.T) { + cases := map[string]struct { + Address string + One, Two *State + }{ + "simple resource": { + "test_instance.foo", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{}, + }, + }, + }, + }, + + "single instance": { + "test_instance.foo.primary", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{}, + }, + }, + }, + }, + + "single instance in multi-count": { + "test_instance.foo[0]", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo.0": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.foo.1": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo.1": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + }, + + "single resource, multi-count": { + "test_instance.foo", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo.0": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.foo.1": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{}, + }, + }, + }, + }, + + "full module": { + "module.foo", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + + &ModuleState{ + Path: []string{"root", "foo"}, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.bar": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + }, + + "module and children": { + "module.foo", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + + &ModuleState{ + Path: []string{"root", "foo"}, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.bar": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + + &ModuleState{ + Path: []string{"root", "foo", "bar"}, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.bar": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + }, + } + + for k, tc := range cases { + if err := tc.One.Remove(tc.Address); err != nil { + t.Fatalf("bad: %s\n\n%s", k, err) + } + + if !tc.One.Equal(tc.Two) { + t.Fatalf("Bad: %s\n\n%s\n\n%s", k, tc.One.String(), tc.Two.String()) + } + } +} + func TestResourceStateEqual(t *testing.T) { cases := []struct { Result bool @@ -716,6 +1053,34 @@ func TestStateEmpty(t *testing.T) { } } +func TestStateFromFutureTerraform(t *testing.T) { + cases := []struct { + In string + Result bool + }{ + { + "", + false, + }, + { + "0.1", + false, + }, + { + "999.15.1", + true, + }, + } + + for _, tc := range cases { + state := &State{TFVersion: tc.In} + actual := state.FromFutureTerraform() + if actual != tc.Result { + t.Fatalf("%s: bad: %v", tc.In, actual) + } + } +} + func TestStateIsRemote(t *testing.T) { cases := []struct { In *State @@ -829,16 +1194,43 @@ func TestInstanceState_MergeDiff_nilDiff(t *testing.T) { } } +func TestReadUpgradeStateV1toV2(t *testing.T) { + // ReadState should transparently detect the old version but will upgrade + // it on Write. + actual, err := ReadState(strings.NewReader(testV1State)) + if err != nil { + t.Fatalf("err: %s", err) + } + + buf := new(bytes.Buffer) + if err := WriteState(actual, buf); err != nil { + t.Fatalf("err: %s", err) + } + + if actual.Version != 2 { + t.Fatalf("bad: State version not incremented; is %d", actual.Version) + } + + roundTripped, err := ReadState(buf) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(actual, roundTripped) { + t.Fatalf("bad: %#v", actual) + } +} + func TestReadUpgradeState(t *testing.T) { - state := &StateV1{ - Resources: map[string]*ResourceStateV1{ - "foo": &ResourceStateV1{ + state := &StateV0{ + Resources: map[string]*ResourceStateV0{ + "foo": &ResourceStateV0{ ID: "bar", }, }, } buf := new(bytes.Buffer) - if err := testWriteStateV1(state, buf); err != nil { + if err := testWriteStateV0(state, buf); err != nil { t.Fatalf("err: %s", err) } @@ -849,7 +1241,7 @@ func TestReadUpgradeState(t *testing.T) { t.Fatalf("err: %s", err) } - upgraded, err := upgradeV1State(state) + upgraded, err := upgradeV0State(state) if err != nil { t.Fatalf("err: %s", err) } @@ -935,20 +1327,111 @@ func TestReadStateNewVersion(t *testing.T) { } } -func TestUpgradeV1State(t *testing.T) { - old := &StateV1{ +func TestReadStateTFVersion(t *testing.T) { + type tfVersion struct { + TFVersion string `json:"terraform_version"` + } + + cases := []struct { + Written string + Read string + Err bool + }{ + { + "0.0.0", + "0.0.0", + false, + }, + { + "", + "", + false, + }, + { + "bad", + "", + true, + }, + } + + for _, tc := range cases { + buf, err := json.Marshal(&tfVersion{tc.Written}) + if err != nil { + t.Fatalf("err: %v", err) + } + + s, err := ReadState(bytes.NewReader(buf)) + if (err != nil) != tc.Err { + t.Fatalf("%s: err: %s", tc.Written, err) + } + if err != nil { + continue + } + + if s.TFVersion != tc.Read { + t.Fatalf("%s: bad: %s", tc.Written, s.TFVersion) + } + } +} + +func TestWriteStateTFVersion(t *testing.T) { + cases := []struct { + Write string + Read string + Err bool + }{ + { + "0.0.0", + "0.0.0", + false, + }, + { + "", + "", + false, + }, + { + "bad", + "", + true, + }, + } + + for _, tc := range cases { + var buf bytes.Buffer + err := WriteState(&State{TFVersion: tc.Write}, &buf) + if (err != nil) != tc.Err { + t.Fatalf("%s: err: %s", tc.Write, err) + } + if err != nil { + continue + } + + s, err := ReadState(&buf) + if err != nil { + t.Fatalf("%s: err: %s", tc.Write, err) + } + + if s.TFVersion != tc.Read { + t.Fatalf("%s: bad: %s", tc.Write, s.TFVersion) + } + } +} + +func TestUpgradeV0State(t *testing.T) { + old := &StateV0{ Outputs: map[string]string{ "ip": "127.0.0.1", }, - Resources: map[string]*ResourceStateV1{ - "foo": &ResourceStateV1{ + Resources: map[string]*ResourceStateV0{ + "foo": &ResourceStateV0{ Type: "test_resource", ID: "bar", Attributes: map[string]string{ "key": "val", }, }, - "bar": &ResourceStateV1{ + "bar": &ResourceStateV0{ Type: "test_resource", ID: "1234", Attributes: map[string]string{ @@ -960,7 +1443,7 @@ func TestUpgradeV1State(t *testing.T) { "bar": struct{}{}, }, } - state, err := upgradeV1State(old) + state, err := upgradeV0State(old) if err != nil { t.Fatalf("err: %v", err) } @@ -1062,3 +1545,34 @@ func TestParseResourceStateKey(t *testing.T) { } } } + +const testV1State = `{ + "version": 1, + "serial": 9, + "remote": { + "type": "http", + "config": { + "url": "http://my-cool-server.com/" + } + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": null, + "resources": { + "foo": { + "type": "", + "primary": { + "id": "bar" + } + } + }, + "depends_on": [ + "aws_instance.bar" + ] + } + ] +} +` diff --git a/terraform/state_v1.go b/terraform/state_v0.go similarity index 91% rename from terraform/state_v1.go rename to terraform/state_v0.go index 85ba939a7f..44f1664b8f 100644 --- a/terraform/state_v1.go +++ b/terraform/state_v0.go @@ -21,21 +21,21 @@ const ( stateFormatVersion byte = 1 ) -// StateV1 is used to represent the state of Terraform files before +// StateV0 is used to represent the state of Terraform files before // 0.3. It is automatically upgraded to a modern State representation // on start. -type StateV1 struct { +type StateV0 struct { Outputs map[string]string - Resources map[string]*ResourceStateV1 + Resources map[string]*ResourceStateV0 Tainted map[string]struct{} once sync.Once } -func (s *StateV1) init() { +func (s *StateV0) init() { s.once.Do(func() { if s.Resources == nil { - s.Resources = make(map[string]*ResourceStateV1) + s.Resources = make(map[string]*ResourceStateV0) } if s.Tainted == nil { @@ -44,8 +44,8 @@ func (s *StateV1) init() { }) } -func (s *StateV1) deepcopy() *StateV1 { - result := new(StateV1) +func (s *StateV0) deepcopy() *StateV0 { + result := new(StateV0) result.init() if s != nil { for k, v := range s.Resources { @@ -61,7 +61,7 @@ func (s *StateV1) deepcopy() *StateV1 { // prune is a helper that removes any empty IDs from the state // and cleans it up in general. -func (s *StateV1) prune() { +func (s *StateV0) prune() { for k, v := range s.Resources { if v.ID == "" { delete(s.Resources, k) @@ -72,7 +72,7 @@ func (s *StateV1) prune() { // Orphans returns a list of keys of resources that are in the State // but aren't present in the configuration itself. Hence, these keys // represent the state of resources that are orphans. -func (s *StateV1) Orphans(c *config.Config) []string { +func (s *StateV0) Orphans(c *config.Config) []string { keys := make(map[string]struct{}) for k, _ := range s.Resources { keys[k] = struct{}{} @@ -96,7 +96,7 @@ func (s *StateV1) Orphans(c *config.Config) []string { return result } -func (s *StateV1) String() string { +func (s *StateV0) String() string { if len(s.Resources) == 0 { return "" } @@ -175,7 +175,7 @@ func (s *StateV1) String() string { // // Extra is just extra data that a provider can return that we store // for later, but is not exposed in any way to the user. -type ResourceStateV1 struct { +type ResourceStateV0 struct { // This is filled in and managed by Terraform, and is the resource // type itself such as "mycloud_instance". If a resource provider sets // this value, it won't be persisted. @@ -228,8 +228,8 @@ type ResourceStateV1 struct { // If the diff attribute requires computing the value, and hence // won't be available until apply, the value is replaced with the // computeID. -func (s *ResourceStateV1) MergeDiff(d *InstanceDiff) *ResourceStateV1 { - var result ResourceStateV1 +func (s *ResourceStateV0) MergeDiff(d *InstanceDiff) *ResourceStateV0 { + var result ResourceStateV0 if s != nil { result = *s } @@ -258,7 +258,7 @@ func (s *ResourceStateV1) MergeDiff(d *InstanceDiff) *ResourceStateV1 { return &result } -func (s *ResourceStateV1) GoString() string { +func (s *ResourceStateV0) GoString() string { return fmt.Sprintf("*%#v", *s) } @@ -270,10 +270,10 @@ type ResourceDependency struct { ID string } -// ReadStateV1 reads a state structure out of a reader in the format that +// ReadStateV0 reads a state structure out of a reader in the format that // was written by WriteState. -func ReadStateV1(src io.Reader) (*StateV1, error) { - var result *StateV1 +func ReadStateV0(src io.Reader) (*StateV0, error) { + var result *StateV0 var err error n := 0 diff --git a/terraform/state_v1_test.go b/terraform/state_v0_test.go similarity index 86% rename from terraform/state_v1_test.go rename to terraform/state_v0_test.go index c959e6efe4..04f84545c3 100644 --- a/terraform/state_v1_test.go +++ b/terraform/state_v0_test.go @@ -12,10 +12,10 @@ import ( "github.com/mitchellh/hashstructure" ) -func TestReadWriteStateV1(t *testing.T) { - state := &StateV1{ - Resources: map[string]*ResourceStateV1{ - "foo": &ResourceStateV1{ +func TestReadWriteStateV0(t *testing.T) { + state := &StateV0{ + Resources: map[string]*ResourceStateV0{ + "foo": &ResourceStateV0{ ID: "bar", ConnInfo: map[string]string{ "type": "ssh", @@ -33,7 +33,7 @@ func TestReadWriteStateV1(t *testing.T) { } buf := new(bytes.Buffer) - if err := testWriteStateV1(state, buf); err != nil { + if err := testWriteStateV0(state, buf); err != nil { t.Fatalf("err: %s", err) } @@ -47,7 +47,7 @@ func TestReadWriteStateV1(t *testing.T) { t.Fatalf("structure changed during serialization!") } - actual, err := ReadStateV1(buf) + actual, err := ReadStateV0(buf) if err != nil { t.Fatalf("err: %s", err) } @@ -75,9 +75,9 @@ func (s *sensitiveState) init() { }) } -// testWriteStateV1 writes a state somewhere in a binary format. +// testWriteStateV0 writes a state somewhere in a binary format. // Only for testing now -func testWriteStateV1(d *StateV1, dst io.Writer) error { +func testWriteStateV0(d *StateV0, dst io.Writer) error { // Write the magic bytes so we can determine the file format later n, err := dst.Write([]byte(stateFormatMagic)) if err != nil { diff --git a/terraform/terraform_test.go b/terraform/terraform_test.go index 09fb2370c9..ad19d7b1ba 100644 --- a/terraform/terraform_test.go +++ b/terraform/terraform_test.go @@ -592,7 +592,7 @@ aws_instance.foo: Outputs: -foo_num = bar,bar,bar +foo_num = [bar,bar,bar] ` const testTerraformApplyOutputMultiStr = ` diff --git a/terraform/test-fixtures/apply-map-var-through-module/amodule/main.tf b/terraform/test-fixtures/apply-map-var-through-module/amodule/main.tf new file mode 100644 index 0000000000..133ac62fcf --- /dev/null +++ b/terraform/test-fixtures/apply-map-var-through-module/amodule/main.tf @@ -0,0 +1,9 @@ +variable "amis" { + type = "map" +} + +resource "null_resource" "noop" {} + +output "amis_out" { + value = "${var.amis}" +} diff --git a/terraform/test-fixtures/apply-map-var-through-module/main.tf b/terraform/test-fixtures/apply-map-var-through-module/main.tf new file mode 100644 index 0000000000..991a0ecf67 --- /dev/null +++ b/terraform/test-fixtures/apply-map-var-through-module/main.tf @@ -0,0 +1,19 @@ +variable "amis_in" { + type = "map" + default = { + "us-west-1" = "ami-123456" + "us-west-2" = "ami-456789" + "eu-west-1" = "ami-789012" + "eu-west-2" = "ami-989484" + } +} + +module "test" { + source = "./amodule" + + amis = "${var.amis_in}" +} + +output "amis_from_module" { + value = "${module.test.amis_out}" +} diff --git a/terraform/test-fixtures/apply-vars/main.tf b/terraform/test-fixtures/apply-vars/main.tf index 01ffb6a91d..7cd4b5316c 100644 --- a/terraform/test-fixtures/apply-vars/main.tf +++ b/terraform/test-fixtures/apply-vars/main.tf @@ -19,5 +19,5 @@ resource "aws_instance" "foo" { resource "aws_instance" "bar" { foo = "${var.foo}" bar = "${lookup(var.amis, var.foo)}" - baz = "${var.amis.us-east-1}" + baz = "${var.amis["us-east-1"]}" } diff --git a/terraform/test-fixtures/state-filter/complete.tfstate b/terraform/test-fixtures/state-filter/complete.tfstate new file mode 100644 index 0000000000..587243002b --- /dev/null +++ b/terraform/test-fixtures/state-filter/complete.tfstate @@ -0,0 +1,1311 @@ +{ + "version": 1, + "serial": 12, + "modules": [ + { + "path": [ + "root" + ], + "outputs": { + "public_az1_subnet_id": "subnet-d658bba0", + "region": "us-west-2", + "vpc_cidr": "10.201.0.0/16", + "vpc_id": "vpc-65814701" + }, + "resources": { + "aws_key_pair.onprem": { + "type": "aws_key_pair", + "primary": { + "id": "onprem", + "attributes": { + "id": "onprem", + "key_name": "onprem", + "public_key": "foo" + }, + "meta": { + "schema_version": "1" + } + } + } + } + }, + { + "path": [ + "root", + "bootstrap" + ], + "outputs": { + "consul_bootstrap_dns": "consul.bootstrap" + }, + "resources": { + "aws_route53_record.oasis-consul-bootstrap-a": { + "type": "aws_route53_record", + "depends_on": [ + "aws_route53_zone.oasis-consul-bootstrap" + ], + "primary": { + "id": "Z68734P5178QN_consul.bootstrap_A", + "attributes": { + "failover": "", + "fqdn": "consul.bootstrap", + "health_check_id": "", + "id": "Z68734P5178QN_consul.bootstrap_A", + "name": "consul.bootstrap", + "records.#": "6", + "records.1148461392": "10.201.3.8", + "records.1169574759": "10.201.2.8", + "records.1206973758": "10.201.1.8", + "records.1275070284": "10.201.2.4", + "records.1304587643": "10.201.3.4", + "records.1313257749": "10.201.1.4", + "set_identifier": "", + "ttl": "300", + "type": "A", + "weight": "-1", + "zone_id": "Z68734P5178QN" + } + } + }, + "aws_route53_record.oasis-consul-bootstrap-ns": { + "type": "aws_route53_record", + "depends_on": [ + "aws_route53_zone.oasis-consul-bootstrap", + "aws_route53_zone.oasis-consul-bootstrap", + "aws_route53_zone.oasis-consul-bootstrap", + "aws_route53_zone.oasis-consul-bootstrap", + "aws_route53_zone.oasis-consul-bootstrap" + ], + "primary": { + "id": "Z68734P5178QN_consul.bootstrap_NS", + "attributes": { + "failover": "", + "fqdn": "consul.bootstrap", + "health_check_id": "", + "id": "Z68734P5178QN_consul.bootstrap_NS", + "name": "consul.bootstrap", + "records.#": "4", + "records.1796532126": "ns-512.awsdns-00.net.", + "records.2728059479": "ns-1536.awsdns-00.co.uk.", + "records.4092160370": "ns-1024.awsdns-00.org.", + "records.456007465": "ns-0.awsdns-00.com.", + "set_identifier": "", + "ttl": "30", + "type": "NS", + "weight": "-1", + "zone_id": "Z68734P5178QN" + } + } + }, + "aws_route53_zone.oasis-consul-bootstrap": { + "type": "aws_route53_zone", + "primary": { + "id": "Z68734P5178QN", + "attributes": { + "comment": "Used to bootstrap consul dns", + "id": "Z68734P5178QN", + "name": "consul.bootstrap", + "name_servers.#": "4", + "name_servers.0": "ns-0.awsdns-00.com.", + "name_servers.1": "ns-1024.awsdns-00.org.", + "name_servers.2": "ns-1536.awsdns-00.co.uk.", + "name_servers.3": "ns-512.awsdns-00.net.", + "tags.#": "0", + "vpc_id": "vpc-65814701", + "vpc_region": "us-west-2", + "zone_id": "Z68734P5178QN" + } + } + } + } + }, + { + "path": [ + "root", + "consul" + ], + "outputs": { + "consul_ips": "10.201.1.8,10.201.2.8,10.201.3.8,", + "security_group_id": "sg-6c4d2f0b" + }, + "resources": { + "aws_instance.consul-green.0": { + "type": "aws_instance", + "depends_on": [ + "aws_security_group.consul" + ], + "primary": { + "id": "i-6dc2acb5", + "attributes": { + "ami": "ami-abcd1234", + "availability_zone": "us-west-2a", + "ebs_block_device.#": "0", + "ebs_optimized": "false", + "ephemeral_block_device.#": "0", + "iam_instance_profile": "", + "id": "i-6dc2acb5", + "instance_state": "running", + "instance_type": "t2.small", + "key_name": "onprem", + "monitoring": "false", + "private_dns": "ip-10-201-1-8.us-west-2.compute.internal", + "private_ip": "10.201.1.8", + "public_dns": "", + "public_ip": "", + "root_block_device.#": "1", + "root_block_device.0.delete_on_termination": "true", + "root_block_device.0.iops": "24", + "root_block_device.0.volume_size": "8", + "root_block_device.0.volume_type": "gp2", + "security_groups.#": "0", + "source_dest_check": "true", + "subnet_id": "subnet-d558bba3", + "tags.#": "1", + "tags.Name": "onprem-consul", + "tenancy": "default", + "user_data": "daea808a0010d9ab14d862878905052ee9e3fe55", + "vpc_security_group_ids.#": "1", + "vpc_security_group_ids.753260136": "sg-6c4d2f0b" + }, + "meta": { + "schema_version": "1" + } + } + }, + "aws_instance.consul-green.1": { + "type": "aws_instance", + "depends_on": [ + "aws_security_group.consul" + ], + "primary": { + "id": "i-59bde69e", + "attributes": { + "ami": "ami-abcd1234", + "availability_zone": "us-west-2b", + "ebs_block_device.#": "0", + "ebs_optimized": "false", + "ephemeral_block_device.#": "0", + "iam_instance_profile": "", + "id": "i-59bde69e", + "instance_state": "running", + "instance_type": "t2.small", + "key_name": "onprem", + "monitoring": "false", + "private_dns": "ip-10-201-2-8.us-west-2.compute.internal", + "private_ip": "10.201.2.8", + "public_dns": "", + "public_ip": "", + "root_block_device.#": "1", + "root_block_device.0.delete_on_termination": "true", + "root_block_device.0.iops": "24", + "root_block_device.0.volume_size": "8", + "root_block_device.0.volume_type": "gp2", + "security_groups.#": "0", + "source_dest_check": "true", + "subnet_id": "subnet-984f81fc", + "tags.#": "1", + "tags.Name": "onprem-consul", + "tenancy": "default", + "user_data": "daea808a0010d9ab14d862878905052ee9e3fe55", + "vpc_security_group_ids.#": "1", + "vpc_security_group_ids.753260136": "sg-6c4d2f0b" + }, + "meta": { + "schema_version": "1" + } + } + }, + "aws_instance.consul-green.2": { + "type": "aws_instance", + "depends_on": [ + "aws_security_group.consul" + ], + "primary": { + "id": "i-24d5e9fe", + "attributes": { + "ami": "ami-abcd1234", + "availability_zone": "us-west-2c", + "ebs_block_device.#": "0", + "ebs_optimized": "false", + "ephemeral_block_device.#": "0", + "iam_instance_profile": "", + "id": "i-24d5e9fe", + "instance_state": "running", + "instance_type": "t2.small", + "key_name": "onprem", + "monitoring": "false", + "private_dns": "ip-10-201-3-8.us-west-2.compute.internal", + "private_ip": "10.201.3.8", + "public_dns": "", + "public_ip": "", + "root_block_device.#": "1", + "root_block_device.0.delete_on_termination": "true", + "root_block_device.0.iops": "24", + "root_block_device.0.volume_size": "8", + "root_block_device.0.volume_type": "gp2", + "security_groups.#": "0", + "source_dest_check": "true", + "subnet_id": "subnet-776d532e", + "tags.#": "1", + "tags.Name": "onprem-consul", + "tenancy": "default", + "user_data": "daea808a0010d9ab14d862878905052ee9e3fe55", + "vpc_security_group_ids.#": "1", + "vpc_security_group_ids.753260136": "sg-6c4d2f0b" + }, + "meta": { + "schema_version": "1" + } + } + }, + "aws_security_group.consul": { + "type": "aws_security_group", + "primary": { + "id": "sg-6c4d2f0b", + "attributes": { + "description": "Managed by Terraform", + "egress.#": "1", + "egress.482069346.cidr_blocks.#": "1", + "egress.482069346.cidr_blocks.0": "0.0.0.0/0", + "egress.482069346.from_port": "0", + "egress.482069346.protocol": "-1", + "egress.482069346.security_groups.#": "0", + "egress.482069346.self": "false", + "egress.482069346.to_port": "0", + "id": "sg-6c4d2f0b", + "ingress.#": "1", + "ingress.3832255922.cidr_blocks.#": "2", + "ingress.3832255922.cidr_blocks.0": "10.201.0.0/16", + "ingress.3832255922.cidr_blocks.1": "127.0.0.1/32", + "ingress.3832255922.from_port": "0", + "ingress.3832255922.protocol": "-1", + "ingress.3832255922.security_groups.#": "0", + "ingress.3832255922.self": "false", + "ingress.3832255922.to_port": "0", + "name": "onprem-consul", + "owner_id": "209146746714", + "tags.#": "0", + "vpc_id": "vpc-65814701" + } + } + } + } + }, + { + "path": [ + "root", + "network-core" + ], + "outputs": { + "private_az1_subnet_id": "subnet-d558bba3", + "private_az2_subnet_id": "subnet-984f81fc", + "private_az3_subnet_id": "subnet-776d532e", + "public_az1_subnet_id": "subnet-d658bba0", + "public_az2_subnet_id": "subnet-9f4f81fb", + "public_az3_subnet_id": "subnet-756d532c", + "vpc_cidr": "10.201.0.0/16", + "vpc_id": "vpc-65814701" + }, + "resources": {} + }, + { + "path": [ + "root", + "vault" + ], + "outputs": { + "dns_name": "internal-onprem-vault-2015291251.us-west-2.elb.amazonaws.com", + "private_ips": "10.201.1.145,10.201.2.191,10.201.3.230" + }, + "resources": { + "aws_elb.vault": { + "type": "aws_elb", + "depends_on": [ + "aws_instance.vault", + "aws_security_group.elb" + ], + "primary": { + "id": "onprem-vault", + "attributes": { + "access_logs.#": "0", + "availability_zones.#": "3", + "availability_zones.2050015877": "us-west-2c", + "availability_zones.221770259": "us-west-2b", + "availability_zones.2487133097": "us-west-2a", + "connection_draining": "true", + "connection_draining_timeout": "400", + "cross_zone_load_balancing": "true", + "dns_name": "internal-onprem-vault-2015291251.us-west-2.elb.amazonaws.com", + "health_check.#": "1", + "health_check.4162994118.healthy_threshold": "2", + "health_check.4162994118.interval": "15", + "health_check.4162994118.target": "HTTPS:8200/v1/sys/health", + "health_check.4162994118.timeout": "5", + "health_check.4162994118.unhealthy_threshold": "3", + "id": "onprem-vault", + "idle_timeout": "60", + "instances.#": "3", + "instances.1694111637": "i-b6d5e96c", + "instances.237539873": "i-11bee5d6", + "instances.3767473091": "i-f3c2ac2b", + "internal": "true", + "listener.#": "2", + "listener.2355508663.instance_port": "8200", + "listener.2355508663.instance_protocol": "tcp", + "listener.2355508663.lb_port": "443", + "listener.2355508663.lb_protocol": "tcp", + "listener.2355508663.ssl_certificate_id": "", + "listener.3383204430.instance_port": "8200", + "listener.3383204430.instance_protocol": "tcp", + "listener.3383204430.lb_port": "80", + "listener.3383204430.lb_protocol": "tcp", + "listener.3383204430.ssl_certificate_id": "", + "name": "onprem-vault", + "security_groups.#": "1", + "security_groups.4254461258": "sg-6b4d2f0c", + "source_security_group": "onprem-vault-elb", + "source_security_group_id": "sg-6b4d2f0c", + "subnets.#": "3", + "subnets.1994053001": "subnet-d658bba0", + "subnets.3216774672": "subnet-756d532c", + "subnets.3611140374": "subnet-9f4f81fb", + "tags.#": "0", + "zone_id": "Z33MTJ483KN6FU" + } + } + }, + "aws_instance.vault.0": { + "type": "aws_instance", + "depends_on": [ + "aws_security_group.vault", + "template_cloudinit_config.config" + ], + "primary": { + "id": "i-f3c2ac2b", + "attributes": { + "ami": "ami-abcd1234", + "availability_zone": "us-west-2a", + "ebs_block_device.#": "0", + "ebs_optimized": "false", + "ephemeral_block_device.#": "0", + "iam_instance_profile": "", + "id": "i-f3c2ac2b", + "instance_state": "running", + "instance_type": "t2.small", + "key_name": "onprem", + "monitoring": "false", + "private_dns": "ip-10-201-1-145.us-west-2.compute.internal", + "private_ip": "10.201.1.145", + "public_dns": "", + "public_ip": "", + "root_block_device.#": "1", + "root_block_device.0.delete_on_termination": "true", + "root_block_device.0.iops": "24", + "root_block_device.0.volume_size": "8", + "root_block_device.0.volume_type": "gp2", + "security_groups.#": "0", + "source_dest_check": "true", + "subnet_id": "subnet-d558bba3", + "tags.#": "1", + "tags.Name": "onprem-vault - 0", + "tenancy": "default", + "user_data": "423b5c91392a6b2ac287a118fcdad0aadaeffd48", + "vpc_security_group_ids.#": "1", + "vpc_security_group_ids.1377395316": "sg-6a4d2f0d" + }, + "meta": { + "schema_version": "1" + } + } + }, + "aws_instance.vault.1": { + "type": "aws_instance", + "depends_on": [ + "aws_security_group.vault", + "template_cloudinit_config.config" + ], + "primary": { + "id": "i-11bee5d6", + "attributes": { + "ami": "ami-abcd1234", + "availability_zone": "us-west-2b", + "ebs_block_device.#": "0", + "ebs_optimized": "false", + "ephemeral_block_device.#": "0", + "iam_instance_profile": "", + "id": "i-11bee5d6", + "instance_state": "running", + "instance_type": "t2.small", + "key_name": "onprem", + "monitoring": "false", + "private_dns": "ip-10-201-2-191.us-west-2.compute.internal", + "private_ip": "10.201.2.191", + "public_dns": "", + "public_ip": "", + "root_block_device.#": "1", + "root_block_device.0.delete_on_termination": "true", + "root_block_device.0.iops": "24", + "root_block_device.0.volume_size": "8", + "root_block_device.0.volume_type": "gp2", + "security_groups.#": "0", + "source_dest_check": "true", + "subnet_id": "subnet-984f81fc", + "tags.#": "1", + "tags.Name": "onprem-vault - 1", + "tenancy": "default", + "user_data": "de5ec79c02b721123a7c2a1622257b425aa26e61", + "vpc_security_group_ids.#": "1", + "vpc_security_group_ids.1377395316": "sg-6a4d2f0d" + }, + "meta": { + "schema_version": "1" + } + } + }, + "aws_instance.vault.2": { + "type": "aws_instance", + "depends_on": [ + "aws_security_group.vault", + "template_cloudinit_config.config" + ], + "primary": { + "id": "i-b6d5e96c", + "attributes": { + "ami": "ami-abcd1234", + "availability_zone": "us-west-2c", + "ebs_block_device.#": "0", + "ebs_optimized": "false", + "ephemeral_block_device.#": "0", + "iam_instance_profile": "", + "id": "i-b6d5e96c", + "instance_state": "running", + "instance_type": "t2.small", + "key_name": "onprem", + "monitoring": "false", + "private_dns": "ip-10-201-3-230.us-west-2.compute.internal", + "private_ip": "10.201.3.230", + "public_dns": "", + "public_ip": "", + "root_block_device.#": "1", + "root_block_device.0.delete_on_termination": "true", + "root_block_device.0.iops": "24", + "root_block_device.0.volume_size": "8", + "root_block_device.0.volume_type": "gp2", + "security_groups.#": "0", + "source_dest_check": "true", + "subnet_id": "subnet-776d532e", + "tags.#": "1", + "tags.Name": "onprem-vault - 2", + "tenancy": "default", + "user_data": "7ecdafc11c715866578ab5441bb27abbae97c850", + "vpc_security_group_ids.#": "1", + "vpc_security_group_ids.1377395316": "sg-6a4d2f0d" + }, + "meta": { + "schema_version": "1" + } + } + }, + "aws_security_group.elb": { + "type": "aws_security_group", + "primary": { + "id": "sg-6b4d2f0c", + "attributes": { + "description": "Managed by Terraform", + "egress.#": "1", + "egress.482069346.cidr_blocks.#": "1", + "egress.482069346.cidr_blocks.0": "0.0.0.0/0", + "egress.482069346.from_port": "0", + "egress.482069346.protocol": "-1", + "egress.482069346.security_groups.#": "0", + "egress.482069346.self": "false", + "egress.482069346.to_port": "0", + "id": "sg-6b4d2f0c", + "ingress.#": "2", + "ingress.2915022413.cidr_blocks.#": "1", + "ingress.2915022413.cidr_blocks.0": "10.201.0.0/16", + "ingress.2915022413.from_port": "80", + "ingress.2915022413.protocol": "tcp", + "ingress.2915022413.security_groups.#": "0", + "ingress.2915022413.self": "false", + "ingress.2915022413.to_port": "80", + "ingress.382081576.cidr_blocks.#": "1", + "ingress.382081576.cidr_blocks.0": "10.201.0.0/16", + "ingress.382081576.from_port": "443", + "ingress.382081576.protocol": "tcp", + "ingress.382081576.security_groups.#": "0", + "ingress.382081576.self": "false", + "ingress.382081576.to_port": "443", + "name": "onprem-vault-elb", + "owner_id": "209146746714", + "tags.#": "0", + "vpc_id": "vpc-65814701" + } + } + }, + "aws_security_group.vault": { + "type": "aws_security_group", + "primary": { + "id": "sg-6a4d2f0d", + "attributes": { + "description": "Managed by Terraform", + "egress.#": "1", + "egress.482069346.cidr_blocks.#": "1", + "egress.482069346.cidr_blocks.0": "0.0.0.0/0", + "egress.482069346.from_port": "0", + "egress.482069346.protocol": "-1", + "egress.482069346.security_groups.#": "0", + "egress.482069346.self": "false", + "egress.482069346.to_port": "0", + "id": "sg-6a4d2f0d", + "ingress.#": "1", + "ingress.2546146930.cidr_blocks.#": "1", + "ingress.2546146930.cidr_blocks.0": "10.201.0.0/16", + "ingress.2546146930.from_port": "0", + "ingress.2546146930.protocol": "-1", + "ingress.2546146930.security_groups.#": "0", + "ingress.2546146930.self": "false", + "ingress.2546146930.to_port": "0", + "name": "onprem-vault", + "owner_id": "209146746714", + "tags.#": "0", + "vpc_id": "vpc-65814701" + } + } + } + } + }, + { + "path": [ + "root", + "network-core", + "igw" + ], + "outputs": { + "id": "igw-d06c48b5" + }, + "resources": { + "aws_internet_gateway.main_igw": { + "type": "aws_internet_gateway", + "primary": { + "id": "igw-d06c48b5", + "attributes": { + "id": "igw-d06c48b5", + "vpc_id": "vpc-65814701" + } + } + } + } + }, + { + "path": [ + "root", + "network-core", + "private-subnets" + ], + "outputs": { + "az1_subnet_id": "subnet-d558bba3", + "az2_subnet_id": "subnet-984f81fc", + "az3_subnet_id": "subnet-776d532e" + }, + "resources": { + "aws_subnet.subnet_az1_private": { + "type": "aws_subnet", + "primary": { + "id": "subnet-d558bba3", + "attributes": { + "availability_zone": "us-west-2a", + "cidr_block": "10.201.1.0/24", + "id": "subnet-d558bba3", + "map_public_ip_on_launch": "false", + "tags.#": "1", + "tags.Name": "onprem-private", + "vpc_id": "vpc-65814701" + } + } + }, + "aws_subnet.subnet_az2_private": { + "type": "aws_subnet", + "primary": { + "id": "subnet-984f81fc", + "attributes": { + "availability_zone": "us-west-2b", + "cidr_block": "10.201.2.0/24", + "id": "subnet-984f81fc", + "map_public_ip_on_launch": "false", + "tags.#": "1", + "tags.Name": "onprem-private", + "vpc_id": "vpc-65814701" + } + } + }, + "aws_subnet.subnet_az3_private": { + "type": "aws_subnet", + "primary": { + "id": "subnet-776d532e", + "attributes": { + "availability_zone": "us-west-2c", + "cidr_block": "10.201.3.0/24", + "id": "subnet-776d532e", + "map_public_ip_on_launch": "false", + "tags.#": "1", + "tags.Name": "onprem-private", + "vpc_id": "vpc-65814701" + } + } + } + } + }, + { + "path": [ + "root", + "network-core", + "public-subnets" + ], + "outputs": { + "az1_nat_gateway_id": "nat-05ca7f2d5f1f96693", + "az1_subnet_id": "subnet-d658bba0", + "az2_nat_gateway_id": "nat-03223582301f75a08", + "az2_subnet_id": "subnet-9f4f81fb", + "az3_nat_gateway_id": "nat-0f2710d577d3f32ee", + "az3_subnet_id": "subnet-756d532c" + }, + "resources": { + "aws_eip.eip_nat_az1": { + "type": "aws_eip", + "primary": { + "id": "eipalloc-5f7bd73b", + "attributes": { + "association_id": "", + "domain": "vpc", + "id": "eipalloc-5f7bd73b", + "instance": "", + "network_interface": "", + "private_ip": "", + "public_ip": "52.37.99.10", + "vpc": "true" + } + } + }, + "aws_eip.eip_nat_az2": { + "type": "aws_eip", + "primary": { + "id": "eipalloc-927bd7f6", + "attributes": { + "association_id": "", + "domain": "vpc", + "id": "eipalloc-927bd7f6", + "instance": "", + "network_interface": "", + "private_ip": "", + "public_ip": "52.36.32.86", + "vpc": "true" + } + } + }, + "aws_eip.eip_nat_az3": { + "type": "aws_eip", + "primary": { + "id": "eipalloc-fe76da9a", + "attributes": { + "association_id": "", + "domain": "vpc", + "id": "eipalloc-fe76da9a", + "instance": "", + "network_interface": "", + "private_ip": "", + "public_ip": "52.25.71.124", + "vpc": "true" + } + } + }, + "aws_nat_gateway.nat_gw_az1": { + "type": "aws_nat_gateway", + "depends_on": [ + "aws_eip.eip_nat_az1", + "aws_subnet.subnet_az1_public" + ], + "primary": { + "id": "nat-05ca7f2d5f1f96693", + "attributes": { + "allocation_id": "eipalloc-5f7bd73b", + "id": "nat-05ca7f2d5f1f96693", + "network_interface_id": "eni-c3ff6089", + "private_ip": "10.201.101.229", + "public_ip": "52.37.99.10", + "subnet_id": "subnet-d658bba0" + } + } + }, + "aws_nat_gateway.nat_gw_az2": { + "type": "aws_nat_gateway", + "depends_on": [ + "aws_eip.eip_nat_az2", + "aws_subnet.subnet_az2_public" + ], + "primary": { + "id": "nat-03223582301f75a08", + "attributes": { + "allocation_id": "eipalloc-927bd7f6", + "id": "nat-03223582301f75a08", + "network_interface_id": "eni-db22f0a0", + "private_ip": "10.201.102.214", + "public_ip": "52.36.32.86", + "subnet_id": "subnet-9f4f81fb" + } + } + }, + "aws_nat_gateway.nat_gw_az3": { + "type": "aws_nat_gateway", + "depends_on": [ + "aws_eip.eip_nat_az3", + "aws_subnet.subnet_az3_public" + ], + "primary": { + "id": "nat-0f2710d577d3f32ee", + "attributes": { + "allocation_id": "eipalloc-fe76da9a", + "id": "nat-0f2710d577d3f32ee", + "network_interface_id": "eni-e0cd4dbd", + "private_ip": "10.201.103.58", + "public_ip": "52.25.71.124", + "subnet_id": "subnet-756d532c" + } + } + }, + "aws_route_table.route_table_public": { + "type": "aws_route_table", + "primary": { + "id": "rtb-838f29e7", + "attributes": { + "id": "rtb-838f29e7", + "propagating_vgws.#": "0", + "route.#": "1", + "route.1250083285.cidr_block": "0.0.0.0/0", + "route.1250083285.gateway_id": "igw-d06c48b5", + "route.1250083285.instance_id": "", + "route.1250083285.nat_gateway_id": "", + "route.1250083285.network_interface_id": "", + "route.1250083285.vpc_peering_connection_id": "", + "tags.#": "1", + "tags.Name": "onprem-public", + "vpc_id": "vpc-65814701" + } + } + }, + "aws_route_table_association.route_table_az1": { + "type": "aws_route_table_association", + "depends_on": [ + "aws_route_table.route_table_public", + "aws_subnet.subnet_az1_public" + ], + "primary": { + "id": "rtbassoc-a5d6abc1", + "attributes": { + "id": "rtbassoc-a5d6abc1", + "route_table_id": "rtb-838f29e7", + "subnet_id": "subnet-d658bba0" + } + } + }, + "aws_route_table_association.route_table_az2": { + "type": "aws_route_table_association", + "depends_on": [ + "aws_route_table.route_table_public", + "aws_subnet.subnet_az2_public" + ], + "primary": { + "id": "rtbassoc-a0d6abc4", + "attributes": { + "id": "rtbassoc-a0d6abc4", + "route_table_id": "rtb-838f29e7", + "subnet_id": "subnet-9f4f81fb" + } + } + }, + "aws_route_table_association.route_table_az3": { + "type": "aws_route_table_association", + "depends_on": [ + "aws_route_table.route_table_public", + "aws_subnet.subnet_az3_public" + ], + "primary": { + "id": "rtbassoc-a7d6abc3", + "attributes": { + "id": "rtbassoc-a7d6abc3", + "route_table_id": "rtb-838f29e7", + "subnet_id": "subnet-756d532c" + } + } + }, + "aws_subnet.subnet_az1_public": { + "type": "aws_subnet", + "primary": { + "id": "subnet-d658bba0", + "attributes": { + "availability_zone": "us-west-2a", + "cidr_block": "10.201.101.0/24", + "id": "subnet-d658bba0", + "map_public_ip_on_launch": "true", + "tags.#": "1", + "tags.Name": "onprem-public", + "vpc_id": "vpc-65814701" + } + } + }, + "aws_subnet.subnet_az2_public": { + "type": "aws_subnet", + "primary": { + "id": "subnet-9f4f81fb", + "attributes": { + "availability_zone": "us-west-2b", + "cidr_block": "10.201.102.0/24", + "id": "subnet-9f4f81fb", + "map_public_ip_on_launch": "true", + "tags.#": "1", + "tags.Name": "onprem-public", + "vpc_id": "vpc-65814701" + } + } + }, + "aws_subnet.subnet_az3_public": { + "type": "aws_subnet", + "primary": { + "id": "subnet-756d532c", + "attributes": { + "availability_zone": "us-west-2c", + "cidr_block": "10.201.103.0/24", + "id": "subnet-756d532c", + "map_public_ip_on_launch": "true", + "tags.#": "1", + "tags.Name": "onprem-public", + "vpc_id": "vpc-65814701" + } + } + } + } + }, + { + "path": [ + "root", + "network-core", + "restricted-subnets" + ], + "outputs": { + "az1_subnet_id": "subnet-d758bba1", + "az2_subnet_id": "subnet-994f81fd", + "az3_subnet_id": "subnet-746d532d" + }, + "resources": { + "aws_subnet.subnet_az1_private": { + "type": "aws_subnet", + "primary": { + "id": "subnet-d758bba1", + "attributes": { + "availability_zone": "us-west-2a", + "cidr_block": "10.201.220.0/24", + "id": "subnet-d758bba1", + "map_public_ip_on_launch": "false", + "tags.#": "1", + "tags.Name": "onprem-restricted", + "vpc_id": "vpc-65814701" + } + } + }, + "aws_subnet.subnet_az2_private": { + "type": "aws_subnet", + "primary": { + "id": "subnet-994f81fd", + "attributes": { + "availability_zone": "us-west-2b", + "cidr_block": "10.201.221.0/24", + "id": "subnet-994f81fd", + "map_public_ip_on_launch": "false", + "tags.#": "1", + "tags.Name": "onprem-restricted", + "vpc_id": "vpc-65814701" + } + } + }, + "aws_subnet.subnet_az3_private": { + "type": "aws_subnet", + "primary": { + "id": "subnet-746d532d", + "attributes": { + "availability_zone": "us-west-2c", + "cidr_block": "10.201.222.0/24", + "id": "subnet-746d532d", + "map_public_ip_on_launch": "false", + "tags.#": "1", + "tags.Name": "onprem-restricted", + "vpc_id": "vpc-65814701" + } + } + } + } + }, + { + "path": [ + "root", + "network-core", + "routing-private" + ], + "outputs": { + "az1_route_table_id": "rtb-828f29e6", + "az2_route_table_id": "rtb-808f29e4", + "az3_route_table_id": "rtb-818f29e5" + }, + "resources": { + "aws_route.route_table_az1_private_default": { + "type": "aws_route", + "depends_on": [ + "aws_route_table.route_table_az1_private", + "aws_route_table.route_table_az1_private" + ], + "primary": { + "id": "r-rtb-828f29e61080289494", + "attributes": { + "destination_cidr_block": "0.0.0.0/0", + "destination_prefix_list_id": "", + "gateway_id": "", + "id": "r-rtb-828f29e61080289494", + "instance_id": "", + "instance_owner_id": "", + "nat_gateway_id": "nat-05ca7f2d5f1f96693", + "network_interface_id": "", + "origin": "CreateRoute", + "route_table_id": "rtb-828f29e6", + "state": "active", + "vpc_peering_connection_id": "" + } + } + }, + "aws_route.route_table_az2_private_default": { + "type": "aws_route", + "depends_on": [ + "aws_route_table.route_table_az2_private", + "aws_route_table.route_table_az2_private" + ], + "primary": { + "id": "r-rtb-808f29e41080289494", + "attributes": { + "destination_cidr_block": "0.0.0.0/0", + "destination_prefix_list_id": "", + "gateway_id": "", + "id": "r-rtb-808f29e41080289494", + "instance_id": "", + "instance_owner_id": "", + "nat_gateway_id": "nat-03223582301f75a08", + "network_interface_id": "", + "origin": "CreateRoute", + "route_table_id": "rtb-808f29e4", + "state": "active", + "vpc_peering_connection_id": "" + } + } + }, + "aws_route.route_table_az3_private_default": { + "type": "aws_route", + "depends_on": [ + "aws_route_table.route_table_az3_private", + "aws_route_table.route_table_az3_private" + ], + "primary": { + "id": "r-rtb-818f29e51080289494", + "attributes": { + "destination_cidr_block": "0.0.0.0/0", + "destination_prefix_list_id": "", + "gateway_id": "", + "id": "r-rtb-818f29e51080289494", + "instance_id": "", + "instance_owner_id": "", + "nat_gateway_id": "nat-0f2710d577d3f32ee", + "network_interface_id": "", + "origin": "CreateRoute", + "route_table_id": "rtb-818f29e5", + "state": "active", + "vpc_peering_connection_id": "" + } + } + }, + "aws_route_table.route_table_az1_private": { + "type": "aws_route_table", + "primary": { + "id": "rtb-828f29e6", + "attributes": { + "id": "rtb-828f29e6", + "propagating_vgws.#": "0", + "route.#": "0", + "tags.#": "1", + "tags.Name": "onprem-routing-private", + "vpc_id": "vpc-65814701" + } + } + }, + "aws_route_table.route_table_az2_private": { + "type": "aws_route_table", + "primary": { + "id": "rtb-808f29e4", + "attributes": { + "id": "rtb-808f29e4", + "propagating_vgws.#": "0", + "route.#": "0", + "tags.#": "1", + "tags.Name": "onprem-routing-private", + "vpc_id": "vpc-65814701" + } + } + }, + "aws_route_table.route_table_az3_private": { + "type": "aws_route_table", + "primary": { + "id": "rtb-818f29e5", + "attributes": { + "id": "rtb-818f29e5", + "propagating_vgws.#": "0", + "route.#": "0", + "tags.#": "1", + "tags.Name": "onprem-routing-private", + "vpc_id": "vpc-65814701" + } + } + }, + "aws_route_table_association.route_table_az1": { + "type": "aws_route_table_association", + "depends_on": [ + "aws_route_table.route_table_az1_private", + "aws_route_table.route_table_az1_private" + ], + "primary": { + "id": "rtbassoc-a4d6abc0", + "attributes": { + "id": "rtbassoc-a4d6abc0", + "route_table_id": "rtb-828f29e6", + "subnet_id": "subnet-d558bba3" + } + } + }, + "aws_route_table_association.route_table_az2": { + "type": "aws_route_table_association", + "depends_on": [ + "aws_route_table.route_table_az2_private", + "aws_route_table.route_table_az2_private" + ], + "primary": { + "id": "rtbassoc-d9d6abbd", + "attributes": { + "id": "rtbassoc-d9d6abbd", + "route_table_id": "rtb-808f29e4", + "subnet_id": "subnet-984f81fc" + } + } + }, + "aws_route_table_association.route_table_az3": { + "type": "aws_route_table_association", + "depends_on": [ + "aws_route_table.route_table_az3_private", + "aws_route_table.route_table_az3_private" + ], + "primary": { + "id": "rtbassoc-dbd6abbf", + "attributes": { + "id": "rtbassoc-dbd6abbf", + "route_table_id": "rtb-818f29e5", + "subnet_id": "subnet-776d532e" + } + } + }, + "aws_vpc_endpoint.vpe_s3_az1_private": { + "type": "aws_vpc_endpoint", + "depends_on": [ + "aws_route_table.route_table_az1_private", + "aws_route_table.route_table_az1_private" + ], + "primary": { + "id": "vpce-94e70afd", + "attributes": { + "id": "vpce-94e70afd", + "policy": "{\"Statement\":[{\"Action\":\"*\",\"Effect\":\"Allow\",\"Principal\":\"*\",\"Resource\":\"*\",\"Sid\":\"\"}],\"Version\":\"2008-10-17\"}", + "route_table_ids.#": "1", + "route_table_ids.1792300572": "rtb-828f29e6", + "service_name": "com.amazonaws.us-west-2.s3", + "vpc_id": "vpc-65814701" + } + } + }, + "aws_vpc_endpoint.vpe_s3_az2_private": { + "type": "aws_vpc_endpoint", + "depends_on": [ + "aws_route_table.route_table_az2_private", + "aws_route_table.route_table_az2_private" + ], + "primary": { + "id": "vpce-95e70afc", + "attributes": { + "id": "vpce-95e70afc", + "policy": "{\"Statement\":[{\"Action\":\"*\",\"Effect\":\"Allow\",\"Principal\":\"*\",\"Resource\":\"*\",\"Sid\":\"\"}],\"Version\":\"2008-10-17\"}", + "route_table_ids.#": "1", + "route_table_ids.323298841": "rtb-808f29e4", + "service_name": "com.amazonaws.us-west-2.s3", + "vpc_id": "vpc-65814701" + } + } + }, + "aws_vpc_endpoint.vpe_s3_az3_private": { + "type": "aws_vpc_endpoint", + "depends_on": [ + "aws_route_table.route_table_az3_private", + "aws_route_table.route_table_az3_private" + ], + "primary": { + "id": "vpce-97e70afe", + "attributes": { + "id": "vpce-97e70afe", + "policy": "{\"Statement\":[{\"Action\":\"*\",\"Effect\":\"Allow\",\"Principal\":\"*\",\"Resource\":\"*\",\"Sid\":\"\"}],\"Version\":\"2008-10-17\"}", + "route_table_ids.#": "1", + "route_table_ids.3258260795": "rtb-818f29e5", + "service_name": "com.amazonaws.us-west-2.s3", + "vpc_id": "vpc-65814701" + } + } + } + } + }, + { + "path": [ + "root", + "network-core", + "routing-restricted" + ], + "outputs": {}, + "resources": { + "aws_route_table.route_table_az1_restricted": { + "type": "aws_route_table", + "primary": { + "id": "rtb-428c2a26", + "attributes": { + "id": "rtb-428c2a26", + "propagating_vgws.#": "0", + "route.#": "1", + "route.1020029083.cidr_block": "0.0.0.0/0", + "route.1020029083.gateway_id": "", + "route.1020029083.instance_id": "", + "route.1020029083.nat_gateway_id": "nat-05ca7f2d5f1f96693", + "route.1020029083.network_interface_id": "", + "route.1020029083.vpc_peering_connection_id": "", + "tags.#": "1", + "tags.Name": "onprem-routing-restricted", + "vpc_id": "vpc-65814701" + } + } + }, + "aws_route_table.route_table_az2_restricted": { + "type": "aws_route_table", + "primary": { + "id": "rtb-2d8c2a49", + "attributes": { + "id": "rtb-2d8c2a49", + "propagating_vgws.#": "0", + "route.#": "0", + "tags.#": "1", + "tags.Name": "onprem-routing-restricted", + "vpc_id": "vpc-65814701" + } + } + }, + "aws_route_table.route_table_az3_restricted": { + "type": "aws_route_table", + "primary": { + "id": "rtb-4a8c2a2e", + "attributes": { + "id": "rtb-4a8c2a2e", + "propagating_vgws.#": "0", + "route.#": "1", + "route.3346134226.cidr_block": "0.0.0.0/0", + "route.3346134226.gateway_id": "", + "route.3346134226.instance_id": "", + "route.3346134226.nat_gateway_id": "nat-0f2710d577d3f32ee", + "route.3346134226.network_interface_id": "", + "route.3346134226.vpc_peering_connection_id": "", + "tags.#": "1", + "tags.Name": "onprem-routing-restricted", + "vpc_id": "vpc-65814701" + } + } + }, + "aws_route_table_association.route_table_az1": { + "type": "aws_route_table_association", + "depends_on": [ + "aws_route_table.route_table_az1_restricted" + ], + "primary": { + "id": "rtbassoc-76d1ac12", + "attributes": { + "id": "rtbassoc-76d1ac12", + "route_table_id": "rtb-428c2a26", + "subnet_id": "subnet-d758bba1" + } + } + }, + "aws_route_table_association.route_table_az2": { + "type": "aws_route_table_association", + "depends_on": [ + "aws_route_table.route_table_az2_restricted" + ], + "primary": { + "id": "rtbassoc-21d1ac45", + "attributes": { + "id": "rtbassoc-21d1ac45", + "route_table_id": "rtb-2d8c2a49", + "subnet_id": "subnet-994f81fd" + } + } + }, + "aws_route_table_association.route_table_az3": { + "type": "aws_route_table_association", + "depends_on": [ + "aws_route_table.route_table_az3_restricted" + ], + "primary": { + "id": "rtbassoc-45d1ac21", + "attributes": { + "id": "rtbassoc-45d1ac21", + "route_table_id": "rtb-4a8c2a2e", + "subnet_id": "subnet-746d532d" + } + } + } + } + }, + { + "path": [ + "root", + "network-core", + "vpc" + ], + "outputs": { + "cidr": "10.201.0.0/16", + "id": "vpc-65814701" + }, + "resources": { + "aws_vpc.vpc": { + "type": "aws_vpc", + "primary": { + "id": "vpc-65814701", + "attributes": { + "cidr_block": "10.201.0.0/16", + "default_network_acl_id": "acl-30964254", + "default_security_group_id": "sg-604d2f07", + "dhcp_options_id": "dopt-e1afbb83", + "enable_classiclink": "false", + "enable_dns_hostnames": "true", + "id": "vpc-65814701", + "main_route_table_id": "rtb-868f29e2", + "tags.#": "1", + "tags.Name": "onprem" + } + } + } + } + } + ] +} diff --git a/terraform/test-fixtures/state-filter/resource-in-module-2.tfstate b/terraform/test-fixtures/state-filter/resource-in-module-2.tfstate new file mode 100644 index 0000000000..ee1d65f818 --- /dev/null +++ b/terraform/test-fixtures/state-filter/resource-in-module-2.tfstate @@ -0,0 +1,20 @@ +{ + "version": 1, + "serial": 12, + "modules": [ + { + "path": [ + "root", + "foo" + ], + "resources": { + "aws_instance.bar": { + "type": "aws_instance", + "primary": { + "id": "bar" + } + } + } + } + ] +} diff --git a/terraform/test-fixtures/state-filter/small.tfstate b/terraform/test-fixtures/state-filter/small.tfstate new file mode 100644 index 0000000000..9cb3c1d9f4 --- /dev/null +++ b/terraform/test-fixtures/state-filter/small.tfstate @@ -0,0 +1,122 @@ +{ + "version": 1, + "serial": 12, + "modules": [ + { + "path": [ + "root" + ], + "outputs": { + "public_az1_subnet_id": "subnet-d658bba0", + "region": "us-west-2", + "vpc_cidr": "10.201.0.0/16", + "vpc_id": "vpc-65814701" + }, + "resources": { + "aws_key_pair.onprem": { + "type": "aws_key_pair", + "primary": { + "id": "onprem", + "attributes": { + "id": "onprem", + "key_name": "onprem", + "public_key": "foo" + }, + "meta": { + "schema_version": "1" + } + } + } + } + }, + { + "path": [ + "root", + "bootstrap" + ], + "outputs": { + "consul_bootstrap_dns": "consul.bootstrap" + }, + "resources": { + "aws_route53_record.oasis-consul-bootstrap-a": { + "type": "aws_route53_record", + "depends_on": [ + "aws_route53_zone.oasis-consul-bootstrap" + ], + "primary": { + "id": "Z68734P5178QN_consul.bootstrap_A", + "attributes": { + "failover": "", + "fqdn": "consul.bootstrap", + "health_check_id": "", + "id": "Z68734P5178QN_consul.bootstrap_A", + "name": "consul.bootstrap", + "records.#": "6", + "records.1148461392": "10.201.3.8", + "records.1169574759": "10.201.2.8", + "records.1206973758": "10.201.1.8", + "records.1275070284": "10.201.2.4", + "records.1304587643": "10.201.3.4", + "records.1313257749": "10.201.1.4", + "set_identifier": "", + "ttl": "300", + "type": "A", + "weight": "-1", + "zone_id": "Z68734P5178QN" + } + } + }, + "aws_route53_record.oasis-consul-bootstrap-ns": { + "type": "aws_route53_record", + "depends_on": [ + "aws_route53_zone.oasis-consul-bootstrap", + "aws_route53_zone.oasis-consul-bootstrap", + "aws_route53_zone.oasis-consul-bootstrap", + "aws_route53_zone.oasis-consul-bootstrap", + "aws_route53_zone.oasis-consul-bootstrap" + ], + "primary": { + "id": "Z68734P5178QN_consul.bootstrap_NS", + "attributes": { + "failover": "", + "fqdn": "consul.bootstrap", + "health_check_id": "", + "id": "Z68734P5178QN_consul.bootstrap_NS", + "name": "consul.bootstrap", + "records.#": "4", + "records.1796532126": "ns-512.awsdns-00.net.", + "records.2728059479": "ns-1536.awsdns-00.co.uk.", + "records.4092160370": "ns-1024.awsdns-00.org.", + "records.456007465": "ns-0.awsdns-00.com.", + "set_identifier": "", + "ttl": "30", + "type": "NS", + "weight": "-1", + "zone_id": "Z68734P5178QN" + } + } + }, + "aws_route53_zone.oasis-consul-bootstrap": { + "type": "aws_route53_zone", + "primary": { + "id": "Z68734P5178QN", + "attributes": { + "comment": "Used to bootstrap consul dns", + "id": "Z68734P5178QN", + "name": "consul.bootstrap", + "name_servers.#": "4", + "name_servers.0": "ns-0.awsdns-00.com.", + "name_servers.1": "ns-1024.awsdns-00.org.", + "name_servers.2": "ns-1536.awsdns-00.co.uk.", + "name_servers.3": "ns-512.awsdns-00.net.", + "tags.#": "0", + "vpc_id": "vpc-65814701", + "vpc_region": "us-west-2", + "zone_id": "Z68734P5178QN" + } + } + } + } + } + ] +} diff --git a/terraform/transform_output_test.go b/terraform/transform_output_test.go index dc9ea0a764..6ba2150dcd 100644 --- a/terraform/transform_output_test.go +++ b/terraform/transform_output_test.go @@ -11,7 +11,7 @@ func TestAddOutputOrphanTransformer(t *testing.T) { Modules: []*ModuleState{ &ModuleState{ Path: RootModulePath, - Outputs: map[string]string{ + Outputs: map[string]interface{}{ "foo": "bar", "bar": "baz", }, diff --git a/terraform/version.go b/terraform/version.go index 9f0ce0b132..e781d9c259 100644 --- a/terraform/version.go +++ b/terraform/version.go @@ -1,5 +1,9 @@ package terraform +import ( + "github.com/hashicorp/go-version" +) + // The main version number that is being run at the moment. const Version = "0.7.0" @@ -7,3 +11,8 @@ const Version = "0.7.0" // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. const VersionPrerelease = "dev" + +// SemVersion is an instance of version.Version. This has the secondary +// benefit of verifying during tests and init time that our version is a +// proper semantic version, which should always be the case. +var SemVersion = version.Must(version.NewVersion(Version)) diff --git a/vendor/github.com/hashicorp/go-plugin/.gitignore b/vendor/github.com/hashicorp/go-plugin/.gitignore new file mode 100644 index 0000000000..e43b0f9889 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/.gitignore @@ -0,0 +1 @@ +.DS_Store diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE new file mode 100644 index 0000000000..82b4de97c7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md new file mode 100644 index 0000000000..2058cfb68d --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -0,0 +1,161 @@ +# Go Plugin System over RPC + +`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system +that has been in use by HashiCorp tooling for over 3 years. While initially +created for [Packer](https://www.packer.io), it has since been used by +[Terraform](https://www.terraform.io) and [Otto](https://www.ottoproject.io), +with plans to also use it for [Nomad](https://www.nomadproject.io) and +[Vault](https://www.vaultproject.io). + +While the plugin system is over RPC, it is currently only designed to work +over a local [reliable] network. Plugins over a real network are not supported +and will lead to unexpected behavior. + +This plugin system has been used on millions of machines across many different +projects and has proven to be battle hardened and ready for production use. + +## Features + +The HashiCorp plugin system supports a number of features: + +**Plugins are Go interface implementations.** This makes writing and consuming +plugins feel very natural. To a plugin author: you just implement an +interface as if it were going to run in the same process. For a plugin user: +you just use and call functions on an interface as if it were in the same +process. This plugin system handles the communication in between. + +**Complex arguments and return values are supported.** This library +provides APIs for handling complex arguments and return values such +as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library +(`MuxBroker`) for creating new connections between the client/server to +serve additional interfaces or transfer raw data. + +**Bidirectional communication.** Because the plugin system supports +complex arguments, the host process can send it interface implementations +and the plugin can call back into the host process. + +**Built-in Logging.** Any plugins that use the `log` standard library +will have log data automatically sent to the host process. The host +process will mirror this output prefixed with the path to the plugin +binary. This makes debugging with plugins simple. + +**Protocol Versioning.** A very basic "protocol version" is supported that +can be incremented to invalidate any previous plugins. This is useful when +interface signatures are changing, protocol level changes are necessary, +etc. When a protocol version is incompatible, a human friendly error +message is shown to the end user. + +**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue +to use stdout/stderr as usual and the output will get mirrored back to +the host process. The host process can control what `io.Writer` these +streams go to to prevent this from happening. + +**TTY Preservation.** Plugin subprocesses are connected to the identical +stdin file descriptor as the host process, allowing software that requires +a TTY to work. For example, a plugin can execute `ssh` and even though there +are multiple subprocesses and RPC happening, it will look and act perfectly +to the end user. + +**Host upgrade while a plugin is running.** Plugins can be "reattached" +so that the host process can be upgraded while the plugin is still running. +This requires the host/plugin to know this is possible and daemonize +properly. `NewClient` takes a `ReattachConfig` to determine if and how to +reattach. + +## Architecture + +The HashiCorp plugin system works by launching subprocesses and communicating +over RPC (using standard `net/rpc`). A single connection is made between +any plugin and the host process, and we use a +[connection multiplexing](https://github.com/hashicorp/yamux) +library to multiplex any other connections on top. + +This architecture has a number of benefits: + + * Plugins can't crash your host process: A panic in a plugin doesn't + panic the plugin user. + + * Plugins are very easy to write: just write a Go application and `go build`. + Theoretically you could also use another language as long as it can + communicate the Go `net/rpc` protocol but this hasn't yet been tried. + + * Plugins are very easy to install: just put the binary in a location where + the host will find it (depends on the host but this library also provides + helpers), and the plugin host handles the rest. + + * Plugins can be relatively secure: The plugin only has access to the + interfaces and args given to it, not to the entire memory space of the + process. More security features are planned (see the coming soon section + below). + +## Usage + +To use the plugin system, you must take the following steps. These are +high-level steps that must be done. Examples are available in the +`examples/` directory. + + 1. Choose the interface(s) you want to expose for plugins. + + 2. For each interface, implement an implementation of that interface + that communicates over an `*rpc.Client` (from the standard `net/rpc` + package) for every function call. Likewise, implement the RPC server + struct this communicates to which is then communicating to a real, + concrete implementation. + + 3. Create a `Plugin` implementation that knows how to create the RPC + client/server for a given plugin type. + + 4. Plugin authors call `plugin.Serve` to serve a plugin from the + `main` function. + + 5. Plugin users use `plugin.Client` to launch a subprocess and request + an interface implementation over RPC. + +That's it! In practice, step 2 is the most tedious and time consuming step. +Even so, it isn't very difficult and you can see examples in the `examples/` +directory as well as throughout our various open source projects. + +For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin). + +## Roadmap + +Our plugin system is constantly evolving. As we use the plugin system for +new projects or for new features in existing projects, we constantly find +improvements we can make. + +At this point in time, the roadmap for the plugin system is: + +**Cryptographically Secure Plugins.** We'll implement signing plugins +and loading signed plugins in order to allow Vault to make use of multi-process +in a secure way. + +**Semantic Versioning.** Plugins will be able to implement a semantic version. +This plugin system will give host processes a system for constraining +versions. This is in addition to the protocol versioning already present +which is more for larger underlying changes. + +**Plugin fetching.** We will integrate with [go-getter](https://github.com/hashicorp/go-getter) +to support automatic download + install of plugins. Paired with cryptographically +secure plugins (above), we can make this a safe operation for an amazing +user experience. + +## What About Shared Libraries? + +When we started using plugins (late 2012, early 2013), plugins over RPC +were the only option since Go didn't support dynamic library loading. Today, +Go still doesn't support dynamic library loading, but they do intend to. +Since 2012, our plugin system has stabilized from millions of users using it, +and has many benefits we've come to value greatly. + +For example, we intend to use this plugin system in +[Vault](https://www.vaultproject.io), and dynamic library loading will +simply never be acceptable in Vault for security reasons. That is an extreme +example, but we believe our library system has more upsides than downsides +over dynamic library loading and since we've had it built and tested for years, +we'll likely continue to use it. + +Shared libraries have one major advantage over our system which is much +higher performance. In real world scenarios across our various tools, +we've never required any more performance out of our plugin system and it +has seen very high throughput, so this isn't a concern for us at the moment. + diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go new file mode 100644 index 0000000000..e559f713ee --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -0,0 +1,533 @@ +package plugin + +import ( + "bufio" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode" +) + +// If this is 1, then we've called CleanupClients. This can be used +// by plugin RPC implementations to change error behavior since you +// can expected network connection errors at this point. This should be +// read by using sync/atomic. +var Killed uint32 = 0 + +// This is a slice of the "managed" clients which are cleaned up when +// calling Cleanup +var managedClients = make([]*Client, 0, 5) + +// Error types +var ( + // ErrProcessNotFound is returned when a client is instantiated to + // reattach to an existing process and it isn't found. + ErrProcessNotFound = errors.New("Reattachment process not found") +) + +// Client handles the lifecycle of a plugin application. It launches +// plugins, connects to them, dispenses interface implementations, and handles +// killing the process. +// +// Plugin hosts should use one Client for each plugin executable. To +// dispense a plugin type, use the `Client.Client` function, and then +// cal `Dispense`. This awkward API is mostly historical but is used to split +// the client that deals with subprocess management and the client that +// does RPC management. +// +// See NewClient and ClientConfig for using a Client. +type Client struct { + config *ClientConfig + exited bool + doneLogging chan struct{} + l sync.Mutex + address net.Addr + process *os.Process + client *RPCClient +} + +// ClientConfig is the configuration used to initialize a new +// plugin client. After being used to initialize a plugin client, +// that configuration must not be modified again. +type ClientConfig struct { + // HandshakeConfig is the configuration that must match servers. + HandshakeConfig + + // Plugins are the plugins that can be consumed. + Plugins map[string]Plugin + + // One of the following must be set, but not both. + // + // Cmd is the unstarted subprocess for starting the plugin. If this is + // set, then the Client starts the plugin process on its own and connects + // to it. + // + // Reattach is configuration for reattaching to an existing plugin process + // that is already running. This isn't common. + Cmd *exec.Cmd + Reattach *ReattachConfig + + // Managed represents if the client should be managed by the + // plugin package or not. If true, then by calling CleanupClients, + // it will automatically be cleaned up. Otherwise, the client + // user is fully responsible for making sure to Kill all plugin + // clients. By default the client is _not_ managed. + Managed bool + + // The minimum and maximum port to use for communicating with + // the subprocess. If not set, this defaults to 10,000 and 25,000 + // respectively. + MinPort, MaxPort uint + + // StartTimeout is the timeout to wait for the plugin to say it + // has started successfully. + StartTimeout time.Duration + + // If non-nil, then the stderr of the client will be written to here + // (as well as the log). This is the original os.Stderr of the subprocess. + // This isn't the output of synced stderr. + Stderr io.Writer + + // SyncStdout, SyncStderr can be set to override the + // respective os.Std* values in the plugin. Care should be taken to + // avoid races here. If these are nil, then this will automatically be + // hooked up to os.Stdin, Stdout, and Stderr, respectively. + // + // If the default values (nil) are used, then this package will not + // sync any of these streams. + SyncStdout io.Writer + SyncStderr io.Writer +} + +// ReattachConfig is used to configure a client to reattach to an +// already-running plugin process. You can retrieve this information by +// calling ReattachConfig on Client. +type ReattachConfig struct { + Addr net.Addr + Pid int +} + +// This makes sure all the managed subprocesses are killed and properly +// logged. This should be called before the parent process running the +// plugins exits. +// +// This must only be called _once_. +func CleanupClients() { + // Set the killed to true so that we don't get unexpected panics + atomic.StoreUint32(&Killed, 1) + + // Kill all the managed clients in parallel and use a WaitGroup + // to wait for them all to finish up. + var wg sync.WaitGroup + for _, client := range managedClients { + wg.Add(1) + + go func(client *Client) { + client.Kill() + wg.Done() + }(client) + } + + log.Println("[DEBUG] plugin: waiting for all plugin processes to complete...") + wg.Wait() +} + +// Creates a new plugin client which manages the lifecycle of an external +// plugin and gets the address for the RPC connection. +// +// The client must be cleaned up at some point by calling Kill(). If +// the client is a managed client (created with NewManagedClient) you +// can just call CleanupClients at the end of your program and they will +// be properly cleaned. +func NewClient(config *ClientConfig) (c *Client) { + if config.MinPort == 0 && config.MaxPort == 0 { + config.MinPort = 10000 + config.MaxPort = 25000 + } + + if config.StartTimeout == 0 { + config.StartTimeout = 1 * time.Minute + } + + if config.Stderr == nil { + config.Stderr = ioutil.Discard + } + + if config.SyncStdout == nil { + config.SyncStdout = ioutil.Discard + } + if config.SyncStderr == nil { + config.SyncStderr = ioutil.Discard + } + + c = &Client{config: config} + if config.Managed { + managedClients = append(managedClients, c) + } + + return +} + +// Client returns an RPC client for the plugin. +// +// Subsequent calls to this will return the same RPC client. +func (c *Client) Client() (*RPCClient, error) { + addr, err := c.Start() + if err != nil { + return nil, err + } + + c.l.Lock() + defer c.l.Unlock() + + if c.client != nil { + return c.client, nil + } + + // Connect to the client + conn, err := net.Dial(addr.Network(), addr.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + // Create the actual RPC client + c.client, err = NewRPCClient(conn, c.config.Plugins) + if err != nil { + conn.Close() + return nil, err + } + + // Begin the stream syncing so that stdin, out, err work properly + err = c.client.SyncStreams( + c.config.SyncStdout, + c.config.SyncStderr) + if err != nil { + c.client.Close() + c.client = nil + return nil, err + } + + return c.client, nil +} + +// Tells whether or not the underlying process has exited. +func (c *Client) Exited() bool { + c.l.Lock() + defer c.l.Unlock() + return c.exited +} + +// End the executing subprocess (if it is running) and perform any cleanup +// tasks necessary such as capturing any remaining logs and so on. +// +// This method blocks until the process successfully exits. +// +// This method can safely be called multiple times. +func (c *Client) Kill() { + if c.process == nil { + return + } + + // Kill the process + c.process.Kill() + + // Wait for the client to finish logging so we have a complete log + <-c.doneLogging +} + +// Starts the underlying subprocess, communicating with it to negotiate +// a port for RPC connections, and returning the address to connect via RPC. +// +// This method is safe to call multiple times. Subsequent calls have no effect. +// Once a client has been started once, it cannot be started again, even if +// it was killed. +func (c *Client) Start() (addr net.Addr, err error) { + c.l.Lock() + defer c.l.Unlock() + + if c.address != nil { + return c.address, nil + } + + // If one of cmd or reattach isn't set, then it is an error. We wrap + // this in a {} for scoping reasons, and hopeful that the escape + // analysis will pop the stock here. + { + cmdSet := c.config.Cmd != nil + attachSet := c.config.Reattach != nil + if cmdSet == attachSet { + return nil, fmt.Errorf("Only one of Cmd or Reattach must be set") + } + } + + // Create the logging channel for when we kill + c.doneLogging = make(chan struct{}) + + if c.config.Reattach != nil { + // Verify the process still exists. If not, then it is an error + p, err := os.FindProcess(c.config.Reattach.Pid) + if err != nil { + return nil, err + } + + // Attempt to connect to the addr since on Unix systems FindProcess + // doesn't actually return an error if it can't find the process. + conn, err := net.Dial( + c.config.Reattach.Addr.Network(), + c.config.Reattach.Addr.String()) + if err != nil { + p.Kill() + return nil, ErrProcessNotFound + } + conn.Close() + + // Goroutine to mark exit status + go func(pid int) { + // Wait for the process to die + pidWait(pid) + + // Log so we can see it + log.Printf("[DEBUG] plugin: reattached plugin process exited\n") + + // Mark it + c.l.Lock() + defer c.l.Unlock() + c.exited = true + + // Close the logging channel since that doesn't work on reattach + close(c.doneLogging) + }(p.Pid) + + // Set the address and process + c.address = c.config.Reattach.Addr + c.process = p + + return c.address, nil + } + + env := []string{ + fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue), + fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort), + fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort), + } + + stdout_r, stdout_w := io.Pipe() + stderr_r, stderr_w := io.Pipe() + + cmd := c.config.Cmd + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, env...) + cmd.Stdin = os.Stdin + cmd.Stderr = stderr_w + cmd.Stdout = stdout_w + + log.Printf("[DEBUG] plugin: starting plugin: %s %#v", cmd.Path, cmd.Args) + err = cmd.Start() + if err != nil { + return + } + + // Set the process + c.process = cmd.Process + + // Make sure the command is properly cleaned up if there is an error + defer func() { + r := recover() + + if err != nil || r != nil { + cmd.Process.Kill() + } + + if r != nil { + panic(r) + } + }() + + // Start goroutine to wait for process to exit + exitCh := make(chan struct{}) + go func() { + // Make sure we close the write end of our stderr/stdout so + // that the readers send EOF properly. + defer stderr_w.Close() + defer stdout_w.Close() + + // Wait for the command to end. + cmd.Wait() + + // Log and make sure to flush the logs write away + log.Printf("[DEBUG] plugin: %s: plugin process exited\n", cmd.Path) + os.Stderr.Sync() + + // Mark that we exited + close(exitCh) + + // Set that we exited, which takes a lock + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }() + + // Start goroutine that logs the stderr + go c.logStderr(stderr_r) + + // Start a goroutine that is going to be reading the lines + // out of stdout + linesCh := make(chan []byte) + go func() { + defer close(linesCh) + + buf := bufio.NewReader(stdout_r) + for { + line, err := buf.ReadBytes('\n') + if line != nil { + linesCh <- line + } + + if err == io.EOF { + return + } + } + }() + + // Make sure after we exit we read the lines from stdout forever + // so they don't block since it is an io.Pipe + defer func() { + go func() { + for _ = range linesCh { + } + }() + }() + + // Some channels for the next step + timeout := time.After(c.config.StartTimeout) + + // Start looking for the address + log.Printf("[DEBUG] plugin: waiting for RPC address for: %s", cmd.Path) + select { + case <-timeout: + err = errors.New("timeout while waiting for plugin to start") + case <-exitCh: + err = errors.New("plugin exited before we could connect") + case lineBytes := <-linesCh: + // Trim the line and split by "|" in order to get the parts of + // the output. + line := strings.TrimSpace(string(lineBytes)) + parts := strings.SplitN(line, "|", 4) + if len(parts) < 4 { + err = fmt.Errorf( + "Unrecognized remote plugin message: %s\n\n"+ + "This usually means that the plugin is either invalid or simply\n"+ + "needs to be recompiled to support the latest protocol.", line) + return + } + + // Check the core protocol. Wrapped in a {} for scoping. + { + var coreProtocol int64 + coreProtocol, err = strconv.ParseInt(parts[0], 10, 0) + if err != nil { + err = fmt.Errorf("Error parsing core protocol version: %s", err) + return + } + + if int(coreProtocol) != CoreProtocolVersion { + err = fmt.Errorf("Incompatible core API version with plugin. "+ + "Plugin version: %s, Ours: %d\n\n"+ + "To fix this, the plugin usually only needs to be recompiled.\n"+ + "Please report this to the plugin author.", parts[0], CoreProtocolVersion) + return + } + } + + // Parse the protocol version + var protocol int64 + protocol, err = strconv.ParseInt(parts[1], 10, 0) + if err != nil { + err = fmt.Errorf("Error parsing protocol version: %s", err) + return + } + + // Test the API version + if uint(protocol) != c.config.ProtocolVersion { + err = fmt.Errorf("Incompatible API version with plugin. "+ + "Plugin version: %s, Ours: %d", parts[1], c.config.ProtocolVersion) + return + } + + switch parts[2] { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", parts[3]) + case "unix": + addr, err = net.ResolveUnixAddr("unix", parts[3]) + default: + err = fmt.Errorf("Unknown address type: %s", parts[3]) + } + } + + c.address = addr + return +} + +// ReattachConfig returns the information that must be provided to NewClient +// to reattach to the plugin process that this client started. This is +// useful for plugins that detach from their parent process. +// +// If this returns nil then the process hasn't been started yet. Please +// call Start or Client before calling this. +func (c *Client) ReattachConfig() *ReattachConfig { + c.l.Lock() + defer c.l.Unlock() + + if c.address == nil { + return nil + } + + if c.config.Cmd != nil && c.config.Cmd.Process == nil { + return nil + } + + // If we connected via reattach, just return the information as-is + if c.config.Reattach != nil { + return c.config.Reattach + } + + return &ReattachConfig{ + Addr: c.address, + Pid: c.config.Cmd.Process.Pid, + } +} + +func (c *Client) logStderr(r io.Reader) { + bufR := bufio.NewReader(r) + for { + line, err := bufR.ReadString('\n') + if line != "" { + c.config.Stderr.Write([]byte(line)) + + line = strings.TrimRightFunc(line, unicode.IsSpace) + log.Printf("[DEBUG] plugin: %s: %s", filepath.Base(c.config.Cmd.Path), line) + } + + if err == io.EOF { + break + } + } + + // Flag that we've completed logging for others + close(c.doneLogging) +} diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go new file mode 100644 index 0000000000..d22c566ed5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/discover.go @@ -0,0 +1,28 @@ +package plugin + +import ( + "path/filepath" +) + +// Discover discovers plugins that are in a given directory. +// +// The directory doesn't need to be absolute. For example, "." will work fine. +// +// This currently assumes any file matching the glob is a plugin. +// In the future this may be smarter about checking that a file is +// executable and so on. +// +// TODO: test +func Discover(glob, dir string) ([]string, error) { + var err error + + // Make the directory absolute if it isn't already + if !filepath.IsAbs(dir) { + dir, err = filepath.Abs(dir) + if err != nil { + return nil, err + } + } + + return filepath.Glob(filepath.Join(dir, glob)) +} diff --git a/rpc/error.go b/vendor/github.com/hashicorp/go-plugin/error.go similarity index 83% rename from rpc/error.go rename to vendor/github.com/hashicorp/go-plugin/error.go index c3ab7b1a4c..22a7baa6a0 100644 --- a/rpc/error.go +++ b/vendor/github.com/hashicorp/go-plugin/error.go @@ -1,4 +1,4 @@ -package rpc +package plugin // This is a type that wraps error types so that they can be messaged // across RPC channels. Since "error" is an interface, we can't always @@ -8,6 +8,9 @@ type BasicError struct { Message string } +// NewBasicError is used to create a BasicError. +// +// err is allowed to be nil. func NewBasicError(err error) *BasicError { if err == nil { return nil diff --git a/rpc/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go similarity index 64% rename from rpc/mux_broker.go rename to vendor/github.com/hashicorp/go-plugin/mux_broker.go index 639902a825..01c45ad7c6 100644 --- a/rpc/mux_broker.go +++ b/vendor/github.com/hashicorp/go-plugin/mux_broker.go @@ -1,8 +1,9 @@ -package rpc +package plugin import ( "encoding/binary" "fmt" + "log" "net" "sync" "sync/atomic" @@ -11,12 +12,21 @@ import ( "github.com/hashicorp/yamux" ) -// muxBroker is responsible for brokering multiplexed connections by unique ID. +// MuxBroker is responsible for brokering multiplexed connections by unique ID. +// +// It is used by plugins to multiplex multiple RPC connections and data +// streams on top of a single connection between the plugin process and the +// host process. // // This allows a plugin to request a channel with a specific ID to connect to // or accept a connection from, and the broker handles the details of // holding these channels open while they're being negotiated. -type muxBroker struct { +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new multiplexed streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type MuxBroker struct { nextId uint32 session *yamux.Session streams map[uint32]*muxBrokerPending @@ -29,8 +39,8 @@ type muxBrokerPending struct { doneCh chan struct{} } -func newMuxBroker(s *yamux.Session) *muxBroker { - return &muxBroker{ +func newMuxBroker(s *yamux.Session) *MuxBroker { + return &MuxBroker{ session: s, streams: make(map[uint32]*muxBrokerPending), } @@ -39,7 +49,7 @@ func newMuxBroker(s *yamux.Session) *muxBroker { // Accept accepts a connection by ID. // // This should not be called multiple times with the same ID at one time. -func (m *muxBroker) Accept(id uint32) (net.Conn, error) { +func (m *MuxBroker) Accept(id uint32) (net.Conn, error) { var c net.Conn p := m.getStream(id) select { @@ -62,13 +72,28 @@ func (m *muxBroker) Accept(id uint32) (net.Conn, error) { return c, nil } +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve an RPC server on that stream ID. This is used to easily serve +// complex arguments. +// +// The served interface is always registered to the "Plugin" name. +func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) { + conn, err := m.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + + serve(conn, "Plugin", v) +} + // Close closes the connection and all sub-connections. -func (m *muxBroker) Close() error { +func (m *MuxBroker) Close() error { return m.session.Close() } // Dial opens a connection by ID. -func (m *muxBroker) Dial(id uint32) (net.Conn, error) { +func (m *MuxBroker) Dial(id uint32) (net.Conn, error) { // Open the stream stream, err := m.session.OpenStream() if err != nil { @@ -96,13 +121,20 @@ func (m *muxBroker) Dial(id uint32) (net.Conn, error) { } // NextId returns a unique ID to use next. -func (m *muxBroker) NextId() uint32 { +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of RPC calls. In practice +// we've never seen it happen. +func (m *MuxBroker) NextId() uint32 { return atomic.AddUint32(&m.nextId, 1) } // Run starts the brokering and should be executed in a goroutine, since it // blocks forever, or until the session closes. -func (m *muxBroker) Run() { +// +// Uses of MuxBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *MuxBroker) Run() { for { stream, err := m.session.AcceptStream() if err != nil { @@ -129,7 +161,7 @@ func (m *muxBroker) Run() { } } -func (m *muxBroker) getStream(id uint32) *muxBrokerPending { +func (m *MuxBroker) getStream(id uint32) *muxBrokerPending { m.Lock() defer m.Unlock() @@ -145,7 +177,7 @@ func (m *muxBroker) getStream(id uint32) *muxBrokerPending { return m.streams[id] } -func (m *muxBroker) timeoutWait(id uint32, p *muxBrokerPending) { +func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) { // Wait for the stream to either be picked up and connected, or // for a timeout. timeout := false diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go new file mode 100644 index 0000000000..37c8fd653f --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/plugin.go @@ -0,0 +1,25 @@ +// The plugin package exposes functions and helpers for communicating to +// plugins which are implemented as standalone binary applications. +// +// plugin.Client fully manages the lifecycle of executing the application, +// connecting to it, and returning the RPC client for dispensing plugins. +// +// plugin.Serve fully manages listeners to expose an RPC server from a binary +// that plugin.Client can connect to. +package plugin + +import ( + "net/rpc" +) + +// Plugin is the interface that is implemented to serve/connect to an +// inteface implementation. +type Plugin interface { + // Server should return the RPC server compatible struct to serve + // the methods that the Client calls over net/rpc. + Server(*MuxBroker) (interface{}, error) + + // Client returns an interface implementation for the plugin you're + // serving that communicates to the server end of the plugin. + Client(*MuxBroker, *rpc.Client) (interface{}, error) +} diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go new file mode 100644 index 0000000000..88c999a580 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process.go @@ -0,0 +1,24 @@ +package plugin + +import ( + "time" +) + +// pidAlive checks whether a pid is alive. +func pidAlive(pid int) bool { + return _pidAlive(pid) +} + +// pidWait blocks for a process to exit. +func pidWait(pid int) error { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + for range ticker.C { + if !pidAlive(pid) { + break + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/process_posix.go b/vendor/github.com/hashicorp/go-plugin/process_posix.go new file mode 100644 index 0000000000..70ba546bf6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process_posix.go @@ -0,0 +1,19 @@ +// +build !windows + +package plugin + +import ( + "os" + "syscall" +) + +// _pidAlive tests whether a process is alive or not by sending it Signal 0, +// since Go otherwise has no way to test this. +func _pidAlive(pid int) bool { + proc, err := os.FindProcess(pid) + if err == nil { + err = proc.Signal(syscall.Signal(0)) + } + + return err == nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/process_windows.go b/vendor/github.com/hashicorp/go-plugin/process_windows.go new file mode 100644 index 0000000000..9f7b018090 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process_windows.go @@ -0,0 +1,29 @@ +package plugin + +import ( + "syscall" +) + +const ( + // Weird name but matches the MSDN docs + exit_STILL_ACTIVE = 259 + + processDesiredAccess = syscall.STANDARD_RIGHTS_READ | + syscall.PROCESS_QUERY_INFORMATION | + syscall.SYNCHRONIZE +) + +// _pidAlive tests whether a process is alive or not +func _pidAlive(pid int) bool { + h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid)) + if err != nil { + return false + } + + var ec uint32 + if e := syscall.GetExitCodeProcess(h, &ec); e != nil { + return false + } + + return ec == exit_STILL_ACTIVE +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go new file mode 100644 index 0000000000..e6d613bc61 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go @@ -0,0 +1,110 @@ +package plugin + +import ( + "fmt" + "io" + "net" + "net/rpc" + + "github.com/hashicorp/yamux" +) + +// RPCClient connects to an RPCServer over net/rpc to dispense plugin types. +type RPCClient struct { + broker *MuxBroker + control *rpc.Client + plugins map[string]Plugin + + // These are the streams used for the various stdout/err overrides + stdout, stderr net.Conn +} + +// NewRPCClient creates a client from an already-open connection-like value. +// Dial is typically used instead. +func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { + // Create the yamux client so we can multiplex + mux, err := yamux.Client(conn, nil) + if err != nil { + conn.Close() + return nil, err + } + + // Connect to the control stream. + control, err := mux.Open() + if err != nil { + mux.Close() + return nil, err + } + + // Connect stdout, stderr streams + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Open() + if err != nil { + mux.Close() + return nil, err + } + } + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Build the client using our broker and control channel. + return &RPCClient{ + broker: broker, + control: rpc.NewClient(control), + plugins: plugins, + stdout: stdstream[0], + stderr: stdstream[1], + }, nil +} + +// SyncStreams should be called to enable syncing of stdout, +// stderr with the plugin. +// +// This will return immediately and the syncing will continue to happen +// in the background. You do not need to launch this in a goroutine itself. +// +// This should never be called multiple times. +func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error { + go copyStream("stdout", stdout, c.stdout) + go copyStream("stderr", stderr, c.stderr) + return nil +} + +// Close closes the connection. The client is no longer usable after this +// is called. +func (c *RPCClient) Close() error { + if err := c.control.Close(); err != nil { + return err + } + if err := c.stdout.Close(); err != nil { + return err + } + if err := c.stderr.Close(); err != nil { + return err + } + + return c.broker.Close() +} + +func (c *RPCClient) Dispense(name string) (interface{}, error) { + p, ok := c.plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + var id uint32 + if err := c.control.Call( + "Dispenser.Dispense", name, &id); err != nil { + return nil, err + } + + conn, err := c.broker.Dial(id) + if err != nil { + return nil, err + } + + return p.Client(c.broker, rpc.NewClient(conn)) +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go new file mode 100644 index 0000000000..714b047dc5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -0,0 +1,143 @@ +package plugin + +import ( + "errors" + "fmt" + "io" + "log" + "net" + "net/rpc" + + "github.com/hashicorp/yamux" +) + +// RPCServer listens for network connections and then dispenses interface +// implementations over net/rpc. +type RPCServer struct { + Plugins map[string]Plugin + + // Stdout, Stderr are what this server will use instead of the + // normal stdin/out/err. This is because due to the multi-process nature + // of our plugin system, we can't use the normal process values so we + // make our own custom one we pipe across. + Stdout io.Reader + Stderr io.Reader +} + +// Accept accepts connections on a listener and serves requests for +// each incoming connection. Accept blocks; the caller typically invokes +// it in a go statement. +func (s *RPCServer) Accept(lis net.Listener) { + for { + conn, err := lis.Accept() + if err != nil { + log.Printf("[ERR] plugin: plugin server: %s", err) + return + } + + go s.ServeConn(conn) + } +} + +// ServeConn runs a single connection. +// +// ServeConn blocks, serving the connection until the client hangs up. +func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) { + // First create the yamux server to wrap this connection + mux, err := yamux.Server(conn, nil) + if err != nil { + conn.Close() + log.Printf("[ERR] plugin: error creating yamux server: %s", err) + return + } + + // Accept the control connection + control, err := mux.Accept() + if err != nil { + mux.Close() + if err != io.EOF { + log.Printf("[ERR] plugin: error accepting control connection: %s", err) + } + + return + } + + // Connect the stdstreams (in, out, err) + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Accept() + if err != nil { + mux.Close() + log.Printf("[ERR] plugin: accepting stream %d: %s", i, err) + return + } + } + + // Copy std streams out to the proper place + go copyStream("stdout", stdstream[0], s.Stdout) + go copyStream("stderr", stdstream[1], s.Stderr) + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Use the control connection to build the dispenser and serve the + // connection. + server := rpc.NewServer() + server.RegisterName("Dispenser", &dispenseServer{ + broker: broker, + plugins: s.Plugins, + }) + server.ServeConn(control) +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type dispenseServer struct { + broker *MuxBroker + plugins map[string]Plugin +} + +func (d *dispenseServer) Dispense( + name string, response *uint32) error { + // Find the function to create this implementation + p, ok := d.plugins[name] + if !ok { + return fmt.Errorf("unknown plugin type: %s", name) + } + + // Create the implementation first so we know if there is an error. + impl, err := p.Server(d.broker) + if err != nil { + // We turn the error into an errors error so that it works across RPC + return errors.New(err.Error()) + } + + // Reserve an ID for our implementation + id := d.broker.NextId() + *response = id + + // Run the rest in a goroutine since it can only happen once this RPC + // call returns. We wait for a connection for the plugin implementation + // and serve it. + go func() { + conn, err := d.broker.Accept(id) + if err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err) + return + } + + serve(conn, "Plugin", impl) + }() + + return nil +} + +func serve(conn io.ReadWriteCloser, name string, v interface{}) { + server := rpc.NewServer() + if err := server.RegisterName(name, v); err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s", err) + return + } + + server.ServeConn(conn) +} diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go new file mode 100644 index 0000000000..4e3a552c51 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -0,0 +1,187 @@ +package plugin + +import ( + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "os" + "os/signal" + "runtime" + "strconv" + "sync/atomic" +) + +// CoreProtocolVersion is the ProtocolVersion of the plugin system itself. +// We will increment this whenever we change any protocol behavior. This +// will invalidate any prior plugins but will at least allow us to iterate +// on the core in a safe way. We will do our best to do this very +// infrequently. +const CoreProtocolVersion = 1 + +// HandshakeConfig is the configuration used by client and servers to +// handshake before starting a plugin connection. This is embedded by +// both ServeConfig and ClientConfig. +// +// In practice, the plugin host creates a HandshakeConfig that is exported +// and plugins then can easily consume it. +type HandshakeConfig struct { + // ProtocolVersion is the version that clients must match on to + // agree they can communicate. This should match the ProtocolVersion + // set on ClientConfig when using a plugin. + ProtocolVersion uint + + // MagicCookieKey and value are used as a very basic verification + // that a plugin is intended to be launched. This is not a security + // measure, just a UX feature. If the magic cookie doesn't match, + // we show human-friendly output. + MagicCookieKey string + MagicCookieValue string +} + +// ServeConfig configures what sorts of plugins are served. +type ServeConfig struct { + // HandshakeConfig is the configuration that must match clients. + HandshakeConfig + + // Plugins are the plugins that are served. + Plugins map[string]Plugin +} + +// Serve serves the plugins given by ServeConfig. +// +// Serve doesn't return until the plugin is done being executed. Any +// errors will be outputted to the log. +// +// This is the method that plugins should call in their main() functions. +func Serve(opts *ServeConfig) { + // Validate the handshake config + if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { + fmt.Fprintf(os.Stderr, + "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ + "key or value was set. Please notify the plugin author and report\n"+ + "this as a bug.\n") + os.Exit(1) + } + + // First check the cookie + if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { + fmt.Fprintf(os.Stderr, + "This binary is a plugin. These are not meant to be executed directly.\n"+ + "Please execute the program that consumes these plugins, which will\n"+ + "load any plugins automatically\n") + os.Exit(1) + } + + // Logging goes to the original stderr + log.SetOutput(os.Stderr) + + // Create our new stdout, stderr files. These will override our built-in + // stdout/stderr so that it works across the stream boundary. + stdout_r, stdout_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + stderr_r, stderr_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + + // Register a listener so we can accept a connection + listener, err := serverListener() + if err != nil { + log.Printf("[ERR] plugin: plugin init: %s", err) + return + } + defer listener.Close() + + // Create the RPC server to dispense + server := &RPCServer{ + Plugins: opts.Plugins, + Stdout: stdout_r, + Stderr: stderr_r, + } + + // Output the address and service name to stdout so that core can bring it up. + log.Printf("[DEBUG] plugin: plugin address: %s %s\n", + listener.Addr().Network(), listener.Addr().String()) + fmt.Printf("%d|%d|%s|%s\n", + CoreProtocolVersion, + opts.ProtocolVersion, + listener.Addr().Network(), + listener.Addr().String()) + os.Stdout.Sync() + + // Eat the interrupts + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt) + go func() { + var count int32 = 0 + for { + <-ch + newCount := atomic.AddInt32(&count, 1) + log.Printf( + "[DEBUG] plugin: received interrupt signal (count: %d). Ignoring.", + newCount) + } + }() + + // Set our new out, err + os.Stdout = stdout_w + os.Stderr = stderr_w + + // Serve + server.Accept(listener) +} + +func serverListener() (net.Listener, error) { + if runtime.GOOS == "windows" { + return serverListener_tcp() + } + + return serverListener_unix() +} + +func serverListener_tcp() (net.Listener, error) { + minPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MIN_PORT"), 10, 32) + if err != nil { + return nil, err + } + + maxPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MAX_PORT"), 10, 32) + if err != nil { + return nil, err + } + + for port := minPort; port <= maxPort; port++ { + address := fmt.Sprintf("127.0.0.1:%d", port) + listener, err := net.Listen("tcp", address) + if err == nil { + return listener, nil + } + } + + return nil, errors.New("Couldn't bind plugin TCP listener") +} + +func serverListener_unix() (net.Listener, error) { + tf, err := ioutil.TempFile("", "plugin") + if err != nil { + return nil, err + } + path := tf.Name() + + // Close the file and remove it because it has to not exist for + // the domain socket. + if err := tf.Close(); err != nil { + return nil, err + } + if err := os.Remove(path); err != nil { + return nil, err + } + + return net.Listen("unix", path) +} diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go new file mode 100644 index 0000000000..033079ea0f --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server_mux.go @@ -0,0 +1,31 @@ +package plugin + +import ( + "fmt" + "os" +) + +// ServeMuxMap is the type that is used to configure ServeMux +type ServeMuxMap map[string]*ServeConfig + +// ServeMux is like Serve, but serves multiple types of plugins determined +// by the argument given on the command-line. +// +// This command doesn't return until the plugin is done being executed. Any +// errors are logged or output to stderr. +func ServeMux(m ServeMuxMap) { + if len(os.Args) != 2 { + fmt.Fprintf(os.Stderr, + "Invoked improperly. This is an internal command that shouldn't\n"+ + "be manually invoked.\n") + os.Exit(1) + } + + opts, ok := m[os.Args[1]] + if !ok { + fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1]) + os.Exit(1) + } + + Serve(opts) +} diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go new file mode 100644 index 0000000000..1d547aaaab --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/stream.go @@ -0,0 +1,18 @@ +package plugin + +import ( + "io" + "log" +) + +func copyStream(name string, dst io.Writer, src io.Reader) { + if src == nil { + panic(name + ": src is nil") + } + if dst == nil { + panic(name + ": dst is nil") + } + if _, err := io.Copy(dst, src); err != nil && err != io.EOF { + log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err) + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go new file mode 100644 index 0000000000..9086a1b45f --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/testing.go @@ -0,0 +1,76 @@ +package plugin + +import ( + "bytes" + "net" + "net/rpc" + "testing" +) + +// The testing file contains test helpers that you can use outside of +// this package for making it easier to test plugins themselves. + +// TestConn is a helper function for returning a client and server +// net.Conn connected to each other. +func TestConn(t *testing.T) (net.Conn, net.Conn) { + // Listen to any local port. This listener will be closed + // after a single connection is established. + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start a goroutine to accept our client connection + var serverConn net.Conn + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + defer l.Close() + var err error + serverConn, err = l.Accept() + if err != nil { + t.Fatalf("err: %s", err) + } + }() + + // Connect to the server + clientConn, err := net.Dial("tcp", l.Addr().String()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Wait for the server side to acknowledge it has connected + <-doneCh + + return clientConn, serverConn +} + +// TestRPCConn returns a rpc client and server connected to each other. +func TestRPCConn(t *testing.T) (*rpc.Client, *rpc.Server) { + clientConn, serverConn := TestConn(t) + + server := rpc.NewServer() + go server.ServeConn(serverConn) + + client := rpc.NewClient(clientConn) + return client, server +} + +// TestPluginRPCConn returns a plugin RPC client and server that are connected +// together and configured. +func TestPluginRPCConn(t *testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) { + // Create two net.Conns we can use to shuttle our control connection + clientConn, serverConn := TestConn(t) + + // Start up the server + server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)} + go server.ServeConn(serverConn) + + // Connect the client to the server + client, err := NewRPCClient(clientConn, ps) + if err != nil { + t.Fatalf("err: %s", err) + } + + return client, server +} diff --git a/vendor/github.com/hashicorp/hil/convert.go b/vendor/github.com/hashicorp/hil/convert.go index c52e2f3054..58fd9892a1 100644 --- a/vendor/github.com/hashicorp/hil/convert.go +++ b/vendor/github.com/hashicorp/hil/convert.go @@ -16,23 +16,6 @@ func InterfaceToVariable(input interface{}) (ast.Variable, error) { }, nil } - var sliceVal []interface{} - if err := mapstructure.WeakDecode(input, &sliceVal); err == nil { - elements := make([]ast.Variable, len(sliceVal)) - for i, element := range sliceVal { - varElement, err := InterfaceToVariable(element) - if err != nil { - return ast.Variable{}, err - } - elements[i] = varElement - } - - return ast.Variable{ - Type: ast.TypeList, - Value: elements, - }, nil - } - var mapVal map[string]interface{} if err := mapstructure.WeakDecode(input, &mapVal); err == nil { elements := make(map[string]ast.Variable) @@ -50,5 +33,22 @@ func InterfaceToVariable(input interface{}) (ast.Variable, error) { }, nil } + var sliceVal []interface{} + if err := mapstructure.WeakDecode(input, &sliceVal); err == nil { + elements := make([]ast.Variable, len(sliceVal)) + for i, element := range sliceVal { + varElement, err := InterfaceToVariable(element) + if err != nil { + return ast.Variable{}, err + } + elements[i] = varElement + } + + return ast.Variable{ + Type: ast.TypeList, + Value: elements, + }, nil + } + return ast.Variable{}, fmt.Errorf("value for conversion must be a string, interface{} or map[string]interface: got %T", input) } diff --git a/vendor/github.com/mitchellh/cli/cli.go b/vendor/github.com/mitchellh/cli/cli.go index e871e6136f..81b14f1fcf 100644 --- a/vendor/github.com/mitchellh/cli/cli.go +++ b/vendor/github.com/mitchellh/cli/cli.go @@ -122,20 +122,11 @@ func (c *CLI) Run() (int, error) { return 1, nil } - // If there is an invalid flag, then error - if len(c.topFlags) > 0 { - c.HelpWriter.Write([]byte( - "Invalid flags before the subcommand. If these flags are for\n" + - "the subcommand, please put them after the subcommand.\n\n")) - c.HelpWriter.Write([]byte(c.HelpFunc(c.Commands) + "\n")) - return 1, nil - } - // Attempt to get the factory function for creating the command // implementation. If the command is invalid or blank, it is an error. raw, ok := c.commandTree.Get(c.Subcommand()) if !ok { - c.HelpWriter.Write([]byte(c.HelpFunc(c.Commands) + "\n")) + c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.subcommandParent())) + "\n")) return 1, nil } @@ -150,6 +141,15 @@ func (c *CLI) Run() (int, error) { return 1, nil } + // If there is an invalid flag, then error + if len(c.topFlags) > 0 { + c.HelpWriter.Write([]byte( + "Invalid flags before the subcommand. If these flags are for\n" + + "the subcommand, please put them after the subcommand.\n\n")) + c.commandHelp(command) + return 1, nil + } + code := command.Run(c.SubcommandArgs()) if code == RunResultHelp { // Requesting help @@ -175,6 +175,27 @@ func (c *CLI) SubcommandArgs() []string { return c.subcommandArgs } +// subcommandParent returns the parent of this subcommand, if there is one. +// If there isn't on, "" is returned. +func (c *CLI) subcommandParent() string { + // Get the subcommand, if it is "" alread just return + sub := c.Subcommand() + if sub == "" { + return sub + } + + // Clear any trailing spaces and find the last space + sub = strings.TrimRight(sub, " ") + idx := strings.LastIndex(sub, " ") + + if idx == -1 { + // No space means our parent is root + return "" + } + + return sub[:idx] +} + func (c *CLI) init() { if c.HelpFunc == nil { c.HelpFunc = BasicHelpFunc("app") @@ -268,15 +289,14 @@ func (c *CLI) commandHelp(command Command) { } // Build subcommand list if we have it - var subcommands []map[string]interface{} + var subcommandsTpl []map[string]interface{} if c.commandNested { // Get the matching keys - var keys []string - prefix := c.Subcommand() + " " - c.commandTree.WalkPrefix(prefix, func(k string, raw interface{}) bool { + subcommands := c.helpCommands(c.Subcommand()) + keys := make([]string, 0, len(subcommands)) + for k, _ := range subcommands { keys = append(keys, k) - return false - }) + } // Sort the keys sort.Strings(keys) @@ -290,34 +310,30 @@ func (c *CLI) commandHelp(command Command) { } // Go through and create their structures - subcommands = make([]map[string]interface{}, len(keys)) - for i, k := range keys { - raw, ok := c.commandTree.Get(k) - if !ok { - // We just checked that it should be here above. If it is - // isn't, there are serious problems. - panic("value is missing") - } - + subcommandsTpl = make([]map[string]interface{}, 0, len(subcommands)) + for k, raw := range subcommands { // Get the command - sub, err := raw.(CommandFactory)() + sub, err := raw() if err != nil { c.HelpWriter.Write([]byte(fmt.Sprintf( "Error instantiating %q: %s", k, err))) } - // Determine some info - name := strings.TrimPrefix(k, prefix) + // Find the last space and make sure we only include that last part + name := k + if idx := strings.LastIndex(k, " "); idx > -1 { + name = name[idx+1:] + } - subcommands[i] = map[string]interface{}{ + subcommandsTpl = append(subcommandsTpl, map[string]interface{}{ "Name": name, "NameAligned": name + strings.Repeat(" ", longest-len(k)), "Help": sub.Help(), "Synopsis": sub.Synopsis(), - } + }) } } - data["Subcommands"] = subcommands + data["Subcommands"] = subcommandsTpl // Write err = t.Execute(c.HelpWriter, data) @@ -330,6 +346,40 @@ func (c *CLI) commandHelp(command Command) { "Internal error rendering help: %s", err))) } +// helpCommands returns the subcommands for the HelpFunc argument. +// This will only contain immediate subcommands. +func (c *CLI) helpCommands(prefix string) map[string]CommandFactory { + // If our prefix isn't empty, make sure it ends in ' ' + if prefix != "" && prefix[len(prefix)-1] != ' ' { + prefix += " " + } + + // Get all the subkeys of this command + var keys []string + c.commandTree.WalkPrefix(prefix, func(k string, raw interface{}) bool { + // Ignore any sub-sub keys, i.e. "foo bar baz" when we want "foo bar" + if !strings.Contains(k[len(prefix):], " ") { + keys = append(keys, k) + } + + return false + }) + + // For each of the keys return that in the map + result := make(map[string]CommandFactory, len(keys)) + for _, k := range keys { + raw, ok := c.commandTree.Get(k) + if !ok { + // We just got it via WalkPrefix above, so we just panic + panic("not found: " + k) + } + + result[k] = raw.(CommandFactory) + } + + return result +} + func (c *CLI) processArgs() { for i, arg := range c.Args { if c.subcommand == "" { diff --git a/vendor/github.com/ryanuber/columnize/.travis.yml b/vendor/github.com/ryanuber/columnize/.travis.yml new file mode 100644 index 0000000000..1a0bbea6c7 --- /dev/null +++ b/vendor/github.com/ryanuber/columnize/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: + - tip diff --git a/vendor/github.com/ryanuber/columnize/COPYING b/vendor/github.com/ryanuber/columnize/COPYING new file mode 100644 index 0000000000..86f4501489 --- /dev/null +++ b/vendor/github.com/ryanuber/columnize/COPYING @@ -0,0 +1,20 @@ +MIT LICENSE + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/ryanuber/columnize/README.md b/vendor/github.com/ryanuber/columnize/README.md new file mode 100644 index 0000000000..6852911fcc --- /dev/null +++ b/vendor/github.com/ryanuber/columnize/README.md @@ -0,0 +1,75 @@ +Columnize +========= + +Easy column-formatted output for golang + +[![Build Status](https://travis-ci.org/ryanuber/columnize.svg)](https://travis-ci.org/ryanuber/columnize) + +Columnize is a really small Go package that makes building CLI's a little bit +easier. In some CLI designs, you want to output a number similar items in a +human-readable way with nicely aligned columns. However, figuring out how wide +to make each column is a boring problem to solve and eats your valuable time. + +Here is an example: + +```go +package main + +import ( + "fmt" + "github.com/ryanuber/columnize" +) + +func main() { + output := []string{ + "Name | Gender | Age", + "Bob | Male | 38", + "Sally | Female | 26", + } + result := columnize.SimpleFormat(output) + fmt.Println(result) +} +``` + +As you can see, you just pass in a list of strings. And the result: + +``` +Name Gender Age +Bob Male 38 +Sally Female 26 +``` + +Columnize is tolerant of missing or empty fields, or even empty lines, so +passing in extra lines for spacing should show up as you would expect. + +Configuration +============= + +Columnize is configured using a `Config`, which can be obtained by calling the +`DefaultConfig()` method. You can then tweak the settings in the resulting +`Config`: + +``` +config := columnize.DefaultConfig() +config.Delim = "|" +config.Glue = " " +config.Prefix = "" +config.Empty = "" +``` + +* `Delim` is the string by which columns of **input** are delimited +* `Glue` is the string by which columns of **output** are delimited +* `Prefix` is a string by which each line of **output** is prefixed +* `Empty` is a string used to replace blank values found in output + +You can then pass the `Config` in using the `Format` method (signature below) to +have text formatted to your liking. + +Usage +===== + +```go +SimpleFormat(intput []string) string + +Format(input []string, config *Config) string +``` diff --git a/vendor/github.com/ryanuber/columnize/columnize.go b/vendor/github.com/ryanuber/columnize/columnize.go new file mode 100644 index 0000000000..d87785940c --- /dev/null +++ b/vendor/github.com/ryanuber/columnize/columnize.go @@ -0,0 +1,134 @@ +package columnize + +import ( + "fmt" + "strings" +) + +type Config struct { + // The string by which the lines of input will be split. + Delim string + + // The string by which columns of output will be separated. + Glue string + + // The string by which columns of output will be prefixed. + Prefix string + + // A replacement string to replace empty fields + Empty string +} + +// Returns a Config with default values. +func DefaultConfig() *Config { + return &Config{ + Delim: "|", + Glue: " ", + Prefix: "", + } +} + +// Returns a list of elements, each representing a single item which will +// belong to a column of output. +func getElementsFromLine(config *Config, line string) []interface{} { + elements := make([]interface{}, 0) + for _, field := range strings.Split(line, config.Delim) { + value := strings.TrimSpace(field) + if value == "" && config.Empty != "" { + value = config.Empty + } + elements = append(elements, value) + } + return elements +} + +// Examines a list of strings and determines how wide each column should be +// considering all of the elements that need to be printed within it. +func getWidthsFromLines(config *Config, lines []string) []int { + var widths []int + + for _, line := range lines { + elems := getElementsFromLine(config, line) + for i := 0; i < len(elems); i++ { + l := len(elems[i].(string)) + if len(widths) <= i { + widths = append(widths, l) + } else if widths[i] < l { + widths[i] = l + } + } + } + return widths +} + +// Given a set of column widths and the number of columns in the current line, +// returns a sprintf-style format string which can be used to print output +// aligned properly with other lines using the same widths set. +func (c *Config) getStringFormat(widths []int, columns int) string { + // Start with the prefix, if any was given. + stringfmt := c.Prefix + + // Create the format string from the discovered widths + for i := 0; i < columns && i < len(widths); i++ { + if i == columns-1 { + stringfmt += "%s\n" + } else { + stringfmt += fmt.Sprintf("%%-%ds%s", widths[i], c.Glue) + } + } + return stringfmt +} + +// MergeConfig merges two config objects together and returns the resulting +// configuration. Values from the right take precedence over the left side. +func MergeConfig(a, b *Config) *Config { + var result Config = *a + + // Return quickly if either side was nil + if a == nil || b == nil { + return &result + } + + if b.Delim != "" { + result.Delim = b.Delim + } + if b.Glue != "" { + result.Glue = b.Glue + } + if b.Prefix != "" { + result.Prefix = b.Prefix + } + if b.Empty != "" { + result.Empty = b.Empty + } + + return &result +} + +// Format is the public-facing interface that takes either a plain string +// or a list of strings and returns nicely aligned output. +func Format(lines []string, config *Config) string { + var result string + + conf := MergeConfig(DefaultConfig(), config) + widths := getWidthsFromLines(conf, lines) + + // Create the formatted output using the format string + for _, line := range lines { + elems := getElementsFromLine(conf, line) + stringfmt := conf.getStringFormat(widths, len(elems)) + result += fmt.Sprintf(stringfmt, elems...) + } + + // Remove trailing newline without removing leading/trailing space + if n := len(result); n > 0 && result[n-1] == '\n' { + result = result[:n-1] + } + + return result +} + +// Convenience function for using Columnize as easy as possible. +func SimpleFormat(lines []string) string { + return Format(lines, nil) +} diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss index 37c7f18163..9c7afec524 100755 --- a/website/source/assets/stylesheets/_docs.scss +++ b/website/source/assets/stylesheets/_docs.scss @@ -6,6 +6,7 @@ body.page-sub{ background-color: $light-black; } +body.layout-commands-state, body.layout-atlas, body.layout-aws, body.layout-azure, diff --git a/website/source/docs/commands/index.html.markdown b/website/source/docs/commands/index.html.markdown index f6a9d9118b..b02bef75d3 100644 --- a/website/source/docs/commands/index.html.markdown +++ b/website/source/docs/commands/index.html.markdown @@ -55,4 +55,3 @@ Usage: terraform graph [options] PATH read this format is GraphViz, but many web services are also available to read this format. ``` - diff --git a/website/source/docs/commands/state/addressing.html.md b/website/source/docs/commands/state/addressing.html.md new file mode 100644 index 0000000000..0b1dee2bc5 --- /dev/null +++ b/website/source/docs/commands/state/addressing.html.md @@ -0,0 +1,20 @@ +--- +layout: "commands-state" +page_title: "Command: state resource addressing" +sidebar_current: "docs-state-address" +description: |- + The `terraform state` command is used for advanced state management. +--- + +# Resource Addressing + +The `terraform state` subcommands make heavy use of resource addressing +for targeting and filtering specific resources and modules within the state. + +Resource addressing is a common feature of Terraform that is used in +multiple locations. For example, resource addressing syntax is also used for +the `-target` flag for apply and plan commands. + +Because resource addressing is unified across Terraform, it is documented +in a single place rather than duplicating it in multiple locations. You +can find the [resource addressing documentation here](/docs/internals/resource-addressing.html). diff --git a/website/source/docs/commands/state/index.html.md b/website/source/docs/commands/state/index.html.md new file mode 100644 index 0000000000..5edd084c09 --- /dev/null +++ b/website/source/docs/commands/state/index.html.md @@ -0,0 +1,54 @@ +--- +layout: "commands-state" +page_title: "Command: state" +sidebar_current: "docs-state-index" +description: |- + The `terraform state` command is used for advanced state management. +--- + +# State Command + +The `terraform state` command is used for advanced state management. +As your Terraform usage becomes more advanced, there are some cases where +you may need to modify the [Terraform state](/docs/state/index.html). +Rather than modify the state directly, the `terraform state` commands can +be used in many cases instead. + +This command is a nested subcommand, meaning that it has further subcommands. +These subcommands are listed to the left. + +## Usage + +Usage: `terraform state [options] [args]` + +Please click a subcommand to the left for more information. + +## Remote State + +The Terraform state subcommands all work with remote state just as if it +was local state. Reads and writes may take longer than normal as each read +and each write do a full network roundtrip. Otherwise, backups are still +written to disk and the CLI usage is the same as if it were local state. + +## Backups + +All `terraform state` subcommands that modify the state write backup +files. The path of these backup file can be controlled with `-backup`. + +Subcommands that are read-only (such as [list](/docs/commands/state/list.html)) +do not write any backup files since they aren't modifying the state. + +Note that backups for state modification _can not be disabled_. Due to +the sensitivity of the state file, Terraform forces every state modification +command to write a backup file. You'll have to remove these files manually +if you don't want to keep them around. + +## Command-Line Friendly + +The output and command-line structure of the state subcommands is +designed to be easy to use with Unix command-line tools such as grep, awk, +etc. Consequently, the output is also friendly to the equivalent PowerShell +commands within Windows. + +For advanced filtering and modification, we recommend piping Terraform +state subcommands together with other command line tools. diff --git a/website/source/docs/commands/state/list.html.md b/website/source/docs/commands/state/list.html.md new file mode 100644 index 0000000000..e4350d4830 --- /dev/null +++ b/website/source/docs/commands/state/list.html.md @@ -0,0 +1,63 @@ +--- +layout: "commands-state" +page_title: "Command: state list" +sidebar_current: "docs-state-sub-list" +description: |- + The terraform state list command is used to list resources within a Terraform state. +--- + +# Command: state list + +The `terraform state list` command is used to list resources within a +[Terraform state](/docs/state/index.html). + +## Usage + +Usage: `terraform state list [options] [address...]` + +The command will list all resources in the state file matching the given +addresses (if any). If no addresses are given, all resources are listed. + +The resources listed are sorted according to module depth order followed +by alphabetical. This means that resources that are in your immediate +configuration are listed first, and resources that are more deeply nested +within modules are listed last. + +For complex infrastructures, the state can contain thousands of resources. +To filter these, provide one or more patterns to the command. Patterns are +in [resource addressing format](/docs/commands/state/addressing.html). + +The command-line flags are all optional. The list of available flags are: + +* `-state=path` - Path to the state file. Defaults to "terraform.tfstate". + +## Example: All Resources + +This example will list all resources, including modules: + +``` +$ terraform state list +aws_instance.foo +aws_instance.bar[0] +aws_instance.bar[1] +module.elb.aws_elb.main +``` + +## Example: Filtering by Resource + +This example will only list resources for the given name: + +``` +$ terraform state list aws_instance.bar +aws_instance.bar[0] +aws_instance.bar[1] +``` + +## Example: Filtering by Module + +This example will only list resources in the given module: + +``` +$ terraform state list module.elb +module.elb.aws_elb.main +``` diff --git a/website/source/docs/commands/state/show.html.md b/website/source/docs/commands/state/show.html.md new file mode 100644 index 0000000000..c55b90018b --- /dev/null +++ b/website/source/docs/commands/state/show.html.md @@ -0,0 +1,47 @@ +--- +layout: "commands-state" +page_title: "Command: state show" +sidebar_current: "docs-state-sub-show" +description: |- + The `terraform state show` command is used to show the attributes of a single resource in the Terraform state. +--- + +# Command: state show + +The `terraform state show` command is used to show the attributes of a +single resource in the +[Terraform state](/docs/state/index.html). + +## Usage + +Usage: `terraform state show [options] ADDRESS` + +The command will show the attributes of a single resource in the +state file that matches the given address. + +The attributes are listed in alphabetical order (with the except of "id" +which is always at the top). They are outputted in a way that is easy +to parse on the command-line. + +This command requires a address that points to a single resource in the +state. Addresses are +in [resource addressing format](/docs/commands/state/addressing.html). + +The command-line flags are all optional. The list of available flags are: + +* `-state=path` - Path to the state file. Defaults to "terraform.tfstate". + +## Example: Show a Resource + +The example below shows a resource: + +``` +$ terraform state show module.foo.packet_device.worker[0] +id = 6015bg2b-b8c4-4925-aad2-f0671d5d3b13 +billing_cycle = hourly +created = 2015-12-17T00:06:56Z +facility = ewr1 +hostname = prod-xyz01 +locked = false +... +``` diff --git a/website/source/docs/internals/internal-plugins.html.md b/website/source/docs/internals/internal-plugins.html.md new file mode 100644 index 0000000000..bb99f80949 --- /dev/null +++ b/website/source/docs/internals/internal-plugins.html.md @@ -0,0 +1,75 @@ +--- +layout: "docs" +page_title: "Internal Plugins" +sidebar_current: "docs-internals-plugins" +description: |- + Terraform includes many popular plugins compiled into the main binary. +--- + +# Internal Plugins + +Terraform providers and provisioners are provided via plugins. Each plugin provides an implementation for a specific service, such as AWS, or provisioner, such as bash. Plugins are executed as a separate process and communicate with the main Terraform binary over an RPC interface. + +# Upgrading From 0.7 and Earlier + +In versions of Terraform prior to 0.7, each plugin shipped as a separate binary. In versions of Terraform >= 0.7, all of the official plugins are shipped as a single binary. This saves a lot of disk space and makes downloads faster for you! + +However, when you upgrade you will need to manually delete old plugins from disk. You can do this via something like this, depending on where you installed `terraform`: + + rm /usr/local/bin/terraform-* + +If you don't do this you will see an error message like the following: + +``` +[WARN] /usr/local/bin/terraform-provisioner-file overrides an internal plugin for file-provisioner. + If you did not expect to see this message you will need to remove the old plugin. + See https://www.terraform.io/docs/internals/plugins.html +Error configuring: 2 error(s) occurred: + +* Unrecognized remote plugin message: 2|unix|/var/folders/pj/66q7ztvd17v_vgfg8c99gm1m0000gn/T/tf-plugin604337945 + +This usually means that the plugin is either invalid or simply +needs to be recompiled to support the latest protocol. +* Unrecognized remote plugin message: 2|unix|/var/folders/pj/66q7ztvd17v_vgfg8c99gm1m0000gn/T/tf-plugin647987867 + +This usually means that the plugin is either invalid or simply +needs to be recompiled to support the latest protocol. +``` + +## Why Does This Happen? + +In previous versions of Terraform all of the plugins were included in a zip file. For example, when you upgraded from 0.6.12 to 0.6.15, the newer version of each plugin file would have replaced the older one on disk, and you would have ended up with the latest version of each plugin. + +Going forward there is only one file in the distribution so you will need to perform a one-time cleanup when upgrading from Terraform < 0.7 to Terraform 0.7 or higher. + +If you're curious about the low-level details, keep reading! + +## Go Plugin Architecture + +Terraform is written in the Go programming language. One of Go's interesting properties is that it produces statically-compiled binaries. This means that it does not need to find libraries on your computer to run, and in general only needs to be compatible with your operating system (to make system calls) and with your CPU architecture (so the assembly instructions match the CPU you're running on). + +Another property of Go is that it does not support dynamic libraries. It _only_ supports static binaries. This is part of Go's overall design and is the reason why it produces statically-compiled binaries in the first place -- once you have a Go binary for your platform it should _Just Work_. + +In other languages, plugins are built using dynamic libraries. Since this is not an option for us in Go we use a network RPC interface instead. This means that each plugin is an independent program, and instead of communicating via shared memory, the main process communicates with the plugin process over HTTP. When you start Terraform, it identifies the plugin you want to use, finds it on disk, runs the other binary, and does some handshaking to make sure they can talk to each other (the error you may see after upgrading is a handshake failure in the RPC code). + +### Downsides + +There is a significant downside to this approach. Statically compiled binaries are much larger than dynamically-linked binaries because they include everything they need to run. And because Terraform shares a lot of code with its plugins, there is a lot of binary data duplicated between each of these programs. + +In Terraform 0.6.15 there were 42 programs in total, using around 750MB on disk. And it turns out that about 600MB of this is duplicate data! This uses up a lot of space on your hard drive and a lot of bandwidth on our CDN. Fortunately, there is a way to resolve this problem. + +### Our Solution + +In Terraform 0.7 we merged all of the programs into the same binary. We do this by using a special command `terraform internal-plugin` which allows us to invoke a plugin just by calling the same Terraform binary with extra arguments. In essence, Terraform now just calls itself in order to activate the special behavior in each plugin. + +### Supporting our Community + +> Why would you do this? Why not just eliminate the network RPC interface and simplify everything? + +Terraform is an open source project with a large community, and while we maintain a wide range of plugins as part of the core distribution, we also want to make it easy for people anywhere to write and use their own plugins. + +By using the network RPC interface, you can build and distribute a plugin for Terraform without having to rebuild Terraform itself. This makes it easy for you to build a Terraform plugin for your organization's internal use, for a proprietary API that you don't want to open source, or to prototype something before contributing it back to the main project. + +In theory, because the plugin interface is HTTP, you could even develop a plugin using a completely different programming language! (Disclaimer, you would also have to re-implement the plugin API which is not a trivial amount of work.) + +So to conclude, with the RPC interface _and_ internal plugins, we get the best of all of these features: Binaries that _Just Work_, savings from shared code, and extensibility through plugins. We hope you enjoy using these features in Terraform. \ No newline at end of file diff --git a/website/source/docs/state/index.html.md b/website/source/docs/state/index.html.md index f1847702d3..3a78fc7087 100644 --- a/website/source/docs/state/index.html.md +++ b/website/source/docs/state/index.html.md @@ -25,6 +25,23 @@ state file with the real infrastructure if the file didn't exist. But currently, Terraform state is a mixture of both a cache and required configuration and isn't optional. +## Inspection and Modification + +While the format of the state files are just JSON, direct file editing +of the state is discouraged. Terraform provides the +[terraform state](/docs/commands/state/index.html) command to perform +basic modifications of the state using the CLI. + +The CLI usage and output of the state commands is structured to be +friendly for Unix tools such as grep, awk, etc. Additionally, the CLI +insulates users from any format changes within the state itself. The Terraform +project will keep the CLI working while the state format underneath it may +shift. + +Finally, the CLI manages backups for you automatically. If you make a mistake +modifying your state, the state CLI will always have a backup available for +you that you can restore. + ## Format The state is in JSON format and Terraform will promise backwards compatibility diff --git a/website/source/downloads.html.erb b/website/source/downloads.html.erb index bf89b14413..87179f12ea 100644 --- a/website/source/downloads.html.erb +++ b/website/source/downloads.html.erb @@ -31,6 +31,10 @@ description: |- +

+ Note: Terraform now ships as a single binary. When upgrading from Terraform < 0.7.0 + you will need to remove the old terraform-* plugins from your installation path. +

diff --git a/website/source/intro/getting-started/variables.html.md b/website/source/intro/getting-started/variables.html.md index a9dcc15db9..0b2668259c 100644 --- a/website/source/intro/getting-started/variables.html.md +++ b/website/source/intro/getting-started/variables.html.md @@ -122,6 +122,7 @@ support for the "us-west-2" region as well: ``` variable "amis" { + type = "map" default = { us-east-1 = "ami-b8b061d0" us-west-2 = "ami-ef5e24df" @@ -129,8 +130,8 @@ variable "amis" { } ``` -A variable becomes a mapping when it has a default value that is a -map like above. There is no way to create a required map. +A variable becomes a mapping when it has a type of "map" assigned, or has a +default value that is a map like above. Then, replace the "aws\_instance" with the following: @@ -148,7 +149,7 @@ variables is the key. While we don't use it in our example, it is worth noting that you can also do a static lookup of a mapping directly with -`${var.amis.us-east-1}`. +`${var.amis["us-east-1"]}`. ## Assigning Mappings diff --git a/website/source/layouts/commands-state.erb b/website/source/layouts/commands-state.erb new file mode 100644 index 0000000000..e182f0d2e7 --- /dev/null +++ b/website/source/layouts/commands-state.erb @@ -0,0 +1,34 @@ +<% wrap_layout :inner do %> + <% content_for :sidebar do %> + + <% end %> + + <%= yield %> +<% end %> diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index d357c6d53c..bb94f405ba 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -107,6 +107,10 @@ show + > + state + + > taint @@ -269,7 +273,7 @@ TLS - > + > Triton @@ -343,6 +347,10 @@ > Provider + + > + Internals + @@ -364,6 +372,10 @@ > Resource Addressing + + > + Internal Plugins +

Checkout the v<%= latest_version %> CHANGELOG for information on the latest release.