From 8e7fc240f91e659c6a3f27d2ae6c54299865789d Mon Sep 17 00:00:00 2001
From: Fatih Arslan
Date: Wed, 16 Sep 2015 23:26:27 +0300
Subject: [PATCH 001/664] schema: delete non existing values
We need to set the value to an empty value so the state file does
indeed change the value. Otherwise the obsolote value is still
intact and doesn't get changed at all. This means `terraform show`
still shows the obsolote value when the particular value is not
existing anymore. This is due the AWS API which is returning a null
instead of an empty string.
---
helper/schema/field_writer_map.go | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/helper/schema/field_writer_map.go b/helper/schema/field_writer_map.go
index 3e9b047192..ea877e147e 100644
--- a/helper/schema/field_writer_map.go
+++ b/helper/schema/field_writer_map.go
@@ -207,7 +207,8 @@ func (w *MapFieldWriter) setPrimitive(
k := strings.Join(addr, ".")
if v == nil {
- delete(w.result, k)
+ // The empty string here means the value is removed.
+ w.result[k] = ""
return nil
}
From f269d4fc8ce9e389ec5355df05a53fcd907e907c Mon Sep 17 00:00:00 2001
From: Fatih Arslan
Date: Wed, 16 Sep 2015 23:35:10 +0300
Subject: [PATCH 002/664] schema: add test for nil string case
---
helper/schema/field_writer_map_test.go | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/helper/schema/field_writer_map_test.go b/helper/schema/field_writer_map_test.go
index 8cf8100f2c..c9373da5dd 100644
--- a/helper/schema/field_writer_map_test.go
+++ b/helper/schema/field_writer_map_test.go
@@ -97,6 +97,15 @@ func TestMapFieldWriter(t *testing.T) {
},
},
+ "string nil": {
+ []string{"string"},
+ nil,
+ false,
+ map[string]string{
+ "string": "",
+ },
+ },
+
"list of resources": {
[]string{"listResource"},
[]interface{}{
From 545b8a3cd0b731e60137bbb1f8735ec621a01ce0 Mon Sep 17 00:00:00 2001
From: Fatih Arslan
Date: Thu, 17 Sep 2015 13:26:38 +0300
Subject: [PATCH 003/664] aws: store and read instance state
This allows us to store the instance state into the state file. This
means we can now easily see the instance state with `terraform show`.
---
builtin/providers/aws/resource_aws_instance.go | 17 +++++++++++++----
1 file changed, 13 insertions(+), 4 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_instance.go b/builtin/providers/aws/resource_aws_instance.go
index 093b6ae86b..6752f8e690 100644
--- a/builtin/providers/aws/resource_aws_instance.go
+++ b/builtin/providers/aws/resource_aws_instance.go
@@ -132,6 +132,11 @@ func resourceAwsInstance() *schema.Resource {
Computed: true,
},
+ "instance_state": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
"private_dns": &schema.Schema{
Type: schema.TypeString,
Computed: true,
@@ -449,10 +454,14 @@ func resourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error {
instance := resp.Reservations[0].Instances[0]
- // If the instance is terminated, then it is gone
- if *instance.State.Name == "terminated" {
- d.SetId("")
- return nil
+ if instance.State != nil {
+ // If the instance is terminated, then it is gone
+ if *instance.State.Name == "terminated" {
+ d.SetId("")
+ return nil
+ }
+
+ d.Set("instance_state", instance.State.Name)
}
if instance.Placement != nil {
From 0b66da1cd0fb6d83884ac28b0fc0ce66ad5bdd79 Mon Sep 17 00:00:00 2001
From: Ross McFarland
Date: Sat, 26 Sep 2015 12:23:50 -0700
Subject: [PATCH 004/664] Avoid nil map assign in aws instance
migrateStateV0toV1
---
builtin/providers/aws/resource_aws_instance_migrate.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/builtin/providers/aws/resource_aws_instance_migrate.go b/builtin/providers/aws/resource_aws_instance_migrate.go
index 5d7075f759..3208e40456 100644
--- a/builtin/providers/aws/resource_aws_instance_migrate.go
+++ b/builtin/providers/aws/resource_aws_instance_migrate.go
@@ -24,7 +24,7 @@ func resourceAwsInstanceMigrateState(
}
func migrateStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) {
- if is.Empty() {
+ if is.Empty() || is.Attributes == nil {
log.Println("[DEBUG] Empty InstanceState; nothing to migrate.")
return is, nil
}
From ba8f1fa1f0ef564c24503709e69f1bd8cb02c8f1 Mon Sep 17 00:00:00 2001
From: Kazunori Kojima
Date: Fri, 31 Jul 2015 16:09:28 +0900
Subject: [PATCH 005/664] Add support S3 server side encryption with KMS.
* Example
```
terraform remote config \
-backend=s3
-backend-config="bucket=bucket-tfstate"
-backend-config="key=terraform.tfstate"
-backend-config="region=ap-northeast-1"
-backend-config="encrypt=1"
-backend-config="kmsKeyID=arn:aws:kms:ap-northeast-1:123456789:key/ac54dbd2-f301-42c1-bab9-88e6a84292a9"
```
---
state/remote/s3.go | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/state/remote/s3.go b/state/remote/s3.go
index 26330d1126..dcf9a3b800 100644
--- a/state/remote/s3.go
+++ b/state/remote/s3.go
@@ -50,6 +50,7 @@ func s3Factory(conf map[string]string) (Client, error) {
if raw, ok := conf["acl"]; ok {
acl = raw
}
+ kmsKeyID := conf["kmsKeyID"]
accessKeyId := conf["access_key"]
secretAccessKey := conf["secret_key"]
@@ -84,6 +85,7 @@ func s3Factory(conf map[string]string) (Client, error) {
keyName: keyName,
serverSideEncryption: serverSideEncryption,
acl: acl,
+ kmsKeyID: kmsKeyID,
}, nil
}
@@ -93,6 +95,7 @@ type S3Client struct {
keyName string
serverSideEncryption bool
acl string
+ kmsKeyID string
}
func (c *S3Client) Get() (*Payload, error) {
@@ -145,7 +148,12 @@ func (c *S3Client) Put(data []byte) error {
}
if c.serverSideEncryption {
- i.ServerSideEncryption = aws.String("AES256")
+ if c.kmsKeyID != "" {
+ i.SSEKMSKeyID = &c.kmsKeyID
+ i.ServerSideEncryption = aws.String("aws:kms")
+ } else {
+ i.ServerSideEncryption = aws.String("AES256")
+ }
}
if c.acl != "" {
From 576b2d11093f0b750cd0402ebb1a55eaf30a3b9e Mon Sep 17 00:00:00 2001
From: Kazunori Kojima
Date: Wed, 7 Oct 2015 23:09:03 +0900
Subject: [PATCH 006/664] Change KMS Key ID configuration name to used in other
---
state/remote/s3.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/state/remote/s3.go b/state/remote/s3.go
index dcf9a3b800..f9d95c7180 100644
--- a/state/remote/s3.go
+++ b/state/remote/s3.go
@@ -50,7 +50,7 @@ func s3Factory(conf map[string]string) (Client, error) {
if raw, ok := conf["acl"]; ok {
acl = raw
}
- kmsKeyID := conf["kmsKeyID"]
+ kmsKeyID := conf["kms_key_id"]
accessKeyId := conf["access_key"]
secretAccessKey := conf["secret_key"]
From 9186c29dd8816e06f0283a4d0364a6caefae572c Mon Sep 17 00:00:00 2001
From: Kazunori Kojima
Date: Wed, 7 Oct 2015 23:39:08 +0900
Subject: [PATCH 007/664] Fix typo
---
state/remote/s3.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/state/remote/s3.go b/state/remote/s3.go
index f9d95c7180..cfe2c570e5 100644
--- a/state/remote/s3.go
+++ b/state/remote/s3.go
@@ -149,7 +149,7 @@ func (c *S3Client) Put(data []byte) error {
if c.serverSideEncryption {
if c.kmsKeyID != "" {
- i.SSEKMSKeyID = &c.kmsKeyID
+ i.SSEKMSKeyId = &c.kmsKeyID
i.ServerSideEncryption = aws.String("aws:kms")
} else {
i.ServerSideEncryption = aws.String("AES256")
From 31767accac0c524c928cf65fa63de10bd32d60bc Mon Sep 17 00:00:00 2001
From: Joshua Semar
Date: Tue, 27 Oct 2015 21:30:11 -0500
Subject: [PATCH 008/664] get profile name even if profile path exists
---
builtin/providers/aws/resource_aws_instance.go | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/builtin/providers/aws/resource_aws_instance.go b/builtin/providers/aws/resource_aws_instance.go
index d096a45d6f..13c406de24 100644
--- a/builtin/providers/aws/resource_aws_instance.go
+++ b/builtin/providers/aws/resource_aws_instance.go
@@ -1082,5 +1082,6 @@ func iamInstanceProfileArnToName(ip *ec2.IamInstanceProfile) string {
if ip == nil || ip.Arn == nil {
return ""
}
- return strings.Split(*ip.Arn, "/")[1]
+ parts := strings.Split(*ip.Arn, "/")
+ return parts[len(parts)-1]
}
From c3f863f4c5a4cf61eef49dc31db3830116e18694 Mon Sep 17 00:00:00 2001
From: "John E. Vincent"
Date: Thu, 29 Oct 2015 09:33:09 -0400
Subject: [PATCH 009/664] add artifactory remote state storage
---
state/remote/artifactory.go | 117 +++++++++++++++++++++++++++++++
state/remote/artifactory_test.go | 55 +++++++++++++++
state/remote/remote.go | 13 ++--
3 files changed, 179 insertions(+), 6 deletions(-)
create mode 100644 state/remote/artifactory.go
create mode 100644 state/remote/artifactory_test.go
diff --git a/state/remote/artifactory.go b/state/remote/artifactory.go
new file mode 100644
index 0000000000..727e9faf03
--- /dev/null
+++ b/state/remote/artifactory.go
@@ -0,0 +1,117 @@
+package remote
+
+import (
+ "crypto/md5"
+ "fmt"
+ "os"
+ "strings"
+
+ artifactory "github.com/lusis/go-artifactory/src/artifactory.v401"
+)
+
+const ARTIF_TFSTATE_NAME = "terraform.tfstate"
+
+func artifactoryFactory(conf map[string]string) (Client, error) {
+ userName, ok := conf["username"]
+ if !ok {
+ userName = os.Getenv("ARTIFACTORY_USERNAME")
+ if userName == "" {
+ return nil, fmt.Errorf(
+ "missing 'username' configuration or ARTIFACTORY_USERNAME environment variable")
+ }
+ }
+ password, ok := conf["password"]
+ if !ok {
+ password = os.Getenv("ARTIFACTORY_PASSWORD")
+ if password == "" {
+ return nil, fmt.Errorf(
+ "missing 'password' configuration or ARTIFACTORY_PASSWORD environment variable")
+ }
+ }
+ url, ok := conf["url"]
+ if !ok {
+ url = os.Getenv("ARTIFACTORY_URL")
+ if url == "" {
+ return nil, fmt.Errorf(
+ "missing 'url' configuration or ARTIFACTORY_URL environment variable")
+ }
+ }
+ repo, ok := conf["repo"]
+ if !ok {
+ return nil, fmt.Errorf(
+ "missing 'repo' configuration")
+ }
+ subpath, ok := conf["subpath"]
+ if !ok {
+ return nil, fmt.Errorf(
+ "missing 'subpath' configuration")
+ }
+
+ clientConf := &artifactory.ClientConfig{
+ BaseURL: url,
+ Username: userName,
+ Password: password,
+ }
+ nativeClient := artifactory.NewClient(clientConf)
+
+ return &ArtifactoryClient{
+ nativeClient: &nativeClient,
+ userName: userName,
+ password: password,
+ url: url,
+ repo: repo,
+ subpath: subpath,
+ }, nil
+
+}
+
+type ArtifactoryClient struct {
+ nativeClient *artifactory.ArtifactoryClient
+ userName string
+ password string
+ url string
+ repo string
+ subpath string
+}
+
+func (c *ArtifactoryClient) Get() (*Payload, error) {
+ p := fmt.Sprintf("%s/%s/%s", c.repo, c.subpath, ARTIF_TFSTATE_NAME)
+ output, err := c.nativeClient.Get(p, make(map[string]string))
+ if err != nil {
+ if strings.Contains(err.Error(), "404") {
+ return nil, nil
+ }
+ return nil, err
+ }
+
+ // TODO: migrate to using X-Checksum-Md5 header from artifactory
+ // needs to be exposed by go-artifactory first
+
+ hash := md5.Sum(output)
+ payload := &Payload{
+ Data: output,
+ MD5: hash[:md5.Size],
+ }
+
+ // If there was no data, then return nil
+ if len(payload.Data) == 0 {
+ return nil, nil
+ }
+
+ return payload, nil
+}
+
+func (c *ArtifactoryClient) Put(data []byte) error {
+ p := fmt.Sprintf("%s/%s/%s", c.repo, c.subpath, ARTIF_TFSTATE_NAME)
+ if _, err := c.nativeClient.Put(p, string(data), make(map[string]string)); err == nil {
+ return nil
+ } else {
+ return fmt.Errorf("Failed to upload state: %v", err)
+ }
+}
+
+func (c *ArtifactoryClient) Delete() error {
+ p := fmt.Sprintf("%s/%s/%s", c.repo, c.subpath, ARTIF_TFSTATE_NAME)
+ err := c.nativeClient.Delete(p)
+ return err
+}
diff --git a/state/remote/artifactory_test.go b/state/remote/artifactory_test.go
new file mode 100644
index 0000000000..74197fa916
--- /dev/null
+++ b/state/remote/artifactory_test.go
@@ -0,0 +1,55 @@
+package remote
+
+import (
+ "testing"
+)
+
+func TestArtifactoryClient_impl(t *testing.T) {
+ var _ Client = new(ArtifactoryClient)
+}
+
+func TestArtifactoryFactory(t *testing.T) {
+ // This test just instantiates the client. Shouldn't make any actual
+ // requests nor incur any costs.
+
+ config := make(map[string]string)
+
+ // Empty config is an error
+ _, err := artifactoryFactory(config)
+ if err == nil {
+ t.Fatalf("Empty config should be error")
+ }
+
+ config["url"] = "http://artifactory.local:8081/artifactory"
+ config["repo"] = "terraform-repo"
+ config["subpath"] = "myproject"
+
+ // For this test we'll provide the credentials as config. The
+ // acceptance tests implicitly test passing credentials as
+ // environment variables.
+ config["username"] = "test"
+ config["password"] = "testpass"
+
+ client, err := artifactoryFactory(config)
+ if err != nil {
+ t.Fatalf("Error for valid config")
+ }
+
+ artifactoryClient := client.(*ArtifactoryClient)
+
+ if artifactoryClient.nativeClient.Config.BaseURL != "http://artifactory.local:8081/artifactory" {
+ t.Fatalf("Incorrect url was populated")
+ }
+ if artifactoryClient.nativeClient.Config.Username != "test" {
+ t.Fatalf("Incorrect username was populated")
+ }
+ if artifactoryClient.nativeClient.Config.Password != "testpass" {
+ t.Fatalf("Incorrect password was populated")
+ }
+ if artifactoryClient.repo != "terraform-repo" {
+ t.Fatalf("Incorrect repo was populated")
+ }
+ if artifactoryClient.subpath != "myproject" {
+ t.Fatalf("Incorrect subpath was populated")
+ }
+}
diff --git a/state/remote/remote.go b/state/remote/remote.go
index 5337ad7b7b..4074c2c64e 100644
--- a/state/remote/remote.go
+++ b/state/remote/remote.go
@@ -36,12 +36,13 @@ func NewClient(t string, conf map[string]string) (Client, error) {
// BuiltinClients is the list of built-in clients that can be used with
// NewClient.
var BuiltinClients = map[string]Factory{
- "atlas": atlasFactory,
- "consul": consulFactory,
- "etcd": etcdFactory,
- "http": httpFactory,
- "s3": s3Factory,
- "swift": swiftFactory,
+ "atlas": atlasFactory,
+ "consul": consulFactory,
+ "etcd": etcdFactory,
+ "http": httpFactory,
+ "s3": s3Factory,
+ "swift": swiftFactory,
+ "artifactory": artifactoryFactory,
// This is used for development purposes only.
"_local": fileFactory,
From c1bb852390869c3626b87cc8bb6913ff02bc9139 Mon Sep 17 00:00:00 2001
From: AJ Bahnken
Date: Thu, 29 Oct 2015 10:27:50 -0700
Subject: [PATCH 010/664] Added measure_latency option to Route 53 Health Check
resource.
Related to #3273
---
.../providers/aws/resource_aws_route53_health_check.go | 10 ++++++++++
.../aws/resource_aws_route53_health_check_test.go | 1 +
2 files changed, 11 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_route53_health_check.go b/builtin/providers/aws/resource_aws_route53_health_check.go
index 1850401d91..16ad47c810 100644
--- a/builtin/providers/aws/resource_aws_route53_health_check.go
+++ b/builtin/providers/aws/resource_aws_route53_health_check.go
@@ -55,6 +55,11 @@ func resourceAwsRoute53HealthCheck() *schema.Resource {
Type: schema.TypeString,
Optional: true,
},
+ "measure_latency": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ Default: false,
+ },
"tags": tagsSchema(),
},
}
@@ -128,6 +133,10 @@ func resourceAwsRoute53HealthCheckCreate(d *schema.ResourceData, meta interface{
healthConfig.ResourcePath = aws.String(v.(string))
}
+ if v, ok := d.GetOk("measure_latency"); ok {
+ healthConfig.MeasureLatency = aws.Bool(v.(bool))
+ }
+
input := &route53.CreateHealthCheckInput{
CallerReference: aws.String(time.Now().Format(time.RFC3339Nano)),
HealthCheckConfig: healthConfig,
@@ -174,6 +183,7 @@ func resourceAwsRoute53HealthCheckRead(d *schema.ResourceData, meta interface{})
d.Set("ip_address", updated.IPAddress)
d.Set("port", updated.Port)
d.Set("resource_path", updated.ResourcePath)
+ d.Set("measure_latency", updated.MeasureLatency)
// read the tags
req := &route53.ListTagsForResourceInput{
diff --git a/builtin/providers/aws/resource_aws_route53_health_check_test.go b/builtin/providers/aws/resource_aws_route53_health_check_test.go
index 9b14419637..0886b7ba3c 100644
--- a/builtin/providers/aws/resource_aws_route53_health_check_test.go
+++ b/builtin/providers/aws/resource_aws_route53_health_check_test.go
@@ -124,6 +124,7 @@ resource "aws_route53_health_check" "foo" {
resource_path = "/"
failure_threshold = "2"
request_interval = "30"
+ measure_latency = true
tags = {
Name = "tf-test-health-check"
From ab273bb2ee0cb8655d9f78c7d3903b1615fa2b7f Mon Sep 17 00:00:00 2001
From: AJ Bahnken
Date: Thu, 29 Oct 2015 12:50:02 -0700
Subject: [PATCH 011/664] Fixed up measure_latency option in r53 health checks.
* Added ignoring of param when Type is CALCULATED
* Added ForceNew param to measure_latency item in schema
* Added check to test
---
builtin/providers/aws/resource_aws_route53_health_check.go | 7 +++++--
.../aws/resource_aws_route53_health_check_test.go | 2 ++
2 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_route53_health_check.go b/builtin/providers/aws/resource_aws_route53_health_check.go
index 16ad47c810..3f4a2ae6f2 100644
--- a/builtin/providers/aws/resource_aws_route53_health_check.go
+++ b/builtin/providers/aws/resource_aws_route53_health_check.go
@@ -59,6 +59,7 @@ func resourceAwsRoute53HealthCheck() *schema.Resource {
Type: schema.TypeBool,
Optional: true,
Default: false,
+ ForceNew: true,
},
"tags": tagsSchema(),
},
@@ -133,8 +134,10 @@ func resourceAwsRoute53HealthCheckCreate(d *schema.ResourceData, meta interface{
healthConfig.ResourcePath = aws.String(v.(string))
}
- if v, ok := d.GetOk("measure_latency"); ok {
- healthConfig.MeasureLatency = aws.Bool(v.(bool))
+ if *healthConfig.Type != route53.HealthCheckTypeCalculated {
+ if v, ok := d.GetOk("measure_latency"); ok {
+ healthConfig.MeasureLatency = aws.Bool(v.(bool))
+ }
}
input := &route53.CreateHealthCheckInput{
diff --git a/builtin/providers/aws/resource_aws_route53_health_check_test.go b/builtin/providers/aws/resource_aws_route53_health_check_test.go
index 0886b7ba3c..f6f837c926 100644
--- a/builtin/providers/aws/resource_aws_route53_health_check_test.go
+++ b/builtin/providers/aws/resource_aws_route53_health_check_test.go
@@ -20,6 +20,8 @@ func TestAccAWSRoute53HealthCheck_basic(t *testing.T) {
Config: testAccRoute53HealthCheckConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckRoute53HealthCheckExists("aws_route53_health_check.foo"),
+ resource.TestCheckResourceAttr(
+ "aws_route53_health_check.foo", "measure_latency", "true"),
),
},
resource.TestStep{
From 006cac56a2ff3f21a2507332101d631af347319b Mon Sep 17 00:00:00 2001
From: Sunil K Chopra
Date: Fri, 30 Oct 2015 16:45:19 -0500
Subject: [PATCH 012/664] added placement group as an option for autoscaling
groups
---
.../providers/aws/resource_aws_autoscaling_group.go | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_autoscaling_group.go b/builtin/providers/aws/resource_aws_autoscaling_group.go
index f457e6dcd1..b74b2b6cc3 100644
--- a/builtin/providers/aws/resource_aws_autoscaling_group.go
+++ b/builtin/providers/aws/resource_aws_autoscaling_group.go
@@ -95,6 +95,13 @@ func resourceAwsAutoscalingGroup() *schema.Resource {
Set: schema.HashString,
},
+ "placement_group": &schema.Schema{
+ Type: schema.TypeSet,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ Set: schema.HashString,
+ },
+
"load_balancers": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
@@ -175,6 +182,11 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{})
autoScalingGroupOpts.HealthCheckGracePeriod = aws.Int64(int64(v.(int)))
}
+ if v, ok := d.GetOk("placement_group"); ok && v.(*schema.Set).Len() > 0 {
+ autoScalingGroupOpts.PlacementGroup = expandStringList(
+ v.(*schema.Set).List())
+ }
+
if v, ok := d.GetOk("load_balancers"); ok && v.(*schema.Set).Len() > 0 {
autoScalingGroupOpts.LoadBalancerNames = expandStringList(
v.(*schema.Set).List())
From 8780bd269ae1678319be0129af6e79c4cf407e72 Mon Sep 17 00:00:00 2001
From: Brett Mack
Date: Mon, 26 Oct 2015 14:45:48 +0000
Subject: [PATCH 013/664] Added vCloud Director provider with tests and
provider documentation
---
builtin/bins/provider-vcd/main.go | 12 +
builtin/providers/vcd/config.go | 32 ++
builtin/providers/vcd/provider.go | 69 ++++
builtin/providers/vcd/provider_test.go | 50 +++
builtin/providers/vcd/resource_vcd_dnat.go | 171 +++++++++
.../providers/vcd/resource_vcd_dnat_test.go | 120 ++++++
.../vcd/resource_vcd_firewall_rules.go | 236 ++++++++++++
.../vcd/resource_vcd_firewall_rules_test.go | 105 ++++++
builtin/providers/vcd/resource_vcd_network.go | 263 +++++++++++++
.../vcd/resource_vcd_network_test.go | 107 ++++++
builtin/providers/vcd/resource_vcd_snat.go | 161 ++++++++
.../providers/vcd/resource_vcd_snat_test.go | 119 ++++++
builtin/providers/vcd/resource_vcd_vapp.go | 355 ++++++++++++++++++
.../providers/vcd/resource_vcd_vapp_test.go | 180 +++++++++
builtin/providers/vcd/structure.go | 103 +++++
website/source/assets/stylesheets/_docs.scss | 1 +
.../docs/providers/vcd/index.html.markdown | 54 +++
.../docs/providers/vcd/r/dnat.html.markdown | 32 ++
.../vcd/r/firewall_rules.html.markdown | 63 ++++
.../providers/vcd/r/network.html.markdown | 57 +++
.../docs/providers/vcd/r/snat.html.markdown | 30 ++
.../docs/providers/vcd/r/vapp.html.markdown | 59 +++
website/source/layouts/docs.erb | 4 +
website/source/layouts/vcd.erb | 38 ++
24 files changed, 2421 insertions(+)
create mode 100644 builtin/bins/provider-vcd/main.go
create mode 100644 builtin/providers/vcd/config.go
create mode 100644 builtin/providers/vcd/provider.go
create mode 100644 builtin/providers/vcd/provider_test.go
create mode 100644 builtin/providers/vcd/resource_vcd_dnat.go
create mode 100644 builtin/providers/vcd/resource_vcd_dnat_test.go
create mode 100644 builtin/providers/vcd/resource_vcd_firewall_rules.go
create mode 100644 builtin/providers/vcd/resource_vcd_firewall_rules_test.go
create mode 100644 builtin/providers/vcd/resource_vcd_network.go
create mode 100644 builtin/providers/vcd/resource_vcd_network_test.go
create mode 100644 builtin/providers/vcd/resource_vcd_snat.go
create mode 100644 builtin/providers/vcd/resource_vcd_snat_test.go
create mode 100644 builtin/providers/vcd/resource_vcd_vapp.go
create mode 100644 builtin/providers/vcd/resource_vcd_vapp_test.go
create mode 100644 builtin/providers/vcd/structure.go
create mode 100644 website/source/docs/providers/vcd/index.html.markdown
create mode 100644 website/source/docs/providers/vcd/r/dnat.html.markdown
create mode 100644 website/source/docs/providers/vcd/r/firewall_rules.html.markdown
create mode 100644 website/source/docs/providers/vcd/r/network.html.markdown
create mode 100644 website/source/docs/providers/vcd/r/snat.html.markdown
create mode 100644 website/source/docs/providers/vcd/r/vapp.html.markdown
create mode 100644 website/source/layouts/vcd.erb
diff --git a/builtin/bins/provider-vcd/main.go b/builtin/bins/provider-vcd/main.go
new file mode 100644
index 0000000000..7e040dd432
--- /dev/null
+++ b/builtin/bins/provider-vcd/main.go
@@ -0,0 +1,12 @@
+package main
+
+import (
+ "github.com/hashicorp/terraform/builtin/providers/vcd"
+ "github.com/hashicorp/terraform/plugin"
+)
+
+func main() {
+ plugin.Serve(&plugin.ServeOpts{
+ ProviderFunc: vcd.Provider,
+ })
+}
diff --git a/builtin/providers/vcd/config.go b/builtin/providers/vcd/config.go
new file mode 100644
index 0000000000..0768bbc3db
--- /dev/null
+++ b/builtin/providers/vcd/config.go
@@ -0,0 +1,32 @@
+package vcd
+
+import (
+ "fmt"
+ "net/url"
+
+ "github.com/opencredo/vmware-govcd"
+)
+
+type Config struct {
+ User string
+ Password string
+ Org string
+ Href string
+ VDC string
+}
+
+func (c *Config) Client() (*govcd.VCDClient, error) {
+ u, err := url.ParseRequestURI(c.Href)
+ if err != nil {
+ return nil, fmt.Errorf("Something went wrong: %s", err)
+ }
+
+ vcdclient := govcd.NewVCDClient(*u)
+ org, vcd, err := vcdclient.Authenticate(c.User, c.Password, c.Org, c.VDC)
+ if err != nil {
+ return nil, fmt.Errorf("Something went wrong: %s", err)
+ }
+ vcdclient.Org = org
+ vcdclient.OrgVdc = vcd
+ return vcdclient, nil
+}
diff --git a/builtin/providers/vcd/provider.go b/builtin/providers/vcd/provider.go
new file mode 100644
index 0000000000..c9849be356
--- /dev/null
+++ b/builtin/providers/vcd/provider.go
@@ -0,0 +1,69 @@
+package vcd
+
+import (
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Provider returns a terraform.ResourceProvider.
+func Provider() terraform.ResourceProvider {
+ return &schema.Provider{
+ Schema: map[string]*schema.Schema{
+ "user": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ DefaultFunc: schema.EnvDefaultFunc("VCD_USER", nil),
+ Description: "The user name for vcd API operations.",
+ },
+
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ DefaultFunc: schema.EnvDefaultFunc("VCD_PASSWORD", nil),
+ Description: "The user password for vcd API operations.",
+ },
+
+ "org": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ DefaultFunc: schema.EnvDefaultFunc("VCD_ORG", nil),
+ Description: "The vcd org for API operations",
+ },
+
+ "url": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ DefaultFunc: schema.EnvDefaultFunc("VCD_URL", nil),
+ Description: "The vcd url for vcd API operations.",
+ },
+ "vdc": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ DefaultFunc: schema.EnvDefaultFunc("VCD_VDC", ""),
+ Description: "The name of the VDC to run operations on",
+ },
+ },
+
+ ResourcesMap: map[string]*schema.Resource{
+ "vcd_network": resourceVcdNetwork(),
+ "vcd_vapp": resourceVcdVApp(),
+ "vcd_firewall_rules": resourceVcdFirewallRules(),
+ "vcd_dnat": resourceVcdDNAT(),
+ "vcd_snat": resourceVcdSNAT(),
+ },
+
+ ConfigureFunc: providerConfigure,
+ }
+}
+
+func providerConfigure(d *schema.ResourceData) (interface{}, error) {
+ config := Config{
+ User: d.Get("user").(string),
+ Password: d.Get("password").(string),
+ Org: d.Get("org").(string),
+ Href: d.Get("url").(string),
+ VDC: d.Get("vdc").(string),
+ }
+
+ return config.Client()
+}
diff --git a/builtin/providers/vcd/provider_test.go b/builtin/providers/vcd/provider_test.go
new file mode 100644
index 0000000000..48ee207219
--- /dev/null
+++ b/builtin/providers/vcd/provider_test.go
@@ -0,0 +1,50 @@
+package vcd
+
+import (
+ "os"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+var testAccProviders map[string]terraform.ResourceProvider
+var testAccProvider *schema.Provider
+
+func init() {
+ testAccProvider = Provider().(*schema.Provider)
+ testAccProviders = map[string]terraform.ResourceProvider{
+ "vcd": testAccProvider,
+ }
+}
+
+func TestProvider(t *testing.T) {
+ if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestProvider_impl(t *testing.T) {
+ var _ terraform.ResourceProvider = Provider()
+}
+
+func testAccPreCheck(t *testing.T) {
+ if v := os.Getenv("VCD_USER"); v == "" {
+ t.Fatal("VCD_USER must be set for acceptance tests")
+ }
+ if v := os.Getenv("VCD_PASSWORD"); v == "" {
+ t.Fatal("VCD_PASSWORD must be set for acceptance tests")
+ }
+ if v := os.Getenv("VCD_ORG"); v == "" {
+ t.Fatal("VCD_ORG must be set for acceptance tests")
+ }
+ if v := os.Getenv("VCD_URL"); v == "" {
+ t.Fatal("VCD_URL must be set for acceptance tests")
+ }
+ if v := os.Getenv("VCD_EDGE_GATEWAY"); v == "" {
+ t.Fatal("VCD_EDGE_GATEWAY must be set for acceptance tests")
+ }
+ if v := os.Getenv("VCD_VDC"); v == "" {
+ t.Fatal("VCD_VDC must be set for acceptance tests")
+ }
+}
diff --git a/builtin/providers/vcd/resource_vcd_dnat.go b/builtin/providers/vcd/resource_vcd_dnat.go
new file mode 100644
index 0000000000..dd1c67e338
--- /dev/null
+++ b/builtin/providers/vcd/resource_vcd_dnat.go
@@ -0,0 +1,171 @@
+package vcd
+
+import (
+ "fmt"
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/opencredo/vmware-govcd"
+ "regexp"
+ "strings"
+ "time"
+)
+
+func resourceVcdDNAT() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceVcdDNATCreate,
+ Update: resourceVcdDNATUpdate,
+ Delete: resourceVcdDNATDelete,
+ Read: resourceVcdDNATRead,
+
+ Schema: map[string]*schema.Schema{
+ "edge_gateway": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "external_ip": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "port": &schema.Schema{
+ Type: schema.TypeInt,
+ Required: true,
+ },
+
+ "internal_ip": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ },
+ }
+}
+
+func resourceVcdDNATCreate(d *schema.ResourceData, meta interface{}) error {
+ vcd_client := meta.(*govcd.VCDClient)
+ // Multiple VCD components need to run operations on the Edge Gateway, as
+ // the edge gatway will throw back an error if it is already performing an
+ // operation we must wait until we can aquire a lock on the client
+ vcd_client.Mutex.Lock()
+ defer vcd_client.Mutex.Unlock()
+ var task govcd.Task
+ portString := getPortString(d.Get("port").(int))
+
+ // Creating a loop to offer further protection from the edge gateway erroring
+ // due to being busy eg another person is using another client so wouldn't be
+ // constrained by out lock. If the edge gateway reurns with a busy error, wait
+ // 3 seconds and then try again. Continue until a non-busy error or success
+ for {
+ err := vcd_client.OrgVdc.Refresh()
+ if err != nil {
+ return fmt.Errorf("Error refreshing vdc: %#v", err)
+ }
+
+ edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+
+ if err != nil {
+ return fmt.Errorf("Unable to find edge gateway: %#v", err)
+ }
+
+ task, err = edgeGateway.AddNATMapping("DNAT", d.Get("external_ip").(string),
+ d.Get("internal_ip").(string),
+ portString)
+
+ if err != nil {
+ if v, _ := regexp.MatchString("is busy completing an operation.$", err.Error()); v {
+ time.Sleep(3 * time.Second)
+ continue
+ } else {
+ return fmt.Errorf("Error setting DNAT rules: %#v", err)
+ }
+ }
+ break
+ }
+
+ err := task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error completing tasks: %#v", err)
+ }
+
+ d.SetId(d.Get("external_ip").(string) + "_" + portString)
+ return nil
+}
+
+func resourceVcdDNATUpdate(d *schema.ResourceData, meta interface{}) error {
+ return nil
+}
+
+func resourceVcdDNATRead(d *schema.ResourceData, meta interface{}) error {
+ vcd_client := meta.(*govcd.VCDClient)
+ e, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+
+ if err != nil {
+ return fmt.Errorf("Unable to find edge gateway: %#v", err)
+ }
+
+ idSplit := strings.Split(d.Id(), "_")
+ var found bool
+
+ for _, r := range e.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.NatService.NatRule {
+ if r.RuleType == "DNAT" &&
+ r.GatewayNatRule.OriginalIP == idSplit[0] &&
+ r.GatewayNatRule.OriginalPort == idSplit[1] {
+ found = true
+ d.Set("internal_ip", r.GatewayNatRule.TranslatedIP)
+ }
+ }
+
+ if !found {
+ d.SetId("")
+ }
+
+ return nil
+}
+
+func resourceVcdDNATDelete(d *schema.ResourceData, meta interface{}) error {
+ vcd_client := meta.(*govcd.VCDClient)
+ // Multiple VCD components need to run operations on the Edge Gateway, as
+ // the edge gatway will throw back an error if it is already performing an
+ // operation we must wait until we can aquire a lock on the client
+ vcd_client.Mutex.Lock()
+ defer vcd_client.Mutex.Unlock()
+ var task govcd.Task
+ portString := getPortString(d.Get("port").(int))
+
+ // Creating a loop to offer further protection from the edge gateway erroring
+ // due to being busy eg another person is using another client so wouldn't be
+ // constrained by out lock. If the edge gateway reurns with a busy error, wait
+ // 3 seconds and then try again. Continue until a non-busy error or success
+ for {
+ err := vcd_client.OrgVdc.Refresh()
+ if err != nil {
+ return fmt.Errorf("Error refreshing vdc: %#v", err)
+ }
+
+ edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+
+ if err != nil {
+ return fmt.Errorf("Unable to find edge gateway: %#v", err)
+ }
+
+ task, err = edgeGateway.RemoveNATMapping("DNAT", d.Get("external_ip").(string),
+ d.Get("internal_ip").(string),
+ portString)
+
+ if err != nil {
+ if v, _ := regexp.MatchString("is busy completing an operation.$", err.Error()); v {
+ time.Sleep(3 * time.Second)
+ continue
+ } else {
+ return fmt.Errorf("Error setting DNAT rules: %#v", err)
+ }
+ }
+ break
+ }
+
+ err := task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error completing tasks: %#v", err)
+ }
+ return nil
+}
diff --git a/builtin/providers/vcd/resource_vcd_dnat_test.go b/builtin/providers/vcd/resource_vcd_dnat_test.go
new file mode 100644
index 0000000000..ba4bfce134
--- /dev/null
+++ b/builtin/providers/vcd/resource_vcd_dnat_test.go
@@ -0,0 +1,120 @@
+package vcd
+
+import (
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/opencredo/vmware-govcd"
+)
+
+func TestAccVcdDNAT_Basic(t *testing.T) {
+ if v := os.Getenv("VCD_EXTERNAL_IP"); v == "" {
+ t.Skip("Environment variable VCD_EXTERNAL_IP must be set to run DNAT tests")
+ return
+ }
+
+ var e govcd.EdgeGateway
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckVcdDNATDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: fmt.Sprintf(testAccCheckVcdDnat_basic, os.Getenv("VCD_EDGE_GATWEWAY"), os.Getenv("VCD_EXTERNAL_IP")),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckVcdDNATExists("vcd_dnat.bar", &e),
+ resource.TestCheckResourceAttr(
+ "vcd_dnat.bar", "external_ip", os.Getenv("VCD_EXTERNAL_IP")),
+ resource.TestCheckResourceAttr(
+ "vcd_dnat.bar", "port", "77"),
+ resource.TestCheckResourceAttr(
+ "vcd_dnat.bar", "internal_ip", "10.10.102.60"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckVcdDNATExists(n string, gateway *govcd.EdgeGateway) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No DNAT ID is set")
+ }
+
+ conn := testAccProvider.Meta().(*govcd.VCDClient)
+
+ gatewayName := rs.Primary.Attributes["edge_gateway"]
+ edgeGateway, err := conn.OrgVdc.FindEdgeGateway(gatewayName)
+
+ if err != nil {
+ return fmt.Errorf("Could not find edge gateway")
+ }
+
+ var found bool
+ for _, v := range edgeGateway.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.NatService.NatRule {
+ if v.RuleType == "DNAT" &&
+ v.GatewayNatRule.OriginalIP == os.Getenv("VCD_EXTERNAL_IP") &&
+ v.GatewayNatRule.OriginalPort == "77" &&
+ v.GatewayNatRule.TranslatedIP == "10.10.102.60" {
+ found = true
+ }
+ }
+ if !found {
+ return fmt.Errorf("DNAT rule was not found")
+ }
+
+ *gateway = edgeGateway
+
+ return nil
+ }
+}
+
+func testAccCheckVcdDNATDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*govcd.VCDClient)
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "vcd_dnat" {
+ continue
+ }
+
+ gatewayName := rs.Primary.Attributes["edge_gateway"]
+ edgeGateway, err := conn.OrgVdc.FindEdgeGateway(gatewayName)
+
+ if err != nil {
+ return fmt.Errorf("Could not find edge gateway")
+ }
+
+ var found bool
+ for _, v := range edgeGateway.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.NatService.NatRule {
+ if v.RuleType == "DNAT" &&
+ v.GatewayNatRule.OriginalIP == os.Getenv("VCD_EXTERNAL_IP") &&
+ v.GatewayNatRule.OriginalPort == "77" &&
+ v.GatewayNatRule.TranslatedIP == "10.10.102.60" {
+ found = true
+ }
+ }
+
+ if found {
+ return fmt.Errorf("DNAT rule still exists.")
+ }
+ }
+
+ return nil
+}
+
+const testAccCheckVcdDnat_basic = `
+resource "vcd_dnat" "bar" {
+ edge_gateway = "%s"
+ external_ip = "%s"
+ port = 77
+ internal_ip = "10.10.102.60"
+}
+`
diff --git a/builtin/providers/vcd/resource_vcd_firewall_rules.go b/builtin/providers/vcd/resource_vcd_firewall_rules.go
new file mode 100644
index 0000000000..e025b143f6
--- /dev/null
+++ b/builtin/providers/vcd/resource_vcd_firewall_rules.go
@@ -0,0 +1,236 @@
+package vcd
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/hashicorp/terraform/helper/hashcode"
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/opencredo/vmware-govcd"
+ types "github.com/opencredo/vmware-govcd/types/v56"
+ "strings"
+)
+
+func resourceVcdFirewallRules() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceVcdFirewallRulesCreate,
+ Delete: resourceFirewallRulesDelete,
+ Read: resourceFirewallRulesRead,
+
+ Schema: map[string]*schema.Schema{
+ "edge_gateway": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "default_action": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "rule": &schema.Schema{
+ Type: schema.TypeSet,
+ Optional: true,
+ ForceNew: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+
+ "description": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "policy": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "protocol": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "destination_port": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "destination_ip": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "source_port": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "source_ip": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ },
+ },
+ Set: resourceVcdNetworkFirewallRuleHash,
+ },
+ },
+ }
+}
+
+func resourceVcdFirewallRulesCreate(d *schema.ResourceData, meta interface{}) error {
+ vcd_client := meta.(*govcd.VCDClient)
+ vcd_client.Mutex.Lock()
+ defer vcd_client.Mutex.Unlock()
+
+ edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+
+ firewallRules, _ := expandFirewallRules(d.Get("rule").(*schema.Set).List(), edgeGateway.EdgeGateway)
+
+ task, err := edgeGateway.CreateFirewallRules(d.Get("default_action").(string), firewallRules)
+ if err != nil {
+ return fmt.Errorf("Error setting firewall rules: %#v", err)
+ }
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error completing tasks: %#v", err)
+ }
+
+ d.SetId(d.Get("edge_gateway").(string))
+
+ return resourceFirewallRulesRead(d, meta)
+}
+
+func resourceFirewallRulesDelete(d *schema.ResourceData, meta interface{}) error {
+ vcd_client := meta.(*govcd.VCDClient)
+ vcd_client.Mutex.Lock()
+ defer vcd_client.Mutex.Unlock()
+
+ edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+
+ firewallRules := deleteFirewallRules(d.Get("rule").(*schema.Set).List(), edgeGateway.EdgeGateway)
+ defaultAction := edgeGateway.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService.DefaultAction
+ task, err := edgeGateway.CreateFirewallRules(defaultAction, firewallRules)
+ if err != nil {
+ return fmt.Errorf("Error deleting firewall rules: %#v", err)
+ }
+
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error completing tasks: %#v", err)
+ }
+
+ return nil
+}
+
+func resourceFirewallRulesRead(d *schema.ResourceData, meta interface{}) error {
+ vcd_client := meta.(*govcd.VCDClient)
+
+ edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+ if err != nil {
+ return fmt.Errorf("Error finding edge gateway: %#v", err)
+ }
+ firewallRules := *edgeGateway.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService
+ d.Set("rule", resourceVcdFirewallRulesGather(firewallRules.FirewallRule, d.Get("rule").(*schema.Set).List()))
+ d.Set("default_action", firewallRules.DefaultAction)
+
+ return nil
+}
+
+func deleteFirewallRules(configured []interface{}, gateway *types.EdgeGateway) []*types.FirewallRule {
+ firewallRules := gateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService.FirewallRule
+ fwrules := make([]*types.FirewallRule, 0, len(firewallRules)-len(configured))
+
+ for _, f := range firewallRules {
+ keep := true
+ for _, r := range configured {
+ data := r.(map[string]interface{})
+ if data["id"].(string) != f.ID {
+ continue
+ }
+ keep = false
+ }
+ if keep {
+ fwrules = append(fwrules, f)
+ }
+ }
+ return fwrules
+}
+
+func resourceVcdFirewallRulesGather(rules []*types.FirewallRule, configured []interface{}) []map[string]interface{} {
+ fwrules := make([]map[string]interface{}, 0, len(configured))
+
+ for i := len(configured) - 1; i >= 0; i-- {
+ data := configured[i].(map[string]interface{})
+ rule, err := matchFirewallRule(data, rules)
+ if err != nil {
+ continue
+ }
+ fwrules = append(fwrules, rule)
+ }
+ return fwrules
+}
+
+func matchFirewallRule(data map[string]interface{}, rules []*types.FirewallRule) (map[string]interface{}, error) {
+ rule := make(map[string]interface{})
+ for _, m := range rules {
+ if data["id"].(string) == "" {
+ if data["description"].(string) == m.Description &&
+ data["policy"].(string) == m.Policy &&
+ data["protocol"].(string) == getProtocol(*m.Protocols) &&
+ data["destination_port"].(string) == getPortString(m.Port) &&
+ strings.ToLower(data["destination_ip"].(string)) == strings.ToLower(m.DestinationIP) &&
+ data["source_port"].(string) == getPortString(m.SourcePort) &&
+ strings.ToLower(data["source_ip"].(string)) == strings.ToLower(m.SourceIP) {
+ rule["id"] = m.ID
+ rule["description"] = m.Description
+ rule["policy"] = m.Policy
+ rule["protocol"] = getProtocol(*m.Protocols)
+ rule["destination_port"] = getPortString(m.Port)
+ rule["destination_ip"] = strings.ToLower(m.DestinationIP)
+ rule["source_port"] = getPortString(m.SourcePort)
+ rule["source_ip"] = strings.ToLower(m.SourceIP)
+ return rule, nil
+ }
+ } else {
+ if data["id"].(string) == m.ID {
+ rule["id"] = m.ID
+ rule["description"] = m.Description
+ rule["policy"] = m.Policy
+ rule["protocol"] = getProtocol(*m.Protocols)
+ rule["destination_port"] = getPortString(m.Port)
+ rule["destination_ip"] = strings.ToLower(m.DestinationIP)
+ rule["source_port"] = getPortString(m.SourcePort)
+ rule["source_ip"] = strings.ToLower(m.SourceIP)
+ return rule, nil
+ }
+ }
+ }
+ return rule, fmt.Errorf("Unable to find rule")
+}
+
+func resourceVcdNetworkFirewallRuleHash(v interface{}) int {
+ var buf bytes.Buffer
+ m := v.(map[string]interface{})
+ buf.WriteString(fmt.Sprintf("%s-",
+ strings.ToLower(m["description"].(string))))
+ buf.WriteString(fmt.Sprintf("%s-",
+ strings.ToLower(m["policy"].(string))))
+ buf.WriteString(fmt.Sprintf("%s-",
+ strings.ToLower(m["protocol"].(string))))
+ buf.WriteString(fmt.Sprintf("%s-",
+ strings.ToLower(m["destination_port"].(string))))
+ buf.WriteString(fmt.Sprintf("%s-",
+ strings.ToLower(m["destination_ip"].(string))))
+ buf.WriteString(fmt.Sprintf("%s-",
+ strings.ToLower(m["source_port"].(string))))
+ buf.WriteString(fmt.Sprintf("%s-",
+ strings.ToLower(m["source_ip"].(string))))
+
+ return hashcode.String(buf.String())
+}
diff --git a/builtin/providers/vcd/resource_vcd_firewall_rules_test.go b/builtin/providers/vcd/resource_vcd_firewall_rules_test.go
new file mode 100644
index 0000000000..96e2c3e3d7
--- /dev/null
+++ b/builtin/providers/vcd/resource_vcd_firewall_rules_test.go
@@ -0,0 +1,105 @@
+package vcd
+
+import (
+ "fmt"
+ "testing"
+ //"regexp"
+ "log"
+ "os"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/opencredo/vmware-govcd"
+)
+
+func TestAccVcdFirewallRules_basic(t *testing.T) {
+
+ var existingRules, fwRules govcd.EdgeGateway
+ newConfig := createFirewallRulesConfigs(&existingRules)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: newConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckVcdFirewallRulesExists("vcd_firewall_rules.bar", &fwRules),
+ testAccCheckVcdFirewallRulesAttributes(&fwRules, &existingRules),
+ ),
+ },
+ },
+ })
+
+}
+
+func testAccCheckVcdFirewallRulesExists(n string, gateway *govcd.EdgeGateway) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No Record ID is set")
+ }
+
+ conn := testAccProvider.Meta().(*govcd.VCDClient)
+
+ resp, err := conn.OrgVdc.FindEdgeGateway(rs.Primary.ID)
+ if err != nil {
+ return fmt.Errorf("Edge Gateway does not exist.")
+ }
+
+ *gateway = resp
+
+ return nil
+ }
+}
+
+func testAccCheckVcdFirewallRulesAttributes(newRules, existingRules *govcd.EdgeGateway) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+
+ if len(newRules.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService.FirewallRule) != len(existingRules.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService.FirewallRule)+1 {
+ return fmt.Errorf("New firewall rule not added: %d != %d",
+ len(newRules.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService.FirewallRule),
+ len(existingRules.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService.FirewallRule)+1)
+ }
+
+ return nil
+ }
+}
+
+func createFirewallRulesConfigs(existingRules *govcd.EdgeGateway) string {
+ config := Config{
+ User: os.Getenv("VCD_USER"),
+ Password: os.Getenv("VCD_PASSWORD"),
+ Org: os.Getenv("VCD_ORG"),
+ Href: os.Getenv("VCD_URL"),
+ VDC: os.Getenv("VCD_VDC"),
+ }
+ conn, _ := config.Client()
+ edgeGateway, _ := conn.OrgVdc.FindEdgeGateway(os.Getenv("VCD_EDGE_GATWEWAY"))
+ *existingRules = edgeGateway
+ log.Printf("[DEBUG] Edge gateway: %#v", edgeGateway)
+ firewallRules := *edgeGateway.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService
+ return fmt.Sprintf(testAccCheckVcdFirewallRules_add, os.Getenv("VCD_EDGE_GATEWAY"), firewallRules.DefaultAction)
+}
+
+const testAccCheckVcdFirewallRules_add = `
+resource "vcd_firewall_rules" "bar" {
+ edge_gateway = "%s"
+ default_action = "%s"
+
+ rule {
+ description = "Test rule"
+ policy = "allow"
+ protocol = "any"
+ destination_port = "any"
+ destination_ip = "any"
+ source_port = "any"
+ source_ip = "any"
+ }
+}
+`
diff --git a/builtin/providers/vcd/resource_vcd_network.go b/builtin/providers/vcd/resource_vcd_network.go
new file mode 100644
index 0000000000..3196b73065
--- /dev/null
+++ b/builtin/providers/vcd/resource_vcd_network.go
@@ -0,0 +1,263 @@
+package vcd
+
+import (
+ "log"
+
+ "bytes"
+ "fmt"
+ "github.com/hashicorp/terraform/helper/hashcode"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/opencredo/vmware-govcd"
+ types "github.com/opencredo/vmware-govcd/types/v56"
+ "strings"
+ "time"
+)
+
+func resourceVcdNetwork() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceVcdNetworkCreate,
+ Update: resourceVcdNetworkUpdate,
+ Read: resourceVcdNetworkRead,
+ Delete: resourceVcdNetworkDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "fence_mode": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "natRouted",
+ },
+
+ "edge_gateway": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "netmask": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "255.255.255.0",
+ },
+
+ "gateway": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "dns1": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "8.8.8.8",
+ },
+
+ "dns2": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "8.8.4.4",
+ },
+
+ "dns_suffix": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ },
+
+ "href": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+
+ "dhcp_pool": &schema.Schema{
+ Type: schema.TypeSet,
+ Optional: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "start_address": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "end_address": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ },
+ },
+ Set: resourceVcdNetworkIpAddressHash,
+ },
+ "static_ip_pool": &schema.Schema{
+ Type: schema.TypeSet,
+ Optional: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "start_address": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "end_address": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ },
+ },
+ Set: resourceVcdNetworkIpAddressHash,
+ },
+ },
+ }
+}
+
+func resourceVcdNetworkCreate(d *schema.ResourceData, meta interface{}) error {
+ vcd_client := meta.(*govcd.VCDClient)
+ log.Printf("[TRACE] CLIENT: %#v", vcd_client)
+ vcd_client.Mutex.Lock()
+ defer vcd_client.Mutex.Unlock()
+
+ edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+
+ ipRanges, err := expandIpRange(d.Get("static_ip_pool").(*schema.Set).List())
+ if err != nil {
+ fmt.Printf("error: %v\n", err)
+ }
+
+ newnetwork := &types.OrgVDCNetwork{
+ Xmlns: "http://www.vmware.com/vcloud/v1.5",
+ Name: d.Get("name").(string),
+ Configuration: &types.NetworkConfiguration{
+ FenceMode: d.Get("fence_mode").(string),
+ IPScopes: &types.IPScopes{
+ IPScope: types.IPScope{
+ IsInherited: false,
+ Gateway: d.Get("gateway").(string),
+ Netmask: d.Get("netmask").(string),
+ DNS1: d.Get("dns1").(string),
+ DNS2: d.Get("dns2").(string),
+ DNSSuffix: d.Get("dns_suffix").(string),
+ IPRanges: &ipRanges,
+ },
+ },
+ BackwardCompatibilityMode: true,
+ },
+ EdgeGateway: &types.Reference{
+ HREF: edgeGateway.EdgeGateway.HREF,
+ },
+ IsShared: false,
+ }
+
+ log.Printf("[INFO] NETWORK: %#v", newnetwork)
+ err = vcd_client.OrgVdc.CreateOrgVDCNetwork(newnetwork)
+
+ if err != nil {
+ return fmt.Errorf("Error: %#v", err)
+ }
+
+ if dhcp, ok := d.GetOk("dhcp_pool"); ok {
+ err := vcd_client.OrgVdc.Refresh()
+ if err != nil {
+ return fmt.Errorf("Error refreshing vdc: %#v", err)
+ }
+
+ network, err := vcd_client.OrgVdc.FindVDCNetwork(d.Get("name").(string))
+ if err != nil {
+ return fmt.Errorf("Error finding network: %#v", err)
+ }
+
+ task, err := edgeGateway.AddDhcpPool(network.OrgVDCNetwork, dhcp.(*schema.Set).List())
+
+ if err != nil {
+ return fmt.Errorf("Error adding DHCP pool: %#v", err)
+ }
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error completing tasks: %#v", err)
+ }
+
+ }
+
+ d.SetId(d.Get("name").(string))
+
+ return resourceVcdNetworkRead(d, meta)
+}
+
+func resourceVcdNetworkUpdate(d *schema.ResourceData, meta interface{}) error {
+
+ vcd_client := meta.(*govcd.VCDClient)
+
+ log.Printf("[DEBUG] VCD Client configuration: %#v", vcd_client)
+ return nil
+}
+
+func resourceVcdNetworkRead(d *schema.ResourceData, meta interface{}) error {
+ vcd_client := meta.(*govcd.VCDClient)
+ log.Printf("[DEBUG] VCD Client configuration: %#v", vcd_client)
+ log.Printf("[DEBUG] VCD Client configuration: %#v", vcd_client.OrgVdc)
+
+ err := vcd_client.OrgVdc.Refresh()
+ if err != nil {
+ return fmt.Errorf("Error refreshing vdc: %#v", err)
+ }
+
+ network, err := vcd_client.OrgVdc.FindVDCNetwork(d.Id())
+ if err != nil {
+ return fmt.Errorf("Error finding network: %#v", err)
+ }
+
+ d.Set("name", network.OrgVDCNetwork.Name)
+ d.Set("href", network.OrgVDCNetwork.HREF)
+ d.Set("fence_mode", network.OrgVDCNetwork.Configuration.FenceMode)
+ d.Set("gateway", network.OrgVDCNetwork.Configuration.IPScopes.IPScope.Gateway)
+ d.Set("netmask", network.OrgVDCNetwork.Configuration.IPScopes.IPScope.Netmask)
+ d.Set("dns1", network.OrgVDCNetwork.Configuration.IPScopes.IPScope.DNS1)
+ d.Set("dns2", network.OrgVDCNetwork.Configuration.IPScopes.IPScope.DNS2)
+
+ return nil
+}
+
+func resourceVcdNetworkDelete(d *schema.ResourceData, meta interface{}) error {
+ vcd_client := meta.(*govcd.VCDClient)
+ vcd_client.Mutex.Lock()
+ defer vcd_client.Mutex.Unlock()
+ err := vcd_client.OrgVdc.Refresh()
+ if err != nil {
+ return fmt.Errorf("Error refreshing vdc: %#v", err)
+ }
+
+ network, err := vcd_client.OrgVdc.FindVDCNetwork(d.Id())
+ if err != nil {
+ return fmt.Errorf("Error finding network: %#v", err)
+ }
+
+ err = resource.Retry(3*time.Minute, func() error {
+ task, err := network.Delete()
+ if err != nil {
+ return fmt.Errorf("Error Deleting Network: %#v", err)
+ }
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error completing tasks: %#v", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func resourceVcdNetworkIpAddressHash(v interface{}) int {
+ var buf bytes.Buffer
+ m := v.(map[string]interface{})
+ buf.WriteString(fmt.Sprintf("%s-",
+ strings.ToLower(m["start_address"].(string))))
+ buf.WriteString(fmt.Sprintf("%s-",
+ strings.ToLower(m["end_address"].(string))))
+
+ return hashcode.String(buf.String())
+}
diff --git a/builtin/providers/vcd/resource_vcd_network_test.go b/builtin/providers/vcd/resource_vcd_network_test.go
new file mode 100644
index 0000000000..6bfd840bb0
--- /dev/null
+++ b/builtin/providers/vcd/resource_vcd_network_test.go
@@ -0,0 +1,107 @@
+package vcd
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/opencredo/vmware-govcd"
+)
+
+func TestAccVcdNetwork_Basic(t *testing.T) {
+ var network govcd.OrgVDCNetwork
+ generatedHrefRegexp := regexp.MustCompile("^https://")
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckVcdNetworkDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: fmt.Sprintf(testAccCheckVcdNetwork_basic, os.Getenv("VCD_EDGE_GATWEWAY")),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckVcdNetworkExists("vcd_network.foonet", &network),
+ testAccCheckVcdNetworkAttributes(&network),
+ resource.TestCheckResourceAttr(
+ "vcd_network.foonet", "name", "foonet"),
+ resource.TestCheckResourceAttr(
+ "vcd_network.foonet", "static_ip_pool.#", "1"),
+ resource.TestCheckResourceAttr(
+ "vcd_network.foonet", "gateway", "10.10.102.1"),
+ resource.TestMatchResourceAttr(
+ "vcd_network.foonet", "href", generatedHrefRegexp),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckVcdNetworkExists(n string, network *govcd.OrgVDCNetwork) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No VAPP ID is set")
+ }
+
+ conn := testAccProvider.Meta().(*govcd.VCDClient)
+
+ resp, err := conn.OrgVdc.FindVDCNetwork(rs.Primary.ID)
+ if err != nil {
+ return fmt.Errorf("Network does not exist.")
+ }
+
+ *network = resp
+
+ return nil
+ }
+}
+
+func testAccCheckVcdNetworkDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*govcd.VCDClient)
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "vcd_network" {
+ continue
+ }
+
+ _, err := conn.OrgVdc.FindVDCNetwork(rs.Primary.ID)
+
+ if err == nil {
+ return fmt.Errorf("Network still exists.")
+ }
+
+ return nil
+ }
+
+ return nil
+}
+
+func testAccCheckVcdNetworkAttributes(network *govcd.OrgVDCNetwork) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+
+ if network.OrgVDCNetwork.Name != "foonet" {
+ return fmt.Errorf("Bad name: %s", network.OrgVDCNetwork.Name)
+ }
+
+ return nil
+ }
+}
+
+const testAccCheckVcdNetwork_basic = `
+resource "vcd_network" "foonet" {
+ name = "foonet"
+ edge_gateway = "%s"
+ gateway = "10.10.102.1"
+ static_ip_pool {
+ start_address = "10.10.102.2"
+ end_address = "10.10.102.254"
+ }
+}
+`
diff --git a/builtin/providers/vcd/resource_vcd_snat.go b/builtin/providers/vcd/resource_vcd_snat.go
new file mode 100644
index 0000000000..b9627e03a4
--- /dev/null
+++ b/builtin/providers/vcd/resource_vcd_snat.go
@@ -0,0 +1,161 @@
+package vcd
+
+import (
+ "fmt"
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/opencredo/vmware-govcd"
+ "regexp"
+ "time"
+)
+
+func resourceVcdSNAT() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceVcdSNATCreate,
+ Update: resourceVcdSNATUpdate,
+ Delete: resourceVcdSNATDelete,
+ Read: resourceVcdSNATRead,
+
+ Schema: map[string]*schema.Schema{
+ "edge_gateway": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "external_ip": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "internal_ip": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ },
+ }
+}
+
+func resourceVcdSNATCreate(d *schema.ResourceData, meta interface{}) error {
+ vcd_client := meta.(*govcd.VCDClient)
+ // Multiple VCD components need to run operations on the Edge Gateway, as
+ // the edge gatway will throw back an error if it is already performing an
+ // operation we must wait until we can aquire a lock on the client
+ vcd_client.Mutex.Lock()
+ defer vcd_client.Mutex.Unlock()
+ var task govcd.Task
+
+ // Creating a loop to offer further protection from the edge gateway erroring
+ // due to being busy eg another person is using another client so wouldn't be
+ // constrained by out lock. If the edge gateway reurns with a busy error, wait
+ // 3 seconds and then try again. Continue until a non-busy error or success
+ for {
+ err := vcd_client.OrgVdc.Refresh()
+ if err != nil {
+ return fmt.Errorf("Error refreshing vdc: %#v", err)
+ }
+
+ edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+
+ if err != nil {
+ return fmt.Errorf("Unable to find edge gateway: %#v", err)
+ }
+
+ task, err = edgeGateway.AddNATMapping("SNAT", d.Get("internal_ip").(string),
+ d.Get("external_ip").(string),
+ "any")
+
+ if err != nil {
+ if v, _ := regexp.MatchString("is busy completing an operation.$", err.Error()); v {
+ time.Sleep(3 * time.Second)
+ continue
+ } else {
+ return fmt.Errorf("Error setting SNAT rules: %#v", err)
+ }
+ }
+ break
+ }
+
+ err := task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error completing tasks: %#v", err)
+ }
+
+ d.SetId(d.Get("internal_ip").(string))
+ return nil
+}
+
+func resourceVcdSNATUpdate(d *schema.ResourceData, meta interface{}) error {
+ return nil
+}
+
+func resourceVcdSNATRead(d *schema.ResourceData, meta interface{}) error {
+ vcd_client := meta.(*govcd.VCDClient)
+ e, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+
+ if err != nil {
+ return fmt.Errorf("Unable to find edge gateway: %#v", err)
+ }
+
+ var found bool
+
+ for _, r := range e.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.NatService.NatRule {
+ if r.RuleType == "SNAT" &&
+ r.GatewayNatRule.OriginalIP == d.Id() {
+ found = true
+ d.Set("external_ip", r.GatewayNatRule.TranslatedIP)
+ }
+ }
+
+ if !found {
+ d.SetId("")
+ }
+
+ return nil
+}
+
+func resourceVcdSNATDelete(d *schema.ResourceData, meta interface{}) error {
+ vcd_client := meta.(*govcd.VCDClient)
+ // Multiple VCD components need to run operations on the Edge Gateway, as
+ // the edge gatway will throw back an error if it is already performing an
+ // operation we must wait until we can aquire a lock on the client
+ vcd_client.Mutex.Lock()
+ defer vcd_client.Mutex.Unlock()
+ var task govcd.Task
+
+ // Creating a loop to offer further protection from the edge gateway erroring
+ // due to being busy eg another person is using another client so wouldn't be
+ // constrained by out lock. If the edge gateway reurns with a busy error, wait
+ // 3 seconds and then try again. Continue until a non-busy error or success
+ for {
+ err := vcd_client.OrgVdc.Refresh()
+ if err != nil {
+ return fmt.Errorf("Error refreshing vdc: %#v", err)
+ }
+
+ edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+
+ if err != nil {
+ return fmt.Errorf("Unable to find edge gateway: %#v", err)
+ }
+
+ task, err = edgeGateway.RemoveNATMapping("SNAT", d.Get("internal_ip").(string),
+ d.Get("external_ip").(string),
+ "")
+
+ if err != nil {
+ if v, _ := regexp.MatchString("is busy completing an operation.$", err.Error()); v {
+ time.Sleep(3 * time.Second)
+ continue
+ } else {
+ return fmt.Errorf("Error setting SNAT rules: %#v", err)
+ }
+ }
+ break
+ }
+
+ err := task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error completing tasks: %#v", err)
+ }
+ return nil
+}
diff --git a/builtin/providers/vcd/resource_vcd_snat_test.go b/builtin/providers/vcd/resource_vcd_snat_test.go
new file mode 100644
index 0000000000..bf3eced14b
--- /dev/null
+++ b/builtin/providers/vcd/resource_vcd_snat_test.go
@@ -0,0 +1,119 @@
+package vcd
+
+import (
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/opencredo/vmware-govcd"
+)
+
+func TestAccVcdSNAT_Basic(t *testing.T) {
+ if v := os.Getenv("VCD_EXTERNAL_IP"); v == "" {
+ t.Skip("Environment variable VCD_EXTERNAL_IP must be set to run SNAT tests")
+ return
+ }
+
+ var e govcd.EdgeGateway
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckVcdSNATDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: fmt.Sprintf(testAccCheckVcdSnat_basic, os.Getenv("VCD_EDGE_GATWEWAY"), os.Getenv("VCD_EXTERNAL_IP")),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckVcdSNATExists("vcd_snat.bar", &e),
+ resource.TestCheckResourceAttr(
+ "vcd_snat.bar", "external_ip", os.Getenv("VCD_EXTERNAL_IP")),
+ resource.TestCheckResourceAttr(
+ "vcd_snat.bar", "internal_ip", "10.10.102.0/24"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckVcdSNATExists(n string, gateway *govcd.EdgeGateway) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ //return fmt.Errorf("Check this: %#v", rs.Primary)
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No SNAT ID is set")
+ }
+
+ conn := testAccProvider.Meta().(*govcd.VCDClient)
+
+ gatewayName := rs.Primary.Attributes["edge_gateway"]
+ edgeGateway, err := conn.OrgVdc.FindEdgeGateway(gatewayName)
+
+ if err != nil {
+ return fmt.Errorf("Could not find edge gateway")
+ }
+
+ var found bool
+ for _, v := range edgeGateway.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.NatService.NatRule {
+ if v.RuleType == "SNAT" &&
+ v.GatewayNatRule.OriginalIP == "10.10.102.0/24" &&
+ v.GatewayNatRule.OriginalPort == "" &&
+ v.GatewayNatRule.TranslatedIP == os.Getenv("VCD_EXTERNAL_IP") {
+ found = true
+ }
+ }
+ if !found {
+ return fmt.Errorf("SNAT rule was not found")
+ }
+
+ *gateway = edgeGateway
+
+ return nil
+ }
+}
+
+func testAccCheckVcdSNATDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*govcd.VCDClient)
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "vcd_snat" {
+ continue
+ }
+
+ gatewayName := rs.Primary.Attributes["edge_gateway"]
+ edgeGateway, err := conn.OrgVdc.FindEdgeGateway(gatewayName)
+
+ if err != nil {
+ return fmt.Errorf("Could not find edge gateway")
+ }
+
+ var found bool
+ for _, v := range edgeGateway.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.NatService.NatRule {
+ if v.RuleType == "SNAT" &&
+ v.GatewayNatRule.OriginalIP == "10.10.102.0/24" &&
+ v.GatewayNatRule.OriginalPort == "" &&
+ v.GatewayNatRule.TranslatedIP == os.Getenv("VCD_EXTERNAL_IP") {
+ found = true
+ }
+ }
+
+ if found {
+ return fmt.Errorf("SNAT rule still exists.")
+ }
+ }
+
+ return nil
+}
+
+const testAccCheckVcdSnat_basic = `
+resource "vcd_snat" "bar" {
+ edge_gateway = "%s"
+ external_ip = "%s"
+ internal_ip = "10.10.102.0/24"
+}
+`
diff --git a/builtin/providers/vcd/resource_vcd_vapp.go b/builtin/providers/vcd/resource_vcd_vapp.go
new file mode 100644
index 0000000000..7e760ac1dd
--- /dev/null
+++ b/builtin/providers/vcd/resource_vcd_vapp.go
@@ -0,0 +1,355 @@
+package vcd
+
+import (
+ "fmt"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/opencredo/vmware-govcd"
+ types "github.com/opencredo/vmware-govcd/types/v56"
+ "log"
+ "time"
+)
+
+func resourceVcdVApp() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceVcdVAppCreate,
+ Update: resourceVcdVAppUpdate,
+ Read: resourceVcdVAppRead,
+ Delete: resourceVcdVAppDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "template_name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "catalog_name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "network_href": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ },
+
+ "network_name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "memory": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ },
+ "cpus": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ },
+ "ip": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+ "initscript": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ "metadata": &schema.Schema{
+ Type: schema.TypeMap,
+ Optional: true,
+ },
+ "href": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+ "power_on": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ Default: true,
+ },
+ },
+ }
+}
+
+func resourceVcdVAppCreate(d *schema.ResourceData, meta interface{}) error {
+ vcd_client := meta.(*govcd.VCDClient)
+
+ catalog, err := vcd_client.Org.FindCatalog(d.Get("catalog_name").(string))
+ if err != nil {
+ return fmt.Errorf("Error finding catalog: %#v", err)
+ }
+
+ catalogitem, err := catalog.FindCatalogItem(d.Get("template_name").(string))
+ if err != nil {
+ return fmt.Errorf("Error finding catelog item: %#v", err)
+ }
+
+ vapptemplate, err := catalogitem.GetVAppTemplate()
+ if err != nil {
+ return fmt.Errorf("Error finding VAppTemplate: %#v", err)
+ }
+
+ log.Printf("[DEBUG] VAppTemplate: %#v", vapptemplate)
+ var networkHref string
+ net, err := vcd_client.OrgVdc.FindVDCNetwork(d.Get("network_name").(string))
+ if err != nil {
+ return fmt.Errorf("Error finding OrgVCD Network: %#v", err)
+ }
+ if attr, ok := d.GetOk("network_href"); ok {
+ networkHref = attr.(string)
+ } else {
+ networkHref = net.OrgVDCNetwork.HREF
+ }
+ // vapptemplate := govcd.NewVAppTemplate(&vcd_client.Client)
+ //
+ createvapp := &types.InstantiateVAppTemplateParams{
+ Ovf: "http://schemas.dmtf.org/ovf/envelope/1",
+ Xmlns: "http://www.vmware.com/vcloud/v1.5",
+ Name: d.Get("name").(string),
+ InstantiationParams: &types.InstantiationParams{
+ NetworkConfigSection: &types.NetworkConfigSection{
+ Info: "Configuration parameters for logical networks",
+ NetworkConfig: &types.VAppNetworkConfiguration{
+ NetworkName: d.Get("network_name").(string),
+ Configuration: &types.NetworkConfiguration{
+ ParentNetwork: &types.Reference{
+ HREF: networkHref,
+ },
+ FenceMode: "bridged",
+ },
+ },
+ },
+ },
+ Source: &types.Reference{
+ HREF: vapptemplate.VAppTemplate.HREF,
+ },
+ }
+
+ err = resource.Retry(4*time.Minute, func() error {
+ err = vcd_client.OrgVdc.InstantiateVAppTemplate(createvapp)
+
+ if err != nil {
+ return fmt.Errorf("Error: %#v", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ err = vcd_client.OrgVdc.Refresh()
+ if err != nil {
+ return fmt.Errorf("Error: %#v", err)
+ }
+
+ vapp, err := vcd_client.OrgVdc.FindVAppByName(d.Get("name").(string))
+ task, err := vapp.ChangeMemorySize(d.Get("memory").(int))
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error changing memory size: %#v", err)
+ }
+
+ task, err = vapp.ChangeCPUcount(d.Get("cpus").(int))
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error changing cpu count: %#v", err)
+ }
+
+ task, err = vapp.ChangeVMName(d.Get("name").(string))
+ if err != nil {
+ return fmt.Errorf("Error with vm name change: %#v", err)
+ }
+
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error changing vmname: %#v", err)
+ }
+
+ task, err = vapp.ChangeNetworkConfig(d.Get("network_name").(string), d.Get("ip").(string))
+ if err != nil {
+ return fmt.Errorf("Error with Networking change: %#v", err)
+ }
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error changing network: %#v", err)
+ }
+
+ metadata := d.Get("metadata").(map[string]interface{})
+ for k, v := range metadata {
+ task, err = vapp.AddMetadata(k, v.(string))
+ if err != nil {
+ return fmt.Errorf("Error adding metadata: %#v", err)
+ }
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error completing tasks: %#v", err)
+ }
+ }
+
+ if initscript, ok := d.GetOk("initscript"); ok {
+ task, err = vapp.RunCustomizationScript(d.Get("name").(string), initscript.(string))
+ if err != nil {
+ return fmt.Errorf("Error with setting init script: %#v", err)
+ }
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error completing tasks: %#v", err)
+ }
+ }
+
+ if d.Get("power_on").(bool) {
+ task, err = vapp.PowerOn()
+ if err != nil {
+ return fmt.Errorf("Error Powering Up: %#v", err)
+ }
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error completing tasks: %#v", err)
+ }
+ }
+
+ d.SetId(d.Get("name").(string))
+
+ return resourceVcdVAppRead(d, meta)
+ //return nil
+}
+
+func resourceVcdVAppUpdate(d *schema.ResourceData, meta interface{}) error {
+ vcd_client := meta.(*govcd.VCDClient)
+ vapp, err := vcd_client.OrgVdc.FindVAppByName(d.Id())
+
+ if err != nil {
+ return fmt.Errorf("Error finding VApp: %#v", err)
+ }
+
+ status, err := vapp.GetStatus()
+ if err != nil {
+ return fmt.Errorf("Error getting VApp status: %#v", err)
+ }
+
+ if d.HasChange("metadata") {
+ oraw, nraw := d.GetChange("metadata")
+ metadata := oraw.(map[string]interface{})
+ for k, _ := range metadata {
+ task, err := vapp.DeleteMetadata(k)
+ if err != nil {
+ return fmt.Errorf("Error deleting metadata: %#v", err)
+ }
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error completing tasks: %#v", err)
+ }
+ }
+ metadata = nraw.(map[string]interface{})
+ for k, v := range metadata {
+ task, err := vapp.AddMetadata(k, v.(string))
+ if err != nil {
+ return fmt.Errorf("Error adding metadata: %#v", err)
+ }
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error completing tasks: %#v", err)
+ }
+ }
+
+ }
+
+ if d.HasChange("memory") || d.HasChange("cpus") || d.HasChange("power_on") {
+ if status != "POWERED_OFF" {
+ task, err := vapp.PowerOff()
+ if err != nil {
+ return fmt.Errorf("Error Powering Off: %#v", err)
+ }
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error completing tasks: %#v", err)
+ }
+ }
+
+ if d.HasChange("memory") {
+ task, err := vapp.ChangeMemorySize(d.Get("memory").(int))
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error changing memory size: %#v", err)
+ }
+ }
+
+ if d.HasChange("cpus") {
+ task, err := vapp.ChangeCPUcount(d.Get("cpus").(int))
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error changing cpu count: %#v", err)
+ }
+ }
+
+ if d.Get("power_on").(bool) {
+ task, err := vapp.PowerOn()
+ if err != nil {
+ return fmt.Errorf("Error Powering Up: %#v", err)
+ }
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error completing tasks: %#v", err)
+ }
+ }
+
+ }
+
+ return resourceVcdVAppRead(d, meta)
+}
+
+func resourceVcdVAppRead(d *schema.ResourceData, meta interface{}) error {
+ vcd_client := meta.(*govcd.VCDClient)
+
+ err := vcd_client.OrgVdc.Refresh()
+ if err != nil {
+ return fmt.Errorf("Error refreshing vdc: %#v", err)
+ }
+
+ vapp, err := vcd_client.OrgVdc.FindVAppByName(d.Id())
+ if err != nil {
+ return fmt.Errorf("Error finding vapp: %#v", err)
+ }
+ d.Set("ip", vapp.VApp.Children.VM[0].NetworkConnectionSection.NetworkConnection.IPAddress)
+
+ return nil
+}
+
+func resourceVcdVAppDelete(d *schema.ResourceData, meta interface{}) error {
+ vcd_client := meta.(*govcd.VCDClient)
+ vapp, err := vcd_client.OrgVdc.FindVAppByName(d.Id())
+
+ if err != nil {
+ return fmt.Errorf("error finding vdc: %s", err)
+ }
+
+ task, err := vapp.Undeploy()
+ if err != nil {
+ return fmt.Errorf("Error Powering Off: %#v", err)
+ }
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error completing tasks: %#v", err)
+ }
+
+ task, err = vapp.Delete()
+ if err != nil {
+ return fmt.Errorf("Error Powering Off: %#v", err)
+ }
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error completing tasks: %#v", err)
+ }
+
+ return nil
+}
diff --git a/builtin/providers/vcd/resource_vcd_vapp_test.go b/builtin/providers/vcd/resource_vcd_vapp_test.go
new file mode 100644
index 0000000000..bb6e9874a7
--- /dev/null
+++ b/builtin/providers/vcd/resource_vcd_vapp_test.go
@@ -0,0 +1,180 @@
+package vcd
+
+import (
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/opencredo/vmware-govcd"
+)
+
+func TestAccVcdVApp_PowerOff(t *testing.T) {
+ var vapp govcd.VApp
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckVcdVAppDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: fmt.Sprintf(testAccCheckVcdVApp_basic, os.Getenv("VCD_EDGE_GATWEWAY")),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckVcdVAppExists("vcd_vapp.foobar", &vapp),
+ testAccCheckVcdVAppAttributes(&vapp),
+ resource.TestCheckResourceAttr(
+ "vcd_vapp.foobar", "name", "foobar"),
+ resource.TestCheckResourceAttr(
+ "vcd_vapp.foobar", "ip", "10.10.102.160"),
+ resource.TestCheckResourceAttr(
+ "vcd_vapp.foobar", "power_on", "true"),
+ ),
+ },
+ resource.TestStep{
+ Config: testAccCheckVcdVApp_powerOff,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckVcdVAppExists("vcd_vapp.foobar", &vapp),
+ testAccCheckVcdVAppAttributes_off(&vapp),
+ resource.TestCheckResourceAttr(
+ "vcd_vapp.foobar", "name", "foobar"),
+ resource.TestCheckResourceAttr(
+ "vcd_vapp.foobar", "ip", "10.10.102.160"),
+ resource.TestCheckResourceAttr(
+ "vcd_vapp.foobar", "power_on", "false"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckVcdVAppExists(n string, vapp *govcd.VApp) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No VAPP ID is set")
+ }
+
+ conn := testAccProvider.Meta().(*govcd.VCDClient)
+
+ resp, err := conn.OrgVdc.FindVAppByName(rs.Primary.ID)
+ if err != nil {
+ return err
+ }
+
+ *vapp = resp
+
+ return nil
+ }
+}
+
+func testAccCheckVcdVAppDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*govcd.VCDClient)
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "vcd_vapp" {
+ continue
+ }
+
+ _, err := conn.OrgVdc.FindVAppByName(rs.Primary.ID)
+
+ if err == nil {
+ return fmt.Errorf("VPCs still exist.")
+ }
+
+ return nil
+ }
+
+ return nil
+}
+
+func testAccCheckVcdVAppAttributes(vapp *govcd.VApp) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+
+ if vapp.VApp.Name != "foobar" {
+ return fmt.Errorf("Bad name: %s", vapp.VApp.Name)
+ }
+
+ if vapp.VApp.Name != vapp.VApp.Children.VM[0].Name {
+ return fmt.Errorf("VApp and VM names do not match. %s != %s",
+ vapp.VApp.Name, vapp.VApp.Children.VM[0].Name)
+ }
+
+ status, _ := vapp.GetStatus()
+ if status != "POWERED_ON" {
+ return fmt.Errorf("VApp is not powered on")
+ }
+
+ return nil
+ }
+}
+
+func testAccCheckVcdVAppAttributes_off(vapp *govcd.VApp) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+
+ if vapp.VApp.Name != "foobar" {
+ return fmt.Errorf("Bad name: %s", vapp.VApp.Name)
+ }
+
+ if vapp.VApp.Name != vapp.VApp.Children.VM[0].Name {
+ return fmt.Errorf("VApp and VM names do not match. %s != %s",
+ vapp.VApp.Name, vapp.VApp.Children.VM[0].Name)
+ }
+
+ status, _ := vapp.GetStatus()
+ if status != "POWERED_OFF" {
+ return fmt.Errorf("VApp is still powered on")
+ }
+
+ return nil
+ }
+}
+
+const testAccCheckVcdVApp_basic = `
+resource "vcd_network" "foonet" {
+ name = "foonet"
+ edge_gateway = "%s"
+ gateway = "10.10.102.1"
+ static_ip_pool {
+ start_address = "10.10.102.2"
+ end_address = "10.10.102.254"
+ }
+}
+
+resource "vcd_vapp" "foobar" {
+ name = "foobar"
+ template_name = "base-centos-7.0-x86_64_v-0.1_b-74"
+ catalog_name = "NubesLab"
+ network_name = "${vcd_network.foonet.name}"
+ memory = 1024
+ cpus = 1
+ ip = "10.10.102.160"
+}
+`
+
+const testAccCheckVcdVApp_powerOff = `
+resource "vcd_network" "foonet" {
+ name = "foonet"
+ edge_gateway = "%s"
+ gateway = "10.10.102.1"
+ static_ip_pool {
+ start_address = "10.10.102.2"
+ end_address = "10.10.102.254"
+ }
+}
+
+resource "vcd_vapp" "foobar" {
+ name = "foobar"
+ template_name = "base-centos-7.0-x86_64_v-0.1_b-74"
+ catalog_name = "NubesLab"
+ network_name = "${vcd_network.foonet.name}"
+ memory = 1024
+ cpus = 1
+ ip = "10.10.102.160"
+ power_on = false
+}
+`
diff --git a/builtin/providers/vcd/structure.go b/builtin/providers/vcd/structure.go
new file mode 100644
index 0000000000..9cd5fb281b
--- /dev/null
+++ b/builtin/providers/vcd/structure.go
@@ -0,0 +1,103 @@
+package vcd
+
+import (
+ types "github.com/opencredo/vmware-govcd/types/v56"
+ "strconv"
+)
+
+func expandIpRange(configured []interface{}) (types.IPRanges, error) {
+ ipRange := make([]*types.IPRange, 0, len(configured))
+
+ for _, ipRaw := range configured {
+ data := ipRaw.(map[string]interface{})
+
+ ip := types.IPRange{
+ StartAddress: data["start_address"].(string),
+ EndAddress: data["end_address"].(string),
+ }
+
+ ipRange = append(ipRange, &ip)
+ }
+
+ ipRanges := types.IPRanges{
+ IPRange: ipRange,
+ }
+
+ return ipRanges, nil
+}
+
+func expandFirewallRules(configured []interface{}, gateway *types.EdgeGateway) ([]*types.FirewallRule, error) {
+ //firewallRules := make([]*types.FirewallRule, 0, len(configured))
+ firewallRules := gateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService.FirewallRule
+
+ for i := len(configured) - 1; i >= 0; i-- {
+ data := configured[i].(map[string]interface{})
+
+ var protocol *types.FirewallRuleProtocols
+ switch data["protocol"].(string) {
+ case "tcp":
+ protocol = &types.FirewallRuleProtocols{
+ TCP: true,
+ }
+ case "udp":
+ protocol = &types.FirewallRuleProtocols{
+ UDP: true,
+ }
+ case "icmp":
+ protocol = &types.FirewallRuleProtocols{
+ ICMP: true,
+ }
+ default:
+ protocol = &types.FirewallRuleProtocols{
+ Any: true,
+ }
+ }
+ rule := &types.FirewallRule{
+ //ID: strconv.Itoa(len(configured) - i),
+ IsEnabled: true,
+ MatchOnTranslate: false,
+ Description: data["description"].(string),
+ Policy: data["policy"].(string),
+ Protocols: protocol,
+ Port: getNumericPort(data["destination_port"]),
+ DestinationPortRange: data["destination_port"].(string),
+ DestinationIP: data["destination_ip"].(string),
+ SourcePort: getNumericPort(data["source_port"]),
+ SourcePortRange: data["source_port"].(string),
+ SourceIP: data["source_ip"].(string),
+ EnableLogging: false,
+ }
+ firewallRules = append(firewallRules, rule)
+ }
+
+ return firewallRules, nil
+}
+
+func getProtocol(protocol types.FirewallRuleProtocols) string {
+ if protocol.TCP {
+ return "tcp"
+ }
+ if protocol.UDP {
+ return "udp"
+ }
+ if protocol.ICMP {
+ return "icmp"
+ }
+ return "any"
+}
+
+func getNumericPort(portrange interface{}) int {
+ i, err := strconv.Atoi(portrange.(string))
+ if err != nil {
+ return -1
+ }
+ return i
+}
+
+func getPortString(port int) string {
+ if port == -1 {
+ return "any"
+ }
+ portstring := strconv.Itoa(port)
+ return portstring
+}
diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss
index 0defd251a5..ab9dae99cd 100755
--- a/website/source/assets/stylesheets/_docs.scss
+++ b/website/source/assets/stylesheets/_docs.scss
@@ -23,6 +23,7 @@ body.layout-openstack,
body.layout-packet,
body.layout-rundeck,
body.layout-template,
+body.layout-vcd,
body.layout-vsphere,
body.layout-docs,
body.layout-downloads,
diff --git a/website/source/docs/providers/vcd/index.html.markdown b/website/source/docs/providers/vcd/index.html.markdown
new file mode 100644
index 0000000000..45cb0df58a
--- /dev/null
+++ b/website/source/docs/providers/vcd/index.html.markdown
@@ -0,0 +1,54 @@
+---
+layout: "vcd"
+page_title: "Provider: vCloudDirector"
+sidebar_current: "docs-vcd-index"
+description: |-
+ The vCloud Director provider is used to interact with the resources supported by vCloud
+ Director. The provider needs to be configured with the proper credentials before it can be used.
+---
+
+# vCloud Director Provider
+
+The vCloud Director provider is used to interact with the resources supported by vCloud
+Director. The provider needs to be configured with the proper credentials before it can be used.
+
+Use the navigation to the left to read about the available resources.
+
+~> **NOTE:** The vCloud Director Provider currently represents _initial support_ and
+therefore may undergo significant changes as the community improves it.
+
+## Example Usage
+
+```
+# Configure the vCloud Director Provider
+provider "vcd" {
+ user = "${var.vcd_user}"
+ password = "${var.vcd_pass}"
+ org = "${var.vcd_org}"
+ url = "${var.vcd_url}"
+ vdc = "${var.vcd_vdc}"
+}
+
+# Create a new network
+resource "vcd_network" "net" {
+ ...
+}
+```
+
+## Argument Reference
+
+The following arguments are used to configure the vCloud Director Provider:
+
+* `user` - (Required) This is the username for vCloud Director API operations. Can also
+ be specified with the `VCD_USER` environment variable.
+* `password` - (Required) This is the password for vCloud Director API operations. Can
+ also be specified with the `VCD_PASSWORD` environment variable.
+* `org` - (Required) This is the vCloud Director Org on which to run API
+ operations. Can also be specified with the `VCD_ORG` environment
+ variable.
+* `url` - (Required) This is the URL for the vCloud Director API.
+ Can also be specified with the `VCD_URL` environment variable.
+* `vdc` - (Optional) This is the virtual datacenter within vCloud Director to run
+ API operations against. If not set the plugin will select the first virtual
+ datacenter available to your Org. Can also be specified with the `VCD_VDC` environment
+ variable.
diff --git a/website/source/docs/providers/vcd/r/dnat.html.markdown b/website/source/docs/providers/vcd/r/dnat.html.markdown
new file mode 100644
index 0000000000..dcaed4baab
--- /dev/null
+++ b/website/source/docs/providers/vcd/r/dnat.html.markdown
@@ -0,0 +1,32 @@
+---
+layout: "vcd"
+page_title: "vCloudDirector: vcd_dnat"
+sidebar_current: "docs-vcd-resource-dnat"
+description: |-
+ Provides a vCloud Director DNAT resource. This can be used to create, modify, and delete destination NATs to map external IPs to a VM.
+---
+
+# vcd\_dnat
+
+Provides a vCloud Director DNAT resource. This can be used to create, modify,
+and delete destination NATs to map external IPs to a VM.
+
+## Example Usage
+
+```
+resource "vcd_dnat" "web" {
+ edge_gateway = "Edge Gateway Name"
+ external_ip = "78.101.10.20"
+ port = 80
+ internal_ip = "10.10.0.5"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `edge_gateway` - (Required) The name of the edge gateway on which to apply the DNAT
+* `external_ip` - (Required) One of the external IPs available on your Edge Gateway
+* `port` - (Required) The port number to map
+* `internal_ip` - (Required) The IP of the VM to map to
diff --git a/website/source/docs/providers/vcd/r/firewall_rules.html.markdown b/website/source/docs/providers/vcd/r/firewall_rules.html.markdown
new file mode 100644
index 0000000000..e8fb4401d4
--- /dev/null
+++ b/website/source/docs/providers/vcd/r/firewall_rules.html.markdown
@@ -0,0 +1,63 @@
+---
+layout: "vcd"
+page_title: "vCloudDirector: vcd_firewall_rules"
+sidebar_current: "docs-vcd-resource-firewall-rules"
+description: |-
+ Provides a vCloud Director Firewall resource. This can be used to create, modify, and delete firewall settings and rules.
+---
+
+# vcd\_firewall\_rules
+
+Provides a vCloud Director Firewall resource. This can be used to create,
+modify, and delete firewall settings and rules.
+
+## Example Usage
+
+```
+resource "vcd_firewall_rules" "fw" {
+ edge_gateway = "Edge Gateway Name"
+ default_action = "drop"
+
+ rule {
+ description = "allow-web"
+ policy = "allow"
+ protocol = "tcp"
+ destination_port = "80"
+ destination_ip = "10.10.0.5"
+ source_port = "any"
+ source_ip = "any"
+ }
+
+ rule {
+ description = "allow-outbound"
+ policy = "allow"
+ protocol = "any"
+ destination_port = "any"
+ destination_ip = "any"
+ source_port = "any"
+ source_ip = "10.10.0.0/24"
+ }
+
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `edge_gateway` - (Required) The name of the edge gateway on which to apply the Firewall Rules
+* `default_action` - (Required) Either "allow" or "deny". Specifies what to do should none of the rules match
+* `rule` - (Optional) Configures a firewall rule; see [Rules](#rules) below for details.
+
+
+## Rules
+
+Each firewall rule supports the following attributes:
+
+* `description` - (Required) Description of the fireall rule
+* `policy` - (Required) Specifies what to do when this rule is matched. Either "allow" or "deny"
+* `protocol` - (Required) The protocol to match. One of "tcp", "udp", "icmp" or "any"
+* `destination_port` - (Required) The destination port to match. Either a port number or "any"
+* `destination_ip` - (Required) The destination IP to match. Either an IP address, IP range or "any"
+* `source_port` - (Required) The source port to match. Either a port number or "any"
+* `source_ip` - (Required) The source IP to match. Either an IP address, IP range or "any"
diff --git a/website/source/docs/providers/vcd/r/network.html.markdown b/website/source/docs/providers/vcd/r/network.html.markdown
new file mode 100644
index 0000000000..eead8c58ea
--- /dev/null
+++ b/website/source/docs/providers/vcd/r/network.html.markdown
@@ -0,0 +1,57 @@
+---
+layout: "vcd"
+page_title: "vCloudDirector: vcd_network"
+sidebar_current: "docs-vcd-resource-network"
+description: |-
+ Provides a vCloud Director VDC Network. This can be used to create, modify, and delete internal networks for vApps to connect.
+---
+
+# vcd\_network
+
+Provides a vCloud Director VDC Network. This can be used to create,
+modify, and delete internal networks for vApps to connect.
+
+## Example Usage
+
+```
+resource "vcd_network" "net" {
+ name = "my-net"
+ edge_gateway = "Edge Gateway Name"
+ gateway = "10.10.0.1"
+
+ dhcp_pool {
+ start_address = "10.10.0.2"
+ end_address = "10.10.0.100"
+ }
+
+ static_ip_pool {
+ start_address = "10.10.0.152"
+ end_address = "10.10.0.254"
+ }
+
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) A unique name for the network
+* `edge_gateway` - (Required) The name of the edge gateway
+* `netmask` - (Optional) The netmask for the new network. Defaults to `255.255.255.0`
+* `gateway` (Required) The gateway for this network
+* `dns1` - (Optional) First DNS server to use. Defaults to `8.8.8.8`
+* `dns2` - (Optional) Second DNS server to use. Defaults to `8.8.4.4`
+* `dns_suffix` - (Optional) A FQDN for the virtual machines on this network
+* `dhcp_pool` - (Optional) A range of IPs to issue to virtual machines that don't
+ have a static IP; see [IP Pools](#ip-pools) below for details.
+* `static_ip_pool` - (Optional) A range of IPs permitted to be used as static IPs for
+ virtual machines; see [IP Pools](#ip-pools) below for details.
+
+
+## IP Pools
+
+Network interfaces support the following attributes:
+
+* `start_address` - (Required) The first address in the IP Range
+* `end_address` - (Required) The final address in the IP Range
diff --git a/website/source/docs/providers/vcd/r/snat.html.markdown b/website/source/docs/providers/vcd/r/snat.html.markdown
new file mode 100644
index 0000000000..dc8b567c7c
--- /dev/null
+++ b/website/source/docs/providers/vcd/r/snat.html.markdown
@@ -0,0 +1,30 @@
+---
+layout: "vcd"
+page_title: "vCloudDirector: vcd_snat"
+sidebar_current: "docs-vcd-resource-snat"
+description: |-
+ Provides a vCloud Director SNAT resource. This can be used to create, modify, and delete source NATs to allow vApps to send external traffic.
+---
+
+# vcd\_snat
+
+Provides a vCloud Director SNAT resource. This can be used to create, modify,
+and delete source NATs to allow vApps to send external traffic.
+
+## Example Usage
+
+```
+resource "vcd_snat" "outbound" {
+ edge_gateway = "Edge Gateway Name"
+ external_ip = "78.101.10.20"
+ internal_ip = "10.10.0.0/24"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `edge_gateway` - (Required) The name of the edge gateway on which to apply the SNAT
+* `external_ip` - (Required) One of the external IPs available on your Edge Gateway
+* `internal_ip` - (Required) The IP or IP Range of the VM(s) to map from
diff --git a/website/source/docs/providers/vcd/r/vapp.html.markdown b/website/source/docs/providers/vcd/r/vapp.html.markdown
new file mode 100644
index 0000000000..0a2a2e234e
--- /dev/null
+++ b/website/source/docs/providers/vcd/r/vapp.html.markdown
@@ -0,0 +1,59 @@
+---
+layout: "vcd"
+page_title: "vCloudDirector: vcd_vapp"
+sidebar_current: "docs-vcd-resource-vapp"
+description: |-
+ Provides a vCloud Director vApp resource. This can be used to create, modify, and delete vApps.
+---
+
+# vcd\_vapp
+
+Provides a vCloud Director vApp resource. This can be used to create,
+modify, and delete vApps.
+
+## Example Usage
+
+```
+resource "vcd_network" "net" {
+ ...
+}
+
+resource "vcd_vapp" "web" {
+ name = "web"
+ catalog_name = "Boxes"
+ template_name = "lampstack-1.10.1-ubuntu-10.04"
+ memory = 2048
+ cpus = 1
+
+ network_name = "${vcd_network.net.name}"
+ network_href = "${vcd_network.net.href}"
+ ip = "10.10.104.160"
+
+ metadata {
+ role = "web"
+ env = "staging"
+ version = "v1"
+ }
+
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) A unique name for the vApp
+* `catalog_name` - (Required) The catalog name in which to find the given vApp Template
+* `template_name` - (Required) The name of the vApp Template to use
+* `memory` - (Optional) The amount of RAM (in MB) to allocate to the vApp
+* `cpus` - (Optional) The number of virtual CPUs to allocate to the vApp
+* `initscript` (Optional) A script to be run only on initial boot
+* `network_name` - (Required) Name of the network this vApp should join
+* `network_href` - (Optional) The vCloud Director generated href of the network this vApp
+ should join. If empty it will use the network name and query vCloud Director to discover
+ this
+* `ip` - (Optional) The IP to assign to this vApp. If given the address must be within the `static_ip_pool`
+ set for the network. If left blank, and the network has `dhcp_pool` set with at least one available IP then
+ this will be set with DHCP
+* `metadata` - (Optional) Key value map of metadata to assign to this vApp
+* `power_on` - (Optional) A boolean value stating if this vApp should be powered on. Default to `true`
diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb
index 937c120de4..0e3923b86f 100644
--- a/website/source/layouts/docs.erb
+++ b/website/source/layouts/docs.erb
@@ -189,6 +189,10 @@
Template
+ >
+ vCloud Director
+
+
>
vSphere
diff --git a/website/source/layouts/vcd.erb b/website/source/layouts/vcd.erb
new file mode 100644
index 0000000000..ebfd9b7d92
--- /dev/null
+++ b/website/source/layouts/vcd.erb
@@ -0,0 +1,38 @@
+<% wrap_layout :inner do %>
+ <% content_for :sidebar do %>
+
+ <% end %>
+
+ <%= yield %>
+<% end %>
From 0d2007e8bd788e434ed2d4c5ea907d8f7c0dbcdc Mon Sep 17 00:00:00 2001
From: Sunil K Chopra
Date: Mon, 2 Nov 2015 09:26:25 -0600
Subject: [PATCH 014/664] as per advice from stack72, simplified
---
builtin/providers/aws/resource_aws_autoscaling_group.go | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_autoscaling_group.go b/builtin/providers/aws/resource_aws_autoscaling_group.go
index b74b2b6cc3..6c2716b545 100644
--- a/builtin/providers/aws/resource_aws_autoscaling_group.go
+++ b/builtin/providers/aws/resource_aws_autoscaling_group.go
@@ -96,10 +96,8 @@ func resourceAwsAutoscalingGroup() *schema.Resource {
},
"placement_group": &schema.Schema{
- Type: schema.TypeSet,
+ Type: schema.TypeString,
Optional: true,
- Elem: &schema.Schema{Type: schema.TypeString},
- Set: schema.HashString,
},
"load_balancers": &schema.Schema{
From 68c7baa20e870e1db3a92d89416c7f31de974617 Mon Sep 17 00:00:00 2001
From: Sunil K Chopra
Date: Mon, 2 Nov 2015 09:33:35 -0600
Subject: [PATCH 015/664] as per advice from stack72 to stick to strings
---
builtin/providers/aws/resource_aws_autoscaling_group.go | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_autoscaling_group.go b/builtin/providers/aws/resource_aws_autoscaling_group.go
index 6c2716b545..626bd7b5dd 100644
--- a/builtin/providers/aws/resource_aws_autoscaling_group.go
+++ b/builtin/providers/aws/resource_aws_autoscaling_group.go
@@ -180,9 +180,8 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{})
autoScalingGroupOpts.HealthCheckGracePeriod = aws.Int64(int64(v.(int)))
}
- if v, ok := d.GetOk("placement_group"); ok && v.(*schema.Set).Len() > 0 {
- autoScalingGroupOpts.PlacementGroup = expandStringList(
- v.(*schema.Set).List())
+ if v, ok := d.GetOk("placement_group"); ok {
+ autoScalingGroupOpts.PlacementGroup = aws.String(v.(string))
}
if v, ok := d.GetOk("load_balancers"); ok && v.(*schema.Set).Len() > 0 {
From c7b02d9fdb8aa7b219fb814f45c55ddb5588c19b Mon Sep 17 00:00:00 2001
From: Sunil K Chopra
Date: Mon, 2 Nov 2015 09:33:46 -0600
Subject: [PATCH 016/664] handling updates
---
builtin/providers/aws/resource_aws_autoscaling_group.go | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_autoscaling_group.go b/builtin/providers/aws/resource_aws_autoscaling_group.go
index 626bd7b5dd..4f166ce503 100644
--- a/builtin/providers/aws/resource_aws_autoscaling_group.go
+++ b/builtin/providers/aws/resource_aws_autoscaling_group.go
@@ -231,6 +231,7 @@ func resourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) e
d.Set("load_balancers", g.LoadBalancerNames)
d.Set("min_size", g.MinSize)
d.Set("max_size", g.MaxSize)
+ d.Set("placement_group", g.PlacementGroup)
d.Set("name", g.AutoScalingGroupName)
d.Set("tag", g.Tags)
d.Set("vpc_zone_identifier", strings.Split(*g.VPCZoneIdentifier, ","))
@@ -285,6 +286,10 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{})
}
}
+ if d.HasChange("placement_group") {
+ opts.PlacementGroup = aws.String(d.Get("placement_group").(string))
+ }
+
if d.HasChange("termination_policies") {
// If the termination policy is set to null, we need to explicitly set
// it back to "Default", or the API won't reset it for us.
From df41f10d1d09af6d5b217c4e3cdebfd94d5c0cd0 Mon Sep 17 00:00:00 2001
From: Sunil K Chopra
Date: Mon, 2 Nov 2015 09:37:09 -0600
Subject: [PATCH 017/664] tests! yes! (thanks stack72)
---
builtin/providers/aws/resource_aws_autoscaling_group_test.go | 3 +++
1 file changed, 3 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_autoscaling_group_test.go b/builtin/providers/aws/resource_aws_autoscaling_group_test.go
index 1a25c9dea9..bf8b56c08e 100644
--- a/builtin/providers/aws/resource_aws_autoscaling_group_test.go
+++ b/builtin/providers/aws/resource_aws_autoscaling_group_test.go
@@ -48,6 +48,8 @@ func TestAccAWSAutoScalingGroup_basic(t *testing.T) {
"aws_autoscaling_group.bar", "termination_policies.0", "OldestInstance"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "termination_policies.1", "ClosestToNextInstanceHour"),
+ resource.TestCheckResourceAttr(
+ "aws_autoscaling_group.bar", "placement_group", "test"),
),
},
@@ -364,6 +366,7 @@ resource "aws_autoscaling_group" "bar" {
desired_capacity = 4
force_delete = true
termination_policies = ["OldestInstance","ClosestToNextInstanceHour"]
+ placement_group = "test"
launch_configuration = "${aws_launch_configuration.foobar.name}"
From 16b0e0d6190541d21cb7b2bcbee5ce2237641bdd Mon Sep 17 00:00:00 2001
From: Sunil K Chopra
Date: Mon, 2 Nov 2015 09:44:29 -0600
Subject: [PATCH 018/664] documentation, thanks again to stack72
---
.../docs/providers/aws/r/autoscaling_group.html.markdown | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/website/source/docs/providers/aws/r/autoscaling_group.html.markdown b/website/source/docs/providers/aws/r/autoscaling_group.html.markdown
index 7cb1661426..290bcabbed 100644
--- a/website/source/docs/providers/aws/r/autoscaling_group.html.markdown
+++ b/website/source/docs/providers/aws/r/autoscaling_group.html.markdown
@@ -13,6 +13,11 @@ Provides an AutoScaling Group resource.
## Example Usage
```
+resource "aws_placement_group" "test" {
+ name = "test"
+ strategy = "cluster"
+}
+
resource "aws_autoscaling_group" "bar" {
availability_zones = ["us-east-1a"]
name = "foobar3-terraform-test"
@@ -22,6 +27,7 @@ resource "aws_autoscaling_group" "bar" {
health_check_type = "ELB"
desired_capacity = 4
force_delete = true
+ placement_group = "${aws_placement_group.test.id}"
launch_configuration = "${aws_launch_configuration.foobar.name}"
tag {
@@ -48,7 +54,7 @@ The following arguments are supported:
* `availability_zones` - (Optional) A list of AZs to launch resources in.
Required only if you do not specify any `vpc_zone_identifier`
* `launch_configuration` - (Required) The name of the launch configuration to use.
-* `health_check_grace_period` - (Optional) Time after instance comes into service before checking health.
+* `health_check_grace_period` - (Optional) Time after instance comes into service before checking health.
* `health_check_type` - (Optional) "EC2" or "ELB". Controls how health checking is done.
* `desired_capacity` - (Optional) The number of Amazon EC2 instances that
should be running in the group. (See also [Waiting for
@@ -66,6 +72,7 @@ The following arguments are supported:
* `vpc_zone_identifier` (Optional) A list of subnet IDs to launch resources in.
* `termination_policies` (Optional) A list of policies to decide how the instances in the auto scale group should be terminated.
* `tag` (Optional) A list of tag blocks. Tags documented below.
+* `placement_group` (Optional) The name of the placement group into which you'll launch your instances, if any.
* `wait_for_capacity_timeout` (Default: "10m") A maximum
[duration](https://golang.org/pkg/time/#ParseDuration) that Terraform should
wait for ASG instances to be healthy before timing out. (See also [Waiting
From 965882bfdf669be18e3ffc48cefc6bee1937be71 Mon Sep 17 00:00:00 2001
From: Brett Mack
Date: Mon, 2 Nov 2015 16:39:56 +0000
Subject: [PATCH 019/664] Added protection for api limiting
---
builtin/providers/vcd/resource_vcd_dnat.go | 73 +++-------
builtin/providers/vcd/resource_vcd_network.go | 46 +++----
builtin/providers/vcd/resource_vcd_snat.go | 83 ++++--------
builtin/providers/vcd/resource_vcd_vapp.go | 127 +++++++++++-------
builtin/providers/vcd/structure.go | 6 +
5 files changed, 155 insertions(+), 180 deletions(-)
diff --git a/builtin/providers/vcd/resource_vcd_dnat.go b/builtin/providers/vcd/resource_vcd_dnat.go
index dd1c67e338..e949f1da86 100644
--- a/builtin/providers/vcd/resource_vcd_dnat.go
+++ b/builtin/providers/vcd/resource_vcd_dnat.go
@@ -4,9 +4,7 @@ import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
"github.com/opencredo/vmware-govcd"
- "regexp"
"strings"
- "time"
)
func resourceVcdDNAT() *schema.Resource {
@@ -48,41 +46,30 @@ func resourceVcdDNATCreate(d *schema.ResourceData, meta interface{}) error {
// operation we must wait until we can aquire a lock on the client
vcd_client.Mutex.Lock()
defer vcd_client.Mutex.Unlock()
- var task govcd.Task
portString := getPortString(d.Get("port").(int))
+ edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+
+ if err != nil {
+ return fmt.Errorf("Unable to find edge gateway: %#v", err)
+ }
+
// Creating a loop to offer further protection from the edge gateway erroring
// due to being busy eg another person is using another client so wouldn't be
// constrained by out lock. If the edge gateway reurns with a busy error, wait
// 3 seconds and then try again. Continue until a non-busy error or success
- for {
- err := vcd_client.OrgVdc.Refresh()
- if err != nil {
- return fmt.Errorf("Error refreshing vdc: %#v", err)
- }
- edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
-
- if err != nil {
- return fmt.Errorf("Unable to find edge gateway: %#v", err)
- }
-
- task, err = edgeGateway.AddNATMapping("DNAT", d.Get("external_ip").(string),
+ err = retryCall(4, func() error {
+ task, err := edgeGateway.AddNATMapping("DNAT", d.Get("external_ip").(string),
d.Get("internal_ip").(string),
portString)
-
if err != nil {
- if v, _ := regexp.MatchString("is busy completing an operation.$", err.Error()); v {
- time.Sleep(3 * time.Second)
- continue
- } else {
- return fmt.Errorf("Error setting DNAT rules: %#v", err)
- }
+ return fmt.Errorf("Error setting DNAT rules: %#v", err)
}
- break
- }
- err := task.WaitTaskCompletion()
+ return task.WaitTaskCompletion()
+ })
+
if err != nil {
return fmt.Errorf("Error completing tasks: %#v", err)
}
@@ -129,41 +116,23 @@ func resourceVcdDNATDelete(d *schema.ResourceData, meta interface{}) error {
// operation we must wait until we can aquire a lock on the client
vcd_client.Mutex.Lock()
defer vcd_client.Mutex.Unlock()
- var task govcd.Task
portString := getPortString(d.Get("port").(int))
- // Creating a loop to offer further protection from the edge gateway erroring
- // due to being busy eg another person is using another client so wouldn't be
- // constrained by out lock. If the edge gateway reurns with a busy error, wait
- // 3 seconds and then try again. Continue until a non-busy error or success
- for {
- err := vcd_client.OrgVdc.Refresh()
- if err != nil {
- return fmt.Errorf("Error refreshing vdc: %#v", err)
- }
+ edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
- edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
-
- if err != nil {
- return fmt.Errorf("Unable to find edge gateway: %#v", err)
- }
-
- task, err = edgeGateway.RemoveNATMapping("DNAT", d.Get("external_ip").(string),
+ if err != nil {
+ return fmt.Errorf("Unable to find edge gateway: %#v", err)
+ }
+ err = retryCall(4, func() error {
+ task, err := edgeGateway.RemoveNATMapping("DNAT", d.Get("external_ip").(string),
d.Get("internal_ip").(string),
portString)
-
if err != nil {
- if v, _ := regexp.MatchString("is busy completing an operation.$", err.Error()); v {
- time.Sleep(3 * time.Second)
- continue
- } else {
- return fmt.Errorf("Error setting DNAT rules: %#v", err)
- }
+ return fmt.Errorf("Error setting DNAT rules: %#v", err)
}
- break
- }
- err := task.WaitTaskCompletion()
+ return task.WaitTaskCompletion()
+ })
if err != nil {
return fmt.Errorf("Error completing tasks: %#v", err)
}
diff --git a/builtin/providers/vcd/resource_vcd_network.go b/builtin/providers/vcd/resource_vcd_network.go
index 3196b73065..c98527e926 100644
--- a/builtin/providers/vcd/resource_vcd_network.go
+++ b/builtin/providers/vcd/resource_vcd_network.go
@@ -6,12 +6,10 @@ import (
"bytes"
"fmt"
"github.com/hashicorp/terraform/helper/hashcode"
- "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/opencredo/vmware-govcd"
types "github.com/opencredo/vmware-govcd/types/v56"
"strings"
- "time"
)
func resourceVcdNetwork() *schema.Resource {
@@ -151,29 +149,33 @@ func resourceVcdNetworkCreate(d *schema.ResourceData, meta interface{}) error {
}
log.Printf("[INFO] NETWORK: %#v", newnetwork)
- err = vcd_client.OrgVdc.CreateOrgVDCNetwork(newnetwork)
+ err = retryCall(4, func() error {
+ return vcd_client.OrgVdc.CreateOrgVDCNetwork(newnetwork)
+ })
if err != nil {
return fmt.Errorf("Error: %#v", err)
}
+ err = vcd_client.OrgVdc.Refresh()
+ if err != nil {
+ return fmt.Errorf("Error refreshing vdc: %#v", err)
+ }
+
+ network, err := vcd_client.OrgVdc.FindVDCNetwork(d.Get("name").(string))
+ if err != nil {
+ return fmt.Errorf("Error finding network: %#v", err)
+ }
+
if dhcp, ok := d.GetOk("dhcp_pool"); ok {
- err := vcd_client.OrgVdc.Refresh()
- if err != nil {
- return fmt.Errorf("Error refreshing vdc: %#v", err)
- }
+ err = retryCall(4, func() error {
+ task, err := edgeGateway.AddDhcpPool(network.OrgVDCNetwork, dhcp.(*schema.Set).List())
+ if err != nil {
+ return fmt.Errorf("Error adding DHCP pool: %#v", err)
+ }
- network, err := vcd_client.OrgVdc.FindVDCNetwork(d.Get("name").(string))
- if err != nil {
- return fmt.Errorf("Error finding network: %#v", err)
- }
-
- task, err := edgeGateway.AddDhcpPool(network.OrgVDCNetwork, dhcp.(*schema.Set).List())
-
- if err != nil {
- return fmt.Errorf("Error adding DHCP pool: %#v", err)
- }
- err = task.WaitTaskCompletion()
+ return task.WaitTaskCompletion()
+ })
if err != nil {
return fmt.Errorf("Error completing tasks: %#v", err)
}
@@ -233,16 +235,12 @@ func resourceVcdNetworkDelete(d *schema.ResourceData, meta interface{}) error {
return fmt.Errorf("Error finding network: %#v", err)
}
- err = resource.Retry(3*time.Minute, func() error {
+ err = retryCall(4, func() error {
task, err := network.Delete()
if err != nil {
return fmt.Errorf("Error Deleting Network: %#v", err)
}
- err = task.WaitTaskCompletion()
- if err != nil {
- return fmt.Errorf("Error completing tasks: %#v", err)
- }
- return nil
+ return task.WaitTaskCompletion()
})
if err != nil {
return err
diff --git a/builtin/providers/vcd/resource_vcd_snat.go b/builtin/providers/vcd/resource_vcd_snat.go
index b9627e03a4..08a9b4023a 100644
--- a/builtin/providers/vcd/resource_vcd_snat.go
+++ b/builtin/providers/vcd/resource_vcd_snat.go
@@ -4,8 +4,6 @@ import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
"github.com/opencredo/vmware-govcd"
- "regexp"
- "time"
)
func resourceVcdSNAT() *schema.Resource {
@@ -42,42 +40,27 @@ func resourceVcdSNATCreate(d *schema.ResourceData, meta interface{}) error {
// operation we must wait until we can aquire a lock on the client
vcd_client.Mutex.Lock()
defer vcd_client.Mutex.Unlock()
- var task govcd.Task
// Creating a loop to offer further protection from the edge gateway erroring
// due to being busy eg another person is using another client so wouldn't be
// constrained by out lock. If the edge gateway reurns with a busy error, wait
// 3 seconds and then try again. Continue until a non-busy error or success
- for {
- err := vcd_client.OrgVdc.Refresh()
- if err != nil {
- return fmt.Errorf("Error refreshing vdc: %#v", err)
- }
-
- edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
-
- if err != nil {
- return fmt.Errorf("Unable to find edge gateway: %#v", err)
- }
-
- task, err = edgeGateway.AddNATMapping("SNAT", d.Get("internal_ip").(string),
- d.Get("external_ip").(string),
- "any")
-
- if err != nil {
- if v, _ := regexp.MatchString("is busy completing an operation.$", err.Error()); v {
- time.Sleep(3 * time.Second)
- continue
- } else {
- return fmt.Errorf("Error setting SNAT rules: %#v", err)
- }
- }
- break
+ edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+ if err != nil {
+ return fmt.Errorf("Unable to find edge gateway: %#v", err)
}
- err := task.WaitTaskCompletion()
+ err = retryCall(4, func() error {
+ task, err := edgeGateway.AddNATMapping("SNAT", d.Get("internal_ip").(string),
+ d.Get("external_ip").(string),
+ "any")
+ if err != nil {
+ return fmt.Errorf("Error setting SNAT rules: %#v", err)
+ }
+ return task.WaitTaskCompletion()
+ })
if err != nil {
- return fmt.Errorf("Error completing tasks: %#v", err)
+ return err
}
d.SetId(d.Get("internal_ip").(string))
@@ -120,42 +103,24 @@ func resourceVcdSNATDelete(d *schema.ResourceData, meta interface{}) error {
// operation we must wait until we can aquire a lock on the client
vcd_client.Mutex.Lock()
defer vcd_client.Mutex.Unlock()
- var task govcd.Task
- // Creating a loop to offer further protection from the edge gateway erroring
- // due to being busy eg another person is using another client so wouldn't be
- // constrained by out lock. If the edge gateway reurns with a busy error, wait
- // 3 seconds and then try again. Continue until a non-busy error or success
- for {
- err := vcd_client.OrgVdc.Refresh()
- if err != nil {
- return fmt.Errorf("Error refreshing vdc: %#v", err)
- }
+ edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+ if err != nil {
+ return fmt.Errorf("Unable to find edge gateway: %#v", err)
+ }
- edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
-
- if err != nil {
- return fmt.Errorf("Unable to find edge gateway: %#v", err)
- }
-
- task, err = edgeGateway.RemoveNATMapping("SNAT", d.Get("internal_ip").(string),
+ err = retryCall(4, func() error {
+ task, err := edgeGateway.RemoveNATMapping("SNAT", d.Get("internal_ip").(string),
d.Get("external_ip").(string),
"")
-
if err != nil {
- if v, _ := regexp.MatchString("is busy completing an operation.$", err.Error()); v {
- time.Sleep(3 * time.Second)
- continue
- } else {
- return fmt.Errorf("Error setting SNAT rules: %#v", err)
- }
+ return fmt.Errorf("Error setting SNAT rules: %#v", err)
}
- break
+ return task.WaitTaskCompletion()
+ })
+ if err != nil {
+ return err
}
- err := task.WaitTaskCompletion()
- if err != nil {
- return fmt.Errorf("Error completing tasks: %#v", err)
- }
return nil
}
diff --git a/builtin/providers/vcd/resource_vcd_vapp.go b/builtin/providers/vcd/resource_vcd_vapp.go
index 7e760ac1dd..1add00412a 100644
--- a/builtin/providers/vcd/resource_vcd_vapp.go
+++ b/builtin/providers/vcd/resource_vcd_vapp.go
@@ -2,12 +2,10 @@ package vcd
import (
"fmt"
- "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/opencredo/vmware-govcd"
types "github.com/opencredo/vmware-govcd/types/v56"
"log"
- "time"
)
func resourceVcdVApp() *schema.Resource {
@@ -135,11 +133,16 @@ func resourceVcdVAppCreate(d *schema.ResourceData, meta interface{}) error {
},
}
- err = resource.Retry(4*time.Minute, func() error {
- err = vcd_client.OrgVdc.InstantiateVAppTemplate(createvapp)
+ err = retryCall(4, func() error {
+ e := vcd_client.OrgVdc.InstantiateVAppTemplate(createvapp)
- if err != nil {
- return fmt.Errorf("Error: %#v", err)
+ if e != nil {
+ return fmt.Errorf("Error: %#v", e)
+ }
+
+ e = vcd_client.OrgVdc.Refresh()
+ if e != nil {
+ return fmt.Errorf("Error: %#v", e)
}
return nil
})
@@ -147,72 +150,106 @@ func resourceVcdVAppCreate(d *schema.ResourceData, meta interface{}) error {
return err
}
- err = vcd_client.OrgVdc.Refresh()
- if err != nil {
- return fmt.Errorf("Error: %#v", err)
- }
+ // err = resource.Retry(4*time.Minute, func() error {
+ // err = vcd_client.OrgVdc.InstantiateVAppTemplate(createvapp)
+ //
+ // if err != nil {
+ // return fmt.Errorf("Error: %#v", err)
+ // }
+ // return nil
+ // })
+ // if err != nil {
+ // return err
+ // }
vapp, err := vcd_client.OrgVdc.FindVAppByName(d.Get("name").(string))
- task, err := vapp.ChangeMemorySize(d.Get("memory").(int))
- err = task.WaitTaskCompletion()
+
+ err = retryCall(4, func() error {
+ task, err := vapp.ChangeMemorySize(d.Get("memory").(int))
+ if err != nil {
+ return fmt.Errorf("Error changing memory size: %#v", err)
+ }
+
+ return task.WaitTaskCompletion()
+ })
if err != nil {
- return fmt.Errorf("Error changing memory size: %#v", err)
+ return err
}
- task, err = vapp.ChangeCPUcount(d.Get("cpus").(int))
- err = task.WaitTaskCompletion()
+ err = retryCall(4, func() error {
+ task, err := vapp.ChangeCPUcount(d.Get("cpus").(int))
+ if err != nil {
+ return fmt.Errorf("Error changing cpu count: %#v", err)
+ }
+
+ return task.WaitTaskCompletion()
+ })
if err != nil {
- return fmt.Errorf("Error changing cpu count: %#v", err)
+ return fmt.Errorf("Error completing task: %#v", err)
}
- task, err = vapp.ChangeVMName(d.Get("name").(string))
- if err != nil {
- return fmt.Errorf("Error with vm name change: %#v", err)
- }
+ err = retryCall(4, func() error {
+ task, err := vapp.ChangeVMName(d.Get("name").(string))
+ if err != nil {
+ return fmt.Errorf("Error with vm name change: %#v", err)
+ }
- err = task.WaitTaskCompletion()
+ return task.WaitTaskCompletion()
+ })
if err != nil {
return fmt.Errorf("Error changing vmname: %#v", err)
}
- task, err = vapp.ChangeNetworkConfig(d.Get("network_name").(string), d.Get("ip").(string))
- if err != nil {
- return fmt.Errorf("Error with Networking change: %#v", err)
- }
- err = task.WaitTaskCompletion()
+ err = retryCall(4, func() error {
+ task, err := vapp.ChangeNetworkConfig(d.Get("network_name").(string), d.Get("ip").(string))
+ if err != nil {
+ return fmt.Errorf("Error with Networking change: %#v", err)
+ }
+ return task.WaitTaskCompletion()
+ })
if err != nil {
return fmt.Errorf("Error changing network: %#v", err)
}
- metadata := d.Get("metadata").(map[string]interface{})
- for k, v := range metadata {
- task, err = vapp.AddMetadata(k, v.(string))
- if err != nil {
- return fmt.Errorf("Error adding metadata: %#v", err)
- }
- err = task.WaitTaskCompletion()
- if err != nil {
- return fmt.Errorf("Error completing tasks: %#v", err)
+ err = retryCall(4, func() error {
+ metadata := d.Get("metadata").(map[string]interface{})
+ for k, v := range metadata {
+ task, err := vapp.AddMetadata(k, v.(string))
+ if err != nil {
+ return fmt.Errorf("Error adding metadata: %#v", err)
+ }
+ err = task.WaitTaskCompletion()
+ if err != nil {
+ return fmt.Errorf("Error completing tasks: %#v", err)
+ }
}
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("Error adding metadata: %#v", err)
}
if initscript, ok := d.GetOk("initscript"); ok {
- task, err = vapp.RunCustomizationScript(d.Get("name").(string), initscript.(string))
- if err != nil {
- return fmt.Errorf("Error with setting init script: %#v", err)
- }
- err = task.WaitTaskCompletion()
+ err = retryCall(4, func() error {
+ task, err := vapp.RunCustomizationScript(d.Get("name").(string), initscript.(string))
+ if err != nil {
+ return fmt.Errorf("Error with setting init script: %#v", err)
+ }
+ return task.WaitTaskCompletion()
+ })
if err != nil {
return fmt.Errorf("Error completing tasks: %#v", err)
}
}
if d.Get("power_on").(bool) {
- task, err = vapp.PowerOn()
- if err != nil {
- return fmt.Errorf("Error Powering Up: %#v", err)
- }
- err = task.WaitTaskCompletion()
+ err = retryCall(4, func() error {
+ task, err := vapp.PowerOn()
+ if err != nil {
+ return fmt.Errorf("Error Powering Up: %#v", err)
+ }
+ return task.WaitTaskCompletion()
+ })
if err != nil {
return fmt.Errorf("Error completing tasks: %#v", err)
}
diff --git a/builtin/providers/vcd/structure.go b/builtin/providers/vcd/structure.go
index 9cd5fb281b..b243cdf8bf 100644
--- a/builtin/providers/vcd/structure.go
+++ b/builtin/providers/vcd/structure.go
@@ -1,8 +1,10 @@
package vcd
import (
+ "github.com/hashicorp/terraform/helper/resource"
types "github.com/opencredo/vmware-govcd/types/v56"
"strconv"
+ "time"
)
func expandIpRange(configured []interface{}) (types.IPRanges, error) {
@@ -101,3 +103,7 @@ func getPortString(port int) string {
portstring := strconv.Itoa(port)
return portstring
}
+
+func retryCall(min int, f resource.RetryFunc) error {
+ return resource.Retry(time.Duration(min)*time.Minute, f)
+}
From 95ec9a9fbfad0f3cfe80737183f336bfa89854f8 Mon Sep 17 00:00:00 2001
From: Brett Mack
Date: Mon, 2 Nov 2015 19:09:24 +0000
Subject: [PATCH 020/664] Refresh firewall rules after each failure before
trying to append new rules
---
.../vcd/resource_vcd_firewall_rules.go | 17 +++++++++++------
1 file changed, 11 insertions(+), 6 deletions(-)
diff --git a/builtin/providers/vcd/resource_vcd_firewall_rules.go b/builtin/providers/vcd/resource_vcd_firewall_rules.go
index e025b143f6..3581f1179c 100644
--- a/builtin/providers/vcd/resource_vcd_firewall_rules.go
+++ b/builtin/providers/vcd/resource_vcd_firewall_rules.go
@@ -89,14 +89,19 @@ func resourceVcdFirewallRulesCreate(d *schema.ResourceData, meta interface{}) er
defer vcd_client.Mutex.Unlock()
edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
-
- firewallRules, _ := expandFirewallRules(d.Get("rule").(*schema.Set).List(), edgeGateway.EdgeGateway)
-
- task, err := edgeGateway.CreateFirewallRules(d.Get("default_action").(string), firewallRules)
if err != nil {
- return fmt.Errorf("Error setting firewall rules: %#v", err)
+ return fmt.Errorf("Unable to find edge gateway: %s", err)
}
- err = task.WaitTaskCompletion()
+
+ err = retryCall(5, func() error {
+ edgeGateway.Refresh()
+ firewallRules, _ := expandFirewallRules(d.Get("rule").(*schema.Set).List(), edgeGateway.EdgeGateway)
+ task, err := edgeGateway.CreateFirewallRules(d.Get("default_action").(string), firewallRules)
+ if err != nil {
+ return fmt.Errorf("Error setting firewall rules: %#v", err)
+ }
+ return task.WaitTaskCompletion()
+ })
if err != nil {
return fmt.Errorf("Error completing tasks: %#v", err)
}
From fc7dcb824fcae118608914274460832fc5d934a7 Mon Sep 17 00:00:00 2001
From: Brett Mack
Date: Mon, 2 Nov 2015 19:22:48 +0000
Subject: [PATCH 021/664] Minor change to documentation
---
website/source/docs/providers/vcd/r/dnat.html.markdown | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/website/source/docs/providers/vcd/r/dnat.html.markdown b/website/source/docs/providers/vcd/r/dnat.html.markdown
index dcaed4baab..dd6fb92b0a 100644
--- a/website/source/docs/providers/vcd/r/dnat.html.markdown
+++ b/website/source/docs/providers/vcd/r/dnat.html.markdown
@@ -9,7 +9,7 @@ description: |-
# vcd\_dnat
Provides a vCloud Director DNAT resource. This can be used to create, modify,
-and delete destination NATs to map external IPs to a VM.
+and delete destination NATs to map an external IP/port to a VM.
## Example Usage
From 843c04e917f083c031aac211bc69acc7183bfe53 Mon Sep 17 00:00:00 2001
From: Lars Wander
Date: Tue, 3 Nov 2015 10:30:36 -0500
Subject: [PATCH 022/664] provider/google: Improve Container Scope Example
Documentation
---
.../google/r/container_cluster.html.markdown | 23 +++++++++++++------
1 file changed, 16 insertions(+), 7 deletions(-)
diff --git a/website/source/docs/providers/google/r/container_cluster.html.markdown b/website/source/docs/providers/google/r/container_cluster.html.markdown
index 5a66ec9aaf..a3ebad49e1 100644
--- a/website/source/docs/providers/google/r/container_cluster.html.markdown
+++ b/website/source/docs/providers/google/r/container_cluster.html.markdown
@@ -14,14 +14,23 @@ description: |-
```
resource "google_container_cluster" "primary" {
- name = "marcellus-wallace"
- zone = "us-central1-a"
- initial_node_count = 3
+ name = "marcellus-wallace"
+ zone = "us-central1-a"
+ initial_node_count = 3
- master_auth {
- username = "mr.yoda"
- password = "adoy.rm"
- }
+ master_auth {
+ username = "mr.yoda"
+ password = "adoy.rm"
+ }
+
+ node_config {
+ oauth_scopes = [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/logging.write",
+ "https://www.googleapis.com/auth/monitoring"
+ ]
+ }
}
```
From 0ded14f160af4e99b4f47b0473e4b85dc4358690 Mon Sep 17 00:00:00 2001
From: ryane
Date: Mon, 26 Oct 2015 17:24:48 -0400
Subject: [PATCH 023/664] entrypoint support for docker_container resource
---
.../docker/resource_docker_container.go | 7 +++
.../docker/resource_docker_container_funcs.go | 4 ++
.../docker/resource_docker_container_test.go | 49 ++++++++++++++++++-
.../docker/r/container.html.markdown | 5 ++
4 files changed, 63 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/docker/resource_docker_container.go b/builtin/providers/docker/resource_docker_container.go
index 59e65b9c16..4fe63650ed 100644
--- a/builtin/providers/docker/resource_docker_container.go
+++ b/builtin/providers/docker/resource_docker_container.go
@@ -71,6 +71,13 @@ func resourceDockerContainer() *schema.Resource {
Elem: &schema.Schema{Type: schema.TypeString},
},
+ "entrypoint": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+
"dns": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
diff --git a/builtin/providers/docker/resource_docker_container_funcs.go b/builtin/providers/docker/resource_docker_container_funcs.go
index aa74a4e1d8..24df694906 100644
--- a/builtin/providers/docker/resource_docker_container_funcs.go
+++ b/builtin/providers/docker/resource_docker_container_funcs.go
@@ -54,6 +54,10 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
createOpts.Config.Cmd = stringListToStringSlice(v.([]interface{}))
}
+ if v, ok := d.GetOk("entrypoint"); ok {
+ createOpts.Config.Entrypoint = stringListToStringSlice(v.([]interface{}))
+ }
+
exposedPorts := map[dc.Port]struct{}{}
portBindings := map[dc.Port][]dc.PortBinding{}
diff --git a/builtin/providers/docker/resource_docker_container_test.go b/builtin/providers/docker/resource_docker_container_test.go
index 29ecc4bb3f..e888c67da7 100644
--- a/builtin/providers/docker/resource_docker_container_test.go
+++ b/builtin/providers/docker/resource_docker_container_test.go
@@ -10,6 +10,7 @@ import (
)
func TestAccDockerContainer_basic(t *testing.T) {
+ var c dc.Container
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@@ -17,14 +18,42 @@ func TestAccDockerContainer_basic(t *testing.T) {
resource.TestStep{
Config: testAccDockerContainerConfig,
Check: resource.ComposeTestCheckFunc(
- testAccContainerRunning("docker_container.foo"),
+ testAccContainerRunning("docker_container.foo", &c),
),
},
},
})
}
-func testAccContainerRunning(n string) resource.TestCheckFunc {
+func TestAccDockerContainer_entrypoint(t *testing.T) {
+ var c dc.Container
+
+ testCheck := func(*terraform.State) error {
+ if len(c.Config.Entrypoint) < 3 ||
+ (c.Config.Entrypoint[0] != "/bin/bash" &&
+ c.Config.Entrypoint[1] != "-c" &&
+ c.Config.Entrypoint[2] != "ping localhost") {
+ return fmt.Errorf("Container wrong entrypoint: %s", c.Config.Entrypoint)
+ }
+ return nil
+ }
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccDockerContainerEntrypointConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccContainerRunning("docker_container.foo", &c),
+ testCheck,
+ ),
+ },
+ },
+ })
+}
+
+func testAccContainerRunning(n string, container *dc.Container) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
@@ -43,6 +72,11 @@ func testAccContainerRunning(n string) resource.TestCheckFunc {
for _, c := range containers {
if c.ID == rs.Primary.ID {
+ inspected, err := client.InspectContainer(c.ID)
+ if err != nil {
+ return fmt.Errorf("Container could not be inspected: %s", err)
+ }
+ *container = *inspected
return nil
}
}
@@ -61,3 +95,14 @@ resource "docker_container" "foo" {
image = "${docker_image.foo.latest}"
}
`
+const testAccDockerContainerEntrypointConfig = `
+resource "docker_image" "foo" {
+ name = "nginx:latest"
+}
+
+resource "docker_container" "foo" {
+ name = "tf-test"
+ image = "${docker_image.foo.latest}"
+ entrypoint = ["/bin/bash", "-c", "ping localhost"]
+}
+`
diff --git a/website/source/docs/providers/docker/r/container.html.markdown b/website/source/docs/providers/docker/r/container.html.markdown
index 91a4714b7a..f1f9707bf1 100644
--- a/website/source/docs/providers/docker/r/container.html.markdown
+++ b/website/source/docs/providers/docker/r/container.html.markdown
@@ -37,6 +37,11 @@ The following arguments are supported:
* `command` - (Optional, list of strings) The command to use to start the
container. For example, to run `/usr/bin/myprogram -f baz.conf` set the
command to be `["/usr/bin/myprogram", "-f", "baz.conf"]`.
+* `entrypoint` - (Optional, list of strings) The command to use as the
+ Entrypoint for the container. The Entrypoint allows you to configure a
+ container to run as an executable. For example, to run `/usr/bin/myprogram`
+ when starting a container, set the entrypoint to be
+ `["/usr/bin/myprogram"]`.
* `dns` - (Optional, set of strings) Set of DNS servers.
* `env` - (Optional, set of strings) Environmental variables to set.
* `links` - (Optional, set of strings) Set of links for link based
From 17d185808e5e83cde6d4a535b20e4a4fc6770b00 Mon Sep 17 00:00:00 2001
From: ryane
Date: Tue, 27 Oct 2015 12:08:57 -0400
Subject: [PATCH 024/664] restart policy support for docker_container
---
.../docker/resource_docker_container.go | 22 +++++++++++++++++++
.../docker/resource_docker_container_funcs.go | 4 ++++
.../docker/resource_docker_container_test.go | 17 +++++++++++---
.../docker/r/container.html.markdown | 4 ++++
4 files changed, 44 insertions(+), 3 deletions(-)
diff --git a/builtin/providers/docker/resource_docker_container.go b/builtin/providers/docker/resource_docker_container.go
index 4fe63650ed..7d2fa34cdc 100644
--- a/builtin/providers/docker/resource_docker_container.go
+++ b/builtin/providers/docker/resource_docker_container.go
@@ -6,6 +6,7 @@ import (
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
+ "regexp"
)
func resourceDockerContainer() *schema.Resource {
@@ -92,6 +93,27 @@ func resourceDockerContainer() *schema.Resource {
ForceNew: true,
},
+ "restart": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ Default: "no",
+ ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
+ value := v.(string)
+ if !regexp.MustCompile(`^(no|on-failure|always)$`).MatchString(value) {
+ es = append(es, fmt.Errorf(
+ "%q must be one of \"no\", \"on-failure\", or \"always\"", k))
+ }
+ return
+ },
+ },
+
+ "max_retry_count": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+
"volumes": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
diff --git a/builtin/providers/docker/resource_docker_container_funcs.go b/builtin/providers/docker/resource_docker_container_funcs.go
index 24df694906..800f0f8abf 100644
--- a/builtin/providers/docker/resource_docker_container_funcs.go
+++ b/builtin/providers/docker/resource_docker_container_funcs.go
@@ -95,6 +95,10 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
hostConfig := &dc.HostConfig{
Privileged: d.Get("privileged").(bool),
PublishAllPorts: d.Get("publish_all_ports").(bool),
+ RestartPolicy: dc.RestartPolicy{
+ Name: d.Get("restart").(string),
+ MaximumRetryCount: d.Get("max_retry_count").(int),
+ },
}
if len(portBindings) != 0 {
diff --git a/builtin/providers/docker/resource_docker_container_test.go b/builtin/providers/docker/resource_docker_container_test.go
index e888c67da7..0d0fe734fc 100644
--- a/builtin/providers/docker/resource_docker_container_test.go
+++ b/builtin/providers/docker/resource_docker_container_test.go
@@ -25,7 +25,7 @@ func TestAccDockerContainer_basic(t *testing.T) {
})
}
-func TestAccDockerContainer_entrypoint(t *testing.T) {
+func TestAccDockerContainer_customized(t *testing.T) {
var c dc.Container
testCheck := func(*terraform.State) error {
@@ -35,6 +35,15 @@ func TestAccDockerContainer_entrypoint(t *testing.T) {
c.Config.Entrypoint[2] != "ping localhost") {
return fmt.Errorf("Container wrong entrypoint: %s", c.Config.Entrypoint)
}
+
+ if c.HostConfig.RestartPolicy.Name == "on-failure" {
+ if c.HostConfig.RestartPolicy.MaximumRetryCount != 5 {
+ return fmt.Errorf("Container has wrong restart policy max retry count: %d", c.HostConfig.RestartPolicy.MaximumRetryCount)
+ }
+ } else {
+ return fmt.Errorf("Container has wrong restart policy: %s", c.HostConfig.RestartPolicy.Name)
+ }
+
return nil
}
@@ -43,7 +52,7 @@ func TestAccDockerContainer_entrypoint(t *testing.T) {
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccDockerContainerEntrypointConfig,
+ Config: testAccDockerContainerCustomizedConfig,
Check: resource.ComposeTestCheckFunc(
testAccContainerRunning("docker_container.foo", &c),
testCheck,
@@ -95,7 +104,7 @@ resource "docker_container" "foo" {
image = "${docker_image.foo.latest}"
}
`
-const testAccDockerContainerEntrypointConfig = `
+const testAccDockerContainerCustomizedConfig = `
resource "docker_image" "foo" {
name = "nginx:latest"
}
@@ -104,5 +113,7 @@ resource "docker_container" "foo" {
name = "tf-test"
image = "${docker_image.foo.latest}"
entrypoint = ["/bin/bash", "-c", "ping localhost"]
+ restart = "on-failure"
+ max_retry_count = 5
}
`
diff --git a/website/source/docs/providers/docker/r/container.html.markdown b/website/source/docs/providers/docker/r/container.html.markdown
index f1f9707bf1..91c5a8659d 100644
--- a/website/source/docs/providers/docker/r/container.html.markdown
+++ b/website/source/docs/providers/docker/r/container.html.markdown
@@ -48,6 +48,10 @@ The following arguments are supported:
connectivity between containers that are running on the same host.
* `hostname` - (Optional, string) Hostname of the container.
* `domainname` - (Optional, string) Domain name of the container.
+* `restart` - (Optional, string) The restart policy for the container. Must be
+ one of "no", "on-failure", "always".
+* `max_retry_count` - (Optional, int) The maximum amount of times to an attempt
+ a restart when `restart` is set to "on-failure"
* `must_run` - (Optional, bool) If true, then the Docker container will be
kept running. If false, then as long as the container exists, Terraform
assumes it is successful.
From 6842c32d03b5a48fa066efe1c00c3a01a903354b Mon Sep 17 00:00:00 2001
From: ryane
Date: Tue, 27 Oct 2015 19:53:49 -0400
Subject: [PATCH 025/664] add basic runtime constraints to docker_container
---
.../docker/resource_docker_container.go | 18 ++++++++++++++
.../docker/resource_docker_container_funcs.go | 24 +++++++++++++++++++
.../docker/resource_docker_container_test.go | 14 +++++++++++
.../docker/r/container.html.markdown | 4 ++++
4 files changed, 60 insertions(+)
diff --git a/builtin/providers/docker/resource_docker_container.go b/builtin/providers/docker/resource_docker_container.go
index 7d2fa34cdc..48eac9a4d9 100644
--- a/builtin/providers/docker/resource_docker_container.go
+++ b/builtin/providers/docker/resource_docker_container.go
@@ -171,6 +171,24 @@ func resourceDockerContainer() *schema.Resource {
Optional: true,
ForceNew: true,
},
+
+ "memory": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "memory_swap": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "cpu_shares": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
},
}
}
diff --git a/builtin/providers/docker/resource_docker_container_funcs.go b/builtin/providers/docker/resource_docker_container_funcs.go
index 800f0f8abf..0f1a9d9e0d 100644
--- a/builtin/providers/docker/resource_docker_container_funcs.go
+++ b/builtin/providers/docker/resource_docker_container_funcs.go
@@ -120,6 +120,30 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
hostConfig.Links = stringSetToStringSlice(v.(*schema.Set))
}
+ if v, ok := d.GetOk("memory"); ok {
+ memory := int64(v.(int))
+ if memory > 0 {
+ hostConfig.Memory = memory * 1024 * 1024
+ }
+ }
+
+ if v, ok := d.GetOk("memory_swap"); ok {
+ swap := int64(v.(int))
+ if swap != 0 {
+ if swap > 0 { // only convert positive #s to bytes
+ swap = swap * 1024 * 1024
+ }
+ hostConfig.MemorySwap = swap
+ }
+ }
+
+ if v, ok := d.GetOk("cpu_shares"); ok {
+ shares := int64(v.(int))
+ if shares > 0 {
+ hostConfig.CPUShares = shares
+ }
+ }
+
creationTime = time.Now()
if err := client.StartContainer(retContainer.ID, hostConfig); err != nil {
return fmt.Errorf("Unable to start container: %s", err)
diff --git a/builtin/providers/docker/resource_docker_container_test.go b/builtin/providers/docker/resource_docker_container_test.go
index 0d0fe734fc..1402f129f3 100644
--- a/builtin/providers/docker/resource_docker_container_test.go
+++ b/builtin/providers/docker/resource_docker_container_test.go
@@ -44,6 +44,17 @@ func TestAccDockerContainer_customized(t *testing.T) {
return fmt.Errorf("Container has wrong restart policy: %s", c.HostConfig.RestartPolicy.Name)
}
+ if c.HostConfig.Memory != (128 * 1024 * 1024) {
+ return fmt.Errorf("Container has wrong memory setting: %d", c.HostConfig.Memory)
+ }
+
+ if c.HostConfig.MemorySwap != (128 * 1024 * 1024) {
+ return fmt.Errorf("Container has wrong memory swap setting: %d", c.HostConfig.Memory)
+ }
+
+ if c.HostConfig.CPUShares != 512 {
+ return fmt.Errorf("Container has wrong cpu shares setting: %d", c.HostConfig.CPUShares)
+ }
return nil
}
@@ -115,5 +126,8 @@ resource "docker_container" "foo" {
entrypoint = ["/bin/bash", "-c", "ping localhost"]
restart = "on-failure"
max_retry_count = 5
+ memory = 128
+ memory_swap = 128
+ cpu_shares = 512
}
`
diff --git a/website/source/docs/providers/docker/r/container.html.markdown b/website/source/docs/providers/docker/r/container.html.markdown
index 91c5a8659d..c1a728f1a6 100644
--- a/website/source/docs/providers/docker/r/container.html.markdown
+++ b/website/source/docs/providers/docker/r/container.html.markdown
@@ -59,6 +59,10 @@ The following arguments are supported:
* `privileged` - (Optional, bool) Run container in privileged mode.
* `publish_all_ports` - (Optional, bool) Publish all ports of the container.
* `volumes` - (Optional) See [Volumes](#volumes) below for details.
+* `memory` - (Optional, int) The memory limit for the container in MBs.
+* `memory_swap` - (Optional, int) The total memory limit (memory + swap) for the
+ container in MBs.
+* `cpu_shares` - (Optional, int) CPU shares (relative weight) for the container.
## Ports
From 4531866d8da30aba88dfc7498648c1c15d15c756 Mon Sep 17 00:00:00 2001
From: ryane
Date: Tue, 3 Nov 2015 15:20:58 -0500
Subject: [PATCH 026/664] add label support to docker container resource
---
.../providers/docker/resource_docker_container.go | 6 ++++++
.../docker/resource_docker_container_funcs.go | 12 ++++++++++++
.../docker/resource_docker_container_test.go | 9 +++++++++
.../docs/providers/docker/r/container.html.markdown | 1 +
4 files changed, 28 insertions(+)
diff --git a/builtin/providers/docker/resource_docker_container.go b/builtin/providers/docker/resource_docker_container.go
index 48eac9a4d9..0a29ab73d8 100644
--- a/builtin/providers/docker/resource_docker_container.go
+++ b/builtin/providers/docker/resource_docker_container.go
@@ -172,6 +172,12 @@ func resourceDockerContainer() *schema.Resource {
ForceNew: true,
},
+ "labels": &schema.Schema{
+ Type: schema.TypeMap,
+ Optional: true,
+ ForceNew: true,
+ },
+
"memory": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
diff --git a/builtin/providers/docker/resource_docker_container_funcs.go b/builtin/providers/docker/resource_docker_container_funcs.go
index 0f1a9d9e0d..4a617480e8 100644
--- a/builtin/providers/docker/resource_docker_container_funcs.go
+++ b/builtin/providers/docker/resource_docker_container_funcs.go
@@ -82,6 +82,10 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
createOpts.Config.Volumes = volumes
}
+ if v, ok := d.GetOk("labels"); ok {
+ createOpts.Config.Labels = mapLabels(v.(map[string]interface{}))
+ }
+
var retContainer *dc.Container
if retContainer, err = client.CreateContainer(createOpts); err != nil {
return fmt.Errorf("Unable to create container: %s", err)
@@ -255,6 +259,14 @@ func stringSetToStringSlice(stringSet *schema.Set) []string {
return ret
}
+func mapLabels(labels map[string]interface{}) map[string]string {
+ mapped := make(map[string]string, len(labels))
+ for k, v := range labels {
+ mapped[k] = v.(string)
+ }
+ return mapped
+}
+
func fetchDockerContainer(name string, client *dc.Client) (*dc.APIContainers, error) {
apiContainers, err := client.ListContainers(dc.ListContainersOptions{All: true})
diff --git a/builtin/providers/docker/resource_docker_container_test.go b/builtin/providers/docker/resource_docker_container_test.go
index 1402f129f3..e194d1a10c 100644
--- a/builtin/providers/docker/resource_docker_container_test.go
+++ b/builtin/providers/docker/resource_docker_container_test.go
@@ -55,6 +55,11 @@ func TestAccDockerContainer_customized(t *testing.T) {
if c.HostConfig.CPUShares != 512 {
return fmt.Errorf("Container has wrong cpu shares setting: %d", c.HostConfig.CPUShares)
}
+
+ if c.Config.Labels["env"] != "prod" || c.Config.Labels["role"] != "test" {
+ return fmt.Errorf("Container does not have the correct labels")
+ }
+
return nil
}
@@ -129,5 +134,9 @@ resource "docker_container" "foo" {
memory = 128
memory_swap = 128
cpu_shares = 512
+ labels {
+ env = "prod"
+ role = "test"
+ }
}
`
diff --git a/website/source/docs/providers/docker/r/container.html.markdown b/website/source/docs/providers/docker/r/container.html.markdown
index c1a728f1a6..b83387aef0 100644
--- a/website/source/docs/providers/docker/r/container.html.markdown
+++ b/website/source/docs/providers/docker/r/container.html.markdown
@@ -44,6 +44,7 @@ The following arguments are supported:
`["/usr/bin/myprogram"]`.
* `dns` - (Optional, set of strings) Set of DNS servers.
* `env` - (Optional, set of strings) Environmental variables to set.
+* `labels` - (Optional) Key/value pairs to set as labels on the container.
* `links` - (Optional, set of strings) Set of links for link based
connectivity between containers that are running on the same host.
* `hostname` - (Optional, string) Hostname of the container.
From 72c86a62c0a22fa6faf3e1effbbcb3a1e4bd0ad3 Mon Sep 17 00:00:00 2001
From: ryane
Date: Wed, 4 Nov 2015 12:42:55 -0500
Subject: [PATCH 027/664] support for log driver + config in docker container
---
.../docker/resource_docker_container.go | 21 +++++++++++++++++++
.../docker/resource_docker_container_funcs.go | 15 +++++++++----
.../docker/resource_docker_container_test.go | 17 +++++++++++++++
.../docker/r/container.html.markdown | 4 ++++
4 files changed, 53 insertions(+), 4 deletions(-)
diff --git a/builtin/providers/docker/resource_docker_container.go b/builtin/providers/docker/resource_docker_container.go
index 0a29ab73d8..92331fc795 100644
--- a/builtin/providers/docker/resource_docker_container.go
+++ b/builtin/providers/docker/resource_docker_container.go
@@ -195,6 +195,27 @@ func resourceDockerContainer() *schema.Resource {
Optional: true,
ForceNew: true,
},
+
+ "log_driver": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ Default: "json-file",
+ ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
+ value := v.(string)
+ if !regexp.MustCompile(`^(json-file|syslog|journald|gelf|fluentd)$`).MatchString(value) {
+ es = append(es, fmt.Errorf(
+ "%q must be one of \"json-file\", \"syslog\", \"journald\", \"gelf\", or \"fluentd\"", k))
+ }
+ return
+ },
+ },
+
+ "log_opts": &schema.Schema{
+ Type: schema.TypeMap,
+ Optional: true,
+ ForceNew: true,
+ },
},
}
}
diff --git a/builtin/providers/docker/resource_docker_container_funcs.go b/builtin/providers/docker/resource_docker_container_funcs.go
index 4a617480e8..443f9ef3fb 100644
--- a/builtin/providers/docker/resource_docker_container_funcs.go
+++ b/builtin/providers/docker/resource_docker_container_funcs.go
@@ -83,7 +83,7 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
}
if v, ok := d.GetOk("labels"); ok {
- createOpts.Config.Labels = mapLabels(v.(map[string]interface{}))
+ createOpts.Config.Labels = mapTypeMapValsToString(v.(map[string]interface{}))
}
var retContainer *dc.Container
@@ -103,6 +103,9 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
Name: d.Get("restart").(string),
MaximumRetryCount: d.Get("max_retry_count").(int),
},
+ LogConfig: dc.LogConfig{
+ Type: d.Get("log_driver").(string),
+ },
}
if len(portBindings) != 0 {
@@ -148,6 +151,10 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
}
}
+ if v, ok := d.GetOk("log_opts"); ok {
+ hostConfig.LogConfig.Config = mapTypeMapValsToString(v.(map[string]interface{}))
+ }
+
creationTime = time.Now()
if err := client.StartContainer(retContainer.ID, hostConfig); err != nil {
return fmt.Errorf("Unable to start container: %s", err)
@@ -259,9 +266,9 @@ func stringSetToStringSlice(stringSet *schema.Set) []string {
return ret
}
-func mapLabels(labels map[string]interface{}) map[string]string {
- mapped := make(map[string]string, len(labels))
- for k, v := range labels {
+func mapTypeMapValsToString(typeMap map[string]interface{}) map[string]string {
+ mapped := make(map[string]string, len(typeMap))
+ for k, v := range typeMap {
mapped[k] = v.(string)
}
return mapped
diff --git a/builtin/providers/docker/resource_docker_container_test.go b/builtin/providers/docker/resource_docker_container_test.go
index e194d1a10c..4b3dfce9a2 100644
--- a/builtin/providers/docker/resource_docker_container_test.go
+++ b/builtin/providers/docker/resource_docker_container_test.go
@@ -60,6 +60,18 @@ func TestAccDockerContainer_customized(t *testing.T) {
return fmt.Errorf("Container does not have the correct labels")
}
+ if c.HostConfig.LogConfig.Type != "json-file" {
+ return fmt.Errorf("Container does not have the correct log config: %s", c.HostConfig.LogConfig.Type)
+ }
+
+ if c.HostConfig.LogConfig.Config["max-size"] != "10m" {
+ return fmt.Errorf("Container does not have the correct max-size log option: %v", c.HostConfig.LogConfig.Config["max-size"])
+ }
+
+ if c.HostConfig.LogConfig.Config["max-file"] != "20" {
+ return fmt.Errorf("Container does not have the correct max-file log option: %v", c.HostConfig.LogConfig.Config["max-file"])
+ }
+
return nil
}
@@ -138,5 +150,10 @@ resource "docker_container" "foo" {
env = "prod"
role = "test"
}
+ log_driver = "json-file"
+ log_opts = {
+ max-size = "10m"
+ max-file = 20
+ }
}
`
diff --git a/website/source/docs/providers/docker/r/container.html.markdown b/website/source/docs/providers/docker/r/container.html.markdown
index b83387aef0..920288eb25 100644
--- a/website/source/docs/providers/docker/r/container.html.markdown
+++ b/website/source/docs/providers/docker/r/container.html.markdown
@@ -64,6 +64,10 @@ The following arguments are supported:
* `memory_swap` - (Optional, int) The total memory limit (memory + swap) for the
container in MBs.
* `cpu_shares` - (Optional, int) CPU shares (relative weight) for the container.
+* `log_driver` - (Optional, string) The logging driver to use for the container.
+ Defaults to "json-file".
+* `log_opts` - (Optional) Key/value pairs to use as options for the logging
+ driver.
## Ports
From 1f739d31da0987622a02e476845e672422fa40a9 Mon Sep 17 00:00:00 2001
From: ryane
Date: Wed, 4 Nov 2015 15:46:24 -0500
Subject: [PATCH 028/664] fix resource constraint specs
---
.../docker/resource_docker_container_test.go | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/builtin/providers/docker/resource_docker_container_test.go b/builtin/providers/docker/resource_docker_container_test.go
index 4b3dfce9a2..df8ba0cb8a 100644
--- a/builtin/providers/docker/resource_docker_container_test.go
+++ b/builtin/providers/docker/resource_docker_container_test.go
@@ -44,15 +44,15 @@ func TestAccDockerContainer_customized(t *testing.T) {
return fmt.Errorf("Container has wrong restart policy: %s", c.HostConfig.RestartPolicy.Name)
}
- if c.HostConfig.Memory != (128 * 1024 * 1024) {
+ if c.HostConfig.Memory != (512 * 1024 * 1024) {
return fmt.Errorf("Container has wrong memory setting: %d", c.HostConfig.Memory)
}
- if c.HostConfig.MemorySwap != (128 * 1024 * 1024) {
- return fmt.Errorf("Container has wrong memory swap setting: %d", c.HostConfig.Memory)
+ if c.HostConfig.MemorySwap != (2048 * 1024 * 1024) {
+ return fmt.Errorf("Container has wrong memory swap setting: %d", c.HostConfig.MemorySwap)
}
- if c.HostConfig.CPUShares != 512 {
+ if c.HostConfig.CPUShares != 32 {
return fmt.Errorf("Container has wrong cpu shares setting: %d", c.HostConfig.CPUShares)
}
@@ -143,9 +143,9 @@ resource "docker_container" "foo" {
entrypoint = ["/bin/bash", "-c", "ping localhost"]
restart = "on-failure"
max_retry_count = 5
- memory = 128
- memory_swap = 128
- cpu_shares = 512
+ memory = 512
+ memory_swap = 2048
+ cpu_shares = 32
labels {
env = "prod"
role = "test"
From b5ae355a990720fa78a625d4b623420c5e11de55 Mon Sep 17 00:00:00 2001
From: ryane
Date: Wed, 4 Nov 2015 15:46:41 -0500
Subject: [PATCH 029/664] include hostconfig when creating docker_container
---
.../docker/resource_docker_container_funcs.go | 22 ++++++++++---------
1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/builtin/providers/docker/resource_docker_container_funcs.go b/builtin/providers/docker/resource_docker_container_funcs.go
index 443f9ef3fb..2b0259bc96 100644
--- a/builtin/providers/docker/resource_docker_container_funcs.go
+++ b/builtin/providers/docker/resource_docker_container_funcs.go
@@ -86,16 +86,6 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
createOpts.Config.Labels = mapTypeMapValsToString(v.(map[string]interface{}))
}
- var retContainer *dc.Container
- if retContainer, err = client.CreateContainer(createOpts); err != nil {
- return fmt.Errorf("Unable to create container: %s", err)
- }
- if retContainer == nil {
- return fmt.Errorf("Returned container is nil")
- }
-
- d.SetId(retContainer.ID)
-
hostConfig := &dc.HostConfig{
Privileged: d.Get("privileged").(bool),
PublishAllPorts: d.Get("publish_all_ports").(bool),
@@ -155,6 +145,18 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
hostConfig.LogConfig.Config = mapTypeMapValsToString(v.(map[string]interface{}))
}
+ createOpts.HostConfig = hostConfig
+
+ var retContainer *dc.Container
+ if retContainer, err = client.CreateContainer(createOpts); err != nil {
+ return fmt.Errorf("Unable to create container: %s", err)
+ }
+ if retContainer == nil {
+ return fmt.Errorf("Returned container is nil")
+ }
+
+ d.SetId(retContainer.ID)
+
creationTime = time.Now()
if err := client.StartContainer(retContainer.ID, hostConfig); err != nil {
return fmt.Errorf("Unable to start container: %s", err)
From 5f90a4bc7e96ca0715629d769889357db42088a3 Mon Sep 17 00:00:00 2001
From: Matt Morrison
Date: Thu, 5 Nov 2015 12:38:17 +1300
Subject: [PATCH 030/664] Issue #3742 - terraform destroy fails if Google
Compute Instance no longer exists
---
builtin/providers/google/resource_compute_instance.go | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go
index 808c5de789..ce56b17e9d 100644
--- a/builtin/providers/google/resource_compute_instance.go
+++ b/builtin/providers/google/resource_compute_instance.go
@@ -8,6 +8,7 @@ import (
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
+ "strings"
)
func stringHashcode(v interface{}) int {
@@ -285,9 +286,10 @@ func getInstance(config *Config, d *schema.ResourceData) (*compute.Instance, err
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
// The resource doesn't exist anymore
+ id := d.Id()
d.SetId("")
- return nil, fmt.Errorf("Resource %s no longer exists", config.Project)
+ return nil, fmt.Errorf("Resource %s no longer exists", id)
}
return nil, fmt.Errorf("Error reading instance: %s", err)
@@ -549,6 +551,9 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error
instance, err := getInstance(config, d)
if err != nil {
+ if strings.Contains(err.Error(), "no longer exists") {
+ return nil
+ }
return err
}
From ede5ebb368bdf0a07e3ce57d1af6fb89332f0cb7 Mon Sep 17 00:00:00 2001
From: Matt Morrison
Date: Fri, 6 Nov 2015 10:15:35 +1300
Subject: [PATCH 031/664] Add logging when instance no longer exists
---
builtin/providers/google/resource_compute_instance.go | 2 ++
1 file changed, 2 insertions(+)
diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go
index ce56b17e9d..3359c4d649 100644
--- a/builtin/providers/google/resource_compute_instance.go
+++ b/builtin/providers/google/resource_compute_instance.go
@@ -549,9 +549,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
+ id := d.Id()
instance, err := getInstance(config, d)
if err != nil {
if strings.Contains(err.Error(), "no longer exists") {
+ log.Printf("[WARN] Google Compute Instance (%s) not found", id)
return nil
}
return err
From 7c09f9653dbd5ff103ec295e4f17143b3f9b3cf4 Mon Sep 17 00:00:00 2001
From: Brett Mack
Date: Fri, 6 Nov 2015 10:19:59 +0000
Subject: [PATCH 032/664] Changed vmware-govcd dependency to pull from hmrc
---
builtin/providers/vcd/config.go | 2 +-
builtin/providers/vcd/resource_vcd_dnat.go | 2 +-
builtin/providers/vcd/resource_vcd_dnat_test.go | 2 +-
builtin/providers/vcd/resource_vcd_firewall_rules.go | 4 ++--
builtin/providers/vcd/resource_vcd_firewall_rules_test.go | 2 +-
builtin/providers/vcd/resource_vcd_network.go | 4 ++--
builtin/providers/vcd/resource_vcd_network_test.go | 2 +-
builtin/providers/vcd/resource_vcd_snat.go | 2 +-
builtin/providers/vcd/resource_vcd_snat_test.go | 2 +-
builtin/providers/vcd/resource_vcd_vapp.go | 4 ++--
builtin/providers/vcd/resource_vcd_vapp_test.go | 2 +-
builtin/providers/vcd/structure.go | 2 +-
12 files changed, 15 insertions(+), 15 deletions(-)
diff --git a/builtin/providers/vcd/config.go b/builtin/providers/vcd/config.go
index 0768bbc3db..b5da76dba5 100644
--- a/builtin/providers/vcd/config.go
+++ b/builtin/providers/vcd/config.go
@@ -4,7 +4,7 @@ import (
"fmt"
"net/url"
- "github.com/opencredo/vmware-govcd"
+ "github.com/hmrc/vmware-govcd"
)
type Config struct {
diff --git a/builtin/providers/vcd/resource_vcd_dnat.go b/builtin/providers/vcd/resource_vcd_dnat.go
index e949f1da86..b0ffc196cd 100644
--- a/builtin/providers/vcd/resource_vcd_dnat.go
+++ b/builtin/providers/vcd/resource_vcd_dnat.go
@@ -3,7 +3,7 @@ package vcd
import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
- "github.com/opencredo/vmware-govcd"
+ "github.com/hmrc/vmware-govcd"
"strings"
)
diff --git a/builtin/providers/vcd/resource_vcd_dnat_test.go b/builtin/providers/vcd/resource_vcd_dnat_test.go
index ba4bfce134..6e073905b1 100644
--- a/builtin/providers/vcd/resource_vcd_dnat_test.go
+++ b/builtin/providers/vcd/resource_vcd_dnat_test.go
@@ -7,7 +7,7 @@ import (
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
- "github.com/opencredo/vmware-govcd"
+ "github.com/hmrc/vmware-govcd"
)
func TestAccVcdDNAT_Basic(t *testing.T) {
diff --git a/builtin/providers/vcd/resource_vcd_firewall_rules.go b/builtin/providers/vcd/resource_vcd_firewall_rules.go
index 3581f1179c..0af03009a4 100644
--- a/builtin/providers/vcd/resource_vcd_firewall_rules.go
+++ b/builtin/providers/vcd/resource_vcd_firewall_rules.go
@@ -5,8 +5,8 @@ import (
"fmt"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
- "github.com/opencredo/vmware-govcd"
- types "github.com/opencredo/vmware-govcd/types/v56"
+ "github.com/hmrc/vmware-govcd"
+ types "github.com/hmrc/vmware-govcd/types/v56"
"strings"
)
diff --git a/builtin/providers/vcd/resource_vcd_firewall_rules_test.go b/builtin/providers/vcd/resource_vcd_firewall_rules_test.go
index 96e2c3e3d7..3b7a4e90a1 100644
--- a/builtin/providers/vcd/resource_vcd_firewall_rules_test.go
+++ b/builtin/providers/vcd/resource_vcd_firewall_rules_test.go
@@ -9,7 +9,7 @@ import (
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
- "github.com/opencredo/vmware-govcd"
+ "github.com/hmrc/vmware-govcd"
)
func TestAccVcdFirewallRules_basic(t *testing.T) {
diff --git a/builtin/providers/vcd/resource_vcd_network.go b/builtin/providers/vcd/resource_vcd_network.go
index c98527e926..c984d708ec 100644
--- a/builtin/providers/vcd/resource_vcd_network.go
+++ b/builtin/providers/vcd/resource_vcd_network.go
@@ -7,8 +7,8 @@ import (
"fmt"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
- "github.com/opencredo/vmware-govcd"
- types "github.com/opencredo/vmware-govcd/types/v56"
+ "github.com/hmrc/vmware-govcd"
+ types "github.com/hmrc/vmware-govcd/types/v56"
"strings"
)
diff --git a/builtin/providers/vcd/resource_vcd_network_test.go b/builtin/providers/vcd/resource_vcd_network_test.go
index 6bfd840bb0..2d260bc03b 100644
--- a/builtin/providers/vcd/resource_vcd_network_test.go
+++ b/builtin/providers/vcd/resource_vcd_network_test.go
@@ -8,7 +8,7 @@ import (
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
- "github.com/opencredo/vmware-govcd"
+ "github.com/hmrc/vmware-govcd"
)
func TestAccVcdNetwork_Basic(t *testing.T) {
diff --git a/builtin/providers/vcd/resource_vcd_snat.go b/builtin/providers/vcd/resource_vcd_snat.go
index 08a9b4023a..afae155505 100644
--- a/builtin/providers/vcd/resource_vcd_snat.go
+++ b/builtin/providers/vcd/resource_vcd_snat.go
@@ -3,7 +3,7 @@ package vcd
import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
- "github.com/opencredo/vmware-govcd"
+ "github.com/hmrc/vmware-govcd"
)
func resourceVcdSNAT() *schema.Resource {
diff --git a/builtin/providers/vcd/resource_vcd_snat_test.go b/builtin/providers/vcd/resource_vcd_snat_test.go
index bf3eced14b..66351f2a15 100644
--- a/builtin/providers/vcd/resource_vcd_snat_test.go
+++ b/builtin/providers/vcd/resource_vcd_snat_test.go
@@ -7,7 +7,7 @@ import (
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
- "github.com/opencredo/vmware-govcd"
+ "github.com/hmrc/vmware-govcd"
)
func TestAccVcdSNAT_Basic(t *testing.T) {
diff --git a/builtin/providers/vcd/resource_vcd_vapp.go b/builtin/providers/vcd/resource_vcd_vapp.go
index 1add00412a..ee50d63ea2 100644
--- a/builtin/providers/vcd/resource_vcd_vapp.go
+++ b/builtin/providers/vcd/resource_vcd_vapp.go
@@ -3,8 +3,8 @@ package vcd
import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
- "github.com/opencredo/vmware-govcd"
- types "github.com/opencredo/vmware-govcd/types/v56"
+ "github.com/hmrc/vmware-govcd"
+ types "github.com/hmrc/vmware-govcd/types/v56"
"log"
)
diff --git a/builtin/providers/vcd/resource_vcd_vapp_test.go b/builtin/providers/vcd/resource_vcd_vapp_test.go
index bb6e9874a7..e4e44647a3 100644
--- a/builtin/providers/vcd/resource_vcd_vapp_test.go
+++ b/builtin/providers/vcd/resource_vcd_vapp_test.go
@@ -7,7 +7,7 @@ import (
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
- "github.com/opencredo/vmware-govcd"
+ "github.com/hmrc/vmware-govcd"
)
func TestAccVcdVApp_PowerOff(t *testing.T) {
diff --git a/builtin/providers/vcd/structure.go b/builtin/providers/vcd/structure.go
index b243cdf8bf..2893514696 100644
--- a/builtin/providers/vcd/structure.go
+++ b/builtin/providers/vcd/structure.go
@@ -2,7 +2,7 @@ package vcd
import (
"github.com/hashicorp/terraform/helper/resource"
- types "github.com/opencredo/vmware-govcd/types/v56"
+ types "github.com/hmrc/vmware-govcd/types/v56"
"strconv"
"time"
)
From b6abb91b8374b4df2f8f15f54edb8685597374c0 Mon Sep 17 00:00:00 2001
From: Brett Mack
Date: Fri, 6 Nov 2015 16:39:40 +0000
Subject: [PATCH 033/664] Class a resource that is in tfstate but unable to be
found on the provider as deleted
---
builtin/providers/vcd/resource_vcd_network.go | 4 +++-
builtin/providers/vcd/resource_vcd_vapp.go | 4 +++-
2 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/vcd/resource_vcd_network.go b/builtin/providers/vcd/resource_vcd_network.go
index c984d708ec..b247be5da6 100644
--- a/builtin/providers/vcd/resource_vcd_network.go
+++ b/builtin/providers/vcd/resource_vcd_network.go
@@ -207,7 +207,9 @@ func resourceVcdNetworkRead(d *schema.ResourceData, meta interface{}) error {
network, err := vcd_client.OrgVdc.FindVDCNetwork(d.Id())
if err != nil {
- return fmt.Errorf("Error finding network: %#v", err)
+ log.Printf("[DEBUG] Network no longer exists. Removing from tfstate")
+ d.SetId("")
+ return nil
}
d.Set("name", network.OrgVDCNetwork.Name)
diff --git a/builtin/providers/vcd/resource_vcd_vapp.go b/builtin/providers/vcd/resource_vcd_vapp.go
index ee50d63ea2..c500378683 100644
--- a/builtin/providers/vcd/resource_vcd_vapp.go
+++ b/builtin/providers/vcd/resource_vcd_vapp.go
@@ -355,7 +355,9 @@ func resourceVcdVAppRead(d *schema.ResourceData, meta interface{}) error {
vapp, err := vcd_client.OrgVdc.FindVAppByName(d.Id())
if err != nil {
- return fmt.Errorf("Error finding vapp: %#v", err)
+ log.Printf("[DEBUG] Unable to find vapp. Removing from tfstate")
+ d.SetId("")
+ return nil
}
d.Set("ip", vapp.VApp.Children.VM[0].NetworkConnectionSection.NetworkConnection.IPAddress)
From 725a735c0d2c398bc7432a3adfbeadd76df55c4a Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Sun, 8 Nov 2015 18:02:07 -0700
Subject: [PATCH 034/664] adding capability to set custom configuration value
in virtual machines
---
.../resource_vsphere_virtual_machine.go | 84 +++++++++++++++----
1 file changed, 69 insertions(+), 15 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
index ac15cd97f6..07d84367a3 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
@@ -41,21 +41,22 @@ type hardDisk struct {
}
type virtualMachine struct {
- name string
- datacenter string
- cluster string
- resourcePool string
- datastore string
- vcpu int
- memoryMb int64
- template string
- networkInterfaces []networkInterface
- hardDisks []hardDisk
- gateway string
- domain string
- timeZone string
- dnsSuffixes []string
- dnsServers []string
+ name string
+ datacenter string
+ cluster string
+ resourcePool string
+ datastore string
+ vcpu int
+ memoryMb int64
+ template string
+ networkInterfaces []networkInterface
+ hardDisks []hardDisk
+ gateway string
+ domain string
+ timeZone string
+ dnsSuffixes []string
+ dnsServers []string
+ customConfigurations map[string](types.AnyType)
}
func resourceVSphereVirtualMachine() *schema.Resource {
@@ -135,6 +136,12 @@ func resourceVSphereVirtualMachine() *schema.Resource {
ForceNew: true,
},
+ "custom_configuration_parameters": &schema.Schema{
+ Type: schema.TypeMap,
+ Optional: true,
+ ForceNew: true,
+ },
+
"network_interface": &schema.Schema{
Type: schema.TypeList,
Required: true,
@@ -261,6 +268,12 @@ func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{
vm.dnsServers = DefaultDNSServers
}
+ if vL, ok := d.GetOk("custom_configuration_parameters"); ok {
+ if custom_configs, ok := vL.(map[string]types.AnyType); ok {
+ vm.customConfigurations = custom_configs
+ }
+ }
+
if vL, ok := d.GetOk("network_interface"); ok {
networks := make([]networkInterface, len(vL.([]interface{})))
for i, v := range vL.([]interface{}) {
@@ -418,6 +431,15 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
d.Set("datacenter", dc)
d.Set("memory", mvm.Summary.Config.MemorySizeMB)
d.Set("cpu", mvm.Summary.Config.NumCpu)
+
+ if mvm.Config && len(mvm.Config.ExtraConfig) > 0 {
+ custom_configs := make(map[string]string)
+ for _, v := range mvm.Config.ExtraConfig {
+ value := v.GetOptionValue()
+ custom_configs[value.Key] = value.Value
+ }
+ d.Set("custom_configuration_parameters", custom_configs)
+ }
d.Set("datastore", rootDatastore)
// Initialize the connection info
@@ -802,6 +824,22 @@ func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
}
log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
+ // make ExtraConfig
+ if len(vm.customConfigurations) > 0 {
+ var ov []types.BaseOptionValue
+ for k, v := range vm.customConfigurations {
+ key := k
+ value := v
+ o := types.OptionValue{
+ Key: key,
+ Value: &value,
+ }
+ ov = append(ov, &o)
+ }
+ configSpec.ExtraConfig = ov
+ log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
+ }
+
var datastore *object.Datastore
if vm.datastore == "" {
datastore, err = finder.DefaultDatastore(context.TODO())
@@ -1003,6 +1041,22 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
}
log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
+ // make ExtraConfig
+ if len(vm.customConfigurations) > 0 {
+ var ov []types.BaseOptionValue
+ for k, v := range vm.customConfigurations {
+ key := k
+ value := v
+ o := types.OptionValue{
+ Key: key,
+ Value: &value,
+ }
+ ov = append(ov, &o)
+ }
+ configSpec.ExtraConfig = ov
+ log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
+ }
+
// create CustomizationSpec
customSpec := types.CustomizationSpec{
Identity: &types.CustomizationLinuxPrep{
From b5ca1466433c6a3eed350f6d6f9e163ddcb7b032 Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Sun, 8 Nov 2015 18:21:17 -0700
Subject: [PATCH 035/664] fixing if and AnyTypes
---
builtin/providers/vsphere/resource_vsphere_virtual_machine.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
index 07d84367a3..cf636e79da 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
@@ -432,8 +432,8 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
d.Set("memory", mvm.Summary.Config.MemorySizeMB)
d.Set("cpu", mvm.Summary.Config.NumCpu)
- if mvm.Config && len(mvm.Config.ExtraConfig) > 0 {
- custom_configs := make(map[string]string)
+ if len(mvm.Config.ExtraConfig) > 0 {
+ custom_configs := make(map[string]types.AnyType)
for _, v := range mvm.Config.ExtraConfig {
value := v.GetOptionValue()
custom_configs[value.Key] = value.Value
From 01adcb18f3416be7e7fc02a9bec750375915968b Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Sun, 8 Nov 2015 18:49:49 -0700
Subject: [PATCH 036/664] adding new functional test
---
.../resource_vsphere_virtual_machine_test.go | 83 ++++++++++++++++++-
1 file changed, 82 insertions(+), 1 deletion(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
index 66d6ea44f8..2cae45fe48 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
@@ -127,6 +127,67 @@ func TestAccVSphereVirtualMachine_dhcp(t *testing.T) {
})
}
+func TestAccVSphereVirtualMachine_custom_configs(t *testing.T) {
+ var vm virtualMachine
+ var locationOpt string
+ var datastoreOpt string
+
+ if v := os.Getenv("VSPHERE_DATACENTER"); v != "" {
+ locationOpt += fmt.Sprintf(" datacenter = \"%s\"\n", v)
+ }
+ if v := os.Getenv("VSPHERE_CLUSTER"); v != "" {
+ locationOpt += fmt.Sprintf(" cluster = \"%s\"\n", v)
+ }
+ if v := os.Getenv("VSPHERE_RESOURCE_POOL"); v != "" {
+ locationOpt += fmt.Sprintf(" resource_pool = \"%s\"\n", v)
+ }
+ if v := os.Getenv("VSPHERE_DATASTORE"); v != "" {
+ datastoreOpt = fmt.Sprintf(" datastore = \"%s\"\n", v)
+ }
+ template := os.Getenv("VSPHERE_TEMPLATE")
+ label := os.Getenv("VSPHERE_NETWORK_LABEL_DHCP")
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckVSphereVirtualMachineDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: fmt.Sprintf(
+ testAccCheckVSphereVirtualMachineConfig_custom_configs,
+ locationOpt,
+ label,
+ datastoreOpt,
+ template,
+ ),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckVSphereVirtualMachineExists("vsphere_virtual_machine.bar", &vm),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "name", "terraform-test"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "vcpu", "2"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "memory", "4096"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "disk.#", "1"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "disk.0.template", template),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "network_interface.#", "1"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "custom_configuration_parameters.foo", "bar"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "custom_configuration_parameters.car", "ferrai"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "custom_configuration_parameters.num", "42"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.bar", "network_interface.0.label", label),
+ ),
+ },
+ },
+ })
+}
+
func testAccCheckVSphereVirtualMachineDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*govmomi.Client)
finder := find.NewFinder(client.Client, true)
@@ -212,7 +273,6 @@ resource "vsphere_virtual_machine" "foo" {
}
}
`
-
const testAccCheckVSphereVirtualMachineConfig_dhcp = `
resource "vsphere_virtual_machine" "bar" {
name = "terraform-test"
@@ -228,3 +288,24 @@ resource "vsphere_virtual_machine" "bar" {
}
}
`
+
+const testAccCheckVSphereVirtualMachineConfig_custom_configs = `
+resource "vsphere_virtual_machine" "car" {
+ name = "terraform-test-custom"
+%s
+ vcpu = 2
+ memory = 4096
+ network_interface {
+ label = "%s"
+ }
+ custom_configuration_parameters {
+ foo = "bar",
+ car = "ferrai",
+ num = 42
+ }
+ disk {
+%s
+ template = "%s"
+ }
+}
+`
From a5050fe471f9256555b732f6017744d50ddfbb5f Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Mon, 9 Nov 2015 04:50:09 +0000
Subject: [PATCH 037/664] working on read and more testing
---
.../resource_vsphere_virtual_machine.go | 23 ++++++++++++++++---
.../resource_vsphere_virtual_machine_test.go | 12 +++++-----
2 files changed, 26 insertions(+), 9 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
index cf636e79da..338c95301f 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
@@ -269,8 +269,13 @@ func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{
}
if vL, ok := d.GetOk("custom_configuration_parameters"); ok {
- if custom_configs, ok := vL.(map[string]types.AnyType); ok {
- vm.customConfigurations = custom_configs
+ if custom_configs, ok := vL.(map[string]interface{}); ok {
+ custom := make(map[string]types.AnyType)
+ for k,v := range custom_configs {
+ custom[k] = v
+ }
+ vm.customConfigurations = custom
+ log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations)
}
}
@@ -432,14 +437,21 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
d.Set("memory", mvm.Summary.Config.MemorySizeMB)
d.Set("cpu", mvm.Summary.Config.NumCpu)
- if len(mvm.Config.ExtraConfig) > 0 {
+ log.Printf("[DEBUG] ===============================")
+ //log.Printf("[DEBUG] Get extra config ===============================")
+ //log.Printf("[DEBUG] Get extra config %v", mvm.Config)
+ //log.Printf("[DEBUG] Get extra config %v", mvm.Config.ExtraConfig)
+ if mvm.Config != nil && mvm.Config.ExtraConfig != nil && len(mvm.Config.ExtraConfig) > 0 {
+ log.Printf("[DEBUG] reading custom configs")
custom_configs := make(map[string]types.AnyType)
for _, v := range mvm.Config.ExtraConfig {
value := v.GetOptionValue()
custom_configs[value.Key] = value.Value
+ log.Printf("[DEBUG] reading custom configs %s,%s",value.Key, value.Value)
}
d.Set("custom_configuration_parameters", custom_configs)
}
+ log.Printf("[DEBUG] ===============================")
d.Set("datastore", rootDatastore)
// Initialize the connection info
@@ -825,6 +837,7 @@ func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
// make ExtraConfig
+ log.Printf("[DEBUG] virtual machine Extra Config spec start")
if len(vm.customConfigurations) > 0 {
var ov []types.BaseOptionValue
for k, v := range vm.customConfigurations {
@@ -834,6 +847,7 @@ func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
Key: key,
Value: &value,
}
+ log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k,v)
ov = append(ov, &o)
}
configSpec.ExtraConfig = ov
@@ -1041,6 +1055,8 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
}
log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
+ log.Printf("[DEBUG] starting extra custom config spec: %v", vm.customConfigurations)
+
// make ExtraConfig
if len(vm.customConfigurations) > 0 {
var ov []types.BaseOptionValue
@@ -1149,5 +1165,6 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
return err
}
}
+ log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
return nil
}
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
index 2cae45fe48..804e1ae074 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
@@ -161,9 +161,9 @@ func TestAccVSphereVirtualMachine_custom_configs(t *testing.T) {
template,
),
Check: resource.ComposeTestCheckFunc(
- testAccCheckVSphereVirtualMachineExists("vsphere_virtual_machine.bar", &vm),
+ testAccCheckVSphereVirtualMachineExists("vsphere_virtual_machine.car", &vm),
resource.TestCheckResourceAttr(
- "vsphere_virtual_machine.car", "name", "terraform-test"),
+ "vsphere_virtual_machine.car", "name", "terraform-test-custom"),
resource.TestCheckResourceAttr(
"vsphere_virtual_machine.car", "vcpu", "2"),
resource.TestCheckResourceAttr(
@@ -181,7 +181,7 @@ func TestAccVSphereVirtualMachine_custom_configs(t *testing.T) {
resource.TestCheckResourceAttr(
"vsphere_virtual_machine.car", "custom_configuration_parameters.num", "42"),
resource.TestCheckResourceAttr(
- "vsphere_virtual_machine.bar", "network_interface.0.label", label),
+ "vsphere_virtual_machine.car", "network_interface.0.label", label),
),
},
},
@@ -299,9 +299,9 @@ resource "vsphere_virtual_machine" "car" {
label = "%s"
}
custom_configuration_parameters {
- foo = "bar",
- car = "ferrai",
- num = 42
+ "foo" = "bar"
+ "car" = "ferrai"
+ "num" = 42
}
disk {
%s
From 4fc60c9f89cc4ebad933bbffb0f243eb9213c183 Mon Sep 17 00:00:00 2001
From: ryane
Date: Mon, 9 Nov 2015 19:36:23 -0500
Subject: [PATCH 038/664] docker: improve validation of runtime constraints
---
.../docker/resource_docker_container.go | 21 +++++++++++++++++++
.../docker/resource_docker_container_funcs.go | 18 +++++-----------
2 files changed, 26 insertions(+), 13 deletions(-)
diff --git a/builtin/providers/docker/resource_docker_container.go b/builtin/providers/docker/resource_docker_container.go
index 92331fc795..242462e1a7 100644
--- a/builtin/providers/docker/resource_docker_container.go
+++ b/builtin/providers/docker/resource_docker_container.go
@@ -182,18 +182,39 @@ func resourceDockerContainer() *schema.Resource {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
+ ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
+ value := v.(int)
+ if value < 0 {
+ es = append(es, fmt.Errorf("%q must be greater than or equal to 0", k))
+ }
+ return
+ },
},
"memory_swap": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
+ ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
+ value := v.(int)
+ if value < -1 {
+ es = append(es, fmt.Errorf("%q must be greater than or equal to -1", k))
+ }
+ return
+ },
},
"cpu_shares": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
+ ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
+ value := v.(int)
+ if value < 0 {
+ es = append(es, fmt.Errorf("%q must be greater than or equal to 0", k))
+ }
+ return
+ },
},
"log_driver": &schema.Schema{
diff --git a/builtin/providers/docker/resource_docker_container_funcs.go b/builtin/providers/docker/resource_docker_container_funcs.go
index 2b0259bc96..b0c262dfcd 100644
--- a/builtin/providers/docker/resource_docker_container_funcs.go
+++ b/builtin/providers/docker/resource_docker_container_funcs.go
@@ -118,27 +118,19 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
}
if v, ok := d.GetOk("memory"); ok {
- memory := int64(v.(int))
- if memory > 0 {
- hostConfig.Memory = memory * 1024 * 1024
- }
+ hostConfig.Memory = int64(v.(int)) * 1024 * 1024
}
if v, ok := d.GetOk("memory_swap"); ok {
swap := int64(v.(int))
- if swap != 0 {
- if swap > 0 { // only convert positive #s to bytes
- swap = swap * 1024 * 1024
- }
- hostConfig.MemorySwap = swap
+ if swap > 0 {
+ swap = swap * 1024 * 1024
}
+ hostConfig.MemorySwap = swap
}
if v, ok := d.GetOk("cpu_shares"); ok {
- shares := int64(v.(int))
- if shares > 0 {
- hostConfig.CPUShares = shares
- }
+ hostConfig.CPUShares = int64(v.(int))
}
if v, ok := d.GetOk("log_opts"); ok {
From a15c99e5bb8168450a3fcfba9e6eb4df92944cdd Mon Sep 17 00:00:00 2001
From: Brett Mack
Date: Tue, 10 Nov 2015 18:39:58 +0000
Subject: [PATCH 039/664] Code cleanup to address PR comments
---
builtin/providers/vcd/resource_vcd_dnat.go | 14 ++++-------
builtin/providers/vcd/resource_vcd_network.go | 24 +++++++++----------
builtin/providers/vcd/resource_vcd_snat.go | 7 ++----
builtin/providers/vcd/resource_vcd_vapp.go | 12 ----------
builtin/providers/vcd/structure.go | 4 ++--
5 files changed, 20 insertions(+), 41 deletions(-)
diff --git a/builtin/providers/vcd/resource_vcd_dnat.go b/builtin/providers/vcd/resource_vcd_dnat.go
index b0ffc196cd..edfdd69f72 100644
--- a/builtin/providers/vcd/resource_vcd_dnat.go
+++ b/builtin/providers/vcd/resource_vcd_dnat.go
@@ -4,13 +4,11 @@ import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hmrc/vmware-govcd"
- "strings"
)
func resourceVcdDNAT() *schema.Resource {
return &schema.Resource{
Create: resourceVcdDNATCreate,
- Update: resourceVcdDNATUpdate,
Delete: resourceVcdDNATDelete,
Read: resourceVcdDNATRead,
@@ -24,16 +22,19 @@ func resourceVcdDNAT() *schema.Resource {
"external_ip": &schema.Schema{
Type: schema.TypeString,
Required: true,
+ ForceNew: true,
},
"port": &schema.Schema{
Type: schema.TypeInt,
Required: true,
+ ForceNew: true,
},
"internal_ip": &schema.Schema{
Type: schema.TypeString,
Required: true,
+ ForceNew: true,
},
},
}
@@ -78,10 +79,6 @@ func resourceVcdDNATCreate(d *schema.ResourceData, meta interface{}) error {
return nil
}
-func resourceVcdDNATUpdate(d *schema.ResourceData, meta interface{}) error {
- return nil
-}
-
func resourceVcdDNATRead(d *schema.ResourceData, meta interface{}) error {
vcd_client := meta.(*govcd.VCDClient)
e, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
@@ -90,13 +87,12 @@ func resourceVcdDNATRead(d *schema.ResourceData, meta interface{}) error {
return fmt.Errorf("Unable to find edge gateway: %#v", err)
}
- idSplit := strings.Split(d.Id(), "_")
var found bool
for _, r := range e.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.NatService.NatRule {
if r.RuleType == "DNAT" &&
- r.GatewayNatRule.OriginalIP == idSplit[0] &&
- r.GatewayNatRule.OriginalPort == idSplit[1] {
+ r.GatewayNatRule.OriginalIP == d.Get("external_ip").(string) &&
+ r.GatewayNatRule.OriginalPort == getPortString(d.Get("port").(int)) {
found = true
d.Set("internal_ip", r.GatewayNatRule.TranslatedIP)
}
diff --git a/builtin/providers/vcd/resource_vcd_network.go b/builtin/providers/vcd/resource_vcd_network.go
index b247be5da6..37b9d68bbc 100644
--- a/builtin/providers/vcd/resource_vcd_network.go
+++ b/builtin/providers/vcd/resource_vcd_network.go
@@ -15,7 +15,6 @@ import (
func resourceVcdNetwork() *schema.Resource {
return &schema.Resource{
Create: resourceVcdNetworkCreate,
- Update: resourceVcdNetworkUpdate,
Read: resourceVcdNetworkRead,
Delete: resourceVcdNetworkDelete,
@@ -29,51 +28,60 @@ func resourceVcdNetwork() *schema.Resource {
"fence_mode": &schema.Schema{
Type: schema.TypeString,
Optional: true,
+ ForceNew: true,
Default: "natRouted",
},
"edge_gateway": &schema.Schema{
Type: schema.TypeString,
Required: true,
+ ForceNew: true,
},
"netmask": &schema.Schema{
Type: schema.TypeString,
Optional: true,
+ ForceNew: true,
Default: "255.255.255.0",
},
"gateway": &schema.Schema{
Type: schema.TypeString,
Required: true,
+ ForceNew: true,
},
"dns1": &schema.Schema{
Type: schema.TypeString,
Optional: true,
+ ForceNew: true,
Default: "8.8.8.8",
},
"dns2": &schema.Schema{
Type: schema.TypeString,
Optional: true,
+ ForceNew: true,
Default: "8.8.4.4",
},
"dns_suffix": &schema.Schema{
Type: schema.TypeString,
Optional: true,
+ ForceNew: true,
},
"href": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
+ ForceNew: true,
},
"dhcp_pool": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
+ ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"start_address": &schema.Schema{
@@ -92,6 +100,7 @@ func resourceVcdNetwork() *schema.Resource {
"static_ip_pool": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
+ ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"start_address": &schema.Schema{
@@ -119,10 +128,7 @@ func resourceVcdNetworkCreate(d *schema.ResourceData, meta interface{}) error {
edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
- ipRanges, err := expandIpRange(d.Get("static_ip_pool").(*schema.Set).List())
- if err != nil {
- fmt.Printf("error: %v\n", err)
- }
+ ipRanges := expandIpRange(d.Get("static_ip_pool").(*schema.Set).List())
newnetwork := &types.OrgVDCNetwork{
Xmlns: "http://www.vmware.com/vcloud/v1.5",
@@ -187,14 +193,6 @@ func resourceVcdNetworkCreate(d *schema.ResourceData, meta interface{}) error {
return resourceVcdNetworkRead(d, meta)
}
-func resourceVcdNetworkUpdate(d *schema.ResourceData, meta interface{}) error {
-
- vcd_client := meta.(*govcd.VCDClient)
-
- log.Printf("[DEBUG] VCD Client configuration: %#v", vcd_client)
- return nil
-}
-
func resourceVcdNetworkRead(d *schema.ResourceData, meta interface{}) error {
vcd_client := meta.(*govcd.VCDClient)
log.Printf("[DEBUG] VCD Client configuration: %#v", vcd_client)
diff --git a/builtin/providers/vcd/resource_vcd_snat.go b/builtin/providers/vcd/resource_vcd_snat.go
index afae155505..75c78696b4 100644
--- a/builtin/providers/vcd/resource_vcd_snat.go
+++ b/builtin/providers/vcd/resource_vcd_snat.go
@@ -9,7 +9,6 @@ import (
func resourceVcdSNAT() *schema.Resource {
return &schema.Resource{
Create: resourceVcdSNATCreate,
- Update: resourceVcdSNATUpdate,
Delete: resourceVcdSNATDelete,
Read: resourceVcdSNATRead,
@@ -23,11 +22,13 @@ func resourceVcdSNAT() *schema.Resource {
"external_ip": &schema.Schema{
Type: schema.TypeString,
Required: true,
+ ForceNew: true,
},
"internal_ip": &schema.Schema{
Type: schema.TypeString,
Required: true,
+ ForceNew: true,
},
},
}
@@ -67,10 +68,6 @@ func resourceVcdSNATCreate(d *schema.ResourceData, meta interface{}) error {
return nil
}
-func resourceVcdSNATUpdate(d *schema.ResourceData, meta interface{}) error {
- return nil
-}
-
func resourceVcdSNATRead(d *schema.ResourceData, meta interface{}) error {
vcd_client := meta.(*govcd.VCDClient)
e, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
diff --git a/builtin/providers/vcd/resource_vcd_vapp.go b/builtin/providers/vcd/resource_vcd_vapp.go
index c500378683..69c4cc4c26 100644
--- a/builtin/providers/vcd/resource_vcd_vapp.go
+++ b/builtin/providers/vcd/resource_vcd_vapp.go
@@ -150,18 +150,6 @@ func resourceVcdVAppCreate(d *schema.ResourceData, meta interface{}) error {
return err
}
- // err = resource.Retry(4*time.Minute, func() error {
- // err = vcd_client.OrgVdc.InstantiateVAppTemplate(createvapp)
- //
- // if err != nil {
- // return fmt.Errorf("Error: %#v", err)
- // }
- // return nil
- // })
- // if err != nil {
- // return err
- // }
-
vapp, err := vcd_client.OrgVdc.FindVAppByName(d.Get("name").(string))
err = retryCall(4, func() error {
diff --git a/builtin/providers/vcd/structure.go b/builtin/providers/vcd/structure.go
index 2893514696..7c40f70fce 100644
--- a/builtin/providers/vcd/structure.go
+++ b/builtin/providers/vcd/structure.go
@@ -7,7 +7,7 @@ import (
"time"
)
-func expandIpRange(configured []interface{}) (types.IPRanges, error) {
+func expandIpRange(configured []interface{}) types.IPRanges {
ipRange := make([]*types.IPRange, 0, len(configured))
for _, ipRaw := range configured {
@@ -25,7 +25,7 @@ func expandIpRange(configured []interface{}) (types.IPRanges, error) {
IPRange: ipRange,
}
- return ipRanges, nil
+ return ipRanges
}
func expandFirewallRules(configured []interface{}, gateway *types.EdgeGateway) ([]*types.FirewallRule, error) {
From a05ff89a7d19c64ddec2fba02cf2fa1c5de77b67 Mon Sep 17 00:00:00 2001
From: Brett Mack
Date: Tue, 10 Nov 2015 22:49:38 +0000
Subject: [PATCH 040/664] Changed documentation to better show what can be done
with firewall rules
---
.../vcd/r/firewall_rules.html.markdown | 30 +++++++++++++++----
1 file changed, 25 insertions(+), 5 deletions(-)
diff --git a/website/source/docs/providers/vcd/r/firewall_rules.html.markdown b/website/source/docs/providers/vcd/r/firewall_rules.html.markdown
index e8fb4401d4..172237322a 100644
--- a/website/source/docs/providers/vcd/r/firewall_rules.html.markdown
+++ b/website/source/docs/providers/vcd/r/firewall_rules.html.markdown
@@ -19,13 +19,13 @@ resource "vcd_firewall_rules" "fw" {
default_action = "drop"
rule {
- description = "allow-web"
- policy = "allow"
+ description = "deny-ftp-out"
+ policy = "deny"
protocol = "tcp"
- destination_port = "80"
- destination_ip = "10.10.0.5"
+ destination_port = "21"
+ destination_ip = "any"
source_port = "any"
- source_ip = "any"
+ source_ip = "10.10.0.0/24"
}
rule {
@@ -39,6 +39,26 @@ resource "vcd_firewall_rules" "fw" {
}
}
+
+resource "vcd_vapp" "web" {
+ ...
+}
+
+resource "vcd_firewall_rules" "fw-web" {
+ edge_gateway = "Edge Gateway Name"
+ default_action = "drop"
+
+ rule {
+ description = "allow-web"
+ policy = "allow"
+ protocol = "tcp"
+ destination_port = "80"
+ destination_ip = "${vcd_vapp.web.ip}"
+ source_port = "any"
+ source_ip = "any"
+ }
+}
+
```
## Argument Reference
From dc8924b537e168a850f59cf3b6453a05c70fe178 Mon Sep 17 00:00:00 2001
From: Brett Mack
Date: Tue, 10 Nov 2015 23:35:25 +0000
Subject: [PATCH 041/664] Changed vcd_vapp resource to make better use of
Update function
---
builtin/providers/vcd/resource_vcd_vapp.go | 82 +++++-----------------
1 file changed, 19 insertions(+), 63 deletions(-)
diff --git a/builtin/providers/vcd/resource_vcd_vapp.go b/builtin/providers/vcd/resource_vcd_vapp.go
index 69c4cc4c26..1e3e5a116c 100644
--- a/builtin/providers/vcd/resource_vcd_vapp.go
+++ b/builtin/providers/vcd/resource_vcd_vapp.go
@@ -152,30 +152,6 @@ func resourceVcdVAppCreate(d *schema.ResourceData, meta interface{}) error {
vapp, err := vcd_client.OrgVdc.FindVAppByName(d.Get("name").(string))
- err = retryCall(4, func() error {
- task, err := vapp.ChangeMemorySize(d.Get("memory").(int))
- if err != nil {
- return fmt.Errorf("Error changing memory size: %#v", err)
- }
-
- return task.WaitTaskCompletion()
- })
- if err != nil {
- return err
- }
-
- err = retryCall(4, func() error {
- task, err := vapp.ChangeCPUcount(d.Get("cpus").(int))
- if err != nil {
- return fmt.Errorf("Error changing cpu count: %#v", err)
- }
-
- return task.WaitTaskCompletion()
- })
- if err != nil {
- return fmt.Errorf("Error completing task: %#v", err)
- }
-
err = retryCall(4, func() error {
task, err := vapp.ChangeVMName(d.Get("name").(string))
if err != nil {
@@ -199,24 +175,6 @@ func resourceVcdVAppCreate(d *schema.ResourceData, meta interface{}) error {
return fmt.Errorf("Error changing network: %#v", err)
}
- err = retryCall(4, func() error {
- metadata := d.Get("metadata").(map[string]interface{})
- for k, v := range metadata {
- task, err := vapp.AddMetadata(k, v.(string))
- if err != nil {
- return fmt.Errorf("Error adding metadata: %#v", err)
- }
- err = task.WaitTaskCompletion()
- if err != nil {
- return fmt.Errorf("Error completing tasks: %#v", err)
- }
- }
- return nil
- })
- if err != nil {
- return fmt.Errorf("Error adding metadata: %#v", err)
- }
-
if initscript, ok := d.GetOk("initscript"); ok {
err = retryCall(4, func() error {
task, err := vapp.RunCustomizationScript(d.Get("name").(string), initscript.(string))
@@ -230,23 +188,9 @@ func resourceVcdVAppCreate(d *schema.ResourceData, meta interface{}) error {
}
}
- if d.Get("power_on").(bool) {
- err = retryCall(4, func() error {
- task, err := vapp.PowerOn()
- if err != nil {
- return fmt.Errorf("Error Powering Up: %#v", err)
- }
- return task.WaitTaskCompletion()
- })
- if err != nil {
- return fmt.Errorf("Error completing tasks: %#v", err)
- }
- }
-
d.SetId(d.Get("name").(string))
- return resourceVcdVAppRead(d, meta)
- //return nil
+ return resourceVcdVAppUpdate(d, meta)
}
func resourceVcdVAppUpdate(d *schema.ResourceData, meta interface{}) error {
@@ -302,18 +246,30 @@ func resourceVcdVAppUpdate(d *schema.ResourceData, meta interface{}) error {
}
if d.HasChange("memory") {
- task, err := vapp.ChangeMemorySize(d.Get("memory").(int))
- err = task.WaitTaskCompletion()
+ err = retryCall(4, func() error {
+ task, err := vapp.ChangeMemorySize(d.Get("memory").(int))
+ if err != nil {
+ return fmt.Errorf("Error changing memory size: %#v", err)
+ }
+
+ return task.WaitTaskCompletion()
+ })
if err != nil {
- return fmt.Errorf("Error changing memory size: %#v", err)
+ return err
}
}
if d.HasChange("cpus") {
- task, err := vapp.ChangeCPUcount(d.Get("cpus").(int))
- err = task.WaitTaskCompletion()
+ err = retryCall(4, func() error {
+ task, err := vapp.ChangeCPUcount(d.Get("cpus").(int))
+ if err != nil {
+ return fmt.Errorf("Error changing cpu count: %#v", err)
+ }
+
+ return task.WaitTaskCompletion()
+ })
if err != nil {
- return fmt.Errorf("Error changing cpu count: %#v", err)
+ return fmt.Errorf("Error completing task: %#v", err)
}
}
From 5df9d22a6adbfaff6e111ea6ed8c339ef479c149 Mon Sep 17 00:00:00 2001
From: Nicki Watt
Date: Wed, 11 Nov 2015 07:43:36 +0000
Subject: [PATCH 042/664] Minor doc updates
---
.../docs/providers/vcd/index.html.markdown | 17 +++++++----------
website/source/layouts/docs.erb | 2 +-
website/source/layouts/vcd.erb | 2 +-
3 files changed, 9 insertions(+), 12 deletions(-)
diff --git a/website/source/docs/providers/vcd/index.html.markdown b/website/source/docs/providers/vcd/index.html.markdown
index 45cb0df58a..385dbcd8fc 100644
--- a/website/source/docs/providers/vcd/index.html.markdown
+++ b/website/source/docs/providers/vcd/index.html.markdown
@@ -1,26 +1,23 @@
---
layout: "vcd"
-page_title: "Provider: vCloudDirector"
+page_title: "Provider: VMware vCloudDirector"
sidebar_current: "docs-vcd-index"
description: |-
- The vCloud Director provider is used to interact with the resources supported by vCloud
- Director. The provider needs to be configured with the proper credentials before it can be used.
+ The VMware vCloud Director provider is used to interact with the resources supported by VMware vCloud Director. The provider needs to be configured with the proper credentials before it can be used.
---
-# vCloud Director Provider
+# VMware vCloud Director Provider
-The vCloud Director provider is used to interact with the resources supported by vCloud
-Director. The provider needs to be configured with the proper credentials before it can be used.
+The VMware vCloud Director provider is used to interact with the resources supported by VMware vCloud Director. The provider needs to be configured with the proper credentials before it can be used.
Use the navigation to the left to read about the available resources.
-~> **NOTE:** The vCloud Director Provider currently represents _initial support_ and
-therefore may undergo significant changes as the community improves it.
+~> **NOTE:** The VMware vCloud Director Provider currently represents _initial support_ and therefore may undergo significant changes as the community improves it.
## Example Usage
```
-# Configure the vCloud Director Provider
+# Configure the VMware vCloud Director Provider
provider "vcd" {
user = "${var.vcd_user}"
password = "${var.vcd_pass}"
@@ -37,7 +34,7 @@ resource "vcd_network" "net" {
## Argument Reference
-The following arguments are used to configure the vCloud Director Provider:
+The following arguments are used to configure the VMware vCloud Director Provider:
* `user` - (Required) This is the username for vCloud Director API operations. Can also
be specified with the `VCD_USER` environment variable.
diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb
index b4a611eb72..462ec0b4f1 100644
--- a/website/source/layouts/docs.erb
+++ b/website/source/layouts/docs.erb
@@ -194,7 +194,7 @@
>
- vCloud Director
+ VMware vCloud Director
>
diff --git a/website/source/layouts/vcd.erb b/website/source/layouts/vcd.erb
index ebfd9b7d92..8bafe26497 100644
--- a/website/source/layouts/vcd.erb
+++ b/website/source/layouts/vcd.erb
@@ -7,7 +7,7 @@
>
- vCloudDirector Provider
+ VMware vCloudDirector Provider
>
From 3f37884721c4d7e27f6524fc1e6cdbc7781414c0 Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Sun, 8 Nov 2015 18:02:07 -0700
Subject: [PATCH 043/664] adding capability to set custom configuration value
in virtual machines
---
.../resource_vsphere_virtual_machine.go | 84 +++++++++++++++----
1 file changed, 69 insertions(+), 15 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
index ac15cd97f6..07d84367a3 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
@@ -41,21 +41,22 @@ type hardDisk struct {
}
type virtualMachine struct {
- name string
- datacenter string
- cluster string
- resourcePool string
- datastore string
- vcpu int
- memoryMb int64
- template string
- networkInterfaces []networkInterface
- hardDisks []hardDisk
- gateway string
- domain string
- timeZone string
- dnsSuffixes []string
- dnsServers []string
+ name string
+ datacenter string
+ cluster string
+ resourcePool string
+ datastore string
+ vcpu int
+ memoryMb int64
+ template string
+ networkInterfaces []networkInterface
+ hardDisks []hardDisk
+ gateway string
+ domain string
+ timeZone string
+ dnsSuffixes []string
+ dnsServers []string
+ customConfigurations map[string](types.AnyType)
}
func resourceVSphereVirtualMachine() *schema.Resource {
@@ -135,6 +136,12 @@ func resourceVSphereVirtualMachine() *schema.Resource {
ForceNew: true,
},
+ "custom_configuration_parameters": &schema.Schema{
+ Type: schema.TypeMap,
+ Optional: true,
+ ForceNew: true,
+ },
+
"network_interface": &schema.Schema{
Type: schema.TypeList,
Required: true,
@@ -261,6 +268,12 @@ func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{
vm.dnsServers = DefaultDNSServers
}
+ if vL, ok := d.GetOk("custom_configuration_parameters"); ok {
+ if custom_configs, ok := vL.(map[string]types.AnyType); ok {
+ vm.customConfigurations = custom_configs
+ }
+ }
+
if vL, ok := d.GetOk("network_interface"); ok {
networks := make([]networkInterface, len(vL.([]interface{})))
for i, v := range vL.([]interface{}) {
@@ -418,6 +431,15 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
d.Set("datacenter", dc)
d.Set("memory", mvm.Summary.Config.MemorySizeMB)
d.Set("cpu", mvm.Summary.Config.NumCpu)
+
+ if mvm.Config && len(mvm.Config.ExtraConfig) > 0 {
+ custom_configs := make(map[string]string)
+ for _, v := range mvm.Config.ExtraConfig {
+ value := v.GetOptionValue()
+ custom_configs[value.Key] = value.Value
+ }
+ d.Set("custom_configuration_parameters", custom_configs)
+ }
d.Set("datastore", rootDatastore)
// Initialize the connection info
@@ -802,6 +824,22 @@ func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
}
log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
+ // make ExtraConfig
+ if len(vm.customConfigurations) > 0 {
+ var ov []types.BaseOptionValue
+ for k, v := range vm.customConfigurations {
+ key := k
+ value := v
+ o := types.OptionValue{
+ Key: key,
+ Value: &value,
+ }
+ ov = append(ov, &o)
+ }
+ configSpec.ExtraConfig = ov
+ log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
+ }
+
var datastore *object.Datastore
if vm.datastore == "" {
datastore, err = finder.DefaultDatastore(context.TODO())
@@ -1003,6 +1041,22 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
}
log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
+ // make ExtraConfig
+ if len(vm.customConfigurations) > 0 {
+ var ov []types.BaseOptionValue
+ for k, v := range vm.customConfigurations {
+ key := k
+ value := v
+ o := types.OptionValue{
+ Key: key,
+ Value: &value,
+ }
+ ov = append(ov, &o)
+ }
+ configSpec.ExtraConfig = ov
+ log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
+ }
+
// create CustomizationSpec
customSpec := types.CustomizationSpec{
Identity: &types.CustomizationLinuxPrep{
From 6e19c3f0e0047e4f40a43c7eba34b5914aa1495b Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Sun, 8 Nov 2015 18:21:17 -0700
Subject: [PATCH 044/664] fixing if and AnyTypes
---
builtin/providers/vsphere/resource_vsphere_virtual_machine.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
index 07d84367a3..cf636e79da 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
@@ -432,8 +432,8 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
d.Set("memory", mvm.Summary.Config.MemorySizeMB)
d.Set("cpu", mvm.Summary.Config.NumCpu)
- if mvm.Config && len(mvm.Config.ExtraConfig) > 0 {
- custom_configs := make(map[string]string)
+ if len(mvm.Config.ExtraConfig) > 0 {
+ custom_configs := make(map[string]types.AnyType)
for _, v := range mvm.Config.ExtraConfig {
value := v.GetOptionValue()
custom_configs[value.Key] = value.Value
From 106b1264485b1bafc0e8f6672ca81491e91791b7 Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Sun, 8 Nov 2015 18:49:49 -0700
Subject: [PATCH 045/664] adding new functional test
---
.../resource_vsphere_virtual_machine_test.go | 83 ++++++++++++++++++-
1 file changed, 82 insertions(+), 1 deletion(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
index 66d6ea44f8..2cae45fe48 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
@@ -127,6 +127,67 @@ func TestAccVSphereVirtualMachine_dhcp(t *testing.T) {
})
}
+func TestAccVSphereVirtualMachine_custom_configs(t *testing.T) {
+ var vm virtualMachine
+ var locationOpt string
+ var datastoreOpt string
+
+ if v := os.Getenv("VSPHERE_DATACENTER"); v != "" {
+ locationOpt += fmt.Sprintf(" datacenter = \"%s\"\n", v)
+ }
+ if v := os.Getenv("VSPHERE_CLUSTER"); v != "" {
+ locationOpt += fmt.Sprintf(" cluster = \"%s\"\n", v)
+ }
+ if v := os.Getenv("VSPHERE_RESOURCE_POOL"); v != "" {
+ locationOpt += fmt.Sprintf(" resource_pool = \"%s\"\n", v)
+ }
+ if v := os.Getenv("VSPHERE_DATASTORE"); v != "" {
+ datastoreOpt = fmt.Sprintf(" datastore = \"%s\"\n", v)
+ }
+ template := os.Getenv("VSPHERE_TEMPLATE")
+ label := os.Getenv("VSPHERE_NETWORK_LABEL_DHCP")
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckVSphereVirtualMachineDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: fmt.Sprintf(
+ testAccCheckVSphereVirtualMachineConfig_custom_configs,
+ locationOpt,
+ label,
+ datastoreOpt,
+ template,
+ ),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckVSphereVirtualMachineExists("vsphere_virtual_machine.bar", &vm),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "name", "terraform-test"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "vcpu", "2"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "memory", "4096"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "disk.#", "1"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "disk.0.template", template),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "network_interface.#", "1"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "custom_configuration_parameters.foo", "bar"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "custom_configuration_parameters.car", "ferrai"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "custom_configuration_parameters.num", "42"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.bar", "network_interface.0.label", label),
+ ),
+ },
+ },
+ })
+}
+
func testAccCheckVSphereVirtualMachineDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*govmomi.Client)
finder := find.NewFinder(client.Client, true)
@@ -212,7 +273,6 @@ resource "vsphere_virtual_machine" "foo" {
}
}
`
-
const testAccCheckVSphereVirtualMachineConfig_dhcp = `
resource "vsphere_virtual_machine" "bar" {
name = "terraform-test"
@@ -228,3 +288,24 @@ resource "vsphere_virtual_machine" "bar" {
}
}
`
+
+const testAccCheckVSphereVirtualMachineConfig_custom_configs = `
+resource "vsphere_virtual_machine" "car" {
+ name = "terraform-test-custom"
+%s
+ vcpu = 2
+ memory = 4096
+ network_interface {
+ label = "%s"
+ }
+ custom_configuration_parameters {
+ foo = "bar",
+ car = "ferrai",
+ num = 42
+ }
+ disk {
+%s
+ template = "%s"
+ }
+}
+`
From 0bf8ffd0430b067bd924d7eed0dcd72ae3b655c4 Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Mon, 9 Nov 2015 04:50:09 +0000
Subject: [PATCH 046/664] working on read and more testing
---
.../resource_vsphere_virtual_machine.go | 23 ++++++++++++++++---
.../resource_vsphere_virtual_machine_test.go | 12 +++++-----
2 files changed, 26 insertions(+), 9 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
index cf636e79da..338c95301f 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
@@ -269,8 +269,13 @@ func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{
}
if vL, ok := d.GetOk("custom_configuration_parameters"); ok {
- if custom_configs, ok := vL.(map[string]types.AnyType); ok {
- vm.customConfigurations = custom_configs
+ if custom_configs, ok := vL.(map[string]interface{}); ok {
+ custom := make(map[string]types.AnyType)
+ for k,v := range custom_configs {
+ custom[k] = v
+ }
+ vm.customConfigurations = custom
+ log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations)
}
}
@@ -432,14 +437,21 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
d.Set("memory", mvm.Summary.Config.MemorySizeMB)
d.Set("cpu", mvm.Summary.Config.NumCpu)
- if len(mvm.Config.ExtraConfig) > 0 {
+ log.Printf("[DEBUG] ===============================")
+ //log.Printf("[DEBUG] Get extra config ===============================")
+ //log.Printf("[DEBUG] Get extra config %v", mvm.Config)
+ //log.Printf("[DEBUG] Get extra config %v", mvm.Config.ExtraConfig)
+ if mvm.Config != nil && mvm.Config.ExtraConfig != nil && len(mvm.Config.ExtraConfig) > 0 {
+ log.Printf("[DEBUG] reading custom configs")
custom_configs := make(map[string]types.AnyType)
for _, v := range mvm.Config.ExtraConfig {
value := v.GetOptionValue()
custom_configs[value.Key] = value.Value
+ log.Printf("[DEBUG] reading custom configs %s,%s",value.Key, value.Value)
}
d.Set("custom_configuration_parameters", custom_configs)
}
+ log.Printf("[DEBUG] ===============================")
d.Set("datastore", rootDatastore)
// Initialize the connection info
@@ -825,6 +837,7 @@ func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
// make ExtraConfig
+ log.Printf("[DEBUG] virtual machine Extra Config spec start")
if len(vm.customConfigurations) > 0 {
var ov []types.BaseOptionValue
for k, v := range vm.customConfigurations {
@@ -834,6 +847,7 @@ func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
Key: key,
Value: &value,
}
+ log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k,v)
ov = append(ov, &o)
}
configSpec.ExtraConfig = ov
@@ -1041,6 +1055,8 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
}
log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
+ log.Printf("[DEBUG] starting extra custom config spec: %v", vm.customConfigurations)
+
// make ExtraConfig
if len(vm.customConfigurations) > 0 {
var ov []types.BaseOptionValue
@@ -1149,5 +1165,6 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
return err
}
}
+ log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
return nil
}
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
index 2cae45fe48..804e1ae074 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
@@ -161,9 +161,9 @@ func TestAccVSphereVirtualMachine_custom_configs(t *testing.T) {
template,
),
Check: resource.ComposeTestCheckFunc(
- testAccCheckVSphereVirtualMachineExists("vsphere_virtual_machine.bar", &vm),
+ testAccCheckVSphereVirtualMachineExists("vsphere_virtual_machine.car", &vm),
resource.TestCheckResourceAttr(
- "vsphere_virtual_machine.car", "name", "terraform-test"),
+ "vsphere_virtual_machine.car", "name", "terraform-test-custom"),
resource.TestCheckResourceAttr(
"vsphere_virtual_machine.car", "vcpu", "2"),
resource.TestCheckResourceAttr(
@@ -181,7 +181,7 @@ func TestAccVSphereVirtualMachine_custom_configs(t *testing.T) {
resource.TestCheckResourceAttr(
"vsphere_virtual_machine.car", "custom_configuration_parameters.num", "42"),
resource.TestCheckResourceAttr(
- "vsphere_virtual_machine.bar", "network_interface.0.label", label),
+ "vsphere_virtual_machine.car", "network_interface.0.label", label),
),
},
},
@@ -299,9 +299,9 @@ resource "vsphere_virtual_machine" "car" {
label = "%s"
}
custom_configuration_parameters {
- foo = "bar",
- car = "ferrai",
- num = 42
+ "foo" = "bar"
+ "car" = "ferrai"
+ "num" = 42
}
disk {
%s
From 54b103b9c7cf2ba5ecd19dfd5701ec7026d41fc3 Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Wed, 11 Nov 2015 22:42:36 +0000
Subject: [PATCH 047/664] testing finished
---
.../resource_vsphere_virtual_machine.go | 19 ++++++++-----------
1 file changed, 8 insertions(+), 11 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
index 338c95301f..274a2278d2 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
@@ -381,7 +381,7 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
var mvm mo.VirtualMachine
collector := property.DefaultCollector(client.Client)
- if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore"}, &mvm); err != nil {
+ if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore","config.extraConfig"}, &mvm); err != nil {
return err
}
@@ -437,21 +437,18 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
d.Set("memory", mvm.Summary.Config.MemorySizeMB)
d.Set("cpu", mvm.Summary.Config.NumCpu)
- log.Printf("[DEBUG] ===============================")
- //log.Printf("[DEBUG] Get extra config ===============================")
- //log.Printf("[DEBUG] Get extra config %v", mvm.Config)
- //log.Printf("[DEBUG] Get extra config %v", mvm.Config.ExtraConfig)
if mvm.Config != nil && mvm.Config.ExtraConfig != nil && len(mvm.Config.ExtraConfig) > 0 {
- log.Printf("[DEBUG] reading custom configs")
- custom_configs := make(map[string]types.AnyType)
+ //TODO: can only set specific custom value, not everything
+ //Would need the config here
+ //custom_configs := make(map[string]types.AnyType)
for _, v := range mvm.Config.ExtraConfig {
value := v.GetOptionValue()
- custom_configs[value.Key] = value.Value
- log.Printf("[DEBUG] reading custom configs %s,%s",value.Key, value.Value)
+ //custom_configs[value.Key] = value.Value
+ log.Printf("[DEBUG] custom configs %s,%s",value.Key, value.Value)
}
- d.Set("custom_configuration_parameters", custom_configs)
+ //d.Set("custom_configuration_parameters", custom_configs)
}
- log.Printf("[DEBUG] ===============================")
+
d.Set("datastore", rootDatastore)
// Initialize the connection info
From 09ce6b4744476b040f89a2421c88cc7d631f16b1 Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Wed, 11 Nov 2015 22:50:18 +0000
Subject: [PATCH 048/664] updating documentation
---
.../docs/providers/vsphere/r/virtual_machine.html.markdown | 1 +
1 file changed, 1 insertion(+)
diff --git a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown
index d008357ecc..4062a212b8 100644
--- a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown
+++ b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown
@@ -48,6 +48,7 @@ The following arguments are supported:
* `network_interface` - (Required) Configures virtual network interfaces; see [Network Interfaces](#network-interfaces) below for details.
* `disk` - (Required) Configures virtual disks; see [Disks](#disks) below for details
* `boot_delay` - (Optional) Time in seconds to wait for machine network to be ready.
+* `custom_configuration_parameters` - (Optional) Map of values that is set as virtual machine custom configurations.
## Network Interfaces
From 7be90215bcf00b05d7b7d14ebcee60cc39bfeeee Mon Sep 17 00:00:00 2001
From: Lars Wander
Date: Thu, 12 Nov 2015 15:44:31 -0500
Subject: [PATCH 049/664] provider/google: Fix instance group manager instance
restart policy
---
builtin/providers/google/compute_operation.go | 6 ++-
...resource_compute_instance_group_manager.go | 40 +++++++++++++++++++
...mpute_instance_group_manager.html.markdown | 9 ++++-
3 files changed, 53 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/google/compute_operation.go b/builtin/providers/google/compute_operation.go
index 987e983b47..66398f9f86 100644
--- a/builtin/providers/google/compute_operation.go
+++ b/builtin/providers/google/compute_operation.go
@@ -134,6 +134,10 @@ func computeOperationWaitRegion(config *Config, op *compute.Operation, region, a
}
func computeOperationWaitZone(config *Config, op *compute.Operation, zone, activity string) error {
+ return computeOperationWaitZoneTime(config, op, zone, 4, activity)
+}
+
+func computeOperationWaitZoneTime(config *Config, op *compute.Operation, zone string, minutes int, activity string) error {
w := &ComputeOperationWaiter{
Service: config.clientCompute,
Op: op,
@@ -143,7 +147,7 @@ func computeOperationWaitZone(config *Config, op *compute.Operation, zone, activ
}
state := w.Conf()
state.Delay = 10 * time.Second
- state.Timeout = 4 * time.Minute
+ state.Timeout = time.Duration(minutes) * time.Minute
state.MinTimeout = 2 * time.Second
opRaw, err := state.WaitForState()
if err != nil {
diff --git a/builtin/providers/google/resource_compute_instance_group_manager.go b/builtin/providers/google/resource_compute_instance_group_manager.go
index b0186b7070..77b7143126 100644
--- a/builtin/providers/google/resource_compute_instance_group_manager.go
+++ b/builtin/providers/google/resource_compute_instance_group_manager.go
@@ -53,6 +53,12 @@ func resourceComputeInstanceGroupManager() *schema.Resource {
Required: true,
},
+ "update_strategy": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "RESTART",
+ },
+
"target_pools": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
@@ -112,6 +118,11 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte
manager.TargetPools = s
}
+ updateStrategy := d.Get("update_strategy").(string)
+ if !(updateStrategy == "NONE" || updateStrategy == "RESTART") {
+ return fmt.Errorf("Update strategy must be \"NONE\" or \"RESTART\"")
+ }
+
log.Printf("[DEBUG] InstanceGroupManager insert request: %#v", manager)
op, err := config.clientCompute.InstanceGroupManagers.Insert(
config.Project, d.Get("zone").(string), manager).Do()
@@ -209,6 +220,35 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte
return err
}
+ if d.Get("update_strategy").(string) == "RESTART" {
+ managedInstances, err := config.clientCompute.InstanceGroupManagers.ListManagedInstances(
+ config.Project, d.Get("zone").(string), d.Id()).Do()
+
+ managedInstanceCount := len(managedInstances.ManagedInstances)
+ instances := make([]string, managedInstanceCount)
+ for i, v := range managedInstances.ManagedInstances {
+ instances[i] = v.Instance
+ }
+
+ recreateInstances := &compute.InstanceGroupManagersRecreateInstancesRequest{
+ Instances: instances,
+ }
+
+ op, err = config.clientCompute.InstanceGroupManagers.RecreateInstances(
+ config.Project, d.Get("zone").(string), d.Id(), recreateInstances).Do()
+
+ if err != nil {
+ return fmt.Errorf("Error restarting instance group managers instances: %s", err)
+ }
+
+ // Wait for the operation to complete
+ err = computeOperationWaitZoneTime(config, op, d.Get("zone").(string),
+ managedInstanceCount * 4, "Restarting InstanceGroupManagers instances")
+ if err != nil {
+ return err
+ }
+ }
+
d.SetPartial("instance_template")
}
diff --git a/website/source/docs/providers/google/r/compute_instance_group_manager.html.markdown b/website/source/docs/providers/google/r/compute_instance_group_manager.html.markdown
index 30527c80ac..8bc6c15006 100644
--- a/website/source/docs/providers/google/r/compute_instance_group_manager.html.markdown
+++ b/website/source/docs/providers/google/r/compute_instance_group_manager.html.markdown
@@ -20,6 +20,7 @@ resource "google_compute_instance_group_manager" "foobar" {
description = "Terraform test instance group manager"
name = "terraform-test"
instance_template = "${google_compute_instance_template.foobar.self_link}"
+ update_strategy= "NONE"
target_pools = ["${google_compute_target_pool.foobar.self_link}"]
base_instance_name = "foobar"
zone = "us-central1-a"
@@ -41,7 +42,13 @@ instance name.
group manager.
* `instance_template` - (Required) The full URL to an instance template from
-which all new instances will be created.
+which all new instances will be created.
+
+* `update_strategy` - (Optional, Default `"RESTART"`) If the `instance_template` resource is
+modified, a value of `"NONE"` will prevent any of the managed instances from
+being restarted by Terraform. A value of `"RESTART"` will restart all of the
+instances at once. In the future, as the GCE API matures we will support
+`"ROLLING_UPDATE"` as well.
* `name` - (Required) The name of the instance group manager. Must be 1-63
characters long and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt).
From bf88ee8ddb07149d33628c26b9de981a8052a0a4 Mon Sep 17 00:00:00 2001
From: Sunil K Chopra
Date: Fri, 13 Nov 2015 12:40:19 -0600
Subject: [PATCH 050/664] fix test to include creation of placement group
---
.../providers/aws/resource_aws_autoscaling_group_test.go | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/builtin/providers/aws/resource_aws_autoscaling_group_test.go b/builtin/providers/aws/resource_aws_autoscaling_group_test.go
index bf8b56c08e..43f5350be2 100644
--- a/builtin/providers/aws/resource_aws_autoscaling_group_test.go
+++ b/builtin/providers/aws/resource_aws_autoscaling_group_test.go
@@ -356,6 +356,11 @@ resource "aws_launch_configuration" "foobar" {
instance_type = "t1.micro"
}
+resource "aws_placement_group" "test" {
+ name = "test"
+ strategy = "cluster"
+}
+
resource "aws_autoscaling_group" "bar" {
availability_zones = ["us-west-2a"]
name = "foobar3-terraform-test"
@@ -366,7 +371,7 @@ resource "aws_autoscaling_group" "bar" {
desired_capacity = 4
force_delete = true
termination_policies = ["OldestInstance","ClosestToNextInstanceHour"]
- placement_group = "test"
+ placement_group = "${aws_placement_group.test.name}"
launch_configuration = "${aws_launch_configuration.foobar.name}"
From e899a2949f92bdb1ae3fae5df1b8de68265993ef Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Sun, 8 Nov 2015 18:02:07 -0700
Subject: [PATCH 051/664] adding capability to set custom configuration value
in virtual machines
---
.../resource_vsphere_virtual_machine.go | 84 +++++++++++++++----
1 file changed, 69 insertions(+), 15 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
index ac15cd97f6..07d84367a3 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
@@ -41,21 +41,22 @@ type hardDisk struct {
}
type virtualMachine struct {
- name string
- datacenter string
- cluster string
- resourcePool string
- datastore string
- vcpu int
- memoryMb int64
- template string
- networkInterfaces []networkInterface
- hardDisks []hardDisk
- gateway string
- domain string
- timeZone string
- dnsSuffixes []string
- dnsServers []string
+ name string
+ datacenter string
+ cluster string
+ resourcePool string
+ datastore string
+ vcpu int
+ memoryMb int64
+ template string
+ networkInterfaces []networkInterface
+ hardDisks []hardDisk
+ gateway string
+ domain string
+ timeZone string
+ dnsSuffixes []string
+ dnsServers []string
+ customConfigurations map[string](types.AnyType)
}
func resourceVSphereVirtualMachine() *schema.Resource {
@@ -135,6 +136,12 @@ func resourceVSphereVirtualMachine() *schema.Resource {
ForceNew: true,
},
+ "custom_configuration_parameters": &schema.Schema{
+ Type: schema.TypeMap,
+ Optional: true,
+ ForceNew: true,
+ },
+
"network_interface": &schema.Schema{
Type: schema.TypeList,
Required: true,
@@ -261,6 +268,12 @@ func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{
vm.dnsServers = DefaultDNSServers
}
+ if vL, ok := d.GetOk("custom_configuration_parameters"); ok {
+ if custom_configs, ok := vL.(map[string]types.AnyType); ok {
+ vm.customConfigurations = custom_configs
+ }
+ }
+
if vL, ok := d.GetOk("network_interface"); ok {
networks := make([]networkInterface, len(vL.([]interface{})))
for i, v := range vL.([]interface{}) {
@@ -418,6 +431,15 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
d.Set("datacenter", dc)
d.Set("memory", mvm.Summary.Config.MemorySizeMB)
d.Set("cpu", mvm.Summary.Config.NumCpu)
+
+ if mvm.Config && len(mvm.Config.ExtraConfig) > 0 {
+ custom_configs := make(map[string]string)
+ for _, v := range mvm.Config.ExtraConfig {
+ value := v.GetOptionValue()
+ custom_configs[value.Key] = value.Value
+ }
+ d.Set("custom_configuration_parameters", custom_configs)
+ }
d.Set("datastore", rootDatastore)
// Initialize the connection info
@@ -802,6 +824,22 @@ func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
}
log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
+ // make ExtraConfig
+ if len(vm.customConfigurations) > 0 {
+ var ov []types.BaseOptionValue
+ for k, v := range vm.customConfigurations {
+ key := k
+ value := v
+ o := types.OptionValue{
+ Key: key,
+ Value: &value,
+ }
+ ov = append(ov, &o)
+ }
+ configSpec.ExtraConfig = ov
+ log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
+ }
+
var datastore *object.Datastore
if vm.datastore == "" {
datastore, err = finder.DefaultDatastore(context.TODO())
@@ -1003,6 +1041,22 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
}
log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
+ // make ExtraConfig
+ if len(vm.customConfigurations) > 0 {
+ var ov []types.BaseOptionValue
+ for k, v := range vm.customConfigurations {
+ key := k
+ value := v
+ o := types.OptionValue{
+ Key: key,
+ Value: &value,
+ }
+ ov = append(ov, &o)
+ }
+ configSpec.ExtraConfig = ov
+ log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
+ }
+
// create CustomizationSpec
customSpec := types.CustomizationSpec{
Identity: &types.CustomizationLinuxPrep{
From bc36ba7f3c9aac7d56e306a8e4cfc9d9a14652b4 Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Sun, 8 Nov 2015 18:21:17 -0700
Subject: [PATCH 052/664] fixing if and AnyTypes
---
builtin/providers/vsphere/resource_vsphere_virtual_machine.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
index 07d84367a3..cf636e79da 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
@@ -432,8 +432,8 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
d.Set("memory", mvm.Summary.Config.MemorySizeMB)
d.Set("cpu", mvm.Summary.Config.NumCpu)
- if mvm.Config && len(mvm.Config.ExtraConfig) > 0 {
- custom_configs := make(map[string]string)
+ if len(mvm.Config.ExtraConfig) > 0 {
+ custom_configs := make(map[string]types.AnyType)
for _, v := range mvm.Config.ExtraConfig {
value := v.GetOptionValue()
custom_configs[value.Key] = value.Value
From 728b2bed636630d233e8f79f290fda2c01227655 Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Sun, 8 Nov 2015 18:49:49 -0700
Subject: [PATCH 053/664] adding new functional test
---
.../resource_vsphere_virtual_machine_test.go | 83 ++++++++++++++++++-
1 file changed, 82 insertions(+), 1 deletion(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
index 66d6ea44f8..2cae45fe48 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
@@ -127,6 +127,67 @@ func TestAccVSphereVirtualMachine_dhcp(t *testing.T) {
})
}
+func TestAccVSphereVirtualMachine_custom_configs(t *testing.T) {
+ var vm virtualMachine
+ var locationOpt string
+ var datastoreOpt string
+
+ if v := os.Getenv("VSPHERE_DATACENTER"); v != "" {
+ locationOpt += fmt.Sprintf(" datacenter = \"%s\"\n", v)
+ }
+ if v := os.Getenv("VSPHERE_CLUSTER"); v != "" {
+ locationOpt += fmt.Sprintf(" cluster = \"%s\"\n", v)
+ }
+ if v := os.Getenv("VSPHERE_RESOURCE_POOL"); v != "" {
+ locationOpt += fmt.Sprintf(" resource_pool = \"%s\"\n", v)
+ }
+ if v := os.Getenv("VSPHERE_DATASTORE"); v != "" {
+ datastoreOpt = fmt.Sprintf(" datastore = \"%s\"\n", v)
+ }
+ template := os.Getenv("VSPHERE_TEMPLATE")
+ label := os.Getenv("VSPHERE_NETWORK_LABEL_DHCP")
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckVSphereVirtualMachineDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: fmt.Sprintf(
+ testAccCheckVSphereVirtualMachineConfig_custom_configs,
+ locationOpt,
+ label,
+ datastoreOpt,
+ template,
+ ),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckVSphereVirtualMachineExists("vsphere_virtual_machine.bar", &vm),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "name", "terraform-test"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "vcpu", "2"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "memory", "4096"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "disk.#", "1"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "disk.0.template", template),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "network_interface.#", "1"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "custom_configuration_parameters.foo", "bar"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "custom_configuration_parameters.car", "ferrai"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.car", "custom_configuration_parameters.num", "42"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.bar", "network_interface.0.label", label),
+ ),
+ },
+ },
+ })
+}
+
func testAccCheckVSphereVirtualMachineDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*govmomi.Client)
finder := find.NewFinder(client.Client, true)
@@ -212,7 +273,6 @@ resource "vsphere_virtual_machine" "foo" {
}
}
`
-
const testAccCheckVSphereVirtualMachineConfig_dhcp = `
resource "vsphere_virtual_machine" "bar" {
name = "terraform-test"
@@ -228,3 +288,24 @@ resource "vsphere_virtual_machine" "bar" {
}
}
`
+
+const testAccCheckVSphereVirtualMachineConfig_custom_configs = `
+resource "vsphere_virtual_machine" "car" {
+ name = "terraform-test-custom"
+%s
+ vcpu = 2
+ memory = 4096
+ network_interface {
+ label = "%s"
+ }
+ custom_configuration_parameters {
+ foo = "bar",
+ car = "ferrai",
+ num = 42
+ }
+ disk {
+%s
+ template = "%s"
+ }
+}
+`
From 0f46b3a6c569bd85bdcfb9f02ec151a4c8ecf72f Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Mon, 9 Nov 2015 04:50:09 +0000
Subject: [PATCH 054/664] working on read and more testing
---
.../resource_vsphere_virtual_machine.go | 23 ++++++++++++++++---
.../resource_vsphere_virtual_machine_test.go | 12 +++++-----
2 files changed, 26 insertions(+), 9 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
index cf636e79da..338c95301f 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
@@ -269,8 +269,13 @@ func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{
}
if vL, ok := d.GetOk("custom_configuration_parameters"); ok {
- if custom_configs, ok := vL.(map[string]types.AnyType); ok {
- vm.customConfigurations = custom_configs
+ if custom_configs, ok := vL.(map[string]interface{}); ok {
+ custom := make(map[string]types.AnyType)
+ for k,v := range custom_configs {
+ custom[k] = v
+ }
+ vm.customConfigurations = custom
+ log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations)
}
}
@@ -432,14 +437,21 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
d.Set("memory", mvm.Summary.Config.MemorySizeMB)
d.Set("cpu", mvm.Summary.Config.NumCpu)
- if len(mvm.Config.ExtraConfig) > 0 {
+ log.Printf("[DEBUG] ===============================")
+ //log.Printf("[DEBUG] Get extra config ===============================")
+ //log.Printf("[DEBUG] Get extra config %v", mvm.Config)
+ //log.Printf("[DEBUG] Get extra config %v", mvm.Config.ExtraConfig)
+ if mvm.Config != nil && mvm.Config.ExtraConfig != nil && len(mvm.Config.ExtraConfig) > 0 {
+ log.Printf("[DEBUG] reading custom configs")
custom_configs := make(map[string]types.AnyType)
for _, v := range mvm.Config.ExtraConfig {
value := v.GetOptionValue()
custom_configs[value.Key] = value.Value
+ log.Printf("[DEBUG] reading custom configs %s,%s",value.Key, value.Value)
}
d.Set("custom_configuration_parameters", custom_configs)
}
+ log.Printf("[DEBUG] ===============================")
d.Set("datastore", rootDatastore)
// Initialize the connection info
@@ -825,6 +837,7 @@ func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
// make ExtraConfig
+ log.Printf("[DEBUG] virtual machine Extra Config spec start")
if len(vm.customConfigurations) > 0 {
var ov []types.BaseOptionValue
for k, v := range vm.customConfigurations {
@@ -834,6 +847,7 @@ func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
Key: key,
Value: &value,
}
+ log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k,v)
ov = append(ov, &o)
}
configSpec.ExtraConfig = ov
@@ -1041,6 +1055,8 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
}
log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
+ log.Printf("[DEBUG] starting extra custom config spec: %v", vm.customConfigurations)
+
// make ExtraConfig
if len(vm.customConfigurations) > 0 {
var ov []types.BaseOptionValue
@@ -1149,5 +1165,6 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
return err
}
}
+ log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
return nil
}
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
index 2cae45fe48..804e1ae074 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
@@ -161,9 +161,9 @@ func TestAccVSphereVirtualMachine_custom_configs(t *testing.T) {
template,
),
Check: resource.ComposeTestCheckFunc(
- testAccCheckVSphereVirtualMachineExists("vsphere_virtual_machine.bar", &vm),
+ testAccCheckVSphereVirtualMachineExists("vsphere_virtual_machine.car", &vm),
resource.TestCheckResourceAttr(
- "vsphere_virtual_machine.car", "name", "terraform-test"),
+ "vsphere_virtual_machine.car", "name", "terraform-test-custom"),
resource.TestCheckResourceAttr(
"vsphere_virtual_machine.car", "vcpu", "2"),
resource.TestCheckResourceAttr(
@@ -181,7 +181,7 @@ func TestAccVSphereVirtualMachine_custom_configs(t *testing.T) {
resource.TestCheckResourceAttr(
"vsphere_virtual_machine.car", "custom_configuration_parameters.num", "42"),
resource.TestCheckResourceAttr(
- "vsphere_virtual_machine.bar", "network_interface.0.label", label),
+ "vsphere_virtual_machine.car", "network_interface.0.label", label),
),
},
},
@@ -299,9 +299,9 @@ resource "vsphere_virtual_machine" "car" {
label = "%s"
}
custom_configuration_parameters {
- foo = "bar",
- car = "ferrai",
- num = 42
+ "foo" = "bar"
+ "car" = "ferrai"
+ "num" = 42
}
disk {
%s
From b47d1cda7c8da6cf8ad0cb69148550dcb17db37e Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Sun, 8 Nov 2015 18:02:07 -0700
Subject: [PATCH 055/664] adding capability to set custom configuration value
in virtual machines
---
.../resource_vsphere_virtual_machine.go | 35 ++++++++++---------
1 file changed, 19 insertions(+), 16 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
index 338c95301f..dbaa3f8843 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
@@ -269,13 +269,8 @@ func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{
}
if vL, ok := d.GetOk("custom_configuration_parameters"); ok {
- if custom_configs, ok := vL.(map[string]interface{}); ok {
- custom := make(map[string]types.AnyType)
- for k,v := range custom_configs {
- custom[k] = v
- }
- vm.customConfigurations = custom
- log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations)
+ if custom_configs, ok := vL.(map[string]types.AnyType); ok {
+ vm.customConfigurations = custom_configs
}
}
@@ -437,21 +432,14 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
d.Set("memory", mvm.Summary.Config.MemorySizeMB)
d.Set("cpu", mvm.Summary.Config.NumCpu)
- log.Printf("[DEBUG] ===============================")
- //log.Printf("[DEBUG] Get extra config ===============================")
- //log.Printf("[DEBUG] Get extra config %v", mvm.Config)
- //log.Printf("[DEBUG] Get extra config %v", mvm.Config.ExtraConfig)
- if mvm.Config != nil && mvm.Config.ExtraConfig != nil && len(mvm.Config.ExtraConfig) > 0 {
- log.Printf("[DEBUG] reading custom configs")
- custom_configs := make(map[string]types.AnyType)
+ if mvm.Config && len(mvm.Config.ExtraConfig) > 0 {
+ custom_configs := make(map[string]string)
for _, v := range mvm.Config.ExtraConfig {
value := v.GetOptionValue()
custom_configs[value.Key] = value.Value
- log.Printf("[DEBUG] reading custom configs %s,%s",value.Key, value.Value)
}
d.Set("custom_configuration_parameters", custom_configs)
}
- log.Printf("[DEBUG] ===============================")
d.Set("datastore", rootDatastore)
// Initialize the connection info
@@ -837,6 +825,21 @@ func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
// make ExtraConfig
+ if len(vm.customConfigurations) > 0 {
+ var ov []types.BaseOptionValue
+ for k, v := range vm.customConfigurations {
+ key := k
+ value := v
+ o := types.OptionValue{
+ Key: key,
+ Value: &value,
+ }
+ ov = append(ov, &o)
+ }
+ configSpec.ExtraConfig = ov
+ log.Printf("[DEBUG] virtual machine Extra Config spec: %v", configSpec.ExtraConfig)
+ }
+
log.Printf("[DEBUG] virtual machine Extra Config spec start")
if len(vm.customConfigurations) > 0 {
var ov []types.BaseOptionValue
From cae7fd8e4ad004df1714d1d0bb93d36917926d2c Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Sun, 8 Nov 2015 18:21:17 -0700
Subject: [PATCH 056/664] fixing if and AnyTypes
---
builtin/providers/vsphere/resource_vsphere_virtual_machine.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
index dbaa3f8843..468d3cc06c 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
@@ -432,8 +432,8 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
d.Set("memory", mvm.Summary.Config.MemorySizeMB)
d.Set("cpu", mvm.Summary.Config.NumCpu)
- if mvm.Config && len(mvm.Config.ExtraConfig) > 0 {
- custom_configs := make(map[string]string)
+ if len(mvm.Config.ExtraConfig) > 0 {
+ custom_configs := make(map[string]types.AnyType)
for _, v := range mvm.Config.ExtraConfig {
value := v.GetOptionValue()
custom_configs[value.Key] = value.Value
From 8c47441a8bcc8ca73a3362fb4d4a316323dd8c5f Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Sun, 8 Nov 2015 18:49:49 -0700
Subject: [PATCH 057/664] adding new functional test
---
.../resource_vsphere_virtual_machine_test.go | 64 ++-----------------
1 file changed, 6 insertions(+), 58 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
index 804e1ae074..a1bd468fea 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
@@ -127,43 +127,9 @@ func TestAccVSphereVirtualMachine_dhcp(t *testing.T) {
})
}
-func TestAccVSphereVirtualMachine_custom_configs(t *testing.T) {
- var vm virtualMachine
- var locationOpt string
- var datastoreOpt string
-
- if v := os.Getenv("VSPHERE_DATACENTER"); v != "" {
- locationOpt += fmt.Sprintf(" datacenter = \"%s\"\n", v)
- }
- if v := os.Getenv("VSPHERE_CLUSTER"); v != "" {
- locationOpt += fmt.Sprintf(" cluster = \"%s\"\n", v)
- }
- if v := os.Getenv("VSPHERE_RESOURCE_POOL"); v != "" {
- locationOpt += fmt.Sprintf(" resource_pool = \"%s\"\n", v)
- }
- if v := os.Getenv("VSPHERE_DATASTORE"); v != "" {
- datastoreOpt = fmt.Sprintf(" datastore = \"%s\"\n", v)
- }
- template := os.Getenv("VSPHERE_TEMPLATE")
- label := os.Getenv("VSPHERE_NETWORK_LABEL_DHCP")
-
- resource.Test(t, resource.TestCase{
- PreCheck: func() { testAccPreCheck(t) },
- Providers: testAccProviders,
- CheckDestroy: testAccCheckVSphereVirtualMachineDestroy,
- Steps: []resource.TestStep{
- resource.TestStep{
- Config: fmt.Sprintf(
- testAccCheckVSphereVirtualMachineConfig_custom_configs,
- locationOpt,
- label,
- datastoreOpt,
- template,
- ),
- Check: resource.ComposeTestCheckFunc(
- testAccCheckVSphereVirtualMachineExists("vsphere_virtual_machine.car", &vm),
+ testAccCheckVSphereVirtualMachineExists("vsphere_virtual_machine.bar", &vm),
resource.TestCheckResourceAttr(
- "vsphere_virtual_machine.car", "name", "terraform-test-custom"),
+ "vsphere_virtual_machine.car", "name", "terraform-test"),
resource.TestCheckResourceAttr(
"vsphere_virtual_machine.car", "vcpu", "2"),
resource.TestCheckResourceAttr(
@@ -181,7 +147,7 @@ func TestAccVSphereVirtualMachine_custom_configs(t *testing.T) {
resource.TestCheckResourceAttr(
"vsphere_virtual_machine.car", "custom_configuration_parameters.num", "42"),
resource.TestCheckResourceAttr(
- "vsphere_virtual_machine.car", "network_interface.0.label", label),
+ "vsphere_virtual_machine.bar", "network_interface.0.label", label),
),
},
},
@@ -288,24 +254,6 @@ resource "vsphere_virtual_machine" "bar" {
}
}
`
-
-const testAccCheckVSphereVirtualMachineConfig_custom_configs = `
-resource "vsphere_virtual_machine" "car" {
- name = "terraform-test-custom"
-%s
- vcpu = 2
- memory = 4096
- network_interface {
- label = "%s"
- }
- custom_configuration_parameters {
- "foo" = "bar"
- "car" = "ferrai"
- "num" = 42
- }
- disk {
-%s
- template = "%s"
- }
-}
-`
+ foo = "bar",
+ car = "ferrai",
+ num = 42
From 6615285d63bccf3ced0947bc38a4b3567082cac6 Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Mon, 9 Nov 2015 04:50:09 +0000
Subject: [PATCH 058/664] working on read and more testing
---
.../resource_vsphere_virtual_machine.go | 20 ++++++++++++++++---
.../resource_vsphere_virtual_machine_test.go | 19 ++++++++++++------
2 files changed, 30 insertions(+), 9 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
index 468d3cc06c..ea17dc09b5 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
@@ -269,8 +269,13 @@ func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{
}
if vL, ok := d.GetOk("custom_configuration_parameters"); ok {
- if custom_configs, ok := vL.(map[string]types.AnyType); ok {
- vm.customConfigurations = custom_configs
+ if custom_configs, ok := vL.(map[string]interface{}); ok {
+ custom := make(map[string]types.AnyType)
+ for k,v := range custom_configs {
+ custom[k] = v
+ }
+ vm.customConfigurations = custom
+ log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations)
}
}
@@ -432,14 +437,21 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
d.Set("memory", mvm.Summary.Config.MemorySizeMB)
d.Set("cpu", mvm.Summary.Config.NumCpu)
- if len(mvm.Config.ExtraConfig) > 0 {
+ log.Printf("[DEBUG] ===============================")
+ //log.Printf("[DEBUG] Get extra config ===============================")
+ //log.Printf("[DEBUG] Get extra config %v", mvm.Config)
+ //log.Printf("[DEBUG] Get extra config %v", mvm.Config.ExtraConfig)
+ if mvm.Config != nil && mvm.Config.ExtraConfig != nil && len(mvm.Config.ExtraConfig) > 0 {
+ log.Printf("[DEBUG] reading custom configs")
custom_configs := make(map[string]types.AnyType)
for _, v := range mvm.Config.ExtraConfig {
value := v.GetOptionValue()
custom_configs[value.Key] = value.Value
+ log.Printf("[DEBUG] reading custom configs %s,%s",value.Key, value.Value)
}
d.Set("custom_configuration_parameters", custom_configs)
}
+ log.Printf("[DEBUG] ===============================")
d.Set("datastore", rootDatastore)
// Initialize the connection info
@@ -825,6 +837,7 @@ func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
// make ExtraConfig
+ log.Printf("[DEBUG] virtual machine Extra Config spec start")
if len(vm.customConfigurations) > 0 {
var ov []types.BaseOptionValue
for k, v := range vm.customConfigurations {
@@ -834,6 +847,7 @@ func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
Key: key,
Value: &value,
}
+ log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k,v)
ov = append(ov, &o)
}
configSpec.ExtraConfig = ov
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
index a1bd468fea..9b0b29cae3 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
@@ -127,9 +127,9 @@ func TestAccVSphereVirtualMachine_dhcp(t *testing.T) {
})
}
- testAccCheckVSphereVirtualMachineExists("vsphere_virtual_machine.bar", &vm),
+ testAccCheckVSphereVirtualMachineExists("vsphere_virtual_machine.car", &vm),
resource.TestCheckResourceAttr(
- "vsphere_virtual_machine.car", "name", "terraform-test"),
+ "vsphere_virtual_machine.car", "name", "terraform-test-custom"),
resource.TestCheckResourceAttr(
"vsphere_virtual_machine.car", "vcpu", "2"),
resource.TestCheckResourceAttr(
@@ -147,7 +147,7 @@ func TestAccVSphereVirtualMachine_dhcp(t *testing.T) {
resource.TestCheckResourceAttr(
"vsphere_virtual_machine.car", "custom_configuration_parameters.num", "42"),
resource.TestCheckResourceAttr(
- "vsphere_virtual_machine.bar", "network_interface.0.label", label),
+ "vsphere_virtual_machine.car", "network_interface.0.label", label),
),
},
},
@@ -254,6 +254,13 @@ resource "vsphere_virtual_machine" "bar" {
}
}
`
- foo = "bar",
- car = "ferrai",
- num = 42
+ "foo" = "bar"
+ "car" = "ferrai"
+ "num" = 42
+ }
+ disk {
+%s
+ template = "%s"
+ }
+}
+`
From ce6f0ae5e4eaef94ee1b3449d5f7614a1c21633d Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Wed, 11 Nov 2015 22:42:36 +0000
Subject: [PATCH 059/664] testing finished
---
.../resource_vsphere_virtual_machine.go | 19 ++++++++-----------
1 file changed, 8 insertions(+), 11 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
index ea17dc09b5..b11021e63e 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
@@ -381,7 +381,7 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
var mvm mo.VirtualMachine
collector := property.DefaultCollector(client.Client)
- if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore"}, &mvm); err != nil {
+ if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore","config.extraConfig"}, &mvm); err != nil {
return err
}
@@ -437,21 +437,18 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
d.Set("memory", mvm.Summary.Config.MemorySizeMB)
d.Set("cpu", mvm.Summary.Config.NumCpu)
- log.Printf("[DEBUG] ===============================")
- //log.Printf("[DEBUG] Get extra config ===============================")
- //log.Printf("[DEBUG] Get extra config %v", mvm.Config)
- //log.Printf("[DEBUG] Get extra config %v", mvm.Config.ExtraConfig)
if mvm.Config != nil && mvm.Config.ExtraConfig != nil && len(mvm.Config.ExtraConfig) > 0 {
- log.Printf("[DEBUG] reading custom configs")
- custom_configs := make(map[string]types.AnyType)
+ //TODO: can only set specific custom value, not everything
+ //Would need the config here
+ //custom_configs := make(map[string]types.AnyType)
for _, v := range mvm.Config.ExtraConfig {
value := v.GetOptionValue()
- custom_configs[value.Key] = value.Value
- log.Printf("[DEBUG] reading custom configs %s,%s",value.Key, value.Value)
+ //custom_configs[value.Key] = value.Value
+ log.Printf("[DEBUG] custom configs %s,%s",value.Key, value.Value)
}
- d.Set("custom_configuration_parameters", custom_configs)
+ //d.Set("custom_configuration_parameters", custom_configs)
}
- log.Printf("[DEBUG] ===============================")
+
d.Set("datastore", rootDatastore)
// Initialize the connection info
From 6d13b9296b759cd34e89fa67dae661d86fbd4f9a Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Wed, 11 Nov 2015 22:50:18 +0000
Subject: [PATCH 060/664] updating documentation
---
.../docs/providers/vsphere/r/virtual_machine.html.markdown | 1 +
1 file changed, 1 insertion(+)
diff --git a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown
index d008357ecc..4062a212b8 100644
--- a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown
+++ b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown
@@ -48,6 +48,7 @@ The following arguments are supported:
* `network_interface` - (Required) Configures virtual network interfaces; see [Network Interfaces](#network-interfaces) below for details.
* `disk` - (Required) Configures virtual disks; see [Disks](#disks) below for details
* `boot_delay` - (Optional) Time in seconds to wait for machine network to be ready.
+* `custom_configuration_parameters` - (Optional) Map of values that is set as virtual machine custom configurations.
## Network Interfaces
From 309e697a524703f381269575ee4b67af9f5a9d5c Mon Sep 17 00:00:00 2001
From: Silas Sewell
Date: Sun, 15 Nov 2015 11:45:05 -0500
Subject: [PATCH 061/664] provider/tls: add locally signed certificates
This allows you to generate and sign certificates using a local CA.
---
builtin/providers/tls/provider.go | 7 +-
builtin/providers/tls/provider_test.go | 59 ++++
.../providers/tls/resource_cert_request.go | 18 +-
builtin/providers/tls/resource_certificate.go | 210 ++++++++++++++
.../tls/resource_locally_signed_cert.go | 79 ++++++
.../tls/resource_locally_signed_cert_test.go | 162 +++++++++++
.../tls/resource_self_signed_cert.go | 268 ++++--------------
builtin/providers/tls/util.go | 76 +++++
.../tls/r/locally_signed_cert.html.md | 118 ++++++++
9 files changed, 765 insertions(+), 232 deletions(-)
create mode 100644 builtin/providers/tls/resource_certificate.go
create mode 100644 builtin/providers/tls/resource_locally_signed_cert.go
create mode 100644 builtin/providers/tls/resource_locally_signed_cert_test.go
create mode 100644 builtin/providers/tls/util.go
create mode 100644 website/source/docs/providers/tls/r/locally_signed_cert.html.md
diff --git a/builtin/providers/tls/provider.go b/builtin/providers/tls/provider.go
index 69dfa0dedf..e6c1d61980 100644
--- a/builtin/providers/tls/provider.go
+++ b/builtin/providers/tls/provider.go
@@ -13,9 +13,10 @@ import (
func Provider() terraform.ResourceProvider {
return &schema.Provider{
ResourcesMap: map[string]*schema.Resource{
- "tls_private_key": resourcePrivateKey(),
- "tls_self_signed_cert": resourceSelfSignedCert(),
- "tls_cert_request": resourceCertRequest(),
+ "tls_private_key": resourcePrivateKey(),
+ "tls_locally_signed_cert": resourceLocallySignedCert(),
+ "tls_self_signed_cert": resourceSelfSignedCert(),
+ "tls_cert_request": resourceCertRequest(),
},
}
}
diff --git a/builtin/providers/tls/provider_test.go b/builtin/providers/tls/provider_test.go
index 31b014733e..7dc7af0d2f 100644
--- a/builtin/providers/tls/provider_test.go
+++ b/builtin/providers/tls/provider_test.go
@@ -34,3 +34,62 @@ DrUJcPbKUfF4VBqmmwwkpwT938Hr/iCcS6kE3hqXiN9a5XJb4vnk2FdZNPS9hf2J
rpxCHbX0xSJh0s8j7exRHMF8W16DHjjkc265YdWPXWo=
-----END RSA PRIVATE KEY-----
`
+
+var testCertRequest = `
+-----BEGIN CERTIFICATE REQUEST-----
+MIICYDCCAckCAQAwgcUxFDASBgNVBAMMC2V4YW1wbGUuY29tMQswCQYDVQQGEwJV
+UzELMAkGA1UECAwCQ0ExFjAUBgNVBAcMDVBpcmF0ZSBIYXJib3IxGTAXBgNVBAkM
+EDU4NzkgQ290dG9uIExpbmsxEzARBgNVBBEMCjk1NTU5LTEyMjcxFTATBgNVBAoM
+DEV4YW1wbGUsIEluYzEoMCYGA1UECwwfRGVwYXJ0bWVudCBvZiBUZXJyYWZvcm0g
+VGVzdGluZzEKMAgGA1UEBRMBMjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
+qLFq7Tpmlt0uDCCn5bA/oTj4v16/pXXaD+Ice2bS4rBH2UUM2gca5U4j8QCxrIxh
+91mBvloE4VS5xrIGotAwoMgwK3E2md5kzQJToDve/hm8JNOcms+OAOjfjajPc40e
++ue9roT8VjWGU0wz7ttQNuao56GXYr5kOpcfiZMs7RcCAwEAAaBaMFgGCSqGSIb3
+DQEJDjFLMEkwLwYDVR0RBCgwJoILZXhhbXBsZS5jb22CC2V4YW1wbGUubmV0hwR/
+AAABhwR/AAACMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXgMA0GCSqGSIb3DQEBBQUA
+A4GBAGEDWUYnGygtnvScamz3o4PuVMFubBfqIdWCu02hBgzL3Hi3/UkOEsV028GM
+M3YMB+it7U8eDdT2XjzBDlvpxWT1hXWnmJFu6z6B8N/JFk8fOkaP7U6YjZlG5N9m
+L1A4WtQz0SgXcnIujKisqIaymYrvpANnm4IsqTKsnwZD7CsQ
+-----END CERTIFICATE REQUEST-----
+`
+
+var testCAPrivateKey = `
+-----BEGIN RSA PRIVATE KEY-----
+MIICXAIBAAKBgQC7QNFtw54heoD9KL2s2Qr7utKZFM/8GXYHh3Y5/Zis9USlJ7Mc
+Lorbmm9Lopnr5zUBZULAxAgX51X0FbifK8Re3JIZvpFRyxNw8aWYBnOk/sX7UhUH
+pI139dSAhkNAMkRQd1ySpDP+4okCptgZPs7h0bXwoYmWMNFKlaRZHuAQLQIDAQAB
+AoGAQ/YwjLAU8n2t1zQ0M0nLDLYvvVOqcQskpXLq2/1Irm2OborMHQxfZXjVsBPh
+3ZbazBjec2wyq8pQjfhcO5j8+fj9zLtRNDpWEa9t/VDky0MSGezQyLL1J5+htFDJ
+JDCkKK441IWKGCMC31hoVP6PvE/3G2+vWAkrkT4U7ekLQVkCQQD1/RKMxDFJ57Qr
+Zlu1y72dnGLsGqoxeNaco6G5JXAEEcWTx8qXghKQX0uHxooeRYQRupOGLBo1Js1p
+/AZDR8inAkEAwt/J0GDsojV89RbpJ0h7C1kcxNULooCYQZs/rmJcVXSs6pUIIFdI
+oYQIEGnRsfQUPo6EUUGMKh8sSEjF6R8nCwJBAMKYuoT7a9aAYwp2RhTSIaW+oo8P
+JRZP9s8hr31tPWkqufeHdSBYOOFXUcQObxM1gR4ZUD0zRGRJ1vSB+F5fOj8CQEuG
+HZnTpoHrBuWZnnyp+33XaG3kP2EYQ2nRuClmV3CLCmTTo1WdXjmyiMmLqUg1Vw8z
+fpZbN+4vLKNLCOCjQScCQDWmNDrie4Omd5wWKV5B+LVZO8/xMlub6IEioZpMfDGZ
+q1Ov/Qw2ge3yumfO+6GzKG0k13yYEn1AcatF5lP8BYY=
+-----END RSA PRIVATE KEY-----
+`
+
+var testCACert = `
+-----BEGIN CERTIFICATE-----
+MIIDVTCCAr6gAwIBAgIJALLsVgWAcCvxMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV
+BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNUGlyYXRlIEhhcmJvcjEVMBMG
+A1UEChMMRXhhbXBsZSwgSW5jMSEwHwYDVQQLExhEZXBhcnRtZW50IG9mIENBIFRl
+c3RpbmcxDTALBgNVBAMTBHJvb3QwHhcNMTUxMTE0MTY1MTQ0WhcNMTUxMjE0MTY1
+MTQ0WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVBpcmF0
+ZSBIYXJib3IxFTATBgNVBAoTDEV4YW1wbGUsIEluYzEhMB8GA1UECxMYRGVwYXJ0
+bWVudCBvZiBDQSBUZXN0aW5nMQ0wCwYDVQQDEwRyb290MIGfMA0GCSqGSIb3DQEB
+AQUAA4GNADCBiQKBgQC7QNFtw54heoD9KL2s2Qr7utKZFM/8GXYHh3Y5/Zis9USl
+J7McLorbmm9Lopnr5zUBZULAxAgX51X0FbifK8Re3JIZvpFRyxNw8aWYBnOk/sX7
+UhUHpI139dSAhkNAMkRQd1ySpDP+4okCptgZPs7h0bXwoYmWMNFKlaRZHuAQLQID
+AQABo4HgMIHdMB0GA1UdDgQWBBQyrsMhTd85ATqm9vNybTtAbwnGkDCBrQYDVR0j
+BIGlMIGigBQyrsMhTd85ATqm9vNybTtAbwnGkKF/pH0wezELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1QaXJhdGUgSGFyYm9yMRUwEwYDVQQKEwxF
+eGFtcGxlLCBJbmMxITAfBgNVBAsTGERlcGFydG1lbnQgb2YgQ0EgVGVzdGluZzEN
+MAsGA1UEAxMEcm9vdIIJALLsVgWAcCvxMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN
+AQEFBQADgYEAuJ7JGZlSzbQOuAFz2t3c1pQzUIiS74blFbg6RPvNPSSjoBg3Ly61
+FbliR8P3qiSWA/X03/XSMTH1XkHU8re+P0uILUzLJkKBkdHJfdwfk8kifDjdO14+
+tffPaqAEFUkwhbiQUoj9aeTOOS6kEjbMV6+o7fsz5pPUHbj/l4idys0=
+-----END CERTIFICATE-----
+`
diff --git a/builtin/providers/tls/resource_cert_request.go b/builtin/providers/tls/resource_cert_request.go
index ac1f70071f..7dd1430c6b 100644
--- a/builtin/providers/tls/resource_cert_request.go
+++ b/builtin/providers/tls/resource_cert_request.go
@@ -10,6 +10,8 @@ import (
"github.com/hashicorp/terraform/helper/schema"
)
+const pemCertReqType = "CERTIFICATE REQUEST"
+
func resourceCertRequest() *schema.Resource {
return &schema.Resource{
Create: CreateCertRequest,
@@ -71,19 +73,9 @@ func resourceCertRequest() *schema.Resource {
}
func CreateCertRequest(d *schema.ResourceData, meta interface{}) error {
- keyAlgoName := d.Get("key_algorithm").(string)
- var keyFunc keyParser
- var ok bool
- if keyFunc, ok = keyParsers[keyAlgoName]; !ok {
- return fmt.Errorf("invalid key_algorithm %#v", keyAlgoName)
- }
- keyBlock, _ := pem.Decode([]byte(d.Get("private_key_pem").(string)))
- if keyBlock == nil {
- return fmt.Errorf("no PEM block found in private_key_pem")
- }
- key, err := keyFunc(keyBlock.Bytes)
+ key, err := parsePrivateKey(d, "private_key_pem", "key_algorithm")
if err != nil {
- return fmt.Errorf("failed to decode private_key_pem: %s", err)
+ return err
}
subjectConfs := d.Get("subject").([]interface{})
@@ -117,7 +109,7 @@ func CreateCertRequest(d *schema.ResourceData, meta interface{}) error {
if err != nil {
fmt.Errorf("Error creating certificate request: %s", err)
}
- certReqPem := string(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE REQUEST", Bytes: certReqBytes}))
+ certReqPem := string(pem.EncodeToMemory(&pem.Block{Type: pemCertReqType, Bytes: certReqBytes}))
d.SetId(hashForState(string(certReqBytes)))
d.Set("cert_request_pem", certReqPem)
diff --git a/builtin/providers/tls/resource_certificate.go b/builtin/providers/tls/resource_certificate.go
new file mode 100644
index 0000000000..bfdc6eea7f
--- /dev/null
+++ b/builtin/providers/tls/resource_certificate.go
@@ -0,0 +1,210 @@
+package tls
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/x509"
+ "encoding/asn1"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "math/big"
+ "time"
+
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+const pemCertType = "CERTIFICATE"
+
+var keyUsages map[string]x509.KeyUsage = map[string]x509.KeyUsage{
+ "digital_signature": x509.KeyUsageDigitalSignature,
+ "content_commitment": x509.KeyUsageContentCommitment,
+ "key_encipherment": x509.KeyUsageKeyEncipherment,
+ "data_encipherment": x509.KeyUsageDataEncipherment,
+ "key_agreement": x509.KeyUsageKeyAgreement,
+ "cert_signing": x509.KeyUsageCertSign,
+ "crl_signing": x509.KeyUsageCRLSign,
+ "encipher_only": x509.KeyUsageEncipherOnly,
+ "decipher_only": x509.KeyUsageDecipherOnly,
+}
+
+var extKeyUsages map[string]x509.ExtKeyUsage = map[string]x509.ExtKeyUsage{
+ "any_extended": x509.ExtKeyUsageAny,
+ "server_auth": x509.ExtKeyUsageServerAuth,
+ "client_auth": x509.ExtKeyUsageClientAuth,
+ "code_signing": x509.ExtKeyUsageCodeSigning,
+ "email_protection": x509.ExtKeyUsageEmailProtection,
+ "ipsec_end_system": x509.ExtKeyUsageIPSECEndSystem,
+ "ipsec_tunnel": x509.ExtKeyUsageIPSECTunnel,
+ "ipsec_user": x509.ExtKeyUsageIPSECUser,
+ "timestamping": x509.ExtKeyUsageTimeStamping,
+ "ocsp_signing": x509.ExtKeyUsageOCSPSigning,
+ "microsoft_server_gated_crypto": x509.ExtKeyUsageMicrosoftServerGatedCrypto,
+ "netscape_server_gated_crypto": x509.ExtKeyUsageNetscapeServerGatedCrypto,
+}
+
+// rsaPublicKey reflects the ASN.1 structure of a PKCS#1 public key.
+type rsaPublicKey struct {
+ N *big.Int
+ E int
+}
+
+// generateSubjectKeyID generates a SHA-1 hash of the subject public key.
+func generateSubjectKeyID(pub crypto.PublicKey) ([]byte, error) {
+ var publicKeyBytes []byte
+ var err error
+
+ switch pub := pub.(type) {
+ case *rsa.PublicKey:
+ publicKeyBytes, err = asn1.Marshal(rsaPublicKey{N: pub.N, E: pub.E})
+ if err != nil {
+ return nil, err
+ }
+ case *ecdsa.PublicKey:
+ publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y)
+ default:
+ return nil, errors.New("only RSA and ECDSA public keys supported")
+ }
+
+ hash := sha1.Sum(publicKeyBytes)
+ return hash[:], nil
+}
+
+func resourceCertificateCommonSchema() map[string]*schema.Schema {
+ return map[string]*schema.Schema{
+ "validity_period_hours": &schema.Schema{
+ Type: schema.TypeInt,
+ Required: true,
+ Description: "Number of hours that the certificate will remain valid for",
+ ForceNew: true,
+ },
+
+ "early_renewal_hours": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 0,
+ Description: "Number of hours before the certificates expiry when a new certificate will be generated",
+ ForceNew: true,
+ },
+
+ "is_ca_certificate": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ Description: "Whether the generated certificate will be usable as a CA certificate",
+ ForceNew: true,
+ },
+
+ "allowed_uses": &schema.Schema{
+ Type: schema.TypeList,
+ Required: true,
+ Description: "Uses that are allowed for the certificate",
+ ForceNew: true,
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+
+ "cert_pem": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
+ "validity_start_time": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
+ "validity_end_time": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ }
+}
+
+func createCertificate(d *schema.ResourceData, template, parent *x509.Certificate, pub crypto.PublicKey, priv interface{}) error {
+ var err error
+
+ template.NotBefore = time.Now()
+ template.NotAfter = template.NotBefore.Add(time.Duration(d.Get("validity_period_hours").(int)) * time.Hour)
+
+ serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
+ template.SerialNumber, err = rand.Int(rand.Reader, serialNumberLimit)
+ if err != nil {
+ return fmt.Errorf("failed to generate serial number: %s", err)
+ }
+
+ keyUsesI := d.Get("allowed_uses").([]interface{})
+ for _, keyUseI := range keyUsesI {
+ keyUse := keyUseI.(string)
+ if usage, ok := keyUsages[keyUse]; ok {
+ template.KeyUsage |= usage
+ }
+ if usage, ok := extKeyUsages[keyUse]; ok {
+ template.ExtKeyUsage = append(template.ExtKeyUsage, usage)
+ }
+ }
+
+ if d.Get("is_ca_certificate").(bool) {
+ template.IsCA = true
+
+ template.SubjectKeyId, err = generateSubjectKeyID(pub)
+ if err != nil {
+ return fmt.Errorf("failed to set subject key identifier: %s", err)
+ }
+ }
+
+ certBytes, err := x509.CreateCertificate(rand.Reader, template, parent, pub, priv)
+ if err != nil {
+ fmt.Errorf("error creating certificate: %s", err)
+ }
+ certPem := string(pem.EncodeToMemory(&pem.Block{Type: pemCertType, Bytes: certBytes}))
+
+ validFromBytes, err := template.NotBefore.MarshalText()
+ if err != nil {
+ return fmt.Errorf("error serializing validity_start_time: %s", err)
+ }
+ validToBytes, err := template.NotAfter.MarshalText()
+ if err != nil {
+ return fmt.Errorf("error serializing validity_end_time: %s", err)
+ }
+
+ d.SetId(template.SerialNumber.String())
+ d.Set("cert_pem", certPem)
+ d.Set("validity_start_time", string(validFromBytes))
+ d.Set("validity_end_time", string(validToBytes))
+
+ return nil
+}
+
+func DeleteCertificate(d *schema.ResourceData, meta interface{}) error {
+ d.SetId("")
+ return nil
+}
+
+func ReadCertificate(d *schema.ResourceData, meta interface{}) error {
+
+ endTimeStr := d.Get("validity_end_time").(string)
+ endTime := time.Now()
+ err := endTime.UnmarshalText([]byte(endTimeStr))
+ if err != nil {
+ // If end time is invalid then we'll just throw away the whole
+ // thing so we can generate a new one.
+ d.SetId("")
+ return nil
+ }
+
+ earlyRenewalPeriod := time.Duration(-d.Get("early_renewal_hours").(int)) * time.Hour
+ endTime = endTime.Add(earlyRenewalPeriod)
+
+ if time.Now().After(endTime) {
+ // Treat an expired certificate as not existing, so we'll generate
+ // a new one with the next plan.
+ d.SetId("")
+ }
+
+ return nil
+}
diff --git a/builtin/providers/tls/resource_locally_signed_cert.go b/builtin/providers/tls/resource_locally_signed_cert.go
new file mode 100644
index 0000000000..39c90022f8
--- /dev/null
+++ b/builtin/providers/tls/resource_locally_signed_cert.go
@@ -0,0 +1,79 @@
+package tls
+
+import (
+ "crypto/x509"
+
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceLocallySignedCert() *schema.Resource {
+ s := resourceCertificateCommonSchema()
+
+ s["cert_request_pem"] = &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ Description: "PEM-encoded certificate request",
+ ForceNew: true,
+ StateFunc: func(v interface{}) string {
+ return hashForState(v.(string))
+ },
+ }
+
+ s["ca_key_algorithm"] = &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ Description: "Name of the algorithm used to generate the certificate's private key",
+ ForceNew: true,
+ }
+
+ s["ca_private_key_pem"] = &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ Description: "PEM-encoded CA private key used to sign the certificate",
+ ForceNew: true,
+ StateFunc: func(v interface{}) string {
+ return hashForState(v.(string))
+ },
+ }
+
+ s["ca_cert_pem"] = &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ Description: "PEM-encoded CA certificate",
+ ForceNew: true,
+ StateFunc: func(v interface{}) string {
+ return hashForState(v.(string))
+ },
+ }
+
+ return &schema.Resource{
+ Create: CreateLocallySignedCert,
+ Delete: DeleteCertificate,
+ Read: ReadCertificate,
+ Schema: s,
+ }
+}
+
+func CreateLocallySignedCert(d *schema.ResourceData, meta interface{}) error {
+ certReq, err := parseCertificateRequest(d, "cert_request_pem")
+ if err != nil {
+ return err
+ }
+ caKey, err := parsePrivateKey(d, "ca_private_key_pem", "ca_key_algorithm")
+ if err != nil {
+ return err
+ }
+ caCert, err := parseCertificate(d, "ca_cert_pem")
+ if err != nil {
+ return err
+ }
+
+ cert := x509.Certificate{
+ Subject: certReq.Subject,
+ DNSNames: certReq.DNSNames,
+ IPAddresses: certReq.IPAddresses,
+ BasicConstraintsValid: true,
+ }
+
+ return createCertificate(d, &cert, caCert, certReq.PublicKey, caKey)
+}
diff --git a/builtin/providers/tls/resource_locally_signed_cert_test.go b/builtin/providers/tls/resource_locally_signed_cert_test.go
new file mode 100644
index 0000000000..7e9688d121
--- /dev/null
+++ b/builtin/providers/tls/resource_locally_signed_cert_test.go
@@ -0,0 +1,162 @@
+package tls
+
+import (
+ "bytes"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ r "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestLocallySignedCert(t *testing.T) {
+ r.Test(t, r.TestCase{
+ Providers: testProviders,
+ Steps: []r.TestStep{
+ r.TestStep{
+ Config: fmt.Sprintf(`
+ resource "tls_locally_signed_cert" "test" {
+ cert_request_pem = < (2 * time.Minute) {
+ return fmt.Errorf("certificate validity begins more than two minutes in the past")
+ }
+ if cert.NotAfter.Sub(cert.NotBefore) != time.Hour {
+ return fmt.Errorf("certificate validity is not one hour")
+ }
+
+ caBlock, _ := pem.Decode([]byte(testCACert))
+ caCert, err := x509.ParseCertificate(caBlock.Bytes)
+ if err != nil {
+ return fmt.Errorf("error parsing ca cert: %s", err)
+ }
+ certPool := x509.NewCertPool()
+
+ // Verify certificate
+ _, err = cert.Verify(x509.VerifyOptions{Roots: certPool})
+ if err == nil {
+ return errors.New("incorrectly verified certificate")
+ } else if _, ok := err.(x509.UnknownAuthorityError); !ok {
+ return fmt.Errorf("incorrect verify error: expected UnknownAuthorityError, got %v", err)
+ }
+ certPool.AddCert(caCert)
+ if _, err = cert.Verify(x509.VerifyOptions{Roots: certPool}); err != nil {
+ return fmt.Errorf("verify failed: %s", err)
+ }
+
+ return nil
+ },
+ },
+ },
+ })
+}
diff --git a/builtin/providers/tls/resource_self_signed_cert.go b/builtin/providers/tls/resource_self_signed_cert.go
index 4055352453..29e04154db 100644
--- a/builtin/providers/tls/resource_self_signed_cert.go
+++ b/builtin/providers/tls/resource_self_signed_cert.go
@@ -1,169 +1,72 @@
package tls
import (
- "crypto/rand"
"crypto/x509"
- "encoding/pem"
"fmt"
- "math/big"
"net"
- "time"
"github.com/hashicorp/terraform/helper/schema"
)
-var keyUsages map[string]x509.KeyUsage = map[string]x509.KeyUsage{
- "digital_signature": x509.KeyUsageDigitalSignature,
- "content_commitment": x509.KeyUsageContentCommitment,
- "key_encipherment": x509.KeyUsageKeyEncipherment,
- "data_encipherment": x509.KeyUsageDataEncipherment,
- "key_agreement": x509.KeyUsageKeyAgreement,
- "cert_signing": x509.KeyUsageCertSign,
- "crl_signing": x509.KeyUsageCRLSign,
- "encipher_only": x509.KeyUsageEncipherOnly,
- "decipher_only": x509.KeyUsageDecipherOnly,
-}
-
-var extKeyUsages map[string]x509.ExtKeyUsage = map[string]x509.ExtKeyUsage{
- "any_extended": x509.ExtKeyUsageAny,
- "server_auth": x509.ExtKeyUsageServerAuth,
- "client_auth": x509.ExtKeyUsageClientAuth,
- "code_signing": x509.ExtKeyUsageCodeSigning,
- "email_protection": x509.ExtKeyUsageEmailProtection,
- "ipsec_end_system": x509.ExtKeyUsageIPSECEndSystem,
- "ipsec_tunnel": x509.ExtKeyUsageIPSECTunnel,
- "ipsec_user": x509.ExtKeyUsageIPSECUser,
- "timestamping": x509.ExtKeyUsageTimeStamping,
- "ocsp_signing": x509.ExtKeyUsageOCSPSigning,
- "microsoft_server_gated_crypto": x509.ExtKeyUsageMicrosoftServerGatedCrypto,
- "netscape_server_gated_crypto": x509.ExtKeyUsageNetscapeServerGatedCrypto,
-}
-
func resourceSelfSignedCert() *schema.Resource {
+ s := resourceCertificateCommonSchema()
+
+ s["subject"] = &schema.Schema{
+ Type: schema.TypeList,
+ Required: true,
+ Elem: nameSchema,
+ ForceNew: true,
+ }
+
+ s["dns_names"] = &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Description: "List of DNS names to use as subjects of the certificate",
+ ForceNew: true,
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ }
+
+ s["ip_addresses"] = &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Description: "List of IP addresses to use as subjects of the certificate",
+ ForceNew: true,
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ }
+
+ s["key_algorithm"] = &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ Description: "Name of the algorithm to use to generate the certificate's private key",
+ ForceNew: true,
+ }
+
+ s["private_key_pem"] = &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ Description: "PEM-encoded private key that the certificate will belong to",
+ ForceNew: true,
+ StateFunc: func(v interface{}) string {
+ return hashForState(v.(string))
+ },
+ }
+
return &schema.Resource{
Create: CreateSelfSignedCert,
- Delete: DeleteSelfSignedCert,
- Read: ReadSelfSignedCert,
-
- Schema: map[string]*schema.Schema{
-
- "dns_names": &schema.Schema{
- Type: schema.TypeList,
- Optional: true,
- Description: "List of DNS names to use as subjects of the certificate",
- ForceNew: true,
- Elem: &schema.Schema{
- Type: schema.TypeString,
- },
- },
-
- "ip_addresses": &schema.Schema{
- Type: schema.TypeList,
- Optional: true,
- Description: "List of IP addresses to use as subjects of the certificate",
- ForceNew: true,
- Elem: &schema.Schema{
- Type: schema.TypeString,
- },
- },
-
- "validity_period_hours": &schema.Schema{
- Type: schema.TypeInt,
- Required: true,
- Description: "Number of hours that the certificate will remain valid for",
- ForceNew: true,
- },
-
- "early_renewal_hours": &schema.Schema{
- Type: schema.TypeInt,
- Optional: true,
- Default: 0,
- Description: "Number of hours before the certificates expiry when a new certificate will be generated",
- ForceNew: true,
- },
-
- "is_ca_certificate": &schema.Schema{
- Type: schema.TypeBool,
- Optional: true,
- Description: "Whether the generated certificate will be usable as a CA certificate",
- ForceNew: true,
- },
-
- "allowed_uses": &schema.Schema{
- Type: schema.TypeList,
- Required: true,
- Description: "Uses that are allowed for the certificate",
- ForceNew: true,
- Elem: &schema.Schema{
- Type: schema.TypeString,
- },
- },
-
- "key_algorithm": &schema.Schema{
- Type: schema.TypeString,
- Required: true,
- Description: "Name of the algorithm to use to generate the certificate's private key",
- ForceNew: true,
- },
-
- "private_key_pem": &schema.Schema{
- Type: schema.TypeString,
- Required: true,
- Description: "PEM-encoded private key that the certificate will belong to",
- ForceNew: true,
- StateFunc: func(v interface{}) string {
- return hashForState(v.(string))
- },
- },
-
- "subject": &schema.Schema{
- Type: schema.TypeList,
- Required: true,
- Elem: nameSchema,
- ForceNew: true,
- },
-
- "cert_pem": &schema.Schema{
- Type: schema.TypeString,
- Computed: true,
- },
-
- "validity_start_time": &schema.Schema{
- Type: schema.TypeString,
- Computed: true,
- },
-
- "validity_end_time": &schema.Schema{
- Type: schema.TypeString,
- Computed: true,
- },
- },
+ Delete: DeleteCertificate,
+ Read: ReadCertificate,
+ Schema: s,
}
}
func CreateSelfSignedCert(d *schema.ResourceData, meta interface{}) error {
- keyAlgoName := d.Get("key_algorithm").(string)
- var keyFunc keyParser
- var ok bool
- if keyFunc, ok = keyParsers[keyAlgoName]; !ok {
- return fmt.Errorf("invalid key_algorithm %#v", keyAlgoName)
- }
- keyBlock, _ := pem.Decode([]byte(d.Get("private_key_pem").(string)))
- if keyBlock == nil {
- return fmt.Errorf("no PEM block found in private_key_pem")
- }
- key, err := keyFunc(keyBlock.Bytes)
+ key, err := parsePrivateKey(d, "private_key_pem", "key_algorithm")
if err != nil {
- return fmt.Errorf("failed to decode private_key_pem: %s", err)
- }
-
- notBefore := time.Now()
- notAfter := notBefore.Add(time.Duration(d.Get("validity_period_hours").(int)) * time.Hour)
-
- serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
- serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
- if err != nil {
- return fmt.Errorf("failed to generate serial number: %s", err)
+ return err
}
subjectConfs := d.Get("subject").([]interface{})
@@ -177,24 +80,10 @@ func CreateSelfSignedCert(d *schema.ResourceData, meta interface{}) error {
}
cert := x509.Certificate{
- SerialNumber: serialNumber,
Subject: *subject,
- NotBefore: notBefore,
- NotAfter: notAfter,
BasicConstraintsValid: true,
}
- keyUsesI := d.Get("allowed_uses").([]interface{})
- for _, keyUseI := range keyUsesI {
- keyUse := keyUseI.(string)
- if usage, ok := keyUsages[keyUse]; ok {
- cert.KeyUsage |= usage
- }
- if usage, ok := extKeyUsages[keyUse]; ok {
- cert.ExtKeyUsage = append(cert.ExtKeyUsage, usage)
- }
- }
-
dnsNamesI := d.Get("dns_names").([]interface{})
for _, nameI := range dnsNamesI {
cert.DNSNames = append(cert.DNSNames, nameI.(string))
@@ -208,58 +97,5 @@ func CreateSelfSignedCert(d *schema.ResourceData, meta interface{}) error {
cert.IPAddresses = append(cert.IPAddresses, ip)
}
- if d.Get("is_ca_certificate").(bool) {
- cert.IsCA = true
- }
-
- certBytes, err := x509.CreateCertificate(rand.Reader, &cert, &cert, publicKey(key), key)
- if err != nil {
- fmt.Errorf("Error creating certificate: %s", err)
- }
- certPem := string(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certBytes}))
-
- validFromBytes, err := notBefore.MarshalText()
- if err != nil {
- return fmt.Errorf("error serializing validity_start_time: %s", err)
- }
- validToBytes, err := notAfter.MarshalText()
- if err != nil {
- return fmt.Errorf("error serializing validity_end_time: %s", err)
- }
-
- d.SetId(serialNumber.String())
- d.Set("cert_pem", certPem)
- d.Set("validity_start_time", string(validFromBytes))
- d.Set("validity_end_time", string(validToBytes))
-
- return nil
-}
-
-func DeleteSelfSignedCert(d *schema.ResourceData, meta interface{}) error {
- d.SetId("")
- return nil
-}
-
-func ReadSelfSignedCert(d *schema.ResourceData, meta interface{}) error {
-
- endTimeStr := d.Get("validity_end_time").(string)
- endTime := time.Now()
- err := endTime.UnmarshalText([]byte(endTimeStr))
- if err != nil {
- // If end time is invalid then we'll just throw away the whole
- // thing so we can generate a new one.
- d.SetId("")
- return nil
- }
-
- earlyRenewalPeriod := time.Duration(-d.Get("early_renewal_hours").(int)) * time.Hour
- endTime = endTime.Add(earlyRenewalPeriod)
-
- if time.Now().After(endTime) {
- // Treat an expired certificate as not existing, so we'll generate
- // a new one with the next plan.
- d.SetId("")
- }
-
- return nil
+ return createCertificate(d, &cert, &cert, publicKey(key), key)
}
diff --git a/builtin/providers/tls/util.go b/builtin/providers/tls/util.go
new file mode 100644
index 0000000000..b1ff32e5b0
--- /dev/null
+++ b/builtin/providers/tls/util.go
@@ -0,0 +1,76 @@
+package tls
+
+import (
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func decodePEM(d *schema.ResourceData, pemKey, pemType string) (*pem.Block, error) {
+ block, _ := pem.Decode([]byte(d.Get(pemKey).(string)))
+ if block == nil {
+ return nil, fmt.Errorf("no PEM block found in %s", pemKey)
+ }
+ if pemType != "" && block.Type != pemType {
+ return nil, fmt.Errorf("invalid PEM type in %s: %s", pemKey, block.Type)
+ }
+
+ return block, nil
+}
+
+func parsePrivateKey(d *schema.ResourceData, pemKey, algoKey string) (interface{}, error) {
+ algoName := d.Get(algoKey).(string)
+
+ keyFunc, ok := keyParsers[algoName]
+ if !ok {
+ return nil, fmt.Errorf("invalid %s: %#v", algoKey, algoName)
+ }
+
+ block, err := decodePEM(d, pemKey, "")
+ if err != nil {
+ return nil, err
+ }
+
+ key, err := keyFunc(block.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode %s: %s", pemKey, err)
+ }
+
+ return key, nil
+}
+
+func parseCertificate(d *schema.ResourceData, pemKey string) (*x509.Certificate, error) {
+ block, err := decodePEM(d, pemKey, "")
+ if err != nil {
+ return nil, err
+ }
+
+ certs, err := x509.ParseCertificates(block.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse %s: %s", pemKey, err)
+ }
+ if len(certs) < 1 {
+ return nil, fmt.Errorf("no certificates found in %s", pemKey)
+ }
+ if len(certs) > 1 {
+ return nil, fmt.Errorf("multiple certificates found in %s", pemKey)
+ }
+
+ return certs[0], nil
+}
+
+func parseCertificateRequest(d *schema.ResourceData, pemKey string) (*x509.CertificateRequest, error) {
+ block, err := decodePEM(d, pemKey, pemCertReqType)
+ if err != nil {
+ return nil, err
+ }
+
+ certReq, err := x509.ParseCertificateRequest(block.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse %s: %s", pemKey, err)
+ }
+
+ return certReq, nil
+}
diff --git a/website/source/docs/providers/tls/r/locally_signed_cert.html.md b/website/source/docs/providers/tls/r/locally_signed_cert.html.md
new file mode 100644
index 0000000000..c052c5ff97
--- /dev/null
+++ b/website/source/docs/providers/tls/r/locally_signed_cert.html.md
@@ -0,0 +1,118 @@
+---
+layout: "tls"
+page_title: "TLS: tls_locally_signed_cert"
+sidebar_current: "docs-tls-resourse-locally-signed-cert"
+description: |-
+ Creates a locally-signed TLS certificate in PEM format.
+---
+
+# tls\_locally\_signed\_cert
+
+Generates a TLS ceritifcate using a *Certificate Signing Request* (CSR) and
+signs it with a provided certificate authority (CA) private key.
+
+Locally-signed certificates are generally only trusted by client software when
+setup to use the provided CA. They are normally used in development environments
+or when deployed internally to an organization.
+
+## Example Usage
+
+```
+resource "tls_locally_signed_cert" "example" {
+ cert_request_pem = "${file(\"cert_request.pem\")}"
+
+ ca_key_algorithm = "ECDSA"
+ ca_private_key_pem = "${file(\"ca_private_key.pem\")}"
+ ca_cert_pem = "${file(\"ca_cert.pem\")}"
+
+ validity_period_hours = 12
+
+ allowed_uses = [
+ "key_encipherment",
+ "digital_signature",
+ "server_auth",
+ ]
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `cert_request_pem` - (Required) PEM-encoded request certificate data.
+
+* `ca_key_algorithm` - (Required) The name of the algorithm for the key provided
+ in `ca_private_key_pem`.
+
+* `ca_private_key_pem` - (Required) PEM-encoded private key data for the CA.
+ This can be read from a separate file using the ``file`` interpolation
+ function.
+
+* `ca_cert_pem` - (Required) PEM-encoded certificate data for the CA.
+
+* `validity_period_hours` - (Required) The number of hours after initial issuing that the
+ certificate will become invalid.
+
+* `allowed_uses` - (Required) List of keywords each describing a use that is permitted
+ for the issued certificate. The valid keywords are listed below.
+
+* `early_renewal_hours` - (Optional) If set, the resource will consider the certificate to
+ have expired the given number of hours before its actual expiry time. This can be useful
+ to deploy an updated certificate in advance of the expiration of the current certificate.
+ Note however that the old certificate remains valid until its true expiration time, since
+ this resource does not (and cannot) support certificate revocation. Note also that this
+ advance update can only be performed should the Terraform configuration be applied during the
+ early renewal period.
+
+* `is_ca_certificate` - (Optional) Boolean controlling whether the CA flag will be set in the
+ generated certificate. Defaults to `false`, meaning that the certificate does not represent
+ a certificate authority.
+
+The `allowed_uses` list accepts the following keywords, combining the set of flags defined by
+both [Key Usage](https://tools.ietf.org/html/rfc5280#section-4.2.1.3) and
+[Extended Key Usage](https://tools.ietf.org/html/rfc5280#section-4.2.1.12) in
+[RFC5280](https://tools.ietf.org/html/rfc5280):
+
+* `digital_signature`
+* `content_commitment`
+* `key_encipherment`
+* `data_encipherment`
+* `key_agreement`
+* `cert_signing`
+* `encipher_only`
+* `decipher_only`
+* `any_extended`
+* `server_auth`
+* `client_auth`
+* `code_signing`
+* `email_protection`
+* `ipsec_end_system`
+* `ipsec_tunnel`
+* `ipsec_user`
+* `timestamping`
+* `ocsp_signing`
+* `microsoft_server_gated_crypto`
+* `netscape_server_gated_crypto`
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `cert_pem` - The certificate data in PEM format.
+* `validity_start_time` - The time after which the certificate is valid, as an
+ [RFC3339](https://tools.ietf.org/html/rfc3339) timestamp.
+* `validity_end_time` - The time until which the certificate is invalid, as an
+ [RFC3339](https://tools.ietf.org/html/rfc3339) timestamp.
+
+## Automatic Renewal
+
+This resource considers its instances to have been deleted after either their validity
+periods ends or the early renewal period is reached. At this time, applying the
+Terraform configuration will cause a new certificate to be generated for the instance.
+
+Therefore in a development environment with frequent deployments it may be convenient
+to set a relatively-short expiration time and use early renewal to automatically provision
+a new certificate when the current one is about to expire.
+
+The creation of a new certificate may of course cause dependent resources to be updated
+or replaced, depending on the lifecycle rules applying to those resources.
From ecc4ce3657504cd885dad591f63edbc1db4097c0 Mon Sep 17 00:00:00 2001
From: Brett Mack
Date: Mon, 16 Nov 2015 20:11:05 +0000
Subject: [PATCH 062/664] Converted firewall_rules rule set to a list type.
Code tidy
---
builtin/providers/vcd/resource_vcd_dnat.go | 20 +--
.../vcd/resource_vcd_firewall_rules.go | 142 +++++++-----------
.../vcd/resource_vcd_firewall_rules_test.go | 3 +-
builtin/providers/vcd/resource_vcd_network.go | 44 +++---
builtin/providers/vcd/resource_vcd_snat.go | 20 +--
builtin/providers/vcd/resource_vcd_vapp.go | 30 ++--
builtin/providers/vcd/structure.go | 29 ++--
7 files changed, 127 insertions(+), 161 deletions(-)
diff --git a/builtin/providers/vcd/resource_vcd_dnat.go b/builtin/providers/vcd/resource_vcd_dnat.go
index edfdd69f72..9c38b0b567 100644
--- a/builtin/providers/vcd/resource_vcd_dnat.go
+++ b/builtin/providers/vcd/resource_vcd_dnat.go
@@ -41,15 +41,15 @@ func resourceVcdDNAT() *schema.Resource {
}
func resourceVcdDNATCreate(d *schema.ResourceData, meta interface{}) error {
- vcd_client := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*govcd.VCDClient)
// Multiple VCD components need to run operations on the Edge Gateway, as
// the edge gatway will throw back an error if it is already performing an
// operation we must wait until we can aquire a lock on the client
- vcd_client.Mutex.Lock()
- defer vcd_client.Mutex.Unlock()
+ vcdClient.Mutex.Lock()
+ defer vcdClient.Mutex.Unlock()
portString := getPortString(d.Get("port").(int))
- edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+ edgeGateway, err := vcdClient.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
if err != nil {
return fmt.Errorf("Unable to find edge gateway: %#v", err)
@@ -80,8 +80,8 @@ func resourceVcdDNATCreate(d *schema.ResourceData, meta interface{}) error {
}
func resourceVcdDNATRead(d *schema.ResourceData, meta interface{}) error {
- vcd_client := meta.(*govcd.VCDClient)
- e, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+ vcdClient := meta.(*govcd.VCDClient)
+ e, err := vcdClient.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
if err != nil {
return fmt.Errorf("Unable to find edge gateway: %#v", err)
@@ -106,15 +106,15 @@ func resourceVcdDNATRead(d *schema.ResourceData, meta interface{}) error {
}
func resourceVcdDNATDelete(d *schema.ResourceData, meta interface{}) error {
- vcd_client := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*govcd.VCDClient)
// Multiple VCD components need to run operations on the Edge Gateway, as
// the edge gatway will throw back an error if it is already performing an
// operation we must wait until we can aquire a lock on the client
- vcd_client.Mutex.Lock()
- defer vcd_client.Mutex.Unlock()
+ vcdClient.Mutex.Lock()
+ defer vcdClient.Mutex.Unlock()
portString := getPortString(d.Get("port").(int))
- edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+ edgeGateway, err := vcdClient.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
if err != nil {
return fmt.Errorf("Unable to find edge gateway: %#v", err)
diff --git a/builtin/providers/vcd/resource_vcd_firewall_rules.go b/builtin/providers/vcd/resource_vcd_firewall_rules.go
index 0af03009a4..123f9f71ae 100644
--- a/builtin/providers/vcd/resource_vcd_firewall_rules.go
+++ b/builtin/providers/vcd/resource_vcd_firewall_rules.go
@@ -1,12 +1,11 @@
package vcd
import (
- "bytes"
"fmt"
- "github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hmrc/vmware-govcd"
types "github.com/hmrc/vmware-govcd/types/v56"
+ "log"
"strings"
)
@@ -30,7 +29,7 @@ func resourceVcdFirewallRules() *schema.Resource {
},
"rule": &schema.Schema{
- Type: schema.TypeSet,
+ Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Resource{
@@ -77,29 +76,30 @@ func resourceVcdFirewallRules() *schema.Resource {
},
},
},
- Set: resourceVcdNetworkFirewallRuleHash,
},
},
}
}
func resourceVcdFirewallRulesCreate(d *schema.ResourceData, meta interface{}) error {
- vcd_client := meta.(*govcd.VCDClient)
- vcd_client.Mutex.Lock()
- defer vcd_client.Mutex.Unlock()
+ vcdClient := meta.(*govcd.VCDClient)
+ vcdClient.Mutex.Lock()
+ defer vcdClient.Mutex.Unlock()
- edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+ edgeGateway, err := vcdClient.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
if err != nil {
return fmt.Errorf("Unable to find edge gateway: %s", err)
}
err = retryCall(5, func() error {
edgeGateway.Refresh()
- firewallRules, _ := expandFirewallRules(d.Get("rule").(*schema.Set).List(), edgeGateway.EdgeGateway)
+ firewallRules, _ := expandFirewallRules(d, edgeGateway.EdgeGateway)
task, err := edgeGateway.CreateFirewallRules(d.Get("default_action").(string), firewallRules)
if err != nil {
+ log.Printf("[INFO] Error setting firewall rules: %s", err)
return fmt.Errorf("Error setting firewall rules: %#v", err)
}
+
return task.WaitTaskCompletion()
})
if err != nil {
@@ -112,13 +112,13 @@ func resourceVcdFirewallRulesCreate(d *schema.ResourceData, meta interface{}) er
}
func resourceFirewallRulesDelete(d *schema.ResourceData, meta interface{}) error {
- vcd_client := meta.(*govcd.VCDClient)
- vcd_client.Mutex.Lock()
- defer vcd_client.Mutex.Unlock()
+ vcdClient := meta.(*govcd.VCDClient)
+ vcdClient.Mutex.Lock()
+ defer vcdClient.Mutex.Unlock()
- edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+ edgeGateway, err := vcdClient.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
- firewallRules := deleteFirewallRules(d.Get("rule").(*schema.Set).List(), edgeGateway.EdgeGateway)
+ firewallRules := deleteFirewallRules(d, edgeGateway.EdgeGateway)
defaultAction := edgeGateway.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService.DefaultAction
task, err := edgeGateway.CreateFirewallRules(defaultAction, firewallRules)
if err != nil {
@@ -134,28 +134,42 @@ func resourceFirewallRulesDelete(d *schema.ResourceData, meta interface{}) error
}
func resourceFirewallRulesRead(d *schema.ResourceData, meta interface{}) error {
- vcd_client := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*govcd.VCDClient)
- edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+ edgeGateway, err := vcdClient.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
if err != nil {
return fmt.Errorf("Error finding edge gateway: %#v", err)
}
+ ruleList := d.Get("rule").([]interface{})
firewallRules := *edgeGateway.EdgeGateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService
- d.Set("rule", resourceVcdFirewallRulesGather(firewallRules.FirewallRule, d.Get("rule").(*schema.Set).List()))
+ rulesCount := d.Get("rule.#").(int)
+ for i := 0; i < rulesCount; i++ {
+ prefix := fmt.Sprintf("rule.%d", i)
+ if d.Get(prefix+".id").(string) == "" {
+ log.Printf("[INFO] Rule %d has no id. Searching...", i)
+ ruleid, err := matchFirewallRule(d, prefix, firewallRules.FirewallRule)
+ if err == nil {
+ currentRule := ruleList[i].(map[string]interface{})
+ currentRule["id"] = ruleid
+ ruleList[i] = currentRule
+ }
+ }
+ }
+ d.Set("rule", ruleList)
d.Set("default_action", firewallRules.DefaultAction)
return nil
}
-func deleteFirewallRules(configured []interface{}, gateway *types.EdgeGateway) []*types.FirewallRule {
+func deleteFirewallRules(d *schema.ResourceData, gateway *types.EdgeGateway) []*types.FirewallRule {
firewallRules := gateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService.FirewallRule
- fwrules := make([]*types.FirewallRule, 0, len(firewallRules)-len(configured))
+ rulesCount := d.Get("rule.#").(int)
+ fwrules := make([]*types.FirewallRule, 0, len(firewallRules)-rulesCount)
for _, f := range firewallRules {
keep := true
- for _, r := range configured {
- data := r.(map[string]interface{})
- if data["id"].(string) != f.ID {
+ for i := 0; i < rulesCount; i++ {
+ if d.Get(fmt.Sprintf("rule.%d.id", i)).(string) != f.ID {
continue
}
keep = false
@@ -167,75 +181,25 @@ func deleteFirewallRules(configured []interface{}, gateway *types.EdgeGateway) [
return fwrules
}
-func resourceVcdFirewallRulesGather(rules []*types.FirewallRule, configured []interface{}) []map[string]interface{} {
- fwrules := make([]map[string]interface{}, 0, len(configured))
+func matchFirewallRule(d *schema.ResourceData, prefix string, rules []*types.FirewallRule) (string, error) {
- for i := len(configured) - 1; i >= 0; i-- {
- data := configured[i].(map[string]interface{})
- rule, err := matchFirewallRule(data, rules)
- if err != nil {
- continue
- }
- fwrules = append(fwrules, rule)
- }
- return fwrules
-}
-
-func matchFirewallRule(data map[string]interface{}, rules []*types.FirewallRule) (map[string]interface{}, error) {
- rule := make(map[string]interface{})
for _, m := range rules {
- if data["id"].(string) == "" {
- if data["description"].(string) == m.Description &&
- data["policy"].(string) == m.Policy &&
- data["protocol"].(string) == getProtocol(*m.Protocols) &&
- data["destination_port"].(string) == getPortString(m.Port) &&
- strings.ToLower(data["destination_ip"].(string)) == strings.ToLower(m.DestinationIP) &&
- data["source_port"].(string) == getPortString(m.SourcePort) &&
- strings.ToLower(data["source_ip"].(string)) == strings.ToLower(m.SourceIP) {
- rule["id"] = m.ID
- rule["description"] = m.Description
- rule["policy"] = m.Policy
- rule["protocol"] = getProtocol(*m.Protocols)
- rule["destination_port"] = getPortString(m.Port)
- rule["destination_ip"] = strings.ToLower(m.DestinationIP)
- rule["source_port"] = getPortString(m.SourcePort)
- rule["source_ip"] = strings.ToLower(m.SourceIP)
- return rule, nil
- }
- } else {
- if data["id"].(string) == m.ID {
- rule["id"] = m.ID
- rule["description"] = m.Description
- rule["policy"] = m.Policy
- rule["protocol"] = getProtocol(*m.Protocols)
- rule["destination_port"] = getPortString(m.Port)
- rule["destination_ip"] = strings.ToLower(m.DestinationIP)
- rule["source_port"] = getPortString(m.SourcePort)
- rule["source_ip"] = strings.ToLower(m.SourceIP)
- return rule, nil
- }
+ log.Printf("[INFO] %s - %s", d.Get(prefix+".description").(string), m.Description)
+ log.Printf("[INFO] %s - %s", d.Get(prefix+".policy").(string), m.Policy)
+ log.Printf("[INFO] %s - %s", d.Get(prefix+".protocol").(string), getProtocol(*m.Protocols))
+ log.Printf("[INFO] %s - %s", d.Get(prefix+".destination_port").(string), getPortString(m.Port))
+ log.Printf("[INFO] %s - %s", strings.ToLower(d.Get(prefix+".destination_ip").(string)), strings.ToLower(m.DestinationIP))
+ log.Printf("[INFO] %s - %s", d.Get(prefix+".source_port").(string), getPortString(m.SourcePort))
+ log.Printf("[INFO] %s - %s", strings.ToLower(d.Get(prefix+".source_ip").(string)), strings.ToLower(m.SourceIP))
+ if d.Get(prefix+".description").(string) == m.Description &&
+ d.Get(prefix+".policy").(string) == m.Policy &&
+ strings.ToLower(d.Get(prefix+".protocol").(string)) == getProtocol(*m.Protocols) &&
+ strings.ToLower(d.Get(prefix+".destination_port").(string)) == getPortString(m.Port) &&
+ strings.ToLower(d.Get(prefix+".destination_ip").(string)) == strings.ToLower(m.DestinationIP) &&
+ strings.ToLower(d.Get(prefix+".source_port").(string)) == getPortString(m.SourcePort) &&
+ strings.ToLower(d.Get(prefix+".source_ip").(string)) == strings.ToLower(m.SourceIP) {
+ return m.ID, nil
}
}
- return rule, fmt.Errorf("Unable to find rule")
-}
-
-func resourceVcdNetworkFirewallRuleHash(v interface{}) int {
- var buf bytes.Buffer
- m := v.(map[string]interface{})
- buf.WriteString(fmt.Sprintf("%s-",
- strings.ToLower(m["description"].(string))))
- buf.WriteString(fmt.Sprintf("%s-",
- strings.ToLower(m["policy"].(string))))
- buf.WriteString(fmt.Sprintf("%s-",
- strings.ToLower(m["protocol"].(string))))
- buf.WriteString(fmt.Sprintf("%s-",
- strings.ToLower(m["destination_port"].(string))))
- buf.WriteString(fmt.Sprintf("%s-",
- strings.ToLower(m["destination_ip"].(string))))
- buf.WriteString(fmt.Sprintf("%s-",
- strings.ToLower(m["source_port"].(string))))
- buf.WriteString(fmt.Sprintf("%s-",
- strings.ToLower(m["source_ip"].(string))))
-
- return hashcode.String(buf.String())
+ return "", fmt.Errorf("Unable to find rule")
}
diff --git a/builtin/providers/vcd/resource_vcd_firewall_rules_test.go b/builtin/providers/vcd/resource_vcd_firewall_rules_test.go
index 3b7a4e90a1..ef766a20c9 100644
--- a/builtin/providers/vcd/resource_vcd_firewall_rules_test.go
+++ b/builtin/providers/vcd/resource_vcd_firewall_rules_test.go
@@ -2,10 +2,9 @@ package vcd
import (
"fmt"
- "testing"
- //"regexp"
"log"
"os"
+ "testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
diff --git a/builtin/providers/vcd/resource_vcd_network.go b/builtin/providers/vcd/resource_vcd_network.go
index 37b9d68bbc..a44aadb1f9 100644
--- a/builtin/providers/vcd/resource_vcd_network.go
+++ b/builtin/providers/vcd/resource_vcd_network.go
@@ -95,7 +95,7 @@ func resourceVcdNetwork() *schema.Resource {
},
},
},
- Set: resourceVcdNetworkIpAddressHash,
+ Set: resourceVcdNetworkIPAddressHash,
},
"static_ip_pool": &schema.Schema{
Type: schema.TypeSet,
@@ -114,21 +114,21 @@ func resourceVcdNetwork() *schema.Resource {
},
},
},
- Set: resourceVcdNetworkIpAddressHash,
+ Set: resourceVcdNetworkIPAddressHash,
},
},
}
}
func resourceVcdNetworkCreate(d *schema.ResourceData, meta interface{}) error {
- vcd_client := meta.(*govcd.VCDClient)
- log.Printf("[TRACE] CLIENT: %#v", vcd_client)
- vcd_client.Mutex.Lock()
- defer vcd_client.Mutex.Unlock()
+ vcdClient := meta.(*govcd.VCDClient)
+ log.Printf("[TRACE] CLIENT: %#v", vcdClient)
+ vcdClient.Mutex.Lock()
+ defer vcdClient.Mutex.Unlock()
- edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+ edgeGateway, err := vcdClient.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
- ipRanges := expandIpRange(d.Get("static_ip_pool").(*schema.Set).List())
+ ipRanges := expandIPRange(d.Get("static_ip_pool").(*schema.Set).List())
newnetwork := &types.OrgVDCNetwork{
Xmlns: "http://www.vmware.com/vcloud/v1.5",
@@ -157,18 +157,18 @@ func resourceVcdNetworkCreate(d *schema.ResourceData, meta interface{}) error {
log.Printf("[INFO] NETWORK: %#v", newnetwork)
err = retryCall(4, func() error {
- return vcd_client.OrgVdc.CreateOrgVDCNetwork(newnetwork)
+ return vcdClient.OrgVdc.CreateOrgVDCNetwork(newnetwork)
})
if err != nil {
return fmt.Errorf("Error: %#v", err)
}
- err = vcd_client.OrgVdc.Refresh()
+ err = vcdClient.OrgVdc.Refresh()
if err != nil {
return fmt.Errorf("Error refreshing vdc: %#v", err)
}
- network, err := vcd_client.OrgVdc.FindVDCNetwork(d.Get("name").(string))
+ network, err := vcdClient.OrgVdc.FindVDCNetwork(d.Get("name").(string))
if err != nil {
return fmt.Errorf("Error finding network: %#v", err)
}
@@ -194,16 +194,16 @@ func resourceVcdNetworkCreate(d *schema.ResourceData, meta interface{}) error {
}
func resourceVcdNetworkRead(d *schema.ResourceData, meta interface{}) error {
- vcd_client := meta.(*govcd.VCDClient)
- log.Printf("[DEBUG] VCD Client configuration: %#v", vcd_client)
- log.Printf("[DEBUG] VCD Client configuration: %#v", vcd_client.OrgVdc)
+ vcdClient := meta.(*govcd.VCDClient)
+ log.Printf("[DEBUG] VCD Client configuration: %#v", vcdClient)
+ log.Printf("[DEBUG] VCD Client configuration: %#v", vcdClient.OrgVdc)
- err := vcd_client.OrgVdc.Refresh()
+ err := vcdClient.OrgVdc.Refresh()
if err != nil {
return fmt.Errorf("Error refreshing vdc: %#v", err)
}
- network, err := vcd_client.OrgVdc.FindVDCNetwork(d.Id())
+ network, err := vcdClient.OrgVdc.FindVDCNetwork(d.Id())
if err != nil {
log.Printf("[DEBUG] Network no longer exists. Removing from tfstate")
d.SetId("")
@@ -222,15 +222,15 @@ func resourceVcdNetworkRead(d *schema.ResourceData, meta interface{}) error {
}
func resourceVcdNetworkDelete(d *schema.ResourceData, meta interface{}) error {
- vcd_client := meta.(*govcd.VCDClient)
- vcd_client.Mutex.Lock()
- defer vcd_client.Mutex.Unlock()
- err := vcd_client.OrgVdc.Refresh()
+ vcdClient := meta.(*govcd.VCDClient)
+ vcdClient.Mutex.Lock()
+ defer vcdClient.Mutex.Unlock()
+ err := vcdClient.OrgVdc.Refresh()
if err != nil {
return fmt.Errorf("Error refreshing vdc: %#v", err)
}
- network, err := vcd_client.OrgVdc.FindVDCNetwork(d.Id())
+ network, err := vcdClient.OrgVdc.FindVDCNetwork(d.Id())
if err != nil {
return fmt.Errorf("Error finding network: %#v", err)
}
@@ -249,7 +249,7 @@ func resourceVcdNetworkDelete(d *schema.ResourceData, meta interface{}) error {
return nil
}
-func resourceVcdNetworkIpAddressHash(v interface{}) int {
+func resourceVcdNetworkIPAddressHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-",
diff --git a/builtin/providers/vcd/resource_vcd_snat.go b/builtin/providers/vcd/resource_vcd_snat.go
index 75c78696b4..88a7a75a5e 100644
--- a/builtin/providers/vcd/resource_vcd_snat.go
+++ b/builtin/providers/vcd/resource_vcd_snat.go
@@ -35,18 +35,18 @@ func resourceVcdSNAT() *schema.Resource {
}
func resourceVcdSNATCreate(d *schema.ResourceData, meta interface{}) error {
- vcd_client := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*govcd.VCDClient)
// Multiple VCD components need to run operations on the Edge Gateway, as
// the edge gatway will throw back an error if it is already performing an
// operation we must wait until we can aquire a lock on the client
- vcd_client.Mutex.Lock()
- defer vcd_client.Mutex.Unlock()
+ vcdClient.Mutex.Lock()
+ defer vcdClient.Mutex.Unlock()
// Creating a loop to offer further protection from the edge gateway erroring
// due to being busy eg another person is using another client so wouldn't be
// constrained by out lock. If the edge gateway reurns with a busy error, wait
// 3 seconds and then try again. Continue until a non-busy error or success
- edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+ edgeGateway, err := vcdClient.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
if err != nil {
return fmt.Errorf("Unable to find edge gateway: %#v", err)
}
@@ -69,8 +69,8 @@ func resourceVcdSNATCreate(d *schema.ResourceData, meta interface{}) error {
}
func resourceVcdSNATRead(d *schema.ResourceData, meta interface{}) error {
- vcd_client := meta.(*govcd.VCDClient)
- e, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+ vcdClient := meta.(*govcd.VCDClient)
+ e, err := vcdClient.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
if err != nil {
return fmt.Errorf("Unable to find edge gateway: %#v", err)
@@ -94,14 +94,14 @@ func resourceVcdSNATRead(d *schema.ResourceData, meta interface{}) error {
}
func resourceVcdSNATDelete(d *schema.ResourceData, meta interface{}) error {
- vcd_client := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*govcd.VCDClient)
// Multiple VCD components need to run operations on the Edge Gateway, as
// the edge gatway will throw back an error if it is already performing an
// operation we must wait until we can aquire a lock on the client
- vcd_client.Mutex.Lock()
- defer vcd_client.Mutex.Unlock()
+ vcdClient.Mutex.Lock()
+ defer vcdClient.Mutex.Unlock()
- edgeGateway, err := vcd_client.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
+ edgeGateway, err := vcdClient.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
if err != nil {
return fmt.Errorf("Unable to find edge gateway: %#v", err)
}
diff --git a/builtin/providers/vcd/resource_vcd_vapp.go b/builtin/providers/vcd/resource_vcd_vapp.go
index 1e3e5a116c..346d9d5443 100644
--- a/builtin/providers/vcd/resource_vcd_vapp.go
+++ b/builtin/providers/vcd/resource_vcd_vapp.go
@@ -80,9 +80,9 @@ func resourceVcdVApp() *schema.Resource {
}
func resourceVcdVAppCreate(d *schema.ResourceData, meta interface{}) error {
- vcd_client := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*govcd.VCDClient)
- catalog, err := vcd_client.Org.FindCatalog(d.Get("catalog_name").(string))
+ catalog, err := vcdClient.Org.FindCatalog(d.Get("catalog_name").(string))
if err != nil {
return fmt.Errorf("Error finding catalog: %#v", err)
}
@@ -99,7 +99,7 @@ func resourceVcdVAppCreate(d *schema.ResourceData, meta interface{}) error {
log.Printf("[DEBUG] VAppTemplate: %#v", vapptemplate)
var networkHref string
- net, err := vcd_client.OrgVdc.FindVDCNetwork(d.Get("network_name").(string))
+ net, err := vcdClient.OrgVdc.FindVDCNetwork(d.Get("network_name").(string))
if err != nil {
return fmt.Errorf("Error finding OrgVCD Network: %#v", err)
}
@@ -108,7 +108,7 @@ func resourceVcdVAppCreate(d *schema.ResourceData, meta interface{}) error {
} else {
networkHref = net.OrgVDCNetwork.HREF
}
- // vapptemplate := govcd.NewVAppTemplate(&vcd_client.Client)
+ // vapptemplate := govcd.NewVAppTemplate(&vcdClient.Client)
//
createvapp := &types.InstantiateVAppTemplateParams{
Ovf: "http://schemas.dmtf.org/ovf/envelope/1",
@@ -134,13 +134,13 @@ func resourceVcdVAppCreate(d *schema.ResourceData, meta interface{}) error {
}
err = retryCall(4, func() error {
- e := vcd_client.OrgVdc.InstantiateVAppTemplate(createvapp)
+ e := vcdClient.OrgVdc.InstantiateVAppTemplate(createvapp)
if e != nil {
return fmt.Errorf("Error: %#v", e)
}
- e = vcd_client.OrgVdc.Refresh()
+ e = vcdClient.OrgVdc.Refresh()
if e != nil {
return fmt.Errorf("Error: %#v", e)
}
@@ -150,7 +150,7 @@ func resourceVcdVAppCreate(d *schema.ResourceData, meta interface{}) error {
return err
}
- vapp, err := vcd_client.OrgVdc.FindVAppByName(d.Get("name").(string))
+ vapp, err := vcdClient.OrgVdc.FindVAppByName(d.Get("name").(string))
err = retryCall(4, func() error {
task, err := vapp.ChangeVMName(d.Get("name").(string))
@@ -194,8 +194,8 @@ func resourceVcdVAppCreate(d *schema.ResourceData, meta interface{}) error {
}
func resourceVcdVAppUpdate(d *schema.ResourceData, meta interface{}) error {
- vcd_client := meta.(*govcd.VCDClient)
- vapp, err := vcd_client.OrgVdc.FindVAppByName(d.Id())
+ vcdClient := meta.(*govcd.VCDClient)
+ vapp, err := vcdClient.OrgVdc.FindVAppByName(d.Id())
if err != nil {
return fmt.Errorf("Error finding VApp: %#v", err)
@@ -209,7 +209,7 @@ func resourceVcdVAppUpdate(d *schema.ResourceData, meta interface{}) error {
if d.HasChange("metadata") {
oraw, nraw := d.GetChange("metadata")
metadata := oraw.(map[string]interface{})
- for k, _ := range metadata {
+ for k := range metadata {
task, err := vapp.DeleteMetadata(k)
if err != nil {
return fmt.Errorf("Error deleting metadata: %#v", err)
@@ -290,14 +290,14 @@ func resourceVcdVAppUpdate(d *schema.ResourceData, meta interface{}) error {
}
func resourceVcdVAppRead(d *schema.ResourceData, meta interface{}) error {
- vcd_client := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*govcd.VCDClient)
- err := vcd_client.OrgVdc.Refresh()
+ err := vcdClient.OrgVdc.Refresh()
if err != nil {
return fmt.Errorf("Error refreshing vdc: %#v", err)
}
- vapp, err := vcd_client.OrgVdc.FindVAppByName(d.Id())
+ vapp, err := vcdClient.OrgVdc.FindVAppByName(d.Id())
if err != nil {
log.Printf("[DEBUG] Unable to find vapp. Removing from tfstate")
d.SetId("")
@@ -309,8 +309,8 @@ func resourceVcdVAppRead(d *schema.ResourceData, meta interface{}) error {
}
func resourceVcdVAppDelete(d *schema.ResourceData, meta interface{}) error {
- vcd_client := meta.(*govcd.VCDClient)
- vapp, err := vcd_client.OrgVdc.FindVAppByName(d.Id())
+ vcdClient := meta.(*govcd.VCDClient)
+ vapp, err := vcdClient.OrgVdc.FindVAppByName(d.Id())
if err != nil {
return fmt.Errorf("error finding vdc: %s", err)
diff --git a/builtin/providers/vcd/structure.go b/builtin/providers/vcd/structure.go
index 7c40f70fce..d8124687a7 100644
--- a/builtin/providers/vcd/structure.go
+++ b/builtin/providers/vcd/structure.go
@@ -1,13 +1,15 @@
package vcd
import (
+ "fmt"
"github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/helper/schema"
types "github.com/hmrc/vmware-govcd/types/v56"
"strconv"
"time"
)
-func expandIpRange(configured []interface{}) types.IPRanges {
+func expandIPRange(configured []interface{}) types.IPRanges {
ipRange := make([]*types.IPRange, 0, len(configured))
for _, ipRaw := range configured {
@@ -28,15 +30,16 @@ func expandIpRange(configured []interface{}) types.IPRanges {
return ipRanges
}
-func expandFirewallRules(configured []interface{}, gateway *types.EdgeGateway) ([]*types.FirewallRule, error) {
+func expandFirewallRules(d *schema.ResourceData, gateway *types.EdgeGateway) ([]*types.FirewallRule, error) {
//firewallRules := make([]*types.FirewallRule, 0, len(configured))
firewallRules := gateway.Configuration.EdgeGatewayServiceConfiguration.FirewallService.FirewallRule
- for i := len(configured) - 1; i >= 0; i-- {
- data := configured[i].(map[string]interface{})
+ rulesCount := d.Get("rule.#").(int)
+ for i := 0; i < rulesCount; i++ {
+ prefix := fmt.Sprintf("rule.%d", i)
var protocol *types.FirewallRuleProtocols
- switch data["protocol"].(string) {
+ switch d.Get(prefix + ".protocol").(string) {
case "tcp":
protocol = &types.FirewallRuleProtocols{
TCP: true,
@@ -58,15 +61,15 @@ func expandFirewallRules(configured []interface{}, gateway *types.EdgeGateway) (
//ID: strconv.Itoa(len(configured) - i),
IsEnabled: true,
MatchOnTranslate: false,
- Description: data["description"].(string),
- Policy: data["policy"].(string),
+ Description: d.Get(prefix + ".description").(string),
+ Policy: d.Get(prefix + ".policy").(string),
Protocols: protocol,
- Port: getNumericPort(data["destination_port"]),
- DestinationPortRange: data["destination_port"].(string),
- DestinationIP: data["destination_ip"].(string),
- SourcePort: getNumericPort(data["source_port"]),
- SourcePortRange: data["source_port"].(string),
- SourceIP: data["source_ip"].(string),
+ Port: getNumericPort(d.Get(prefix + ".destination_port")),
+ DestinationPortRange: d.Get(prefix + ".destination_port").(string),
+ DestinationIP: d.Get(prefix + ".destination_ip").(string),
+ SourcePort: getNumericPort(d.Get(prefix + ".source_port")),
+ SourcePortRange: d.Get(prefix + ".source_port").(string),
+ SourceIP: d.Get(prefix + ".source_ip").(string),
EnableLogging: false,
}
firewallRules = append(firewallRules, rule)
From f140c15039ce2e8c6fee11a2bb2b0e011a837db3 Mon Sep 17 00:00:00 2001
From: Brett Mack
Date: Tue, 17 Nov 2015 10:44:50 +0000
Subject: [PATCH 063/664] Fixed null pointer panic during firewall rules test
---
builtin/providers/vcd/resource_vcd_firewall_rules_test.go | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/builtin/providers/vcd/resource_vcd_firewall_rules_test.go b/builtin/providers/vcd/resource_vcd_firewall_rules_test.go
index ef766a20c9..fe41712768 100644
--- a/builtin/providers/vcd/resource_vcd_firewall_rules_test.go
+++ b/builtin/providers/vcd/resource_vcd_firewall_rules_test.go
@@ -78,7 +78,10 @@ func createFirewallRulesConfigs(existingRules *govcd.EdgeGateway) string {
Href: os.Getenv("VCD_URL"),
VDC: os.Getenv("VCD_VDC"),
}
- conn, _ := config.Client()
+ conn, err := config.Client()
+ if err != nil {
+ return fmt.Sprintf(testAccCheckVcdFirewallRules_add, "", "")
+ }
edgeGateway, _ := conn.OrgVdc.FindEdgeGateway(os.Getenv("VCD_EDGE_GATWEWAY"))
*existingRules = edgeGateway
log.Printf("[DEBUG] Edge gateway: %#v", edgeGateway)
From c8dfecc65ffd76600da42d0e1234ab1223efda93 Mon Sep 17 00:00:00 2001
From: Brett Mack
Date: Tue, 17 Nov 2015 11:40:37 +0000
Subject: [PATCH 064/664] Check where nested structs could possibly be nil
before trying to access their data
---
builtin/providers/vcd/resource_vcd_network.go | 14 +++++++++-----
1 file changed, 9 insertions(+), 5 deletions(-)
diff --git a/builtin/providers/vcd/resource_vcd_network.go b/builtin/providers/vcd/resource_vcd_network.go
index a44aadb1f9..3cb7b8f707 100644
--- a/builtin/providers/vcd/resource_vcd_network.go
+++ b/builtin/providers/vcd/resource_vcd_network.go
@@ -212,11 +212,15 @@ func resourceVcdNetworkRead(d *schema.ResourceData, meta interface{}) error {
d.Set("name", network.OrgVDCNetwork.Name)
d.Set("href", network.OrgVDCNetwork.HREF)
- d.Set("fence_mode", network.OrgVDCNetwork.Configuration.FenceMode)
- d.Set("gateway", network.OrgVDCNetwork.Configuration.IPScopes.IPScope.Gateway)
- d.Set("netmask", network.OrgVDCNetwork.Configuration.IPScopes.IPScope.Netmask)
- d.Set("dns1", network.OrgVDCNetwork.Configuration.IPScopes.IPScope.DNS1)
- d.Set("dns2", network.OrgVDCNetwork.Configuration.IPScopes.IPScope.DNS2)
+ if c := network.OrgVDCNetwork.Configuration; c != nil {
+ d.Set("fence_mode", c.FenceMode)
+ if c.IPScopes != nil {
+ d.Set("gateway", c.IPScopes.IPScope.Gateway)
+ d.Set("netmask", c.IPScopes.IPScope.Netmask)
+ d.Set("dns1", c.IPScopes.IPScope.DNS1)
+ d.Set("dns2", c.IPScopes.IPScope.DNS2)
+ }
+ }
return nil
}
From 7f9360797cc7106f322eafb079c4f137c1424178 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Mon, 16 Nov 2015 18:16:22 -0600
Subject: [PATCH 065/664] provider/aws: wait for ASG capacity on update
It's a bit confusing to have Terraform poll until instances come up on
ASG creation but not on update. This changes update to also poll if
min_size or desired_capacity are changed.
This changes the waiting behavior to wait for precisely the desired
number of instances instead of that number as a "minimum". I believe
this shouldn't have any undue side effects, and the behavior can still
be opted out of by setting `wait_for_capacity_timeout` to 0.
---
.../aws/resource_aws_autoscaling_group.go | 30 +++++++++++++++----
.../resource_aws_autoscaling_group_test.go | 2 +-
.../aws/r/autoscaling_group.html.markdown | 22 +++++++++-----
3 files changed, 40 insertions(+), 14 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_autoscaling_group.go b/builtin/providers/aws/resource_aws_autoscaling_group.go
index d5a87e33b5..d9a9d7bab5 100644
--- a/builtin/providers/aws/resource_aws_autoscaling_group.go
+++ b/builtin/providers/aws/resource_aws_autoscaling_group.go
@@ -51,8 +51,9 @@ func resourceAwsAutoscalingGroup() *schema.Resource {
},
"min_elb_capacity": &schema.Schema{
- Type: schema.TypeInt,
- Optional: true,
+ Type: schema.TypeInt,
+ Optional: true,
+ Deprecated: "Please use 'wait_for_elb_capacity' instead.",
},
"min_size": &schema.Schema{
@@ -136,6 +137,11 @@ func resourceAwsAutoscalingGroup() *schema.Resource {
},
},
+ "wait_for_elb_capacity": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ },
+
"tag": autoscalingTagsSchema(),
},
}
@@ -242,6 +248,7 @@ func resourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) e
func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).autoscalingconn
+ shouldWaitForCapacity := false
opts := autoscaling.UpdateAutoScalingGroupInput{
AutoScalingGroupName: aws.String(d.Id()),
@@ -253,6 +260,7 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{})
if d.HasChange("desired_capacity") {
opts.DesiredCapacity = aws.Int64(int64(d.Get("desired_capacity").(int)))
+ shouldWaitForCapacity = true
}
if d.HasChange("launch_configuration") {
@@ -261,6 +269,7 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{})
if d.HasChange("min_size") {
opts.MinSize = aws.Int64(int64(d.Get("min_size").(int)))
+ shouldWaitForCapacity = true
}
if d.HasChange("max_size") {
@@ -353,6 +362,10 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{})
}
}
+ if shouldWaitForCapacity {
+ waitForASGCapacity(d, meta)
+ }
+
return resourceAwsAutoscalingGroupRead(d, meta)
}
@@ -490,7 +503,7 @@ func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{})
// ASG before continuing. Waits up to `waitForASGCapacityTimeout` for
// "desired_capacity", or "min_size" if desired capacity is not specified.
//
-// If "min_elb_capacity" is specified, will also wait for that number of
+// If "wait_for_elb_capacity" is specified, will also wait for that number of
// instances to show up InService in all attached ELBs. See "Waiting for
// Capacity" in docs for more discussion of the feature.
func waitForASGCapacity(d *schema.ResourceData, meta interface{}) error {
@@ -498,7 +511,10 @@ func waitForASGCapacity(d *schema.ResourceData, meta interface{}) error {
if v := d.Get("desired_capacity").(int); v > 0 {
wantASG = v
}
- wantELB := d.Get("min_elb_capacity").(int)
+ wantELB := d.Get("wait_for_elb_capacity").(int)
+
+ // Covers deprecated field support
+ wantELB += d.Get("min_elb_capacity").(int)
wait, err := time.ParseDuration(d.Get("wait_for_capacity_timeout").(string))
if err != nil {
@@ -561,11 +577,13 @@ func waitForASGCapacity(d *schema.ResourceData, meta interface{}) error {
log.Printf("[DEBUG] %q Capacity: %d/%d ASG, %d/%d ELB",
d.Id(), haveASG, wantASG, haveELB, wantELB)
- if haveASG >= wantASG && haveELB >= wantELB {
+ if haveASG == wantASG && haveELB == wantELB {
return nil
}
- return fmt.Errorf("Still need to wait for more healthy instances. This could mean instances failed to launch. See Scaling History for more information.")
+ return fmt.Errorf(
+ "Still waiting for %q instances. Current/Desired: %d/%d ASG, %d/%d ELB",
+ d.Id(), haveASG, wantASG, haveELB, wantELB)
})
}
diff --git a/builtin/providers/aws/resource_aws_autoscaling_group_test.go b/builtin/providers/aws/resource_aws_autoscaling_group_test.go
index 5f87bc3d08..673bae8678 100644
--- a/builtin/providers/aws/resource_aws_autoscaling_group_test.go
+++ b/builtin/providers/aws/resource_aws_autoscaling_group_test.go
@@ -526,7 +526,7 @@ resource "aws_autoscaling_group" "bar" {
min_size = 2
health_check_grace_period = 300
health_check_type = "ELB"
- min_elb_capacity = 2
+ wait_for_elb_capacity = 2
force_delete = true
launch_configuration = "${aws_launch_configuration.foobar.name}"
diff --git a/website/source/docs/providers/aws/r/autoscaling_group.html.markdown b/website/source/docs/providers/aws/r/autoscaling_group.html.markdown
index 6f2b0e5112..1a0a22e775 100644
--- a/website/source/docs/providers/aws/r/autoscaling_group.html.markdown
+++ b/website/source/docs/providers/aws/r/autoscaling_group.html.markdown
@@ -53,9 +53,6 @@ The following arguments are supported:
* `desired_capacity` - (Optional) The number of Amazon EC2 instances that
should be running in the group. (See also [Waiting for
Capacity](#waiting-for-capacity) below.)
-* `min_elb_capacity` - (Optional) Setting this will cause Terraform to wait
- for this number of healthy instances all attached load balancers.
- (See also [Waiting for Capacity](#waiting-for-capacity) below.)
* `force_delete` - (Optional) Allows deleting the autoscaling group without waiting
for all instances in the pool to terminate. You can force an autoscaling group to delete
even if it's in the process of scaling a resource. Normally, Terraform
@@ -71,6 +68,9 @@ The following arguments are supported:
wait for ASG instances to be healthy before timing out. (See also [Waiting
for Capacity](#waiting-for-capacity) below.) Setting this to "0" causes
Terraform to skip all Capacity Waiting behavior.
+* `wait_for_elb_capacity` - (Optional) Setting this will cause Terraform to wait
+ for this number of healthy instances all attached load balancers.
+ (See also [Waiting for Capacity](#waiting-for-capacity) below.)
Tags support the following:
@@ -79,6 +79,10 @@ Tags support the following:
* `propagate_at_launch` - (Required) Enables propagation of the tag to
Amazon EC2 instances launched via this ASG
+The following fields are deprecated:
+
+* `min_elb_capacity` - Please use `wait_for_elb_capacity` instead.
+
## Attributes Reference
The following attributes are exported:
@@ -96,7 +100,7 @@ The following attributes are exported:
* `vpc_zone_identifier` - The VPC zone identifier
* `load_balancers` (Optional) The load balancer names associated with the
autoscaling group.
-
+
~> **NOTE:** When using `ELB` as the health_check_type, `health_check_grace_period` is required.
@@ -115,6 +119,10 @@ The first is default behavior. Terraform waits after ASG creation for
`min_size` (or `desired_capacity`, if specified) healthy instances to show up
in the ASG before continuing.
+If `min_size` or `desired_capacity` are changed in a subsequent update,
+Terraform will also wait for the correct number of healthy instances before
+continuing.
+
Terraform considers an instance "healthy" when the ASG reports `HealthStatus:
"Healthy"` and `LifecycleState: "InService"`. See the [AWS AutoScaling
Docs](https://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html)
@@ -130,9 +138,9 @@ Setting `wait_for_capacity_timeout` to `"0"` disables ASG Capacity waiting.
#### Waiting for ELB Capacity
The second mechanism is optional, and affects ASGs with attached Load
-Balancers. If `min_elb_capacity` is set, Terraform will wait for that number of
-Instances to be `"InService"` in all attached `load_balancers`. This can be
-used to ensure that service is being provided before Terraform moves on.
+Balancers. If `wait_for_elb_capacity` is set, Terraform will wait for that
+number of Instances to be `"InService"` in all attached `load_balancers`. This
+can be used to ensure that service is being provided before Terraform moves on.
As with ASG Capacity, Terraform will wait for up to `wait_for_capacity_timeout`
(for `"InService"` instances. If ASG creation takes more than a few minutes,
From 29dfc4322e34d1fc075a1877244ee2608bdf2b46 Mon Sep 17 00:00:00 2001
From: Brett Mack
Date: Tue, 17 Nov 2015 17:27:39 +0000
Subject: [PATCH 066/664] Add retry calls to protect against api rate limiting
---
builtin/providers/vcd/resource_vcd_vapp.go | 35 ++++++++++++----------
1 file changed, 19 insertions(+), 16 deletions(-)
diff --git a/builtin/providers/vcd/resource_vcd_vapp.go b/builtin/providers/vcd/resource_vcd_vapp.go
index 346d9d5443..d72b2cb973 100644
--- a/builtin/providers/vcd/resource_vcd_vapp.go
+++ b/builtin/providers/vcd/resource_vcd_vapp.go
@@ -313,26 +313,29 @@ func resourceVcdVAppDelete(d *schema.ResourceData, meta interface{}) error {
vapp, err := vcdClient.OrgVdc.FindVAppByName(d.Id())
if err != nil {
- return fmt.Errorf("error finding vdc: %s", err)
+ return fmt.Errorf("error finding vapp: %s", err)
}
- task, err := vapp.Undeploy()
+ err = retryCall(4, func() error {
+ task, err := vapp.Undeploy()
+ if err != nil {
+ return fmt.Errorf("Error undeploying: %#v", err)
+ }
+
+ return task.WaitTaskCompletion()
+ })
if err != nil {
- return fmt.Errorf("Error Powering Off: %#v", err)
- }
- err = task.WaitTaskCompletion()
- if err != nil {
- return fmt.Errorf("Error completing tasks: %#v", err)
+ return err
}
- task, err = vapp.Delete()
- if err != nil {
- return fmt.Errorf("Error Powering Off: %#v", err)
- }
- err = task.WaitTaskCompletion()
- if err != nil {
- return fmt.Errorf("Error completing tasks: %#v", err)
- }
+ err = retryCall(4, func() error {
+ task, err := vapp.Delete()
+ if err != nil {
+ return fmt.Errorf("Error deleting: %#v", err)
+ }
- return nil
+ return task.WaitTaskCompletion()
+ })
+
+ return err
}
From 45fe850331e3ef47fc1358e309119ec1f5386c2d Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Tue, 17 Nov 2015 21:19:20 +0000
Subject: [PATCH 067/664] trying to remove changes
---
CHANGELOG.md | 1 -
1 file changed, 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ff6d0ab1db..a34eadb58a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -51,7 +51,6 @@ IMPROVEMENTS:
* provider/digitalocean: Make user_data force a new droplet [GH-3740]
* provider/vsphere: Do not add network interfaces by default [GH-3652]
* provider/openstack: Configure Fixed IPs through ports [GH-3772]
- * provider/openstack: Specify a port ID on a Router Interface [GH-3903]
BUG FIXES:
From a5690b751007d06633c4105a0d26015e8adc72f2 Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Tue, 17 Nov 2015 22:00:46 +0000
Subject: [PATCH 068/664] removing debug print statements
---
.../vsphere/resource_vsphere_virtual_machine.go | 15 +--------------
1 file changed, 1 insertion(+), 14 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
index 274a2278d2..f4fd79c905 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
@@ -381,7 +381,7 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
var mvm mo.VirtualMachine
collector := property.DefaultCollector(client.Client)
- if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore","config.extraConfig"}, &mvm); err != nil {
+ if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore"}, &mvm); err != nil {
return err
}
@@ -436,19 +436,6 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
d.Set("datacenter", dc)
d.Set("memory", mvm.Summary.Config.MemorySizeMB)
d.Set("cpu", mvm.Summary.Config.NumCpu)
-
- if mvm.Config != nil && mvm.Config.ExtraConfig != nil && len(mvm.Config.ExtraConfig) > 0 {
- //TODO: can only set specific custom value, not everything
- //Would need the config here
- //custom_configs := make(map[string]types.AnyType)
- for _, v := range mvm.Config.ExtraConfig {
- value := v.GetOptionValue()
- //custom_configs[value.Key] = value.Value
- log.Printf("[DEBUG] custom configs %s,%s",value.Key, value.Value)
- }
- //d.Set("custom_configuration_parameters", custom_configs)
- }
-
d.Set("datastore", rootDatastore)
// Initialize the connection info
From b0fdf8a032808a4ed9c795e5ed17e705212528a4 Mon Sep 17 00:00:00 2001
From: Brett Mack
Date: Wed, 18 Nov 2015 12:54:18 +0000
Subject: [PATCH 069/664] Fixed failing test
---
builtin/providers/vcd/resource_vcd_firewall_rules.go | 7 -------
builtin/providers/vcd/resource_vcd_vapp_test.go | 4 ++--
2 files changed, 2 insertions(+), 9 deletions(-)
diff --git a/builtin/providers/vcd/resource_vcd_firewall_rules.go b/builtin/providers/vcd/resource_vcd_firewall_rules.go
index 123f9f71ae..ff5d249ba2 100644
--- a/builtin/providers/vcd/resource_vcd_firewall_rules.go
+++ b/builtin/providers/vcd/resource_vcd_firewall_rules.go
@@ -184,13 +184,6 @@ func deleteFirewallRules(d *schema.ResourceData, gateway *types.EdgeGateway) []*
func matchFirewallRule(d *schema.ResourceData, prefix string, rules []*types.FirewallRule) (string, error) {
for _, m := range rules {
- log.Printf("[INFO] %s - %s", d.Get(prefix+".description").(string), m.Description)
- log.Printf("[INFO] %s - %s", d.Get(prefix+".policy").(string), m.Policy)
- log.Printf("[INFO] %s - %s", d.Get(prefix+".protocol").(string), getProtocol(*m.Protocols))
- log.Printf("[INFO] %s - %s", d.Get(prefix+".destination_port").(string), getPortString(m.Port))
- log.Printf("[INFO] %s - %s", strings.ToLower(d.Get(prefix+".destination_ip").(string)), strings.ToLower(m.DestinationIP))
- log.Printf("[INFO] %s - %s", d.Get(prefix+".source_port").(string), getPortString(m.SourcePort))
- log.Printf("[INFO] %s - %s", strings.ToLower(d.Get(prefix+".source_ip").(string)), strings.ToLower(m.SourceIP))
if d.Get(prefix+".description").(string) == m.Description &&
d.Get(prefix+".policy").(string) == m.Policy &&
strings.ToLower(d.Get(prefix+".protocol").(string)) == getProtocol(*m.Protocols) &&
diff --git a/builtin/providers/vcd/resource_vcd_vapp_test.go b/builtin/providers/vcd/resource_vcd_vapp_test.go
index e4e44647a3..1ae4315e2a 100644
--- a/builtin/providers/vcd/resource_vcd_vapp_test.go
+++ b/builtin/providers/vcd/resource_vcd_vapp_test.go
@@ -32,7 +32,7 @@ func TestAccVcdVApp_PowerOff(t *testing.T) {
),
},
resource.TestStep{
- Config: testAccCheckVcdVApp_powerOff,
+ Config: fmt.Sprintf(testAccCheckVcdVApp_powerOff, os.Getenv("VCD_EDGE_GATWEWAY")),
Check: resource.ComposeTestCheckFunc(
testAccCheckVcdVAppExists("vcd_vapp.foobar", &vapp),
testAccCheckVcdVAppAttributes_off(&vapp),
@@ -83,7 +83,7 @@ func testAccCheckVcdVAppDestroy(s *terraform.State) error {
_, err := conn.OrgVdc.FindVAppByName(rs.Primary.ID)
if err == nil {
- return fmt.Errorf("VPCs still exist.")
+ return fmt.Errorf("VPCs still exist")
}
return nil
From f9dd42ddce62453ab5700b92caa9b5ac9d76d000 Mon Sep 17 00:00:00 2001
From: Joe Topjian
Date: Sat, 14 Nov 2015 19:47:37 +0000
Subject: [PATCH 070/664] provider/openstack: Add State Change support to LBaaS
Resources
This commit adds State Change support to the LBaaS resources which should
help with clean terminations.
It also adds an acceptance tests that builds out a 2-node load balance
service.
---
.../resource_openstack_lb_monitor_v1.go | 87 ++++++++++++-
.../resource_openstack_lb_pool_v1.go | 81 +++++++++++-
.../resource_openstack_lb_pool_v1_test.go | 120 ++++++++++++++++++
.../openstack/resource_openstack_lb_vip_v1.go | 80 +++++++++++-
4 files changed, 365 insertions(+), 3 deletions(-)
diff --git a/builtin/providers/openstack/resource_openstack_lb_monitor_v1.go b/builtin/providers/openstack/resource_openstack_lb_monitor_v1.go
index 8774dadca0..bca89a2d49 100644
--- a/builtin/providers/openstack/resource_openstack_lb_monitor_v1.go
+++ b/builtin/providers/openstack/resource_openstack_lb_monitor_v1.go
@@ -4,8 +4,12 @@ import (
"fmt"
"log"
"strconv"
+ "time"
+ "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
+
+ "github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors"
)
@@ -108,6 +112,22 @@ func resourceLBMonitorV1Create(d *schema.ResourceData, meta interface{}) error {
}
log.Printf("[INFO] LB Monitor ID: %s", m.ID)
+ log.Printf("[DEBUG] Waiting for OpenStack LB Monitor (%s) to become available.", m.ID)
+
+ stateConf := &resource.StateChangeConf{
+ Pending: []string{"PENDING"},
+ Target: "ACTIVE",
+ Refresh: waitForLBMonitorActive(networkingClient, m.ID),
+ Timeout: 2 * time.Minute,
+ Delay: 5 * time.Second,
+ MinTimeout: 3 * time.Second,
+ }
+
+ _, err = stateConf.WaitForState()
+ if err != nil {
+ return err
+ }
+
d.SetId(m.ID)
return resourceLBMonitorV1Read(d, meta)
@@ -184,7 +204,16 @@ func resourceLBMonitorV1Delete(d *schema.ResourceData, meta interface{}) error {
return fmt.Errorf("Error creating OpenStack networking client: %s", err)
}
- err = monitors.Delete(networkingClient, d.Id()).ExtractErr()
+ stateConf := &resource.StateChangeConf{
+ Pending: []string{"ACTIVE", "PENDING"},
+ Target: "DELETED",
+ Refresh: waitForLBMonitorDelete(networkingClient, d.Id()),
+ Timeout: 2 * time.Minute,
+ Delay: 5 * time.Second,
+ MinTimeout: 3 * time.Second,
+ }
+
+ _, err = stateConf.WaitForState()
if err != nil {
return fmt.Errorf("Error deleting OpenStack LB Monitor: %s", err)
}
@@ -192,3 +221,59 @@ func resourceLBMonitorV1Delete(d *schema.ResourceData, meta interface{}) error {
d.SetId("")
return nil
}
+
+func waitForLBMonitorActive(networkingClient *gophercloud.ServiceClient, monitorId string) resource.StateRefreshFunc {
+ return func() (interface{}, string, error) {
+ m, err := monitors.Get(networkingClient, monitorId).Extract()
+ if err != nil {
+ return nil, "", err
+ }
+
+ // The monitor resource has no Status attribute, so a successful Get is the best we can do
+ log.Printf("[DEBUG] OpenStack LB Monitor: %+v", m)
+ return m, "ACTIVE", nil
+ }
+}
+
+func waitForLBMonitorDelete(networkingClient *gophercloud.ServiceClient, monitorId string) resource.StateRefreshFunc {
+ return func() (interface{}, string, error) {
+ log.Printf("[DEBUG] Attempting to delete OpenStack LB Monitor %s", monitorId)
+
+ m, err := monitors.Get(networkingClient, monitorId).Extract()
+ if err != nil {
+ errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError)
+ if !ok {
+ return m, "ACTIVE", err
+ }
+ if errCode.Actual == 404 {
+ log.Printf("[DEBUG] Successfully deleted OpenStack LB Monitor %s", monitorId)
+ return m, "DELETED", nil
+ }
+ if errCode.Actual == 409 {
+ log.Printf("[DEBUG] OpenStack LB Monitor (%s) is waiting for Pool to delete.", monitorId)
+ return m, "PENDING", nil
+ }
+ }
+
+ log.Printf("[DEBUG] OpenStack LB Monitor: %+v", m)
+ err = monitors.Delete(networkingClient, monitorId).ExtractErr()
+ if err != nil {
+ errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError)
+ if !ok {
+ return m, "ACTIVE", err
+ }
+ if errCode.Actual == 404 {
+ log.Printf("[DEBUG] Successfully deleted OpenStack LB Monitor %s", monitorId)
+ return m, "DELETED", nil
+ }
+ if errCode.Actual == 409 {
+ log.Printf("[DEBUG] OpenStack LB Monitor (%s) is waiting for Pool to delete.", monitorId)
+ return m, "PENDING", nil
+ }
+ }
+
+ log.Printf("[DEBUG] OpenStack LB Monitor %s still active.", monitorId)
+ return m, "ACTIVE", nil
+ }
+
+}
diff --git a/builtin/providers/openstack/resource_openstack_lb_pool_v1.go b/builtin/providers/openstack/resource_openstack_lb_pool_v1.go
index 64e0436dbc..21177fbf26 100644
--- a/builtin/providers/openstack/resource_openstack_lb_pool_v1.go
+++ b/builtin/providers/openstack/resource_openstack_lb_pool_v1.go
@@ -4,9 +4,13 @@ import (
"bytes"
"fmt"
"log"
+ "time"
"github.com/hashicorp/terraform/helper/hashcode"
+ "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
+
+ "github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members"
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools"
"github.com/rackspace/gophercloud/pagination"
@@ -123,6 +127,21 @@ func resourceLBPoolV1Create(d *schema.ResourceData, meta interface{}) error {
}
log.Printf("[INFO] LB Pool ID: %s", p.ID)
+ log.Printf("[DEBUG] Waiting for OpenStack LB pool (%s) to become available.", p.ID)
+
+ stateConf := &resource.StateChangeConf{
+ Target: "ACTIVE",
+ Refresh: waitForLBPoolActive(networkingClient, p.ID),
+ Timeout: 2 * time.Minute,
+ Delay: 5 * time.Second,
+ MinTimeout: 3 * time.Second,
+ }
+
+ _, err = stateConf.WaitForState()
+ if err != nil {
+ return err
+ }
+
d.SetId(p.ID)
if mIDs := resourcePoolMonitorIDsV1(d); mIDs != nil {
@@ -273,7 +292,16 @@ func resourceLBPoolV1Delete(d *schema.ResourceData, meta interface{}) error {
return fmt.Errorf("Error creating OpenStack networking client: %s", err)
}
- err = pools.Delete(networkingClient, d.Id()).ExtractErr()
+ stateConf := &resource.StateChangeConf{
+ Pending: []string{"ACTIVE"},
+ Target: "DELETED",
+ Refresh: waitForLBPoolDelete(networkingClient, d.Id()),
+ Timeout: 2 * time.Minute,
+ Delay: 5 * time.Second,
+ MinTimeout: 3 * time.Second,
+ }
+
+ _, err = stateConf.WaitForState()
if err != nil {
return fmt.Errorf("Error deleting OpenStack LB Pool: %s", err)
}
@@ -326,3 +354,54 @@ func resourceLBMemberV1Hash(v interface{}) int {
return hashcode.String(buf.String())
}
+
+func waitForLBPoolActive(networkingClient *gophercloud.ServiceClient, poolId string) resource.StateRefreshFunc {
+ return func() (interface{}, string, error) {
+ p, err := pools.Get(networkingClient, poolId).Extract()
+ if err != nil {
+ return nil, "", err
+ }
+
+ log.Printf("[DEBUG] OpenStack LB Pool: %+v", p)
+ if p.Status == "ACTIVE" {
+ return p, "ACTIVE", nil
+ }
+
+ return p, p.Status, nil
+ }
+}
+
+func waitForLBPoolDelete(networkingClient *gophercloud.ServiceClient, poolId string) resource.StateRefreshFunc {
+ return func() (interface{}, string, error) {
+ log.Printf("[DEBUG] Attempting to delete OpenStack LB Pool %s", poolId)
+
+ p, err := pools.Get(networkingClient, poolId).Extract()
+ if err != nil {
+ errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError)
+ if !ok {
+ return p, "ACTIVE", err
+ }
+ if errCode.Actual == 404 {
+ log.Printf("[DEBUG] Successfully deleted OpenStack LB Pool %s", poolId)
+ return p, "DELETED", nil
+ }
+ }
+
+ log.Printf("[DEBUG] OpenStack LB Pool: %+v", p)
+ err = pools.Delete(networkingClient, poolId).ExtractErr()
+ if err != nil {
+ errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError)
+ if !ok {
+ return p, "ACTIVE", err
+ }
+ if errCode.Actual == 404 {
+ log.Printf("[DEBUG] Successfully deleted OpenStack LB Pool %s", poolId)
+ return p, "DELETED", nil
+ }
+ }
+
+ log.Printf("[DEBUG] OpenStack LB Pool %s still active.", poolId)
+ return p, "ACTIVE", nil
+ }
+
+}
diff --git a/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go b/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go
index 1889c23845..104e359485 100644
--- a/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go
+++ b/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go
@@ -7,7 +7,13 @@ import (
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
+ "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups"
+ "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
+ "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors"
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools"
+ "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips"
+ "github.com/rackspace/gophercloud/openstack/networking/v2/networks"
+ "github.com/rackspace/gophercloud/openstack/networking/v2/subnets"
)
func TestAccLBV1Pool_basic(t *testing.T) {
@@ -34,6 +40,37 @@ func TestAccLBV1Pool_basic(t *testing.T) {
})
}
+func TestAccLBV1Pool_fullstack(t *testing.T) {
+ var instance1, instance2 servers.Server
+ var monitor monitors.Monitor
+ var network networks.Network
+ var pool pools.Pool
+ var secgroup secgroups.SecurityGroup
+ var subnet subnets.Subnet
+ var vip vips.VirtualIP
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckLBV1PoolDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccLBV1Pool_fullstack,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckNetworkingV2NetworkExists(t, "openstack_networking_network_v2.network_1", &network),
+ testAccCheckNetworkingV2SubnetExists(t, "openstack_networking_subnet_v2.subnet_1", &subnet),
+ testAccCheckComputeV2SecGroupExists(t, "openstack_compute_secgroup_v2.secgroup_1", &secgroup),
+ testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.instance_1", &instance1),
+ testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.instance_2", &instance2),
+ testAccCheckLBV1PoolExists(t, "openstack_lb_pool_v1.pool_1", &pool),
+ testAccCheckLBV1MonitorExists(t, "openstack_lb_monitor_v1.monitor_1", &monitor),
+ testAccCheckLBV1VIPExists(t, "openstack_lb_vip_v1.vip_1", &vip),
+ ),
+ },
+ },
+ })
+}
+
func testAccCheckLBV1PoolDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
networkingClient, err := config.networkingV2Client(OS_REGION_NAME)
@@ -132,3 +169,86 @@ var testAccLBV1Pool_update = fmt.Sprintf(`
lb_method = "ROUND_ROBIN"
}`,
OS_REGION_NAME, OS_REGION_NAME, OS_REGION_NAME)
+
+var testAccLBV1Pool_fullstack = fmt.Sprintf(`
+ resource "openstack_networking_network_v2" "network_1" {
+ name = "network_1"
+ admin_state_up = "true"
+ }
+
+ resource "openstack_networking_subnet_v2" "subnet_1" {
+ network_id = "${openstack_networking_network_v2.network_1.id}"
+ cidr = "192.168.199.0/24"
+ ip_version = 4
+ }
+
+ resource "openstack_compute_secgroup_v2" "secgroup_1" {
+ name = "secgroup_1"
+ description = "Rules for secgroup_1"
+
+ rule {
+ from_port = -1
+ to_port = -1
+ ip_protocol = "icmp"
+ cidr = "0.0.0.0/0"
+ }
+
+ rule {
+ from_port = 80
+ to_port = 80
+ ip_protocol = "tcp"
+ cidr = "0.0.0.0/0"
+ }
+ }
+
+ resource "openstack_compute_instance_v2" "instance_1" {
+ name = "instance_1"
+ security_groups = ["default", "${openstack_compute_secgroup_v2.secgroup_1.name}"]
+ network {
+ uuid = "${openstack_networking_network_v2.network_1.id}"
+ }
+ }
+
+ resource "openstack_compute_instance_v2" "instance_2" {
+ name = "instance_2"
+ security_groups = ["default", "${openstack_compute_secgroup_v2.secgroup_1.name}"]
+ network {
+ uuid = "${openstack_networking_network_v2.network_1.id}"
+ }
+ }
+
+ resource "openstack_lb_monitor_v1" "monitor_1" {
+ type = "TCP"
+ delay = 30
+ timeout = 5
+ max_retries = 3
+ admin_state_up = "true"
+ }
+
+ resource "openstack_lb_pool_v1" "pool_1" {
+ name = "pool_1"
+ protocol = "TCP"
+ subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}"
+ lb_method = "ROUND_ROBIN"
+ monitor_ids = ["${openstack_lb_monitor_v1.monitor_1.id}"]
+
+ member {
+ address = "${openstack_compute_instance_v2.instance_1.access_ip_v4}"
+ port = 80
+ admin_state_up = "true"
+ }
+
+ member {
+ address = "${openstack_compute_instance_v2.instance_2.access_ip_v4}"
+ port = 80
+ admin_state_up = "true"
+ }
+ }
+
+ resource "openstack_lb_vip_v1" "vip_1" {
+ name = "vip_1"
+ subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}"
+ protocol = "TCP"
+ port = 80
+ pool_id = "${openstack_lb_pool_v1.pool_1.id}"
+ }`)
diff --git a/builtin/providers/openstack/resource_openstack_lb_vip_v1.go b/builtin/providers/openstack/resource_openstack_lb_vip_v1.go
index dd165df772..3955282c96 100644
--- a/builtin/providers/openstack/resource_openstack_lb_vip_v1.go
+++ b/builtin/providers/openstack/resource_openstack_lb_vip_v1.go
@@ -3,7 +3,9 @@ package openstack
import (
"fmt"
"log"
+ "time"
+ "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips"
@@ -128,6 +130,22 @@ func resourceLBVipV1Create(d *schema.ResourceData, meta interface{}) error {
}
log.Printf("[INFO] LB VIP ID: %s", p.ID)
+ log.Printf("[DEBUG] Waiting for OpenStack LB VIP (%s) to become available.", p.ID)
+
+ stateConf := &resource.StateChangeConf{
+ Pending: []string{"PENDING_CREATE"},
+ Target: "ACTIVE",
+ Refresh: waitForLBVIPActive(networkingClient, p.ID),
+ Timeout: 2 * time.Minute,
+ Delay: 5 * time.Second,
+ MinTimeout: 3 * time.Second,
+ }
+
+ _, err = stateConf.WaitForState()
+ if err != nil {
+ return err
+ }
+
floatingIP := d.Get("floating_ip").(string)
if floatingIP != "" {
lbVipV1AssignFloatingIP(floatingIP, p.PortID, networkingClient)
@@ -245,7 +263,16 @@ func resourceLBVipV1Delete(d *schema.ResourceData, meta interface{}) error {
return fmt.Errorf("Error creating OpenStack networking client: %s", err)
}
- err = vips.Delete(networkingClient, d.Id()).ExtractErr()
+ stateConf := &resource.StateChangeConf{
+ Pending: []string{"ACTIVE"},
+ Target: "DELETED",
+ Refresh: waitForLBVIPDelete(networkingClient, d.Id()),
+ Timeout: 2 * time.Minute,
+ Delay: 5 * time.Second,
+ MinTimeout: 3 * time.Second,
+ }
+
+ _, err = stateConf.WaitForState()
if err != nil {
return fmt.Errorf("Error deleting OpenStack LB VIP: %s", err)
}
@@ -298,3 +325,54 @@ func lbVipV1AssignFloatingIP(floatingIP, portID string, networkingClient *gopher
return nil
}
+
+func waitForLBVIPActive(networkingClient *gophercloud.ServiceClient, vipId string) resource.StateRefreshFunc {
+ return func() (interface{}, string, error) {
+ p, err := vips.Get(networkingClient, vipId).Extract()
+ if err != nil {
+ return nil, "", err
+ }
+
+ log.Printf("[DEBUG] OpenStack LB VIP: %+v", p)
+ if p.Status == "ACTIVE" {
+ return p, "ACTIVE", nil
+ }
+
+ return p, p.Status, nil
+ }
+}
+
+func waitForLBVIPDelete(networkingClient *gophercloud.ServiceClient, vipId string) resource.StateRefreshFunc {
+ return func() (interface{}, string, error) {
+ log.Printf("[DEBUG] Attempting to delete OpenStack LB VIP %s", vipId)
+
+ p, err := vips.Get(networkingClient, vipId).Extract()
+ if err != nil {
+ errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError)
+ if !ok {
+ return p, "ACTIVE", err
+ }
+ if errCode.Actual == 404 {
+ log.Printf("[DEBUG] Successfully deleted OpenStack LB VIP %s", vipId)
+ return p, "DELETED", nil
+ }
+ }
+
+ log.Printf("[DEBUG] OpenStack LB VIP: %+v", p)
+ err = vips.Delete(networkingClient, vipId).ExtractErr()
+ if err != nil {
+ errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError)
+ if !ok {
+ return p, "ACTIVE", err
+ }
+ if errCode.Actual == 404 {
+ log.Printf("[DEBUG] Successfully deleted OpenStack LB VIP %s", vipId)
+ return p, "DELETED", nil
+ }
+ }
+
+ log.Printf("[DEBUG] OpenStack LB VIP %s still active.", vipId)
+ return p, "ACTIVE", nil
+ }
+
+}
From 7bf02243a1a98e20dfe2c73d79f8480d806d2566 Mon Sep 17 00:00:00 2001
From: Takaaki Furukawa
Date: Sun, 1 Nov 2015 23:07:23 +0900
Subject: [PATCH 071/664] rename vcenter_server config parameter to something
clearer
---
builtin/providers/vsphere/config.go | 4 ++--
builtin/providers/vsphere/provider.go | 8 ++++----
builtin/providers/vsphere/provider_test.go | 4 ++--
website/source/docs/providers/vsphere/index.html.markdown | 6 +++---
4 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/builtin/providers/vsphere/config.go b/builtin/providers/vsphere/config.go
index 1f6af7ffd6..06deedaebb 100644
--- a/builtin/providers/vsphere/config.go
+++ b/builtin/providers/vsphere/config.go
@@ -16,12 +16,12 @@ const (
type Config struct {
User string
Password string
- VCenterServer string
+ VSphereServer string
}
// Client() returns a new client for accessing VMWare vSphere.
func (c *Config) Client() (*govmomi.Client, error) {
- u, err := url.Parse("https://" + c.VCenterServer + "/sdk")
+ u, err := url.Parse("https://" + c.VSphereServer + "/sdk")
if err != nil {
return nil, fmt.Errorf("Error parse url: %s", err)
}
diff --git a/builtin/providers/vsphere/provider.go b/builtin/providers/vsphere/provider.go
index 4dce81a9d6..9a749a127b 100644
--- a/builtin/providers/vsphere/provider.go
+++ b/builtin/providers/vsphere/provider.go
@@ -23,11 +23,11 @@ func Provider() terraform.ResourceProvider {
Description: "The user password for vSphere API operations.",
},
- "vcenter_server": &schema.Schema{
+ "vsphere_server": &schema.Schema{
Type: schema.TypeString,
Required: true,
- DefaultFunc: schema.EnvDefaultFunc("VSPHERE_VCENTER", nil),
- Description: "The vCenter Server name for vSphere API operations.",
+ DefaultFunc: schema.EnvDefaultFunc("VSPHERE_SERVER", nil),
+ Description: "The vSphere Server name for vSphere API operations.",
},
},
@@ -43,7 +43,7 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
config := Config{
User: d.Get("user").(string),
Password: d.Get("password").(string),
- VCenterServer: d.Get("vcenter_server").(string),
+ VSphereServer: d.Get("vsphere_server").(string),
}
return config.Client()
diff --git a/builtin/providers/vsphere/provider_test.go b/builtin/providers/vsphere/provider_test.go
index bb8e4dc55f..ee6995ed87 100644
--- a/builtin/providers/vsphere/provider_test.go
+++ b/builtin/providers/vsphere/provider_test.go
@@ -37,7 +37,7 @@ func testAccPreCheck(t *testing.T) {
t.Fatal("VSPHERE_PASSWORD must be set for acceptance tests")
}
- if v := os.Getenv("VSPHERE_VCENTER"); v == "" {
- t.Fatal("VSPHERE_VCENTER must be set for acceptance tests")
+ if v := os.Getenv("VSPHERE_SERVER"); v == "" {
+ t.Fatal("VSPHERE_SERVER must be set for acceptance tests")
}
}
diff --git a/website/source/docs/providers/vsphere/index.html.markdown b/website/source/docs/providers/vsphere/index.html.markdown
index 17448b024f..7c410ae85f 100644
--- a/website/source/docs/providers/vsphere/index.html.markdown
+++ b/website/source/docs/providers/vsphere/index.html.markdown
@@ -25,7 +25,7 @@ therefore may undergo significant changes as the community improves it.
provider "vsphere" {
user = "${var.vsphere_user}"
password = "${var.vsphere_password}"
- vcenter_server = "${var.vsphere_vcenter_server}"
+ vsphere_server = "${var.vsphere_server}"
}
# Create a virtual machine
@@ -53,7 +53,7 @@ The following arguments are used to configure the vSphere Provider:
be specified with the `VSPHERE_USER` environment variable.
* `password` - (Required) This is the password for vSphere API operations. Can
also be specified with the `VSPHERE_PASSWORD` environment variable.
-* `vcenter_server` - (Required) This is the vCenter server name for vSphere API
- operations. Can also be specified with the `VSPHERE_VCENTER` environment
+* `vsphere_server` - (Required) This is the vCenter server name for vSphere API
+ operations. Can also be specified with the `VSPHERE_SERVER` environment
variable.
From ed3f54cc47810d291e989bada385f486f1138a8c Mon Sep 17 00:00:00 2001
From: Julien Fabre
Date: Fri, 20 Nov 2015 16:48:48 +0100
Subject: [PATCH 072/664] Add AWS Classiclink for AWS VPC resource
---
builtin/providers/aws/resource_aws_vpc.go | 50 +++++++++++++++++++
.../providers/aws/resource_aws_vpc_test.go | 25 ++++++++++
.../docs/providers/aws/r/vpc.html.markdown | 2 +
3 files changed, 77 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_vpc.go b/builtin/providers/aws/resource_aws_vpc.go
index 0de908f0d0..03beb8a78b 100644
--- a/builtin/providers/aws/resource_aws_vpc.go
+++ b/builtin/providers/aws/resource_aws_vpc.go
@@ -55,6 +55,12 @@ func resourceAwsVpc() *schema.Resource {
Computed: true,
},
+ "enable_classiclink": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ Computed: true,
+ },
+
"main_route_table_id": &schema.Schema{
Type: schema.TypeString,
Computed: true,
@@ -170,6 +176,22 @@ func resourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error {
}
d.Set("enable_dns_hostnames", *resp.EnableDnsHostnames)
+ DescribeClassiclinkOpts := &ec2.DescribeVpcClassicLinkInput{
+ VpcIds: []*string{ &vpcid },
+ }
+ respClassiclink, err := conn.DescribeVpcClassicLink(DescribeClassiclinkOpts)
+ if err != nil {
+ return err
+ }
+ classiclink_enabled := false
+ for _, v := range respClassiclink.Vpcs {
+ if *v.VpcId == vpcid {
+ classiclink_enabled = *v.ClassicLinkEnabled
+ break
+ }
+ }
+ d.Set("enable_classiclink", classiclink_enabled)
+
// Get the main routing table for this VPC
// Really Ugly need to make this better - rmenn
filter1 := &ec2.Filter{
@@ -241,6 +263,34 @@ func resourceAwsVpcUpdate(d *schema.ResourceData, meta interface{}) error {
d.SetPartial("enable_dns_support")
}
+ if d.HasChange("enable_classiclink") {
+ val := d.Get("enable_classiclink").(bool)
+
+ if val {
+ modifyOpts := &ec2.EnableVpcClassicLinkInput{
+ VpcId: &vpcid,
+ }
+ log.Printf(
+ "[INFO] Modifying enable_classiclink vpc attribute for %s: %#v",
+ d.Id(), modifyOpts)
+ if _, err := conn.EnableVpcClassicLink(modifyOpts); err != nil {
+ return err
+ }
+ } else {
+ modifyOpts := &ec2.DisableVpcClassicLinkInput{
+ VpcId: &vpcid,
+ }
+ log.Printf(
+ "[INFO] Modifying enable_classiclink vpc attribute for %s: %#v",
+ d.Id(), modifyOpts)
+ if _, err := conn.DisableVpcClassicLink(modifyOpts); err != nil {
+ return err
+ }
+ }
+
+ d.SetPartial("enable_classiclink")
+ }
+
if err := setTags(conn, d); err != nil {
return err
} else {
diff --git a/builtin/providers/aws/resource_aws_vpc_test.go b/builtin/providers/aws/resource_aws_vpc_test.go
index e877621151..cd01bbf5d1 100644
--- a/builtin/providers/aws/resource_aws_vpc_test.go
+++ b/builtin/providers/aws/resource_aws_vpc_test.go
@@ -206,6 +206,23 @@ func TestAccAWSVpc_bothDnsOptionsSet(t *testing.T) {
})
}
+func TestAccAWSVpc_classiclinkOptionSet(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckVpcDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccVpcConfig_ClassiclinkOption,
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(
+ "aws_vpc.bar", "enable_classiclink", "true"),
+ ),
+ },
+ },
+ })
+}
+
const testAccVpcConfig = `
resource "aws_vpc" "foo" {
cidr_block = "10.1.0.0/16"
@@ -254,3 +271,11 @@ resource "aws_vpc" "bar" {
enable_dns_support = true
}
`
+
+const testAccVpcConfig_ClassiclinkOption = `
+resource "aws_vpc" "bar" {
+ cidr_block = "172.2.0.0/16"
+
+ enable_classiclink = true
+}
+`
diff --git a/website/source/docs/providers/aws/r/vpc.html.markdown b/website/source/docs/providers/aws/r/vpc.html.markdown
index 48e56d340d..0e88ea3098 100644
--- a/website/source/docs/providers/aws/r/vpc.html.markdown
+++ b/website/source/docs/providers/aws/r/vpc.html.markdown
@@ -41,6 +41,7 @@ The following arguments are supported:
* `instance_tenancy` - (Optional) A tenancy option for instances launched into the VPC
* `enable_dns_support` - (Optional) A boolean flag to enable/disable DNS support in the VPC. Defaults true.
* `enable_dns_hostnames` - (Optional) A boolean flag to enable/disable DNS hostnames in the VPC. Defaults false.
+* `enable_classiclink` - (Optional) A boolean flag to enable/disable ClassicLink for the VPC. Defaults false.
* `tags` - (Optional) A mapping of tags to assign to the resource.
## Attributes Reference
@@ -52,6 +53,7 @@ The following attributes are exported:
* `instance_tenancy` - Tenancy of instances spin up within VPC.
* `enable_dns_support` - Whether or not the VPC has DNS support
* `enable_dns_hostnames` - Whether or not the VPC has DNS hostname support
+* `enable_classiclink` - Whether or not the VPC has Classiclink enabled
* `main_route_table_id` - The ID of the main route table associated with
this VPC. Note that you can change a VPC's main route table by using an
[`aws_main_route_table_association`](/docs/providers/aws/r/main_route_table_assoc.html).
From f1c2be977293fdc578a4084a9f17f51daec24bc7 Mon Sep 17 00:00:00 2001
From: Nicki Watt
Date: Wed, 18 Nov 2015 23:56:17 +0000
Subject: [PATCH 073/664] Make maxRetryTimeout (in seconds) configurable
---
builtin/providers/vcd/config.go | 22 ++++++++++------
builtin/providers/vcd/provider.go | 19 ++++++++++----
builtin/providers/vcd/resource_vcd_dnat.go | 11 ++++----
.../providers/vcd/resource_vcd_dnat_test.go | 4 +--
.../vcd/resource_vcd_firewall_rules.go | 9 +++----
.../vcd/resource_vcd_firewall_rules_test.go | 3 ++-
builtin/providers/vcd/resource_vcd_network.go | 13 +++++-----
.../vcd/resource_vcd_network_test.go | 4 +--
builtin/providers/vcd/resource_vcd_snat.go | 11 ++++----
.../providers/vcd/resource_vcd_snat_test.go | 4 +--
builtin/providers/vcd/resource_vcd_vapp.go | 25 +++++++++----------
.../providers/vcd/resource_vcd_vapp_test.go | 4 +--
builtin/providers/vcd/structure.go | 4 +--
13 files changed, 73 insertions(+), 60 deletions(-)
diff --git a/builtin/providers/vcd/config.go b/builtin/providers/vcd/config.go
index b5da76dba5..c6b5ba509a 100644
--- a/builtin/providers/vcd/config.go
+++ b/builtin/providers/vcd/config.go
@@ -8,20 +8,28 @@ import (
)
type Config struct {
- User string
- Password string
- Org string
- Href string
- VDC string
+ User string
+ Password string
+ Org string
+ Href string
+ VDC string
+ MaxRetryTimeout int
}
-func (c *Config) Client() (*govcd.VCDClient, error) {
+type VCDClient struct {
+ *govcd.VCDClient
+ MaxRetryTimeout int
+}
+
+func (c *Config) Client() (*VCDClient, error) {
u, err := url.ParseRequestURI(c.Href)
if err != nil {
return nil, fmt.Errorf("Something went wrong: %s", err)
}
- vcdclient := govcd.NewVCDClient(*u)
+ vcdclient := &VCDClient{
+ govcd.NewVCDClient(*u),
+ c.MaxRetryTimeout}
org, vcd, err := vcdclient.Authenticate(c.User, c.Password, c.Org, c.VDC)
if err != nil {
return nil, fmt.Errorf("Something went wrong: %s", err)
diff --git a/builtin/providers/vcd/provider.go b/builtin/providers/vcd/provider.go
index c9849be356..0e3d48d6c3 100644
--- a/builtin/providers/vcd/provider.go
+++ b/builtin/providers/vcd/provider.go
@@ -36,12 +36,20 @@ func Provider() terraform.ResourceProvider {
DefaultFunc: schema.EnvDefaultFunc("VCD_URL", nil),
Description: "The vcd url for vcd API operations.",
},
+
"vdc": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("VCD_VDC", ""),
Description: "The name of the VDC to run operations on",
},
+
+ "maxRetryTimeout": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ DefaultFunc: schema.EnvDefaultFunc("VCD_MAX_RETRY_TIMEOUT", 30),
+ Description: "Max num seconds to wait for successful response when operating on resources within vCloud (defaults to 30)",
+ },
},
ResourcesMap: map[string]*schema.Resource{
@@ -58,11 +66,12 @@ func Provider() terraform.ResourceProvider {
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
config := Config{
- User: d.Get("user").(string),
- Password: d.Get("password").(string),
- Org: d.Get("org").(string),
- Href: d.Get("url").(string),
- VDC: d.Get("vdc").(string),
+ User: d.Get("user").(string),
+ Password: d.Get("password").(string),
+ Org: d.Get("org").(string),
+ Href: d.Get("url").(string),
+ VDC: d.Get("vdc").(string),
+ MaxRetryTimeout: d.Get("maxRetryTimeout").(int),
}
return config.Client()
diff --git a/builtin/providers/vcd/resource_vcd_dnat.go b/builtin/providers/vcd/resource_vcd_dnat.go
index 9c38b0b567..5c2e8006c1 100644
--- a/builtin/providers/vcd/resource_vcd_dnat.go
+++ b/builtin/providers/vcd/resource_vcd_dnat.go
@@ -3,7 +3,6 @@ package vcd
import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
- "github.com/hmrc/vmware-govcd"
)
func resourceVcdDNAT() *schema.Resource {
@@ -41,7 +40,7 @@ func resourceVcdDNAT() *schema.Resource {
}
func resourceVcdDNATCreate(d *schema.ResourceData, meta interface{}) error {
- vcdClient := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*VCDClient)
// Multiple VCD components need to run operations on the Edge Gateway, as
// the edge gatway will throw back an error if it is already performing an
// operation we must wait until we can aquire a lock on the client
@@ -60,7 +59,7 @@ func resourceVcdDNATCreate(d *schema.ResourceData, meta interface{}) error {
// constrained by out lock. If the edge gateway reurns with a busy error, wait
// 3 seconds and then try again. Continue until a non-busy error or success
- err = retryCall(4, func() error {
+ err = retryCall(vcdClient.MaxRetryTimeout, func() error {
task, err := edgeGateway.AddNATMapping("DNAT", d.Get("external_ip").(string),
d.Get("internal_ip").(string),
portString)
@@ -80,7 +79,7 @@ func resourceVcdDNATCreate(d *schema.ResourceData, meta interface{}) error {
}
func resourceVcdDNATRead(d *schema.ResourceData, meta interface{}) error {
- vcdClient := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*VCDClient)
e, err := vcdClient.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
if err != nil {
@@ -106,7 +105,7 @@ func resourceVcdDNATRead(d *schema.ResourceData, meta interface{}) error {
}
func resourceVcdDNATDelete(d *schema.ResourceData, meta interface{}) error {
- vcdClient := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*VCDClient)
// Multiple VCD components need to run operations on the Edge Gateway, as
// the edge gatway will throw back an error if it is already performing an
// operation we must wait until we can aquire a lock on the client
@@ -119,7 +118,7 @@ func resourceVcdDNATDelete(d *schema.ResourceData, meta interface{}) error {
if err != nil {
return fmt.Errorf("Unable to find edge gateway: %#v", err)
}
- err = retryCall(4, func() error {
+ err = retryCall(vcdClient.MaxRetryTimeout, func() error {
task, err := edgeGateway.RemoveNATMapping("DNAT", d.Get("external_ip").(string),
d.Get("internal_ip").(string),
portString)
diff --git a/builtin/providers/vcd/resource_vcd_dnat_test.go b/builtin/providers/vcd/resource_vcd_dnat_test.go
index 6e073905b1..759d9d16b8 100644
--- a/builtin/providers/vcd/resource_vcd_dnat_test.go
+++ b/builtin/providers/vcd/resource_vcd_dnat_test.go
@@ -50,7 +50,7 @@ func testAccCheckVcdDNATExists(n string, gateway *govcd.EdgeGateway) resource.Te
return fmt.Errorf("No DNAT ID is set")
}
- conn := testAccProvider.Meta().(*govcd.VCDClient)
+ conn := testAccProvider.Meta().(*VCDClient)
gatewayName := rs.Primary.Attributes["edge_gateway"]
edgeGateway, err := conn.OrgVdc.FindEdgeGateway(gatewayName)
@@ -79,7 +79,7 @@ func testAccCheckVcdDNATExists(n string, gateway *govcd.EdgeGateway) resource.Te
}
func testAccCheckVcdDNATDestroy(s *terraform.State) error {
- conn := testAccProvider.Meta().(*govcd.VCDClient)
+ conn := testAccProvider.Meta().(*VCDClient)
for _, rs := range s.RootModule().Resources {
if rs.Type != "vcd_dnat" {
continue
diff --git a/builtin/providers/vcd/resource_vcd_firewall_rules.go b/builtin/providers/vcd/resource_vcd_firewall_rules.go
index ff5d249ba2..913bff8be0 100644
--- a/builtin/providers/vcd/resource_vcd_firewall_rules.go
+++ b/builtin/providers/vcd/resource_vcd_firewall_rules.go
@@ -3,7 +3,6 @@ package vcd
import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
- "github.com/hmrc/vmware-govcd"
types "github.com/hmrc/vmware-govcd/types/v56"
"log"
"strings"
@@ -82,7 +81,7 @@ func resourceVcdFirewallRules() *schema.Resource {
}
func resourceVcdFirewallRulesCreate(d *schema.ResourceData, meta interface{}) error {
- vcdClient := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*VCDClient)
vcdClient.Mutex.Lock()
defer vcdClient.Mutex.Unlock()
@@ -91,7 +90,7 @@ func resourceVcdFirewallRulesCreate(d *schema.ResourceData, meta interface{}) er
return fmt.Errorf("Unable to find edge gateway: %s", err)
}
- err = retryCall(5, func() error {
+ err = retryCall(vcdClient.MaxRetryTimeout, func() error {
edgeGateway.Refresh()
firewallRules, _ := expandFirewallRules(d, edgeGateway.EdgeGateway)
task, err := edgeGateway.CreateFirewallRules(d.Get("default_action").(string), firewallRules)
@@ -112,7 +111,7 @@ func resourceVcdFirewallRulesCreate(d *schema.ResourceData, meta interface{}) er
}
func resourceFirewallRulesDelete(d *schema.ResourceData, meta interface{}) error {
- vcdClient := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*VCDClient)
vcdClient.Mutex.Lock()
defer vcdClient.Mutex.Unlock()
@@ -134,7 +133,7 @@ func resourceFirewallRulesDelete(d *schema.ResourceData, meta interface{}) error
}
func resourceFirewallRulesRead(d *schema.ResourceData, meta interface{}) error {
- vcdClient := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*VCDClient)
edgeGateway, err := vcdClient.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
if err != nil {
diff --git a/builtin/providers/vcd/resource_vcd_firewall_rules_test.go b/builtin/providers/vcd/resource_vcd_firewall_rules_test.go
index fe41712768..2c1fa69e6b 100644
--- a/builtin/providers/vcd/resource_vcd_firewall_rules_test.go
+++ b/builtin/providers/vcd/resource_vcd_firewall_rules_test.go
@@ -44,7 +44,7 @@ func testAccCheckVcdFirewallRulesExists(n string, gateway *govcd.EdgeGateway) re
return fmt.Errorf("No Record ID is set")
}
- conn := testAccProvider.Meta().(*govcd.VCDClient)
+ conn := testAccProvider.Meta().(*VCDClient)
resp, err := conn.OrgVdc.FindEdgeGateway(rs.Primary.ID)
if err != nil {
@@ -77,6 +77,7 @@ func createFirewallRulesConfigs(existingRules *govcd.EdgeGateway) string {
Org: os.Getenv("VCD_ORG"),
Href: os.Getenv("VCD_URL"),
VDC: os.Getenv("VCD_VDC"),
+ MaxRetryTimeout: 240,
}
conn, err := config.Client()
if err != nil {
diff --git a/builtin/providers/vcd/resource_vcd_network.go b/builtin/providers/vcd/resource_vcd_network.go
index 3cb7b8f707..531afd878d 100644
--- a/builtin/providers/vcd/resource_vcd_network.go
+++ b/builtin/providers/vcd/resource_vcd_network.go
@@ -7,7 +7,6 @@ import (
"fmt"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
- "github.com/hmrc/vmware-govcd"
types "github.com/hmrc/vmware-govcd/types/v56"
"strings"
)
@@ -121,7 +120,7 @@ func resourceVcdNetwork() *schema.Resource {
}
func resourceVcdNetworkCreate(d *schema.ResourceData, meta interface{}) error {
- vcdClient := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*VCDClient)
log.Printf("[TRACE] CLIENT: %#v", vcdClient)
vcdClient.Mutex.Lock()
defer vcdClient.Mutex.Unlock()
@@ -156,7 +155,7 @@ func resourceVcdNetworkCreate(d *schema.ResourceData, meta interface{}) error {
log.Printf("[INFO] NETWORK: %#v", newnetwork)
- err = retryCall(4, func() error {
+ err = retryCall(vcdClient.MaxRetryTimeout, func() error {
return vcdClient.OrgVdc.CreateOrgVDCNetwork(newnetwork)
})
if err != nil {
@@ -174,7 +173,7 @@ func resourceVcdNetworkCreate(d *schema.ResourceData, meta interface{}) error {
}
if dhcp, ok := d.GetOk("dhcp_pool"); ok {
- err = retryCall(4, func() error {
+ err = retryCall(vcdClient.MaxRetryTimeout, func() error {
task, err := edgeGateway.AddDhcpPool(network.OrgVDCNetwork, dhcp.(*schema.Set).List())
if err != nil {
return fmt.Errorf("Error adding DHCP pool: %#v", err)
@@ -194,7 +193,7 @@ func resourceVcdNetworkCreate(d *schema.ResourceData, meta interface{}) error {
}
func resourceVcdNetworkRead(d *schema.ResourceData, meta interface{}) error {
- vcdClient := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*VCDClient)
log.Printf("[DEBUG] VCD Client configuration: %#v", vcdClient)
log.Printf("[DEBUG] VCD Client configuration: %#v", vcdClient.OrgVdc)
@@ -226,7 +225,7 @@ func resourceVcdNetworkRead(d *schema.ResourceData, meta interface{}) error {
}
func resourceVcdNetworkDelete(d *schema.ResourceData, meta interface{}) error {
- vcdClient := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*VCDClient)
vcdClient.Mutex.Lock()
defer vcdClient.Mutex.Unlock()
err := vcdClient.OrgVdc.Refresh()
@@ -239,7 +238,7 @@ func resourceVcdNetworkDelete(d *schema.ResourceData, meta interface{}) error {
return fmt.Errorf("Error finding network: %#v", err)
}
- err = retryCall(4, func() error {
+ err = retryCall(vcdClient.MaxRetryTimeout, func() error {
task, err := network.Delete()
if err != nil {
return fmt.Errorf("Error Deleting Network: %#v", err)
diff --git a/builtin/providers/vcd/resource_vcd_network_test.go b/builtin/providers/vcd/resource_vcd_network_test.go
index 2d260bc03b..fa59d177b7 100644
--- a/builtin/providers/vcd/resource_vcd_network_test.go
+++ b/builtin/providers/vcd/resource_vcd_network_test.go
@@ -50,7 +50,7 @@ func testAccCheckVcdNetworkExists(n string, network *govcd.OrgVDCNetwork) resour
return fmt.Errorf("No VAPP ID is set")
}
- conn := testAccProvider.Meta().(*govcd.VCDClient)
+ conn := testAccProvider.Meta().(*VCDClient)
resp, err := conn.OrgVdc.FindVDCNetwork(rs.Primary.ID)
if err != nil {
@@ -64,7 +64,7 @@ func testAccCheckVcdNetworkExists(n string, network *govcd.OrgVDCNetwork) resour
}
func testAccCheckVcdNetworkDestroy(s *terraform.State) error {
- conn := testAccProvider.Meta().(*govcd.VCDClient)
+ conn := testAccProvider.Meta().(*VCDClient)
for _, rs := range s.RootModule().Resources {
if rs.Type != "vcd_network" {
diff --git a/builtin/providers/vcd/resource_vcd_snat.go b/builtin/providers/vcd/resource_vcd_snat.go
index 88a7a75a5e..c2ae891210 100644
--- a/builtin/providers/vcd/resource_vcd_snat.go
+++ b/builtin/providers/vcd/resource_vcd_snat.go
@@ -3,7 +3,6 @@ package vcd
import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
- "github.com/hmrc/vmware-govcd"
)
func resourceVcdSNAT() *schema.Resource {
@@ -35,7 +34,7 @@ func resourceVcdSNAT() *schema.Resource {
}
func resourceVcdSNATCreate(d *schema.ResourceData, meta interface{}) error {
- vcdClient := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*VCDClient)
// Multiple VCD components need to run operations on the Edge Gateway, as
// the edge gatway will throw back an error if it is already performing an
// operation we must wait until we can aquire a lock on the client
@@ -51,7 +50,7 @@ func resourceVcdSNATCreate(d *schema.ResourceData, meta interface{}) error {
return fmt.Errorf("Unable to find edge gateway: %#v", err)
}
- err = retryCall(4, func() error {
+ err = retryCall(vcdClient.MaxRetryTimeout, func() error {
task, err := edgeGateway.AddNATMapping("SNAT", d.Get("internal_ip").(string),
d.Get("external_ip").(string),
"any")
@@ -69,7 +68,7 @@ func resourceVcdSNATCreate(d *schema.ResourceData, meta interface{}) error {
}
func resourceVcdSNATRead(d *schema.ResourceData, meta interface{}) error {
- vcdClient := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*VCDClient)
e, err := vcdClient.OrgVdc.FindEdgeGateway(d.Get("edge_gateway").(string))
if err != nil {
@@ -94,7 +93,7 @@ func resourceVcdSNATRead(d *schema.ResourceData, meta interface{}) error {
}
func resourceVcdSNATDelete(d *schema.ResourceData, meta interface{}) error {
- vcdClient := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*VCDClient)
// Multiple VCD components need to run operations on the Edge Gateway, as
// the edge gatway will throw back an error if it is already performing an
// operation we must wait until we can aquire a lock on the client
@@ -106,7 +105,7 @@ func resourceVcdSNATDelete(d *schema.ResourceData, meta interface{}) error {
return fmt.Errorf("Unable to find edge gateway: %#v", err)
}
- err = retryCall(4, func() error {
+ err = retryCall(vcdClient.MaxRetryTimeout, func() error {
task, err := edgeGateway.RemoveNATMapping("SNAT", d.Get("internal_ip").(string),
d.Get("external_ip").(string),
"")
diff --git a/builtin/providers/vcd/resource_vcd_snat_test.go b/builtin/providers/vcd/resource_vcd_snat_test.go
index 66351f2a15..87c2702a31 100644
--- a/builtin/providers/vcd/resource_vcd_snat_test.go
+++ b/builtin/providers/vcd/resource_vcd_snat_test.go
@@ -50,7 +50,7 @@ func testAccCheckVcdSNATExists(n string, gateway *govcd.EdgeGateway) resource.Te
return fmt.Errorf("No SNAT ID is set")
}
- conn := testAccProvider.Meta().(*govcd.VCDClient)
+ conn := testAccProvider.Meta().(*VCDClient)
gatewayName := rs.Primary.Attributes["edge_gateway"]
edgeGateway, err := conn.OrgVdc.FindEdgeGateway(gatewayName)
@@ -79,7 +79,7 @@ func testAccCheckVcdSNATExists(n string, gateway *govcd.EdgeGateway) resource.Te
}
func testAccCheckVcdSNATDestroy(s *terraform.State) error {
- conn := testAccProvider.Meta().(*govcd.VCDClient)
+ conn := testAccProvider.Meta().(*VCDClient)
for _, rs := range s.RootModule().Resources {
if rs.Type != "vcd_snat" {
continue
diff --git a/builtin/providers/vcd/resource_vcd_vapp.go b/builtin/providers/vcd/resource_vcd_vapp.go
index d72b2cb973..73ae6f2b78 100644
--- a/builtin/providers/vcd/resource_vcd_vapp.go
+++ b/builtin/providers/vcd/resource_vcd_vapp.go
@@ -3,7 +3,6 @@ package vcd
import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
- "github.com/hmrc/vmware-govcd"
types "github.com/hmrc/vmware-govcd/types/v56"
"log"
)
@@ -80,7 +79,7 @@ func resourceVcdVApp() *schema.Resource {
}
func resourceVcdVAppCreate(d *schema.ResourceData, meta interface{}) error {
- vcdClient := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*VCDClient)
catalog, err := vcdClient.Org.FindCatalog(d.Get("catalog_name").(string))
if err != nil {
@@ -133,7 +132,7 @@ func resourceVcdVAppCreate(d *schema.ResourceData, meta interface{}) error {
},
}
- err = retryCall(4, func() error {
+ err = retryCall(vcdClient.MaxRetryTimeout, func() error {
e := vcdClient.OrgVdc.InstantiateVAppTemplate(createvapp)
if e != nil {
@@ -152,7 +151,7 @@ func resourceVcdVAppCreate(d *schema.ResourceData, meta interface{}) error {
vapp, err := vcdClient.OrgVdc.FindVAppByName(d.Get("name").(string))
- err = retryCall(4, func() error {
+ err = retryCall(vcdClient.MaxRetryTimeout, func() error {
task, err := vapp.ChangeVMName(d.Get("name").(string))
if err != nil {
return fmt.Errorf("Error with vm name change: %#v", err)
@@ -164,7 +163,7 @@ func resourceVcdVAppCreate(d *schema.ResourceData, meta interface{}) error {
return fmt.Errorf("Error changing vmname: %#v", err)
}
- err = retryCall(4, func() error {
+ err = retryCall(vcdClient.MaxRetryTimeout, func() error {
task, err := vapp.ChangeNetworkConfig(d.Get("network_name").(string), d.Get("ip").(string))
if err != nil {
return fmt.Errorf("Error with Networking change: %#v", err)
@@ -176,7 +175,7 @@ func resourceVcdVAppCreate(d *schema.ResourceData, meta interface{}) error {
}
if initscript, ok := d.GetOk("initscript"); ok {
- err = retryCall(4, func() error {
+ err = retryCall(vcdClient.MaxRetryTimeout, func() error {
task, err := vapp.RunCustomizationScript(d.Get("name").(string), initscript.(string))
if err != nil {
return fmt.Errorf("Error with setting init script: %#v", err)
@@ -194,7 +193,7 @@ func resourceVcdVAppCreate(d *schema.ResourceData, meta interface{}) error {
}
func resourceVcdVAppUpdate(d *schema.ResourceData, meta interface{}) error {
- vcdClient := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*VCDClient)
vapp, err := vcdClient.OrgVdc.FindVAppByName(d.Id())
if err != nil {
@@ -246,7 +245,7 @@ func resourceVcdVAppUpdate(d *schema.ResourceData, meta interface{}) error {
}
if d.HasChange("memory") {
- err = retryCall(4, func() error {
+ err = retryCall(vcdClient.MaxRetryTimeout, func() error {
task, err := vapp.ChangeMemorySize(d.Get("memory").(int))
if err != nil {
return fmt.Errorf("Error changing memory size: %#v", err)
@@ -260,7 +259,7 @@ func resourceVcdVAppUpdate(d *schema.ResourceData, meta interface{}) error {
}
if d.HasChange("cpus") {
- err = retryCall(4, func() error {
+ err = retryCall(vcdClient.MaxRetryTimeout, func() error {
task, err := vapp.ChangeCPUcount(d.Get("cpus").(int))
if err != nil {
return fmt.Errorf("Error changing cpu count: %#v", err)
@@ -290,7 +289,7 @@ func resourceVcdVAppUpdate(d *schema.ResourceData, meta interface{}) error {
}
func resourceVcdVAppRead(d *schema.ResourceData, meta interface{}) error {
- vcdClient := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*VCDClient)
err := vcdClient.OrgVdc.Refresh()
if err != nil {
@@ -309,14 +308,14 @@ func resourceVcdVAppRead(d *schema.ResourceData, meta interface{}) error {
}
func resourceVcdVAppDelete(d *schema.ResourceData, meta interface{}) error {
- vcdClient := meta.(*govcd.VCDClient)
+ vcdClient := meta.(*VCDClient)
vapp, err := vcdClient.OrgVdc.FindVAppByName(d.Id())
if err != nil {
return fmt.Errorf("error finding vapp: %s", err)
}
- err = retryCall(4, func() error {
+ err = retryCall(vcdClient.MaxRetryTimeout, func() error {
task, err := vapp.Undeploy()
if err != nil {
return fmt.Errorf("Error undeploying: %#v", err)
@@ -328,7 +327,7 @@ func resourceVcdVAppDelete(d *schema.ResourceData, meta interface{}) error {
return err
}
- err = retryCall(4, func() error {
+ err = retryCall(vcdClient.MaxRetryTimeout, func() error {
task, err := vapp.Delete()
if err != nil {
return fmt.Errorf("Error deleting: %#v", err)
diff --git a/builtin/providers/vcd/resource_vcd_vapp_test.go b/builtin/providers/vcd/resource_vcd_vapp_test.go
index 1ae4315e2a..38162a64a2 100644
--- a/builtin/providers/vcd/resource_vcd_vapp_test.go
+++ b/builtin/providers/vcd/resource_vcd_vapp_test.go
@@ -59,7 +59,7 @@ func testAccCheckVcdVAppExists(n string, vapp *govcd.VApp) resource.TestCheckFun
return fmt.Errorf("No VAPP ID is set")
}
- conn := testAccProvider.Meta().(*govcd.VCDClient)
+ conn := testAccProvider.Meta().(*VCDClient)
resp, err := conn.OrgVdc.FindVAppByName(rs.Primary.ID)
if err != nil {
@@ -73,7 +73,7 @@ func testAccCheckVcdVAppExists(n string, vapp *govcd.VApp) resource.TestCheckFun
}
func testAccCheckVcdVAppDestroy(s *terraform.State) error {
- conn := testAccProvider.Meta().(*govcd.VCDClient)
+ conn := testAccProvider.Meta().(*VCDClient)
for _, rs := range s.RootModule().Resources {
if rs.Type != "vcd_vapp" {
diff --git a/builtin/providers/vcd/structure.go b/builtin/providers/vcd/structure.go
index d8124687a7..d4ac65eaee 100644
--- a/builtin/providers/vcd/structure.go
+++ b/builtin/providers/vcd/structure.go
@@ -107,6 +107,6 @@ func getPortString(port int) string {
return portstring
}
-func retryCall(min int, f resource.RetryFunc) error {
- return resource.Retry(time.Duration(min)*time.Minute, f)
+func retryCall(seconds int, f resource.RetryFunc) error {
+ return resource.Retry(time.Duration(seconds)*time.Second, f)
}
From 49195f8b77cf9de0b898047001a4d27cb6c8bf5e Mon Sep 17 00:00:00 2001
From: Nicki Watt
Date: Sat, 21 Nov 2015 12:50:40 +0000
Subject: [PATCH 074/664] Added docs for maxRetryTimeout
---
.../docs/providers/vcd/index.html.markdown | 17 ++++++++++++-----
1 file changed, 12 insertions(+), 5 deletions(-)
diff --git a/website/source/docs/providers/vcd/index.html.markdown b/website/source/docs/providers/vcd/index.html.markdown
index 385dbcd8fc..b47818c8c3 100644
--- a/website/source/docs/providers/vcd/index.html.markdown
+++ b/website/source/docs/providers/vcd/index.html.markdown
@@ -19,11 +19,12 @@ Use the navigation to the left to read about the available resources.
```
# Configure the VMware vCloud Director Provider
provider "vcd" {
- user = "${var.vcd_user}"
- password = "${var.vcd_pass}"
- org = "${var.vcd_org}"
- url = "${var.vcd_url}"
- vdc = "${var.vcd_vdc}"
+ user = "${var.vcd_user}"
+ password = "${var.vcd_pass}"
+ org = "${var.vcd_org}"
+ url = "${var.vcd_url}"
+ vdc = "${var.vcd_vdc}"
+ maxRetryTimeout = "${var.vcd_maxRetryTimeout}"
}
# Create a new network
@@ -49,3 +50,9 @@ The following arguments are used to configure the VMware vCloud Director Provide
API operations against. If not set the plugin will select the first virtual
datacenter available to your Org. Can also be specified with the `VCD_VDC` environment
variable.
+* `maxRetryTimeout` - (Optional) This provides you with the ability to specify the maximum
+ amount of time (in seconds) you are prepared to wait for interactions on resources managed
+ by vCloud Director to be successful. If a resource action fails, the action will be retried
+ (as long as it is still within the `maxRetryTimeout` value) to try and ensure success.
+ Defaults to 30 seconds if not set.
+ Can also be specified with the `VCD_MAX_RETRY_TIMEOUT` environment variable.
From ef4726bd506f89e8e841296b1a44603e39e9591e Mon Sep 17 00:00:00 2001
From: Sander van Harmelen
Date: Wed, 18 Nov 2015 11:24:04 +0100
Subject: [PATCH 075/664] Change Set internals and make (extreme) performance
improvements
Changing the Set internals makes a lot of sense as it saves doing
conversions in multiple places and gives a central place to alter
the key when a item is computed.
This will have no side effects other then that the ordering is now
based on strings instead on integers, so the order will be different.
This will however have no effect on existing configs as these will
use the individual codes/keys and not the ordering to determine if
there is a diff or not.
Lastly (but I think also most importantly) there is a fix in this PR
that makes diffing sets extremely more performand. Before a full diff
required reading the complete Set for every single parameter/attribute
you wanted to diff, while now it only gets that specific parameter.
We have a use case where we have a Set that has 18 parameters and the
set consist of about 600 items (don't ask :wink:). So when doing a diff
it would take 100% CPU of all cores and stay that way for almost an
hour before being able to complete the diff.
Debugging this we learned that for retrieving every single parameter
it made over 52.000 calls to `func (c *ResourceConfig) get(..)`. In
this function a slice is created and used only for the duration of the
call, so the time needed to create all needed slices and on the other
hand the time the garbage collector needed to clean them up again caused
the system to cripple itself. Next to that there are also some expensive
reflect calls in this function which also claimed a fair amount of CPU
time.
After this fix the number of calls needed to get a single parameter
dropped from 52.000+ to only 2! :smiley:
---
helper/schema/field_reader_config.go | 56 +++++++++++------------
helper/schema/field_reader_config_test.go | 4 +-
helper/schema/field_writer_map.go | 3 +-
helper/schema/resource_data.go | 17 ++-----
helper/schema/resource_data_test.go | 8 ++--
helper/schema/schema.go | 13 ++----
helper/schema/set.go | 33 +++++++------
helper/schema/set_test.go | 4 +-
8 files changed, 62 insertions(+), 76 deletions(-)
diff --git a/helper/schema/field_reader_config.go b/helper/schema/field_reader_config.go
index 76aeed2bd8..3cf4f5fc30 100644
--- a/helper/schema/field_reader_config.go
+++ b/helper/schema/field_reader_config.go
@@ -18,10 +18,12 @@ type ConfigFieldReader struct {
Config *terraform.ResourceConfig
Schema map[string]*Schema
- lock sync.Mutex
+ indexMaps map[string]map[string]int
+ once sync.Once
}
func (r *ConfigFieldReader) ReadField(address []string) (FieldReadResult, error) {
+ r.once.Do(func() { r.indexMaps = make(map[string]map[string]int) })
return r.readField(address, false)
}
@@ -55,20 +57,18 @@ func (r *ConfigFieldReader) readField(
continue
}
- // Get the code
- code, err := strconv.ParseInt(address[i+1], 0, 0)
- if err != nil {
- return FieldReadResult{}, err
+ indexMap, ok := r.indexMaps[strings.Join(address[:i+1], ".")]
+ if !ok {
+ // Get the set so we can get the index map that tells us the
+ // mapping of the hash code to the list index
+ _, err := r.readSet(address[:i+1], v)
+ if err != nil {
+ return FieldReadResult{}, err
+ }
+ indexMap = r.indexMaps[strings.Join(address[:i+1], ".")]
}
- // Get the set so we can get the index map that tells us the
- // mapping of the hash code to the list index
- _, indexMap, err := r.readSet(address[:i+1], v)
- if err != nil {
- return FieldReadResult{}, err
- }
-
- index, ok := indexMap[int(code)]
+ index, ok := indexMap[address[i+1]]
if !ok {
return FieldReadResult{}, nil
}
@@ -87,8 +87,7 @@ func (r *ConfigFieldReader) readField(
case TypeMap:
return r.readMap(k)
case TypeSet:
- result, _, err := r.readSet(address, schema)
- return result, err
+ return r.readSet(address, schema)
case typeObject:
return readObjectField(
&nestedConfigFieldReader{r},
@@ -112,7 +111,7 @@ func (r *ConfigFieldReader) readMap(k string) (FieldReadResult, error) {
switch m := mraw.(type) {
case []interface{}:
for i, innerRaw := range m {
- for ik, _ := range innerRaw.(map[string]interface{}) {
+ for ik := range innerRaw.(map[string]interface{}) {
key := fmt.Sprintf("%s.%d.%s", k, i, ik)
if r.Config.IsComputed(key) {
computed = true
@@ -125,7 +124,7 @@ func (r *ConfigFieldReader) readMap(k string) (FieldReadResult, error) {
}
case []map[string]interface{}:
for i, innerRaw := range m {
- for ik, _ := range innerRaw {
+ for ik := range innerRaw {
key := fmt.Sprintf("%s.%d.%s", k, i, ik)
if r.Config.IsComputed(key) {
computed = true
@@ -137,7 +136,7 @@ func (r *ConfigFieldReader) readMap(k string) (FieldReadResult, error) {
}
}
case map[string]interface{}:
- for ik, _ := range m {
+ for ik := range m {
key := fmt.Sprintf("%s.%s", k, ik)
if r.Config.IsComputed(key) {
computed = true
@@ -198,17 +197,17 @@ func (r *ConfigFieldReader) readPrimitive(
}
func (r *ConfigFieldReader) readSet(
- address []string, schema *Schema) (FieldReadResult, map[int]int, error) {
- indexMap := make(map[int]int)
+ address []string, schema *Schema) (FieldReadResult, error) {
+ indexMap := make(map[string]int)
// Create the set that will be our result
set := schema.ZeroValue().(*Set)
raw, err := readListField(&nestedConfigFieldReader{r}, address, schema)
if err != nil {
- return FieldReadResult{}, indexMap, err
+ return FieldReadResult{}, err
}
if !raw.Exists {
- return FieldReadResult{Value: set}, indexMap, nil
+ return FieldReadResult{Value: set}, nil
}
// If the list is computed, the set is necessarilly computed
@@ -217,7 +216,7 @@ func (r *ConfigFieldReader) readSet(
Value: set,
Exists: true,
Computed: raw.Computed,
- }, indexMap, nil
+ }, nil
}
// Build up the set from the list elements
@@ -226,19 +225,16 @@ func (r *ConfigFieldReader) readSet(
computed := r.hasComputedSubKeys(
fmt.Sprintf("%s.%d", strings.Join(address, "."), i), schema)
- code := set.add(v)
+ code := set.add(v, computed)
indexMap[code] = i
- if computed {
- set.m[-code] = set.m[code]
- delete(set.m, code)
- code = -code
- }
}
+ r.indexMaps[strings.Join(address, ".")] = indexMap
+
return FieldReadResult{
Value: set,
Exists: true,
- }, indexMap, nil
+ }, nil
}
// hasComputedSubKeys walks through a schema and returns whether or not the
diff --git a/helper/schema/field_reader_config_test.go b/helper/schema/field_reader_config_test.go
index be37fcef9f..aac575883f 100644
--- a/helper/schema/field_reader_config_test.go
+++ b/helper/schema/field_reader_config_test.go
@@ -228,8 +228,8 @@ func TestConfigFieldReader_ComputedSet(t *testing.T) {
"set, normal": {
[]string{"strSet"},
FieldReadResult{
- Value: map[int]interface{}{
- 2356372769: "foo",
+ Value: map[string]interface{}{
+ "2356372769": "foo",
},
Exists: true,
Computed: false,
diff --git a/helper/schema/field_writer_map.go b/helper/schema/field_writer_map.go
index 3e9b047192..7ef40b3673 100644
--- a/helper/schema/field_writer_map.go
+++ b/helper/schema/field_writer_map.go
@@ -298,8 +298,7 @@ func (w *MapFieldWriter) setSet(
}
for code, elem := range value.(*Set).m {
- codeStr := strconv.FormatInt(int64(code), 10)
- if err := w.set(append(addrCopy, codeStr), elem); err != nil {
+ if err := w.set(append(addrCopy, code), elem); err != nil {
return err
}
}
diff --git a/helper/schema/resource_data.go b/helper/schema/resource_data.go
index af48481d3a..5c05a155bf 100644
--- a/helper/schema/resource_data.go
+++ b/helper/schema/resource_data.go
@@ -228,7 +228,7 @@ func (d *ResourceData) State() *terraform.InstanceState {
// attribute set as a map[string]interface{}, write it to a MapFieldWriter,
// and then use that map.
rawMap := make(map[string]interface{})
- for k, _ := range d.schema {
+ for k := range d.schema {
source := getSourceSet
if d.partial {
source = getSourceState
@@ -343,13 +343,13 @@ func (d *ResourceData) diffChange(
}
func (d *ResourceData) getChange(
- key string,
+ k string,
oldLevel getSource,
newLevel getSource) (getResult, getResult) {
var parts, parts2 []string
- if key != "" {
- parts = strings.Split(key, ".")
- parts2 = strings.Split(key, ".")
+ if k != "" {
+ parts = strings.Split(k, ".")
+ parts2 = strings.Split(k, ".")
}
o := d.get(parts, oldLevel)
@@ -374,13 +374,6 @@ func (d *ResourceData) get(addr []string, source getSource) getResult {
level = "state"
}
- // Build the address of the key we're looking for and ask the FieldReader
- for i, v := range addr {
- if v[0] == '~' {
- addr[i] = v[1:]
- }
- }
-
var result FieldReadResult
var err error
if exact {
diff --git a/helper/schema/resource_data_test.go b/helper/schema/resource_data_test.go
index dc62a8a190..310f4a4545 100644
--- a/helper/schema/resource_data_test.go
+++ b/helper/schema/resource_data_test.go
@@ -1509,9 +1509,9 @@ func TestResourceDataSet(t *testing.T) {
Key: "ports",
Value: &Set{
- m: map[int]interface{}{
- 1: 1,
- 2: 2,
+ m: map[string]interface{}{
+ "1": 1,
+ "2": 2,
},
},
@@ -1546,7 +1546,7 @@ func TestResourceDataSet(t *testing.T) {
Err: true,
GetKey: "ports",
- GetValue: []interface{}{80, 100},
+ GetValue: []interface{}{100, 80},
},
// #11: Set with nested set
diff --git a/helper/schema/schema.go b/helper/schema/schema.go
index 8ed8135264..450bfdf86c 100644
--- a/helper/schema/schema.go
+++ b/helper/schema/schema.go
@@ -866,23 +866,16 @@ func (m schemaMap) diffSet(
// Build the list of codes that will make up our set. This is the
// removed codes as well as all the codes in the new codes.
- codes := make([][]int, 2)
+ codes := make([][]string, 2)
codes[0] = os.Difference(ns).listCode()
codes[1] = ns.listCode()
for _, list := range codes {
for _, code := range list {
- // If the code is negative (first character is -) then
- // replace it with "~" for our computed set stuff.
- codeStr := strconv.Itoa(code)
- if codeStr[0] == '-' {
- codeStr = string('~') + codeStr[1:]
- }
-
switch t := schema.Elem.(type) {
case *Resource:
// This is a complex resource
for k2, schema := range t.Schema {
- subK := fmt.Sprintf("%s.%s.%s", k, codeStr, k2)
+ subK := fmt.Sprintf("%s.%s.%s", k, code, k2)
err := m.diff(subK, schema, diff, d, true)
if err != nil {
return err
@@ -896,7 +889,7 @@ func (m schemaMap) diffSet(
// This is just a primitive element, so go through each and
// just diff each.
- subK := fmt.Sprintf("%s.%s", k, codeStr)
+ subK := fmt.Sprintf("%s.%s", k, code)
err := m.diff(subK, &t2, diff, d, true)
if err != nil {
return err
diff --git a/helper/schema/set.go b/helper/schema/set.go
index e070a1eb9f..de05f40eed 100644
--- a/helper/schema/set.go
+++ b/helper/schema/set.go
@@ -5,6 +5,7 @@ import (
"fmt"
"reflect"
"sort"
+ "strconv"
"sync"
"github.com/hashicorp/terraform/helper/hashcode"
@@ -43,7 +44,7 @@ func HashSchema(schema *Schema) SchemaSetFunc {
type Set struct {
F SchemaSetFunc
- m map[int]interface{}
+ m map[string]interface{}
once sync.Once
}
@@ -65,7 +66,7 @@ func CopySet(otherSet *Set) *Set {
// Add adds an item to the set if it isn't already in the set.
func (s *Set) Add(item interface{}) {
- s.add(item)
+ s.add(item, false)
}
// Remove removes an item if it's already in the set. Idempotent.
@@ -157,13 +158,17 @@ func (s *Set) GoString() string {
}
func (s *Set) init() {
- s.m = make(map[int]interface{})
+ s.m = make(map[string]interface{})
}
-func (s *Set) add(item interface{}) int {
+func (s *Set) add(item interface{}, computed bool) string {
s.once.Do(s.init)
code := s.hash(item)
+ if computed {
+ code = "~" + code
+ }
+
if _, ok := s.m[code]; !ok {
s.m[code] = item
}
@@ -171,34 +176,34 @@ func (s *Set) add(item interface{}) int {
return code
}
-func (s *Set) hash(item interface{}) int {
+func (s *Set) hash(item interface{}) string {
code := s.F(item)
// Always return a nonnegative hashcode.
if code < 0 {
- return -code
+ code = -code
}
- return code
+ return strconv.Itoa(code)
}
-func (s *Set) remove(item interface{}) int {
+func (s *Set) remove(item interface{}) string {
s.once.Do(s.init)
- code := s.F(item)
+ code := s.hash(item)
delete(s.m, code)
return code
}
func (s *Set) index(item interface{}) int {
- return sort.SearchInts(s.listCode(), s.hash(item))
+ return sort.SearchStrings(s.listCode(), s.hash(item))
}
-func (s *Set) listCode() []int {
+func (s *Set) listCode() []string {
// Sort the hash codes so the order of the list is deterministic
- keys := make([]int, 0, len(s.m))
- for k, _ := range s.m {
+ keys := make([]string, 0, len(s.m))
+ for k := range s.m {
keys = append(keys, k)
}
- sort.Sort(sort.IntSlice(keys))
+ sort.Sort(sort.StringSlice(keys))
return keys
}
diff --git a/helper/schema/set_test.go b/helper/schema/set_test.go
index 8717735550..87a9f72282 100644
--- a/helper/schema/set_test.go
+++ b/helper/schema/set_test.go
@@ -11,7 +11,7 @@ func TestSetAdd(t *testing.T) {
s.Add(5)
s.Add(25)
- expected := []interface{}{1, 5, 25}
+ expected := []interface{}{1, 25, 5}
actual := s.List()
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
@@ -101,7 +101,7 @@ func TestSetUnion(t *testing.T) {
union := s1.Union(s2)
union.Add(2)
- expected := []interface{}{1, 2, 5, 25}
+ expected := []interface{}{1, 2, 25, 5}
actual := union.List()
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
From 3809315af9dcef161348f4a9b9bbcf51ebcdf760 Mon Sep 17 00:00:00 2001
From: Nicki Watt
Date: Mon, 23 Nov 2015 11:45:16 +0000
Subject: [PATCH 076/664] Upped default maxRetryTimeout from 30s -> 60s
---
builtin/providers/vcd/provider.go | 4 ++--
website/source/docs/providers/vcd/index.html.markdown | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/builtin/providers/vcd/provider.go b/builtin/providers/vcd/provider.go
index 0e3d48d6c3..aab15cedd3 100644
--- a/builtin/providers/vcd/provider.go
+++ b/builtin/providers/vcd/provider.go
@@ -47,8 +47,8 @@ func Provider() terraform.ResourceProvider {
"maxRetryTimeout": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
- DefaultFunc: schema.EnvDefaultFunc("VCD_MAX_RETRY_TIMEOUT", 30),
- Description: "Max num seconds to wait for successful response when operating on resources within vCloud (defaults to 30)",
+ DefaultFunc: schema.EnvDefaultFunc("VCD_MAX_RETRY_TIMEOUT", 60),
+ Description: "Max num seconds to wait for successful response when operating on resources within vCloud (defaults to 60)",
},
},
diff --git a/website/source/docs/providers/vcd/index.html.markdown b/website/source/docs/providers/vcd/index.html.markdown
index b47818c8c3..d4d5e9d698 100644
--- a/website/source/docs/providers/vcd/index.html.markdown
+++ b/website/source/docs/providers/vcd/index.html.markdown
@@ -54,5 +54,5 @@ The following arguments are used to configure the VMware vCloud Director Provide
amount of time (in seconds) you are prepared to wait for interactions on resources managed
by vCloud Director to be successful. If a resource action fails, the action will be retried
(as long as it is still within the `maxRetryTimeout` value) to try and ensure success.
- Defaults to 30 seconds if not set.
+ Defaults to 60 seconds if not set.
Can also be specified with the `VCD_MAX_RETRY_TIMEOUT` environment variable.
From a35b65e5d2c319296261695b19ff92985e525059 Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Tue, 24 Nov 2015 21:35:40 -0700
Subject: [PATCH 077/664] working on better test
---
.../resource_vsphere_virtual_machine_test.go | 90 ++++++++++++++++++-
1 file changed, 88 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
index 804e1ae074..4051a29852 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
@@ -10,6 +10,9 @@ import (
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
+ "github.com/vmware/govmomi/property"
+ "github.com/vmware/govmomi/vim25/mo"
+ "github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
)
@@ -216,8 +219,90 @@ func testAccCheckVSphereVirtualMachineDestroy(s *terraform.State) error {
return nil
}
+func testAccCheckVSphereVirtualMachineExistsHasExtraConfig(n string, vm *virtualMachine) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No ID is set")
+ }
+
+ client := testAccProvider.Meta().(*govmomi.Client)
+ finder := find.NewFinder(client.Client, true)
+
+ dc, err := finder.Datacenter(context.TODO(), rs.Primary.Attributes["datacenter"])
+ if err != nil {
+ return fmt.Errorf("error %s", err)
+ }
+
+ dcFolders, err := dc.Folders(context.TODO())
+ if err != nil {
+ return fmt.Errorf("error %s", err)
+ }
+
+ _, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), dcFolders.VmFolder, rs.Primary.Attributes["name"])
+
+ instance, err := finder.VirtualMachine(context.TODO(), rs.Primary.Attributes["name"])
+ if err != nil {
+ return fmt.Errorf("error %s", err)
+ }
+
+ var mvm mo.VirtualMachine
+
+ collector := property.DefaultCollector(client.Client)
+
+ if err := collector.RetrieveOne(context.TODO(), instance.Reference(), []string{"config.extraConfig"}, &mvm); err != nil {
+ return fmt.Errorf("error %s", err)
+ }
+
+ var configMap = make(map[string]types.AnyType)
+ if mvm.Config != nil && mvm.Config.ExtraConfig != nil && len(mvm.Config.ExtraConfig) > 0 {
+ for _, v := range mvm.Config.ExtraConfig {
+ value := v.GetOptionValue()
+ configMap[value.Key] = value.Value
+ }
+ } else {
+ return fmt.Errorf("error no ExtraConfig")
+ }
+
+ if configMap["foo"] == nil {
+ return fmt.Errorf("error no ExtraConfig for 'foo'")
+ }
+
+ if configMap["foo"] != "bar" {
+ return fmt.Errorf("error ExtraConfig 'foo' != bar")
+ }
+
+ if configMap["car"] == nil {
+ return fmt.Errorf("error no ExtraConfig for 'car'")
+ }
+
+ if configMap["car"] != "ferrari" {
+ return fmt.Errorf("error ExtraConfig 'car' != ferrari")
+ }
+
+ if configMap["car"] == nil {
+ return fmt.Errorf("error no ExtraConfig for 'car'")
+ }
+
+ if configMap["car"] != "ferrari" {
+ return fmt.Errorf("error ExtraConfig 'car' != ferrari")
+ }
+ *vm = virtualMachine{
+ name: rs.Primary.ID,
+ }
+
+ return nil
+ }
+}
func testAccCheckVSphereVirtualMachineExists(n string, vm *virtualMachine) resource.TestCheckFunc {
return func(s *terraform.State) error {
+ // todo how do I return this??
+ //test1 := testAccCheckVSphereVirtualMachineExists(n, vm)
+
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
@@ -247,6 +332,7 @@ func testAccCheckVSphereVirtualMachineExists(n string, vm *virtualMachine) resou
}
return nil
+
}
}
@@ -300,8 +386,8 @@ resource "vsphere_virtual_machine" "car" {
}
custom_configuration_parameters {
"foo" = "bar"
- "car" = "ferrai"
- "num" = 42
+ "car" = "ferrai"
+ "num" = 42
}
disk {
%s
From 1d1de992af1e3b678c98ad638fba42d0f7196d31 Mon Sep 17 00:00:00 2001
From: Chris Love
Date: Wed, 25 Nov 2015 05:41:01 +0000
Subject: [PATCH 078/664] adding better acceptance test to check custom config
---
.../resource_vsphere_virtual_machine_test.go | 28 ++++++++++++-------
1 file changed, 18 insertions(+), 10 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
index 4051a29852..f905a2fa62 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
@@ -164,7 +164,7 @@ func TestAccVSphereVirtualMachine_custom_configs(t *testing.T) {
template,
),
Check: resource.ComposeTestCheckFunc(
- testAccCheckVSphereVirtualMachineExists("vsphere_virtual_machine.car", &vm),
+ testAccCheckVSphereVirtualMachineExistsHasCustomConfig("vsphere_virtual_machine.car", &vm),
resource.TestCheckResourceAttr(
"vsphere_virtual_machine.car", "name", "terraform-test-custom"),
resource.TestCheckResourceAttr(
@@ -180,7 +180,7 @@ func TestAccVSphereVirtualMachine_custom_configs(t *testing.T) {
resource.TestCheckResourceAttr(
"vsphere_virtual_machine.car", "custom_configuration_parameters.foo", "bar"),
resource.TestCheckResourceAttr(
- "vsphere_virtual_machine.car", "custom_configuration_parameters.car", "ferrai"),
+ "vsphere_virtual_machine.car", "custom_configuration_parameters.car", "ferrari"),
resource.TestCheckResourceAttr(
"vsphere_virtual_machine.car", "custom_configuration_parameters.num", "42"),
resource.TestCheckResourceAttr(
@@ -219,8 +219,10 @@ func testAccCheckVSphereVirtualMachineDestroy(s *terraform.State) error {
return nil
}
-func testAccCheckVSphereVirtualMachineExistsHasExtraConfig(n string, vm *virtualMachine) resource.TestCheckFunc {
+func testAccCheckVSphereVirtualMachineExistsHasCustomConfig(n string, vm *virtualMachine) resource.TestCheckFunc {
return func(s *terraform.State) error {
+
+
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
@@ -243,8 +245,13 @@ func testAccCheckVSphereVirtualMachineExistsHasExtraConfig(n string, vm *virtual
return fmt.Errorf("error %s", err)
}
- _, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), dcFolders.VmFolder, rs.Primary.Attributes["name"])
+ _, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), dcFolders.VmFolder, rs.Primary.Attributes["name"])
+ if err != nil {
+ return fmt.Errorf("error %s", err)
+ }
+
+ finder = finder.SetDatacenter(dc)
instance, err := finder.VirtualMachine(context.TODO(), rs.Primary.Attributes["name"])
if err != nil {
return fmt.Errorf("error %s", err)
@@ -284,12 +291,13 @@ func testAccCheckVSphereVirtualMachineExistsHasExtraConfig(n string, vm *virtual
return fmt.Errorf("error ExtraConfig 'car' != ferrari")
}
- if configMap["car"] == nil {
- return fmt.Errorf("error no ExtraConfig for 'car'")
+ if configMap["num"] == nil {
+ return fmt.Errorf("error no ExtraConfig for 'num'")
}
- if configMap["car"] != "ferrari" {
- return fmt.Errorf("error ExtraConfig 'car' != ferrari")
+ // todo this should be an int, getting back a string
+ if configMap["num"] != "42" {
+ return fmt.Errorf("error ExtraConfig 'num' != 42")
}
*vm = virtualMachine{
name: rs.Primary.ID,
@@ -386,8 +394,8 @@ resource "vsphere_virtual_machine" "car" {
}
custom_configuration_parameters {
"foo" = "bar"
- "car" = "ferrai"
- "num" = 42
+ "car" = "ferrari"
+ "num" = 42
}
disk {
%s
From de2c76a61c9c4ba8ec8e1f766c02556c85afdb4d Mon Sep 17 00:00:00 2001
From: chrislovecnm
Date: Wed, 25 Nov 2015 00:48:44 -0700
Subject: [PATCH 079/664] polish
---
.../vsphere/resource_vsphere_virtual_machine_test.go | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
index f905a2fa62..130523a47b 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
@@ -308,8 +308,6 @@ func testAccCheckVSphereVirtualMachineExistsHasCustomConfig(n string, vm *virtua
}
func testAccCheckVSphereVirtualMachineExists(n string, vm *virtualMachine) resource.TestCheckFunc {
return func(s *terraform.State) error {
- // todo how do I return this??
- //test1 := testAccCheckVSphereVirtualMachineExists(n, vm)
rs, ok := s.RootModule().Resources[n]
if !ok {
@@ -393,7 +391,7 @@ resource "vsphere_virtual_machine" "car" {
label = "%s"
}
custom_configuration_parameters {
- "foo" = "bar"
+ "foo" = "bar"
"car" = "ferrari"
"num" = 42
}
From a02667389ed169a4fd432b618f84813e55206409 Mon Sep 17 00:00:00 2001
From: Brett Mack
Date: Wed, 25 Nov 2015 12:05:59 +0000
Subject: [PATCH 080/664] Only undeploy a machine if it is switched on
---
builtin/providers/vcd/resource_vcd_vapp.go | 25 ++++++++++++++--------
1 file changed, 16 insertions(+), 9 deletions(-)
diff --git a/builtin/providers/vcd/resource_vcd_vapp.go b/builtin/providers/vcd/resource_vcd_vapp.go
index 73ae6f2b78..16bdf72e8b 100644
--- a/builtin/providers/vcd/resource_vcd_vapp.go
+++ b/builtin/providers/vcd/resource_vcd_vapp.go
@@ -315,16 +315,23 @@ func resourceVcdVAppDelete(d *schema.ResourceData, meta interface{}) error {
return fmt.Errorf("error finding vapp: %s", err)
}
- err = retryCall(vcdClient.MaxRetryTimeout, func() error {
- task, err := vapp.Undeploy()
- if err != nil {
- return fmt.Errorf("Error undeploying: %#v", err)
- }
-
- return task.WaitTaskCompletion()
- })
+ status, err := vapp.GetStatus()
if err != nil {
- return err
+ return fmt.Errorf("Error getting VApp status: %#v", err)
+ }
+
+ if status == "POWERED_ON" {
+ err = retryCall(vcdClient.MaxRetryTimeout, func() error {
+ task, err := vapp.Undeploy()
+ if err != nil {
+ return fmt.Errorf("Error undeploying: %#v", err)
+ }
+
+ return task.WaitTaskCompletion()
+ })
+ if err != nil {
+ return err
+ }
}
err = retryCall(vcdClient.MaxRetryTimeout, func() error {
From aec94b1682961142c81bf51211380be2224c4879 Mon Sep 17 00:00:00 2001
From: Brett Mack
Date: Wed, 25 Nov 2015 16:53:00 +0000
Subject: [PATCH 081/664] Don't error if unable to undeploy
---
builtin/providers/vcd/resource_vcd_vapp.go | 18 ++++++------------
1 file changed, 6 insertions(+), 12 deletions(-)
diff --git a/builtin/providers/vcd/resource_vcd_vapp.go b/builtin/providers/vcd/resource_vcd_vapp.go
index 16bdf72e8b..50fc93563f 100644
--- a/builtin/providers/vcd/resource_vcd_vapp.go
+++ b/builtin/providers/vcd/resource_vcd_vapp.go
@@ -315,24 +315,18 @@ func resourceVcdVAppDelete(d *schema.ResourceData, meta interface{}) error {
return fmt.Errorf("error finding vapp: %s", err)
}
- status, err := vapp.GetStatus()
if err != nil {
return fmt.Errorf("Error getting VApp status: %#v", err)
}
- if status == "POWERED_ON" {
- err = retryCall(vcdClient.MaxRetryTimeout, func() error {
- task, err := vapp.Undeploy()
- if err != nil {
- return fmt.Errorf("Error undeploying: %#v", err)
- }
-
- return task.WaitTaskCompletion()
- })
+ _ = retryCall(vcdClient.MaxRetryTimeout, func() error {
+ task, err := vapp.Undeploy()
if err != nil {
- return err
+ return fmt.Errorf("Error undeploying: %#v", err)
}
- }
+
+ return task.WaitTaskCompletion()
+ })
err = retryCall(vcdClient.MaxRetryTimeout, func() error {
task, err := vapp.Delete()
From 5753efa8afe854d5e9f66e8fb27def6bd19fd208 Mon Sep 17 00:00:00 2001
From: Anthony Stanton
Date: Thu, 26 Nov 2015 15:32:21 +0100
Subject: [PATCH 082/664] Skip SG ID determination logic for Classic ELBs
---
builtin/providers/aws/resource_aws_elb.go | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_elb.go b/builtin/providers/aws/resource_aws_elb.go
index faf0b8addb..1bc6ddc283 100644
--- a/builtin/providers/aws/resource_aws_elb.go
+++ b/builtin/providers/aws/resource_aws_elb.go
@@ -350,12 +350,12 @@ func resourceAwsElbRead(d *schema.ResourceData, meta interface{}) error {
var elbVpc string
if lb.VPCId != nil {
elbVpc = *lb.VPCId
- }
- sgId, err := sourceSGIdByName(meta, *lb.SourceSecurityGroup.GroupName, elbVpc)
- if err != nil {
- return fmt.Errorf("[WARN] Error looking up ELB Security Group ID: %s", err)
- } else {
- d.Set("source_security_group_id", sgId)
+ sgId, err := sourceSGIdByName(meta, *lb.SourceSecurityGroup.GroupName, elbVpc)
+ if err != nil {
+ return fmt.Errorf("[WARN] Error looking up ELB Security Group ID: %s", err)
+ } else {
+ d.Set("source_security_group_id", sgId)
+ }
}
}
d.Set("subnets", lb.Subnets)
From d4ce2b87fb18816061812cc55d2181f5f4abc19d Mon Sep 17 00:00:00 2001
From: Sander van Harmelen
Date: Fri, 27 Nov 2015 14:49:28 +0100
Subject: [PATCH 083/664] Modified executable bit
---
builtin/providers/cloudstack/resource_cloudstack_template_test.go | 0
1 file changed, 0 insertions(+), 0 deletions(-)
mode change 100755 => 100644 builtin/providers/cloudstack/resource_cloudstack_template_test.go
diff --git a/builtin/providers/cloudstack/resource_cloudstack_template_test.go b/builtin/providers/cloudstack/resource_cloudstack_template_test.go
old mode 100755
new mode 100644
From 85627630bd3b06d3f5e003f1cbea0df355b16580 Mon Sep 17 00:00:00 2001
From: Chris Marchesi
Date: Fri, 27 Nov 2015 15:23:45 -0800
Subject: [PATCH 084/664] New resource (AWS provider) -
aws_lambda_event_source_mapping
---
builtin/providers/aws/provider.go | 1 +
...esource_aws_lambda_event_source_mapping.go | 171 +++++++++++
...ce_aws_lambda_event_source_mapping_test.go | 279 ++++++++++++++++++
.../lambda_event_source_mapping.html.markdown | 47 +++
4 files changed, 498 insertions(+)
create mode 100644 builtin/providers/aws/resource_aws_lambda_event_source_mapping.go
create mode 100644 builtin/providers/aws/resource_aws_lambda_event_source_mapping_test.go
create mode 100644 website/source/docs/providers/aws/r/lambda_event_source_mapping.html.markdown
diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go
index ba627d2ec4..c123cc184c 100644
--- a/builtin/providers/aws/provider.go
+++ b/builtin/providers/aws/provider.go
@@ -223,6 +223,7 @@ func Provider() terraform.ResourceProvider {
"aws_kinesis_firehose_delivery_stream": resourceAwsKinesisFirehoseDeliveryStream(),
"aws_kinesis_stream": resourceAwsKinesisStream(),
"aws_lambda_function": resourceAwsLambdaFunction(),
+ "aws_lambda_event_source_mapping": resourceAwsLambdaEventSourceMapping(),
"aws_launch_configuration": resourceAwsLaunchConfiguration(),
"aws_lb_cookie_stickiness_policy": resourceAwsLBCookieStickinessPolicy(),
"aws_main_route_table_association": resourceAwsMainRouteTableAssociation(),
diff --git a/builtin/providers/aws/resource_aws_lambda_event_source_mapping.go b/builtin/providers/aws/resource_aws_lambda_event_source_mapping.go
new file mode 100644
index 0000000000..70ca3a01c8
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_lambda_event_source_mapping.go
@@ -0,0 +1,171 @@
+package aws
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/lambda"
+
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceAwsLambdaEventSourceMapping() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceAwsLambdaEventSourceMappingCreate,
+ Read: resourceAwsLambdaEventSourceMappingRead,
+ Update: resourceAwsLambdaEventSourceMappingUpdate,
+ Delete: resourceAwsLambdaEventSourceMappingDelete,
+
+ Schema: map[string]*schema.Schema{
+ "event_source_arn": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "function_name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "starting_position": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "batch_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 100,
+ },
+ "enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ Default: true,
+ },
+ "function_arn": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "last_modified": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "last_processing_result": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "state": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "state_transition_reason": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "uuid": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ }
+}
+
+// resourceAwsLambdaEventSourceMappingCreate maps to:
+// CreateEventSourceMapping in the API / SDK
+func resourceAwsLambdaEventSourceMappingCreate(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).lambdaconn
+
+ functionName := d.Get("function_name").(string)
+ eventSourceArn := d.Get("event_source_arn").(string)
+
+ log.Printf("[DEBUG] Creating Lambda event source mapping: source %s to function %s", eventSourceArn, functionName)
+
+ params := &lambda.CreateEventSourceMappingInput{
+ EventSourceArn: aws.String(eventSourceArn),
+ FunctionName: aws.String(functionName),
+ StartingPosition: aws.String(d.Get("starting_position").(string)),
+ BatchSize: aws.Int64(int64(d.Get("batch_size").(int))),
+ Enabled: aws.Bool(d.Get("enabled").(bool)),
+ }
+
+ eventSourceMappingConfiguration, err := conn.CreateEventSourceMapping(params)
+ if err != nil {
+ return fmt.Errorf("Error creating Lambda event source mapping: %s", err)
+ }
+
+ d.Set("uuid", eventSourceMappingConfiguration.UUID)
+ d.SetId(*eventSourceMappingConfiguration.UUID)
+
+ return resourceAwsLambdaEventSourceMappingRead(d, meta)
+}
+
+// resourceAwsLambdaEventSourceMappingRead maps to:
+// GetEventSourceMapping in the API / SDK
+func resourceAwsLambdaEventSourceMappingRead(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).lambdaconn
+
+ log.Printf("[DEBUG] Fetching Lambda event source mapping: %s", d.Id())
+
+ params := &lambda.GetEventSourceMappingInput{
+ UUID: aws.String(d.Id()),
+ }
+
+ eventSourceMappingConfiguration, err := conn.GetEventSourceMapping(params)
+ if err != nil {
+ return err
+ }
+
+ d.Set("batch_size", eventSourceMappingConfiguration.BatchSize)
+ d.Set("event_source_arn", eventSourceMappingConfiguration.EventSourceArn)
+ d.Set("function_arn", eventSourceMappingConfiguration.FunctionArn)
+ d.Set("last_modified", eventSourceMappingConfiguration.LastModified)
+ d.Set("last_processing_result", eventSourceMappingConfiguration.LastProcessingResult)
+ d.Set("state", eventSourceMappingConfiguration.State)
+ d.Set("state_transition_reason", eventSourceMappingConfiguration.StateTransitionReason)
+ d.Set("uuid", eventSourceMappingConfiguration.UUID)
+
+ return nil
+}
+
+// resourceAwsLambdaEventSourceMappingDelete maps to:
+// DeleteEventSourceMapping in the API / SDK
+func resourceAwsLambdaEventSourceMappingDelete(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).lambdaconn
+
+ log.Printf("[INFO] Deleting Lambda event source mapping: %s", d.Id())
+
+ params := &lambda.DeleteEventSourceMappingInput{
+ UUID: aws.String(d.Id()),
+ }
+
+ _, err := conn.DeleteEventSourceMapping(params)
+ if err != nil {
+ return fmt.Errorf("Error deleting Lambda event source mapping: %s", err)
+ }
+
+ d.SetId("")
+
+ return nil
+}
+
+// resourceAwsLambdaEventSourceMappingUpdate maps to:
+// UpdateEventSourceMapping in the API / SDK
+func resourceAwsLambdaEventSourceMappingUpdate(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).lambdaconn
+
+ log.Printf("[DEBUG] Updating Lambda event source mapping: %s", d.Id())
+
+ params := &lambda.UpdateEventSourceMappingInput{
+ UUID: aws.String(d.Id()),
+ BatchSize: aws.Int64(int64(d.Get("batch_size").(int))),
+ FunctionName: aws.String(d.Get("function_name").(string)),
+ Enabled: aws.Bool(d.Get("enabled").(bool)),
+ }
+
+ _, err := conn.UpdateEventSourceMapping(params)
+ if err != nil {
+ return fmt.Errorf("Error updating Lambda event source mapping: %s", err)
+ }
+
+ return resourceAwsLambdaEventSourceMappingRead(d, meta)
+}
diff --git a/builtin/providers/aws/resource_aws_lambda_event_source_mapping_test.go b/builtin/providers/aws/resource_aws_lambda_event_source_mapping_test.go
new file mode 100644
index 0000000000..59fe5b56e6
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_lambda_event_source_mapping_test.go
@@ -0,0 +1,279 @@
+package aws
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/lambda"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAWSLambdaEventSourceMapping_basic(t *testing.T) {
+ var conf lambda.EventSourceMappingConfiguration
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckLambdaEventSourceMappingDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSLambdaEventSourceMappingConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAwsLambdaEventSourceMappingExists("aws_lambda_event_source_mapping.lambda_event_source_mapping_test", &conf),
+ testAccCheckAWSLambdaEventSourceMappingAttributes(&conf),
+ ),
+ },
+ resource.TestStep{
+ Config: testAccAWSLambdaEventSourceMappingConfigUpdate,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAwsLambdaEventSourceMappingExists("aws_lambda_event_source_mapping.lambda_event_source_mapping_test", &conf),
+ resource.TestCheckResourceAttr("aws_lambda_event_source_mapping.lambda_event_source_mapping_test",
+ "batch_size",
+ strconv.Itoa(200)),
+ resource.TestCheckResourceAttr("aws_lambda_event_source_mapping.lambda_event_source_mapping_test",
+ "enabled",
+ strconv.FormatBool(false)),
+ resource.TestMatchResourceAttr(
+ "aws_lambda_event_source_mapping.lambda_event_source_mapping_test",
+ "function_arn",
+ regexp.MustCompile("example_lambda_name_update$"),
+ ),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckLambdaEventSourceMappingDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*AWSClient).lambdaconn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_lambda_event_source_mapping" {
+ continue
+ }
+
+ _, err := conn.GetEventSourceMapping(&lambda.GetEventSourceMappingInput{
+ UUID: aws.String(rs.Primary.ID),
+ })
+
+ if err == nil {
+ return fmt.Errorf("Lambda event source mapping was not deleted")
+ }
+
+ }
+
+ return nil
+
+}
+
+func testAccCheckAwsLambdaEventSourceMappingExists(n string, mapping *lambda.EventSourceMappingConfiguration) resource.TestCheckFunc {
+ // Wait for IAM role
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Lambda event source mapping not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("Lambda event source mapping ID not set")
+ }
+
+ conn := testAccProvider.Meta().(*AWSClient).lambdaconn
+
+ params := &lambda.GetEventSourceMappingInput{
+ UUID: aws.String(rs.Primary.ID),
+ }
+
+ getSourceMappingConfiguration, err := conn.GetEventSourceMapping(params)
+ if err != nil {
+ return err
+ }
+
+ *mapping = *getSourceMappingConfiguration
+
+ return nil
+ }
+}
+
+func testAccCheckAWSLambdaEventSourceMappingAttributes(mapping *lambda.EventSourceMappingConfiguration) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ uuid := *mapping.UUID
+ if uuid == "" {
+ return fmt.Errorf("Could not read Lambda event source mapping's UUID")
+ }
+
+ return nil
+ }
+}
+
+const testAccAWSLambdaEventSourceMappingConfig = `
+resource "aws_iam_role" "iam_for_lambda" {
+ name = "iam_for_lambda"
+ assume_role_policy = <
Date: Mon, 30 Nov 2015 19:43:54 +0100
Subject: [PATCH 085/664] change get -u option get -update
get -u does not work the name is -update.
---
website/source/docs/modules/usage.html.markdown | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/website/source/docs/modules/usage.html.markdown b/website/source/docs/modules/usage.html.markdown
index 3e7ae2477a..8f98a9d6b3 100644
--- a/website/source/docs/modules/usage.html.markdown
+++ b/website/source/docs/modules/usage.html.markdown
@@ -46,7 +46,7 @@ $ terraform get
This command will download the modules if they haven't been already.
By default, the command will not check for updates, so it is safe (and fast)
-to run multiple times. You can use the `-u` flag to check and download
+to run multiple times. You can use the `-update` flag to check and download
updates.
## Configuration
From 84645bd8b5a71d060563f16855044248b1ea673d Mon Sep 17 00:00:00 2001
From: Sander van Harmelen
Date: Tue, 1 Dec 2015 00:36:33 +0100
Subject: [PATCH 086/664] More tweaks to improve performance
---
builtin/providers/cloudstack/provider.go | 2 +-
.../resource_cloudstack_egress_firewall.go | 20 +-
.../resource_cloudstack_firewall.go | 20 +-
.../resource_cloudstack_network_acl_rule.go | 195 +++++++++++++-----
.../resource_cloudstack_port_forward.go | 70 +++----
builtin/providers/cloudstack/resources.go | 6 +-
6 files changed, 196 insertions(+), 117 deletions(-)
diff --git a/builtin/providers/cloudstack/provider.go b/builtin/providers/cloudstack/provider.go
index c7ce67ff05..ac2f0f5214 100644
--- a/builtin/providers/cloudstack/provider.go
+++ b/builtin/providers/cloudstack/provider.go
@@ -36,7 +36,7 @@ func Provider() terraform.ResourceProvider {
"timeout": &schema.Schema{
Type: schema.TypeInt,
Required: true,
- DefaultFunc: schema.EnvDefaultFunc("CLOUDSTACK_TIMEOUT", 300),
+ DefaultFunc: schema.EnvDefaultFunc("CLOUDSTACK_TIMEOUT", 900),
},
},
diff --git a/builtin/providers/cloudstack/resource_cloudstack_egress_firewall.go b/builtin/providers/cloudstack/resource_cloudstack_egress_firewall.go
index 37979e13f2..a1d73b1676 100644
--- a/builtin/providers/cloudstack/resource_cloudstack_egress_firewall.go
+++ b/builtin/providers/cloudstack/resource_cloudstack_egress_firewall.go
@@ -301,21 +301,17 @@ func resourceCloudStackEgressFirewallRead(d *schema.ResourceData, meta interface
// If this is a managed firewall, add all unknown rules into a single dummy rule
managed := d.Get("managed").(bool)
if managed && len(ruleMap) > 0 {
- // Add all UUIDs to a uuids map
- uuids := make(map[string]interface{}, len(ruleMap))
for uuid := range ruleMap {
- uuids[uuid] = uuid
- }
+ // Make a dummy rule to hold the unknown UUID
+ rule := map[string]interface{}{
+ "source_cidr": uuid,
+ "protocol": uuid,
+ "uuids": map[string]interface{}{uuid: uuid},
+ }
- // Make a dummy rule to hold all unknown UUIDs
- rule := map[string]interface{}{
- "source_cidr": "N/A",
- "protocol": "N/A",
- "uuids": ruleMap,
+ // Add the dummy rule to the rules set
+ rules.Add(rule)
}
-
- // Add the dummy rule to the rules set
- rules.Add(rule)
}
if rules.Len() > 0 {
diff --git a/builtin/providers/cloudstack/resource_cloudstack_firewall.go b/builtin/providers/cloudstack/resource_cloudstack_firewall.go
index 3bcced02e2..c5a8f87638 100644
--- a/builtin/providers/cloudstack/resource_cloudstack_firewall.go
+++ b/builtin/providers/cloudstack/resource_cloudstack_firewall.go
@@ -301,21 +301,17 @@ func resourceCloudStackFirewallRead(d *schema.ResourceData, meta interface{}) er
// If this is a managed firewall, add all unknown rules into a single dummy rule
managed := d.Get("managed").(bool)
if managed && len(ruleMap) > 0 {
- // Add all UUIDs to a uuids map
- uuids := make(map[string]interface{}, len(ruleMap))
for uuid := range ruleMap {
- uuids[uuid] = uuid
- }
+ // Make a dummy rule to hold the unknown UUID
+ rule := map[string]interface{}{
+ "source_cidr": uuid,
+ "protocol": uuid,
+ "uuids": map[string]interface{}{uuid: uuid},
+ }
- // Make a dummy rule to hold all unknown UUIDs
- rule := map[string]interface{}{
- "source_cidr": "N/A",
- "protocol": "N/A",
- "uuids": uuids,
+ // Add the dummy rule to the rules set
+ rules.Add(rule)
}
-
- // Add the dummy rule to the rules set
- rules.Add(rule)
}
if rules.Len() > 0 {
diff --git a/builtin/providers/cloudstack/resource_cloudstack_network_acl_rule.go b/builtin/providers/cloudstack/resource_cloudstack_network_acl_rule.go
index 18446738a1..10c91de696 100644
--- a/builtin/providers/cloudstack/resource_cloudstack_network_acl_rule.go
+++ b/builtin/providers/cloudstack/resource_cloudstack_network_acl_rule.go
@@ -7,7 +7,10 @@ import (
"sort"
"strconv"
"strings"
+ "sync"
+ "time"
+ "github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
"github.com/xanzy/go-cloudstack/cloudstack"
@@ -103,32 +106,72 @@ func resourceCloudStackNetworkACLRuleCreate(d *schema.ResourceData, meta interfa
d.SetId(d.Get("aclid").(string))
// Create all rules that are configured
- if rs := d.Get("rule").(*schema.Set); rs.Len() > 0 {
-
- // Create an empty schema.Set to hold all rules
+ if nrs := d.Get("rule").(*schema.Set); nrs.Len() > 0 {
+ // Create an empty rule set to hold all newly created rules
rules := &schema.Set{
F: resourceCloudStackNetworkACLRuleHash,
}
- for _, rule := range rs.List() {
- // Create a single rule
- err := resourceCloudStackNetworkACLRuleCreateRule(d, meta, rule.(map[string]interface{}))
+ err := resourceCloudStackNetworkACLRuleCreateRules(d, meta, rules, nrs)
- // We need to update this first to preserve the correct state
- rules.Add(rule)
- d.Set("rule", rules)
+ // We need to update this first to preserve the correct state
+ d.Set("rule", rules)
- if err != nil {
- return err
- }
+ if err != nil {
+ return err
}
}
return resourceCloudStackNetworkACLRuleRead(d, meta)
}
+func resourceCloudStackNetworkACLRuleCreateRules(
+ d *schema.ResourceData,
+ meta interface{},
+ rules *schema.Set,
+ nrs *schema.Set) error {
+ var errs *multierror.Error
+
+ var wg sync.WaitGroup
+ wg.Add(nrs.Len())
+
+ sem := make(chan struct{}, 10)
+ for _, rule := range nrs.List() {
+ // Put in a tiny sleep here to avoid DoS'ing the API
+ time.Sleep(500 * time.Millisecond)
+
+ go func(rule map[string]interface{}) {
+ defer wg.Done()
+ sem <- struct{}{}
+
+ // Create a single rule
+ err := resourceCloudStackNetworkACLRuleCreateRule(d, meta, rule)
+
+ // If we have at least one UUID, we need to save the rule
+ if len(rule["uuids"].(map[string]interface{})) > 0 {
+ rules.Add(rule)
+ }
+
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ }
+
+ <-sem
+ }(rule.(map[string]interface{}))
+ }
+
+ wg.Wait()
+
+ // We need to update this first to preserve the correct state
+ d.Set("rule", rules)
+
+ return errs.ErrorOrNil()
+}
+
func resourceCloudStackNetworkACLRuleCreateRule(
- d *schema.ResourceData, meta interface{}, rule map[string]interface{}) error {
+ d *schema.ResourceData,
+ meta interface{},
+ rule map[string]interface{}) error {
cs := meta.(*cloudstack.CloudStackClient)
uuids := rule["uuids"].(map[string]interface{})
@@ -188,8 +231,16 @@ func resourceCloudStackNetworkACLRuleCreateRule(
},
}
+ // Define a regexp for parsing the port
+ re := regexp.MustCompile(`^(\d+)(?:-(\d+))?$`)
+
for _, port := range ps.List() {
- re := regexp.MustCompile(`^(\d+)(?:-(\d+))?$`)
+ if _, ok := uuids[port.(string)]; ok {
+ ports.Add(port)
+ rule["ports"] = ports
+ continue
+ }
+
m := re.FindStringSubmatch(port.(string))
startPort, err := strconv.Atoi(m[1])
@@ -354,20 +405,17 @@ func resourceCloudStackNetworkACLRuleRead(d *schema.ResourceData, meta interface
// If this is a managed firewall, add all unknown rules into a single dummy rule
managed := d.Get("managed").(bool)
if managed && len(ruleMap) > 0 {
- // Add all UUIDs to a uuids map
- uuids := make(map[string]interface{}, len(ruleMap))
for uuid := range ruleMap {
- uuids[uuid] = uuid
- }
+ // Make a dummy rule to hold the unknown UUID
+ rule := map[string]interface{}{
+ "source_cidr": uuid,
+ "protocol": uuid,
+ "uuids": map[string]interface{}{uuid: uuid},
+ }
- rule := map[string]interface{}{
- "source_cidr": "N/A",
- "protocol": "N/A",
- "uuids": uuids,
+ // Add the dummy rule to the rules set
+ rules.Add(rule)
}
-
- // Add the dummy rule to the rules set
- rules.Add(rule)
}
if rules.Len() > 0 {
@@ -391,26 +439,29 @@ func resourceCloudStackNetworkACLRuleUpdate(d *schema.ResourceData, meta interfa
ors := o.(*schema.Set).Difference(n.(*schema.Set))
nrs := n.(*schema.Set).Difference(o.(*schema.Set))
- // Now first loop through all the old rules and delete any obsolete ones
- for _, rule := range ors.List() {
- // Delete the rule as it no longer exists in the config
- err := resourceCloudStackNetworkACLRuleDeleteRule(d, meta, rule.(map[string]interface{}))
+ // We need to start with a rule set containing all the rules we
+ // already have and want to keep. Any rules that are not deleted
+ // correctly and any newly created rules, will be added to this
+ // set to make sure we end up in a consistent state
+ rules := o.(*schema.Set).Intersection(n.(*schema.Set))
+
+ // Now first loop through all the old rules and delete them
+ if ors.Len() > 0 {
+ err := resourceCloudStackNetworkACLRuleDeleteRules(d, meta, rules, ors)
+
+ // We need to update this first to preserve the correct state
+ d.Set("rule", rules)
+
if err != nil {
return err
}
}
- // Make sure we save the state of the currently configured rules
- rules := o.(*schema.Set).Intersection(n.(*schema.Set))
- d.Set("rule", rules)
-
- // Then loop through all the currently configured rules and create the new ones
- for _, rule := range nrs.List() {
- // When successfully deleted, re-create it again if it still exists
- err := resourceCloudStackNetworkACLRuleCreateRule(d, meta, rule.(map[string]interface{}))
+ // Then loop through all the new rules and create them
+ if nrs.Len() > 0 {
+ err := resourceCloudStackNetworkACLRuleCreateRules(d, meta, rules, nrs)
// We need to update this first to preserve the correct state
- rules.Add(rule)
d.Set("rule", rules)
if err != nil {
@@ -423,26 +474,71 @@ func resourceCloudStackNetworkACLRuleUpdate(d *schema.ResourceData, meta interfa
}
func resourceCloudStackNetworkACLRuleDelete(d *schema.ResourceData, meta interface{}) error {
+ // Create an empty rule set to hold all rules that where
+ // not deleted correctly
+ rules := &schema.Set{
+ F: resourceCloudStackNetworkACLRuleHash,
+ }
+
// Delete all rules
if rs := d.Get("rule").(*schema.Set); rs.Len() > 0 {
- for _, rule := range rs.List() {
- // Delete a single rule
- err := resourceCloudStackNetworkACLRuleDeleteRule(d, meta, rule.(map[string]interface{}))
+ err := resourceCloudStackNetworkACLRuleDeleteRules(d, meta, rules, rs)
- // We need to update this first to preserve the correct state
- d.Set("rule", rs)
+ // We need to update this first to preserve the correct state
+ d.Set("rule", rules)
- if err != nil {
- return err
- }
+ if err != nil {
+ return err
}
}
return nil
}
+func resourceCloudStackNetworkACLRuleDeleteRules(
+ d *schema.ResourceData,
+ meta interface{},
+ rules *schema.Set,
+ ors *schema.Set) error {
+ var errs *multierror.Error
+
+ var wg sync.WaitGroup
+ wg.Add(ors.Len())
+
+ sem := make(chan struct{}, 10)
+ for _, rule := range ors.List() {
+ // Put a sleep here to avoid DoS'ing the API
+ time.Sleep(500 * time.Millisecond)
+
+ go func(rule map[string]interface{}) {
+ defer wg.Done()
+ sem <- struct{}{}
+
+ // Delete a single rule
+ err := resourceCloudStackNetworkACLRuleDeleteRule(d, meta, rule)
+
+ // If we have at least one UUID, we need to save the rule
+ if len(rule["uuids"].(map[string]interface{})) > 0 {
+ rules.Add(rule)
+ }
+
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ }
+
+ <-sem
+ }(rule.(map[string]interface{}))
+ }
+
+ wg.Wait()
+
+ return errs.ErrorOrNil()
+}
+
func resourceCloudStackNetworkACLRuleDeleteRule(
- d *schema.ResourceData, meta interface{}, rule map[string]interface{}) error {
+ d *schema.ResourceData,
+ meta interface{},
+ rule map[string]interface{}) error {
cs := meta.(*cloudstack.CloudStackClient)
uuids := rule["uuids"].(map[string]interface{})
@@ -463,6 +559,7 @@ func resourceCloudStackNetworkACLRuleDeleteRule(
"Invalid parameter id value=%s due to incorrect long value format, "+
"or entity does not exist", id.(string))) {
delete(uuids, k)
+ rule["uuids"] = uuids
continue
}
@@ -471,11 +568,9 @@ func resourceCloudStackNetworkACLRuleDeleteRule(
// Delete the UUID of this rule
delete(uuids, k)
+ rule["uuids"] = uuids
}
- // Update the UUIDs
- rule["uuids"] = uuids
-
return nil
}
diff --git a/builtin/providers/cloudstack/resource_cloudstack_port_forward.go b/builtin/providers/cloudstack/resource_cloudstack_port_forward.go
index 0bec41af54..e1f8c99fca 100644
--- a/builtin/providers/cloudstack/resource_cloudstack_port_forward.go
+++ b/builtin/providers/cloudstack/resource_cloudstack_port_forward.go
@@ -150,6 +150,22 @@ func resourceCloudStackPortForwardCreateForward(
func resourceCloudStackPortForwardRead(d *schema.ResourceData, meta interface{}) error {
cs := meta.(*cloudstack.CloudStackClient)
+ // Get all the forwards from the running environment
+ p := cs.Firewall.NewListPortForwardingRulesParams()
+ p.SetIpaddressid(d.Id())
+ p.SetListall(true)
+
+ l, err := cs.Firewall.ListPortForwardingRules(p)
+ if err != nil {
+ return err
+ }
+
+ // Make a map of all the forwards so we can easily find a forward
+ forwardMap := make(map[string]*cloudstack.PortForwardingRule, l.Count)
+ for _, f := range l.PortForwardingRules {
+ forwardMap[f.Id] = f
+ }
+
// Create an empty schema.Set to hold all forwards
forwards := &schema.Set{
F: resourceCloudStackPortForwardHash,
@@ -166,36 +182,34 @@ func resourceCloudStackPortForwardRead(d *schema.ResourceData, meta interface{})
}
// Get the forward
- r, count, err := cs.Firewall.GetPortForwardingRuleByID(id.(string))
- // If the count == 0, there is no object found for this ID
- if err != nil {
- if count == 0 {
- forward["uuid"] = ""
- continue
- }
-
- return err
+ f, ok := forwardMap[id.(string)]
+ if !ok {
+ forward["uuid"] = ""
+ continue
}
- privPort, err := strconv.Atoi(r.Privateport)
+ // Delete the known rule so only unknown rules remain in the ruleMap
+ delete(forwardMap, id.(string))
+
+ privPort, err := strconv.Atoi(f.Privateport)
if err != nil {
return err
}
- pubPort, err := strconv.Atoi(r.Publicport)
+ pubPort, err := strconv.Atoi(f.Publicport)
if err != nil {
return err
}
// Update the values
- forward["protocol"] = r.Protocol
+ forward["protocol"] = f.Protocol
forward["private_port"] = privPort
forward["public_port"] = pubPort
if isID(forward["virtual_machine"].(string)) {
- forward["virtual_machine"] = r.Virtualmachineid
+ forward["virtual_machine"] = f.Virtualmachineid
} else {
- forward["virtual_machine"] = r.Virtualmachinename
+ forward["virtual_machine"] = f.Virtualmachinename
}
forwards.Add(forward)
@@ -204,33 +218,11 @@ func resourceCloudStackPortForwardRead(d *schema.ResourceData, meta interface{})
// If this is a managed resource, add all unknown forwards to dummy forwards
managed := d.Get("managed").(bool)
- if managed {
- // Get all the forwards from the running environment
- p := cs.Firewall.NewListPortForwardingRulesParams()
- p.SetIpaddressid(d.Id())
- p.SetListall(true)
-
- r, err := cs.Firewall.ListPortForwardingRules(p)
- if err != nil {
- return err
- }
-
- // Add all UUIDs to the uuids map
- uuids := make(map[string]interface{}, len(r.PortForwardingRules))
- for _, r := range r.PortForwardingRules {
- uuids[r.Id] = r.Id
- }
-
- // Delete all expected UUIDs from the uuids map
- for _, forward := range forwards.List() {
- forward := forward.(map[string]interface{})
- delete(uuids, forward["uuid"].(string))
- }
-
- for uuid := range uuids {
+ if managed && len(forwardMap) > 0 {
+ for uuid := range forwardMap {
// Make a dummy forward to hold the unknown UUID
forward := map[string]interface{}{
- "protocol": "N/A",
+ "protocol": uuid,
"private_port": 0,
"public_port": 0,
"virtual_machine": uuid,
diff --git a/builtin/providers/cloudstack/resources.go b/builtin/providers/cloudstack/resources.go
index f7115e7933..2fe67c6e31 100644
--- a/builtin/providers/cloudstack/resources.go
+++ b/builtin/providers/cloudstack/resources.go
@@ -10,7 +10,7 @@ import (
"github.com/xanzy/go-cloudstack/cloudstack"
)
-// CloudStack uses a "special" ID of -1 to define an unlimited resource
+// UnlimitedResourceID is a "special" ID to define an unlimited resource
const UnlimitedResourceID = "-1"
type retrieveError struct {
@@ -135,8 +135,8 @@ func Retry(n int, f RetryFunc) (interface{}, error) {
for i := 0; i < n; i++ {
r, err := f()
- if err == nil {
- return r, nil
+ if err == nil || err == cloudstack.AsyncTimeoutErr {
+ return r, err
}
lastErr = err
From 48dd42b8f302cbafe759b0aa23c3bb911540be50 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Tue, 1 Dec 2015 08:38:58 -0500
Subject: [PATCH 087/664] Update CHANGELOG.md
---
CHANGELOG.md | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 467c352d26..a8d78fdd22 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,8 +2,9 @@
FEATURES:
- * **New resource: `digitalocean_floating_ip`** [GH-3748]
* **New provider: `statuscake`** [GH-3340]
+ * **New resource: `digitalocean_floating_ip`** [GH-3748]
+ * **New resource: `aws_lambda_event_source_mapping`** [GH-4093]
IMPROVEMENTS:
From 3cbe014e315a320a177329fd9931f943fc7ac288 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Tue, 1 Dec 2015 08:47:12 -0500
Subject: [PATCH 088/664] Add missing documentation link for #4093
---
website/source/layouts/aws.erb | 23 +++++++++++------------
1 file changed, 11 insertions(+), 12 deletions(-)
diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb
index 1a7da9492c..1da203dca2 100644
--- a/website/source/layouts/aws.erb
+++ b/website/source/layouts/aws.erb
@@ -335,18 +335,17 @@
-
- >
- Lambda Resources
-
-
-
+ >
+ Lambda Resources
+
+
>
OpsWorks Resources
From 4a5847f9ea6ed8ca378daf522704711baec51614 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 1 Dec 2015 09:31:20 -0600
Subject: [PATCH 089/664] providers/aws: Vet aws
---
...rce_aws_autoscaling_lifecycle_hook_test.go | 1 -
...ource_aws_autoscaling_notification_test.go | 2 +-
.../resource_aws_autoscaling_policy_test.go | 1 -
.../resource_aws_cloudformation_stack_test.go | 1 -
.../aws/resource_aws_instance_migrate.go | 2 --
.../aws/resource_aws_instance_test.go | 8 ++++----
.../aws/resource_aws_key_pair_migrate.go | 2 --
.../resource_aws_launch_configuration_test.go | 6 +++---
builtin/providers/aws/resource_aws_route.go | 20 ++++++++++---------
...esource_aws_security_group_rule_migrate.go | 2 --
.../aws/resource_aws_vpc_dhcp_options.go | 2 --
11 files changed, 19 insertions(+), 28 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_autoscaling_lifecycle_hook_test.go b/builtin/providers/aws/resource_aws_autoscaling_lifecycle_hook_test.go
index f425570e9c..a32c0b1a1e 100644
--- a/builtin/providers/aws/resource_aws_autoscaling_lifecycle_hook_test.go
+++ b/builtin/providers/aws/resource_aws_autoscaling_lifecycle_hook_test.go
@@ -36,7 +36,6 @@ func testAccCheckLifecycleHookExists(n string, hook *autoscaling.LifecycleHook)
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
- rs = rs
return fmt.Errorf("Not found: %s", n)
}
diff --git a/builtin/providers/aws/resource_aws_autoscaling_notification_test.go b/builtin/providers/aws/resource_aws_autoscaling_notification_test.go
index 81fccfea34..242a9b23c5 100644
--- a/builtin/providers/aws/resource_aws_autoscaling_notification_test.go
+++ b/builtin/providers/aws/resource_aws_autoscaling_notification_test.go
@@ -144,7 +144,7 @@ func testAccCheckASGNDestroy(s *terraform.State) error {
}
if len(resp.NotificationConfigurations) != 0 {
- fmt.Errorf("Error finding notification descriptions")
+ return fmt.Errorf("Error finding notification descriptions")
}
}
diff --git a/builtin/providers/aws/resource_aws_autoscaling_policy_test.go b/builtin/providers/aws/resource_aws_autoscaling_policy_test.go
index 0a7aeff916..6d402de85a 100644
--- a/builtin/providers/aws/resource_aws_autoscaling_policy_test.go
+++ b/builtin/providers/aws/resource_aws_autoscaling_policy_test.go
@@ -34,7 +34,6 @@ func testAccCheckScalingPolicyExists(n string, policy *autoscaling.ScalingPolicy
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
- rs = rs
return fmt.Errorf("Not found: %s", n)
}
diff --git a/builtin/providers/aws/resource_aws_cloudformation_stack_test.go b/builtin/providers/aws/resource_aws_cloudformation_stack_test.go
index 7ad24be344..0c99f8d54d 100644
--- a/builtin/providers/aws/resource_aws_cloudformation_stack_test.go
+++ b/builtin/providers/aws/resource_aws_cloudformation_stack_test.go
@@ -68,7 +68,6 @@ func testAccCheckCloudFormationStackExists(n string, stack *cloudformation.Stack
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
- rs = rs
return fmt.Errorf("Not found: %s", n)
}
diff --git a/builtin/providers/aws/resource_aws_instance_migrate.go b/builtin/providers/aws/resource_aws_instance_migrate.go
index 5d7075f759..c2ae3aaf2e 100644
--- a/builtin/providers/aws/resource_aws_instance_migrate.go
+++ b/builtin/providers/aws/resource_aws_instance_migrate.go
@@ -19,8 +19,6 @@ func resourceAwsInstanceMigrateState(
default:
return is, fmt.Errorf("Unexpected schema version: %d", v)
}
-
- return is, nil
}
func migrateStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) {
diff --git a/builtin/providers/aws/resource_aws_instance_test.go b/builtin/providers/aws/resource_aws_instance_test.go
index 3224f9b5e1..23c1e7b380 100644
--- a/builtin/providers/aws/resource_aws_instance_test.go
+++ b/builtin/providers/aws/resource_aws_instance_test.go
@@ -112,22 +112,22 @@ func TestAccAWSInstance_blockDevices(t *testing.T) {
// Check if the root block device exists.
if _, ok := blockDevices["/dev/sda1"]; !ok {
- fmt.Errorf("block device doesn't exist: /dev/sda1")
+ return fmt.Errorf("block device doesn't exist: /dev/sda1")
}
// Check if the secondary block device exists.
if _, ok := blockDevices["/dev/sdb"]; !ok {
- fmt.Errorf("block device doesn't exist: /dev/sdb")
+ return fmt.Errorf("block device doesn't exist: /dev/sdb")
}
// Check if the third block device exists.
if _, ok := blockDevices["/dev/sdc"]; !ok {
- fmt.Errorf("block device doesn't exist: /dev/sdc")
+ return fmt.Errorf("block device doesn't exist: /dev/sdc")
}
// Check if the encrypted block device exists
if _, ok := blockDevices["/dev/sdd"]; !ok {
- fmt.Errorf("block device doesn't exist: /dev/sdd")
+ return fmt.Errorf("block device doesn't exist: /dev/sdd")
}
return nil
diff --git a/builtin/providers/aws/resource_aws_key_pair_migrate.go b/builtin/providers/aws/resource_aws_key_pair_migrate.go
index 0d56123aab..c937ac360f 100644
--- a/builtin/providers/aws/resource_aws_key_pair_migrate.go
+++ b/builtin/providers/aws/resource_aws_key_pair_migrate.go
@@ -17,8 +17,6 @@ func resourceAwsKeyPairMigrateState(
default:
return is, fmt.Errorf("Unexpected schema version: %d", v)
}
-
- return is, nil
}
func migrateKeyPairStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) {
diff --git a/builtin/providers/aws/resource_aws_launch_configuration_test.go b/builtin/providers/aws/resource_aws_launch_configuration_test.go
index c6a0086a14..1e914c86df 100644
--- a/builtin/providers/aws/resource_aws_launch_configuration_test.go
+++ b/builtin/providers/aws/resource_aws_launch_configuration_test.go
@@ -162,17 +162,17 @@ func testAccCheckAWSLaunchConfigurationAttributes(conf *autoscaling.LaunchConfig
// Check if the root block device exists.
if _, ok := blockDevices["/dev/sda1"]; !ok {
- fmt.Errorf("block device doesn't exist: /dev/sda1")
+ return fmt.Errorf("block device doesn't exist: /dev/sda1")
}
// Check if the secondary block device exists.
if _, ok := blockDevices["/dev/sdb"]; !ok {
- fmt.Errorf("block device doesn't exist: /dev/sdb")
+ return fmt.Errorf("block device doesn't exist: /dev/sdb")
}
// Check if the third block device exists.
if _, ok := blockDevices["/dev/sdc"]; !ok {
- fmt.Errorf("block device doesn't exist: /dev/sdc")
+ return fmt.Errorf("block device doesn't exist: /dev/sdc")
}
// Check if the secondary block device exists.
diff --git a/builtin/providers/aws/resource_aws_route.go b/builtin/providers/aws/resource_aws_route.go
index 60c666ecde..3d6f5d25bb 100644
--- a/builtin/providers/aws/resource_aws_route.go
+++ b/builtin/providers/aws/resource_aws_route.go
@@ -1,6 +1,7 @@
package aws
import (
+ "errors"
"fmt"
"log"
@@ -10,6 +11,11 @@ import (
"github.com/hashicorp/terraform/helper/schema"
)
+// How long to sleep if a limit-exceeded event happens
+var routeTargetValidationError = errors.New("Error: more than 1 target specified. Only 1 of gateway_id" +
+ "instance_id, network_interface_id, route_table_id or" +
+ "vpc_peering_connection_id is allowed.")
+
// AWS Route resource Schema declaration
func resourceAwsRoute() *schema.Resource {
return &schema.Resource{
@@ -94,9 +100,7 @@ func resourceAwsRouteCreate(d *schema.ResourceData, meta interface{}) error {
}
if numTargets > 1 {
- fmt.Errorf("Error: more than 1 target specified. Only 1 of gateway_id" +
- "instance_id, network_interface_id, route_table_id or" +
- "vpc_peering_connection_id is allowed.")
+ return routeTargetValidationError
}
createOpts := &ec2.CreateRouteInput{}
@@ -127,7 +131,7 @@ func resourceAwsRouteCreate(d *schema.ResourceData, meta interface{}) error {
VpcPeeringConnectionId: aws.String(d.Get("vpc_peering_connection_id").(string)),
}
default:
- fmt.Errorf("Error: invalid target type specified.")
+ return fmt.Errorf("Error: invalid target type specified.")
}
log.Printf("[DEBUG] Route create config: %s", createOpts)
@@ -139,7 +143,7 @@ func resourceAwsRouteCreate(d *schema.ResourceData, meta interface{}) error {
route, err := findResourceRoute(conn, d.Get("route_table_id").(string), d.Get("destination_cidr_block").(string))
if err != nil {
- fmt.Errorf("Error: %s", err)
+ return err
}
d.SetId(routeIDHash(d, route))
@@ -187,9 +191,7 @@ func resourceAwsRouteUpdate(d *schema.ResourceData, meta interface{}) error {
}
if numTargets > 1 {
- fmt.Errorf("Error: more than 1 target specified. Only 1 of gateway_id" +
- "instance_id, network_interface_id, route_table_id or" +
- "vpc_peering_connection_id is allowed.")
+ return routeTargetValidationError
}
// Formulate ReplaceRouteInput based on the target type
@@ -221,7 +223,7 @@ func resourceAwsRouteUpdate(d *schema.ResourceData, meta interface{}) error {
VpcPeeringConnectionId: aws.String(d.Get("vpc_peering_connection_id").(string)),
}
default:
- fmt.Errorf("Error: invalid target type specified.")
+ return fmt.Errorf("Error: invalid target type specified.")
}
log.Printf("[DEBUG] Route replace config: %s", replaceOpts)
diff --git a/builtin/providers/aws/resource_aws_security_group_rule_migrate.go b/builtin/providers/aws/resource_aws_security_group_rule_migrate.go
index 0b57f3f171..12788054e3 100644
--- a/builtin/providers/aws/resource_aws_security_group_rule_migrate.go
+++ b/builtin/providers/aws/resource_aws_security_group_rule_migrate.go
@@ -26,8 +26,6 @@ func resourceAwsSecurityGroupRuleMigrateState(
default:
return is, fmt.Errorf("Unexpected schema version: %d", v)
}
-
- return is, nil
}
func migrateSGRuleStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) {
diff --git a/builtin/providers/aws/resource_aws_vpc_dhcp_options.go b/builtin/providers/aws/resource_aws_vpc_dhcp_options.go
index 36b4b1f810..de7b74108a 100644
--- a/builtin/providers/aws/resource_aws_vpc_dhcp_options.go
+++ b/builtin/providers/aws/resource_aws_vpc_dhcp_options.go
@@ -223,8 +223,6 @@ func resourceAwsVpcDhcpOptionsDelete(d *schema.ResourceData, meta interface{}) e
// Any other error, we want to quit the retry loop immediately
return resource.RetryError{Err: err}
}
-
- return nil
})
}
From d90eb2d88e4d1ca74698f5deeff9b4bed33ca25b Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 1 Dec 2015 10:31:05 -0600
Subject: [PATCH 090/664] config: test replicating #4079
Should help cover terraform against regression once
https://github.com/hashicorp/hcl/pull/70 lands.
---
config/loader_test.go | 5 +++++
config/test-fixtures/heredoc.tf | 12 ++++++++++++
2 files changed, 17 insertions(+)
diff --git a/config/loader_test.go b/config/loader_test.go
index 19745adaf6..0ab1234607 100644
--- a/config/loader_test.go
+++ b/config/loader_test.go
@@ -685,6 +685,11 @@ aws_iam_policy[policy] (x1)
name
path
policy
+aws_instance[heredocwithnumbers] (x1)
+ ami
+ provisioners
+ local-exec
+ command
aws_instance[test] (x1)
ami
provisioners
diff --git a/config/test-fixtures/heredoc.tf b/config/test-fixtures/heredoc.tf
index 323d1d4e06..c43fd08106 100644
--- a/config/test-fixtures/heredoc.tf
+++ b/config/test-fixtures/heredoc.tf
@@ -37,3 +37,15 @@ EOT
]
}
}
+
+resource "aws_instance" "heredocwithnumbers" {
+ ami = "foo"
+
+ provisioner "local-exec" {
+ command = <
Date: Tue, 1 Dec 2015 12:23:09 -0500
Subject: [PATCH 091/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a8d78fdd22..6f5caa3067 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -17,6 +17,7 @@ IMPROVEMENTS:
BUG FIXES:
* core: Fix a bug which prevented HEREDOC syntax being used in lists [GH-4078]
+ * core: Fix a bug which prevented HEREDOC syntax where the anchor ends in a number [GH-4128]
* provider/aws: Fix a bug which could result in a panic when reading EC2 metadata [GH-4024]
* provider/aws: Fix issue recreating security group rule if it has been destroyed [GH-4050]
* provider/aws: Fix issue where SPF records in Route 53 could show differences with no modification to the configuration [GH-4108]
From 5ea25363a19def7f6784b3b9614d9eda6592ef6f Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Tue, 1 Dec 2015 13:26:38 -0500
Subject: [PATCH 092/664] Add regression test for #4069
This may be brittle as it makes use of .gitattributes to override the
autocrlf setting in order to have an input file with Windows line
endings across multiple platforms.
---
config/loader_test.go | 35 ++++++++++++++++++++
config/test-fixtures/.gitattributes | 1 +
config/test-fixtures/windows-line-endings.tf | 6 ++++
3 files changed, 42 insertions(+)
create mode 100644 config/test-fixtures/.gitattributes
create mode 100644 config/test-fixtures/windows-line-endings.tf
diff --git a/config/loader_test.go b/config/loader_test.go
index 0ab1234607..4c291f6e98 100644
--- a/config/loader_test.go
+++ b/config/loader_test.go
@@ -45,6 +45,36 @@ func TestLoadFile_badType(t *testing.T) {
}
}
+func TestLoadFileWindowsLineEndings(t *testing.T) {
+ testFile := filepath.Join(fixtureDir, "windows-line-endings.tf")
+
+ contents, err := ioutil.ReadFile(testFile)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if !strings.Contains(string(contents), "\r\n") {
+ t.Fatalf("Windows line endings test file %s contains no windows line endings - this may be an autocrlf related issue.", testFile)
+ }
+
+ c, err := LoadFile(testFile)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if c == nil {
+ t.Fatal("config should not be nil")
+ }
+
+ if c.Dir != "" {
+ t.Fatalf("bad: %#v", c.Dir)
+ }
+
+ actual := resourcesStr(c.Resources)
+ if actual != strings.TrimSpace(windowsHeredocResourcesStr) {
+ t.Fatalf("bad:\n%s", actual)
+ }
+}
+
func TestLoadFileHeredoc(t *testing.T) {
c, err := LoadFile(filepath.Join(fixtureDir, "heredoc.tf"))
if err != nil {
@@ -673,6 +703,11 @@ cloudstack_firewall[test] (x1)
rule
`
+const windowsHeredocResourcesStr = `
+aws_instance[test] (x1)
+ user_data
+`
+
const heredocProvidersStr = `
aws
access_key
diff --git a/config/test-fixtures/.gitattributes b/config/test-fixtures/.gitattributes
new file mode 100644
index 0000000000..23c56cad51
--- /dev/null
+++ b/config/test-fixtures/.gitattributes
@@ -0,0 +1 @@
+windows-line-endings.tf eol=crlf
diff --git a/config/test-fixtures/windows-line-endings.tf b/config/test-fixtures/windows-line-endings.tf
new file mode 100644
index 0000000000..b3fce5e829
--- /dev/null
+++ b/config/test-fixtures/windows-line-endings.tf
@@ -0,0 +1,6 @@
+// This is a comment
+resource "aws_instance" "test" {
+ user_data = <
Date: Tue, 1 Dec 2015 14:58:59 -0600
Subject: [PATCH 093/664] provider/aws: Check for nil on some spot instance
attributes
---
.../providers/aws/resource_aws_spot_instance_request.go | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_spot_instance_request.go b/builtin/providers/aws/resource_aws_spot_instance_request.go
index 1369c972e8..400f7c2b78 100644
--- a/builtin/providers/aws/resource_aws_spot_instance_request.go
+++ b/builtin/providers/aws/resource_aws_spot_instance_request.go
@@ -194,8 +194,13 @@ func resourceAwsSpotInstanceRequestRead(d *schema.ResourceData, meta interface{}
return fmt.Errorf("[ERR] Error reading Spot Instance Data: %s", err)
}
}
- d.Set("spot_request_state", *request.State)
- d.Set("block_duration_minutes", *request.BlockDurationMinutes)
+
+ if request.State != nil {
+ d.Set("spot_request_state", *request.State)
+ }
+ if request.BlockDurationMinutes != nil {
+ d.Set("block_duration_minutes", *request.BlockDurationMinutes)
+ }
d.Set("tags", tagsToMap(request.Tags))
return nil
From 8142b67267dea785228ab03a821f136b049682fa Mon Sep 17 00:00:00 2001
From: Clint
Date: Tue, 1 Dec 2015 15:11:13 -0600
Subject: [PATCH 094/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6f5caa3067..17ee8a4665 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -20,6 +20,7 @@ BUG FIXES:
* core: Fix a bug which prevented HEREDOC syntax where the anchor ends in a number [GH-4128]
* provider/aws: Fix a bug which could result in a panic when reading EC2 metadata [GH-4024]
* provider/aws: Fix issue recreating security group rule if it has been destroyed [GH-4050]
+ * provider/aws: Fix issue with some attributes in Spot Instance Requests returning as nil [GH-4132]
* provider/aws: Fix issue where SPF records in Route 53 could show differences with no modification to the configuration [GH-4108]
* provisioner/chef: Fix issue with path separators breaking the Chef provisioner on Windows [GH-4041]
From 50bb0e6b4ab691d3b96f0c85cdf7f1787bd26601 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Tue, 1 Dec 2015 16:16:23 -0500
Subject: [PATCH 095/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 17ee8a4665..d707144874 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -18,6 +18,7 @@ BUG FIXES:
* core: Fix a bug which prevented HEREDOC syntax being used in lists [GH-4078]
* core: Fix a bug which prevented HEREDOC syntax where the anchor ends in a number [GH-4128]
+ * core: Fix a bug which prevented HEREDOC syntax being used with Windows line endings [GH-4069]
* provider/aws: Fix a bug which could result in a panic when reading EC2 metadata [GH-4024]
* provider/aws: Fix issue recreating security group rule if it has been destroyed [GH-4050]
* provider/aws: Fix issue with some attributes in Spot Instance Requests returning as nil [GH-4132]
From be07e4c0c92cc18bc7e805d6160cd30c91a7a465 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 1 Dec 2015 15:25:53 -0600
Subject: [PATCH 096/664] cleanup spot instance attribute setting
---
.../providers/aws/resource_aws_spot_instance_request.go | 8 ++------
1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_spot_instance_request.go b/builtin/providers/aws/resource_aws_spot_instance_request.go
index 400f7c2b78..256730cfdc 100644
--- a/builtin/providers/aws/resource_aws_spot_instance_request.go
+++ b/builtin/providers/aws/resource_aws_spot_instance_request.go
@@ -195,12 +195,8 @@ func resourceAwsSpotInstanceRequestRead(d *schema.ResourceData, meta interface{}
}
}
- if request.State != nil {
- d.Set("spot_request_state", *request.State)
- }
- if request.BlockDurationMinutes != nil {
- d.Set("block_duration_minutes", *request.BlockDurationMinutes)
- }
+ d.Set("spot_request_state", request.State)
+ d.Set("block_duration_minutes", request.BlockDurationMinutes)
d.Set("tags", tagsToMap(request.Tags))
return nil
From f1f214f03df27c1a790980b6f050fe92a7a547bf Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Wed, 2 Dec 2015 15:06:55 +0000
Subject: [PATCH 097/664] v0.6.8
---
CHANGELOG.md | 2 +-
deps/v0-6-8.json | 529 +++++++++++++++++++++++++++++++++++++++++++
terraform/version.go | 2 +-
website/config.rb | 2 +-
4 files changed, 532 insertions(+), 3 deletions(-)
create mode 100644 deps/v0-6-8.json
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d707144874..e316bb3036 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,4 +1,4 @@
-## 0.6.8 (Unreleased)
+## 0.6.8 (December 2, 2015)
FEATURES:
diff --git a/deps/v0-6-8.json b/deps/v0-6-8.json
new file mode 100644
index 0000000000..91f7356683
--- /dev/null
+++ b/deps/v0-6-8.json
@@ -0,0 +1,529 @@
+{
+ "ImportPath": "github.com/hashicorp/terraform",
+ "GoVersion": "go1.5.1",
+ "Packages": [
+ "./..."
+ ],
+ "Deps": [
+ {
+ "ImportPath": "github.com/Azure/azure-sdk-for-go/core/http",
+ "Comment": "v1.2-277-g8484320",
+ "Rev": "84843207ea0c77c8c8aecbe2e16ac77caa8ce9cc"
+ },
+ {
+ "ImportPath": "github.com/Azure/azure-sdk-for-go/core/tls",
+ "Comment": "v1.2-277-g8484320",
+ "Rev": "84843207ea0c77c8c8aecbe2e16ac77caa8ce9cc"
+ },
+ {
+ "ImportPath": "github.com/Azure/azure-sdk-for-go/management",
+ "Comment": "v1.2-277-g8484320",
+ "Rev": "84843207ea0c77c8c8aecbe2e16ac77caa8ce9cc"
+ },
+ {
+ "ImportPath": "github.com/Azure/azure-sdk-for-go/storage",
+ "Comment": "v1.2-277-g8484320",
+ "Rev": "84843207ea0c77c8c8aecbe2e16ac77caa8ce9cc"
+ },
+ {
+ "ImportPath": "github.com/DreamItGetIT/statuscake",
+ "Rev": "8cbe86575f00210a6df2c19cb2f59b00cd181de3"
+ },
+ {
+ "ImportPath": "github.com/apparentlymart/go-cidr/cidr",
+ "Rev": "a3ebdb999b831ecb6ab8a226e31b07b2b9061c47"
+ },
+ {
+ "ImportPath": "github.com/apparentlymart/go-rundeck-api/rundeck",
+ "Comment": "v0.0.1",
+ "Rev": "cddcfbabbe903e9c8df35ff9569dbb8d67789200"
+ },
+ {
+ "ImportPath": "github.com/armon/circbuf",
+ "Rev": "bbbad097214e2918d8543d5201d12bfd7bca254d"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/aws",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/private/endpoints",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/ec2query",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restjson",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/private/signer/v4",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/private/waiter",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/autoscaling",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/cloudformation",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/cloudtrail",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatch",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatchlogs",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/codecommit",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/codedeploy",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/directoryservice",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/dynamodb",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/ec2",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/ecs",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/efs",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/elasticache",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/elasticsearchservice",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/elb",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/firehose",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/glacier",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/iam",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/kinesis",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/lambda",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/opsworks",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/rds",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/route53",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/s3",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/sns",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/sqs",
+ "Comment": "v1.0.2-4-geac6a33",
+ "Rev": "eac6a331d353c78ab5815fc6a59c1ffe8e92afba"
+ },
+ {
+ "ImportPath": "github.com/coreos/etcd/client",
+ "Comment": "v2.3.0-alpha.0-125-gdd733ca",
+ "Rev": "dd733ca51d5f4c60def1403739b5701a7a7751c4"
+ },
+ {
+ "ImportPath": "github.com/coreos/etcd/pkg/pathutil",
+ "Comment": "v2.3.0-alpha.0-125-gdd733ca",
+ "Rev": "dd733ca51d5f4c60def1403739b5701a7a7751c4"
+ },
+ {
+ "ImportPath": "github.com/coreos/etcd/pkg/types",
+ "Comment": "v2.3.0-alpha.0-125-gdd733ca",
+ "Rev": "dd733ca51d5f4c60def1403739b5701a7a7751c4"
+ },
+ {
+ "ImportPath": "github.com/cyberdelia/heroku-go/v3",
+ "Rev": "8344c6a3e281a99a693f5b71186249a8620eeb6b"
+ },
+ {
+ "ImportPath": "github.com/digitalocean/godo",
+ "Comment": "v0.9.0-12-gccd7d9b",
+ "Rev": "ccd7d9b6bbf2361014a8334ad3c9280b88299ef9"
+ },
+ {
+ "ImportPath": "github.com/dylanmei/iso8601",
+ "Rev": "2075bf119b58e5576c6ed9f867b8f3d17f2e54d4"
+ },
+ {
+ "ImportPath": "github.com/dylanmei/winrmtest",
+ "Rev": "3e9661c52c45dab9a8528966a23d421922fca9b9"
+ },
+ {
+ "ImportPath": "github.com/fsouza/go-dockerclient",
+ "Rev": "dc4295a98977ab5b1983051bc169b784c4b423df"
+ },
+ {
+ "ImportPath": "github.com/go-ini/ini",
+ "Comment": "v0-56-g03e0e7d",
+ "Rev": "03e0e7d51a13a91c765d8d0161246bc14a38001a"
+ },
+ {
+ "ImportPath": "github.com/google/go-querystring/query",
+ "Rev": "2a60fc2ba6c19de80291203597d752e9ba58e4c0"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/atlas-go/archive",
+ "Comment": "20141209094003-81-g6c9afe8",
+ "Rev": "6c9afe8bb88099b424db07dea18f434371de8199"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/atlas-go/v1",
+ "Comment": "20141209094003-81-g6c9afe8",
+ "Rev": "6c9afe8bb88099b424db07dea18f434371de8199"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/consul/api",
+ "Comment": "v0.6.0-rc2-29-g6db8acc",
+ "Rev": "6db8acc6585e318168e2bf3c886d49a28a75d114"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/errwrap",
+ "Rev": "7554cd9344cec97297fa6649b055a8c98c2a1e55"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/go-checkpoint",
+ "Rev": "e4b2dc34c0f698ee04750bf2035d8b9384233e1b"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/go-cleanhttp",
+ "Rev": "5df5ddc69534f1a4697289f1dca2193fbb40213f"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/go-getter",
+ "Rev": "c5e245982bdb4708f89578c8e0054d82b5197401"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/go-multierror",
+ "Rev": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/go-version",
+ "Rev": "2b9865f60ce11e527bd1255ba82036d465570aa3"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/hcl",
+ "Rev": "c40ec20b1285f01e9e75ec39f2bf2cff132891d3"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/logutils",
+ "Rev": "0dc08b1671f34c4250ce212759ebd880f743d883"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/serf/coordinate",
+ "Comment": "v0.6.4-145-ga72c045",
+ "Rev": "a72c0453da2ba628a013e98bf323a76be4aa1443"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/yamux",
+ "Rev": "df949784da9ed028ee76df44652e42d37a09d7e4"
+ },
+ {
+ "ImportPath": "github.com/imdario/mergo",
+ "Comment": "0.2.0-8-gbb554f9",
+ "Rev": "bb554f9fd6ee4cd190eef868de608ced813aeda1"
+ },
+ {
+ "ImportPath": "github.com/jmespath/go-jmespath",
+ "Comment": "0.2.2",
+ "Rev": "3433f3ea46d9f8019119e7dd41274e112a2359a9"
+ },
+ {
+ "ImportPath": "github.com/kardianos/osext",
+ "Rev": "10da29423eb9a6269092eebdc2be32209612d9d2"
+ },
+ {
+ "ImportPath": "github.com/masterzen/simplexml/dom",
+ "Rev": "95ba30457eb1121fa27753627c774c7cd4e90083"
+ },
+ {
+ "ImportPath": "github.com/masterzen/winrm/soap",
+ "Rev": "06208eee5d76e4a422494e25629cefec42b9b3ac"
+ },
+ {
+ "ImportPath": "github.com/masterzen/winrm/winrm",
+ "Rev": "06208eee5d76e4a422494e25629cefec42b9b3ac"
+ },
+ {
+ "ImportPath": "github.com/masterzen/xmlpath",
+ "Rev": "13f4951698adc0fa9c1dda3e275d489a24201161"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/cli",
+ "Rev": "8102d0ed5ea2709ade1243798785888175f6e415"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/colorstring",
+ "Rev": "8631ce90f28644f54aeedcb3e389a85174e067d1"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/copystructure",
+ "Rev": "6fc66267e9da7d155a9d3bd489e00dad02666dc6"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/go-homedir",
+ "Rev": "d682a8f0cf139663a984ff12528da460ca963de9"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/go-linereader",
+ "Rev": "07bab5fdd9580500aea6ada0e09df4aa28e68abd"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/mapstructure",
+ "Rev": "281073eb9eb092240d33ef253c404f1cca550309"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/packer/common/uuid",
+ "Comment": "v0.8.6-230-g400d1e5",
+ "Rev": "400d1e560009fac403a776532549841e40f3a4b8"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/panicwrap",
+ "Rev": "89dc8accc8fec9dfa9b8e1ffdd6793265253de16"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/prefixedio",
+ "Rev": "89d9b535996bf0a185f85b59578f2e245f9e1724"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/reflectwalk",
+ "Rev": "eecf4c70c626c7cfbb95c90195bc34d386c74ac6"
+ },
+ {
+ "ImportPath": "github.com/nesv/go-dynect/dynect",
+ "Comment": "v0.2.0-8-g841842b",
+ "Rev": "841842b16b39cf2b5007278956976d7d909bd98b"
+ },
+ {
+ "ImportPath": "github.com/nu7hatch/gouuid",
+ "Rev": "179d4d0c4d8d407a32af483c2354df1d2c91e6c3"
+ },
+ {
+ "ImportPath": "github.com/packer-community/winrmcp/winrmcp",
+ "Rev": "3d184cea22ee1c41ec1697e0d830ff0c78f7ea97"
+ },
+ {
+ "ImportPath": "github.com/packethost/packngo",
+ "Rev": "f03d7dc788a8b57b62d301ccb98c950c325756f8"
+ },
+ {
+ "ImportPath": "github.com/pborman/uuid",
+ "Rev": "cccd189d45f7ac3368a0d127efb7f4d08ae0b655"
+ },
+ {
+ "ImportPath": "github.com/pearkes/cloudflare",
+ "Rev": "3d4cd12a4c3a7fc29b338b774f7f8b7e3d5afc2e"
+ },
+ {
+ "ImportPath": "github.com/pearkes/dnsimple",
+ "Rev": "78996265f576c7580ff75d0cb2c606a61883ceb8"
+ },
+ {
+ "ImportPath": "github.com/pearkes/mailgun",
+ "Rev": "b88605989c4141d22a6d874f78800399e5bb7ac2"
+ },
+ {
+ "ImportPath": "github.com/rackspace/gophercloud",
+ "Comment": "v1.0.0-757-g761cff8",
+ "Rev": "761cff8afb6a8e7f42c5554a90dae72f341bb481"
+ },
+ {
+ "ImportPath": "github.com/satori/go.uuid",
+ "Rev": "d41af8bb6a7704f00bc3b7cba9355ae6a5a80048"
+ },
+ {
+ "ImportPath": "github.com/soniah/dnsmadeeasy",
+ "Comment": "v1.1-2-g5578a8c",
+ "Rev": "5578a8c15e33958c61cf7db720b6181af65f4a9e"
+ },
+ {
+ "ImportPath": "github.com/tent/http-link-go",
+ "Rev": "ac974c61c2f990f4115b119354b5e0b47550e888"
+ },
+ {
+ "ImportPath": "github.com/ugorji/go/codec",
+ "Rev": "357a44b2b13e2711a45e30016508134101477610"
+ },
+ {
+ "ImportPath": "github.com/vmware/govmomi",
+ "Comment": "v0.2.0-109-g699ac63",
+ "Rev": "699ac6397b74781d2d6519ad2ae408298075e205"
+ },
+ {
+ "ImportPath": "github.com/xanzy/go-cloudstack/cloudstack",
+ "Comment": "v1.2.0-50-g104168f",
+ "Rev": "104168fa792713f5e04b76e2862779dc2ad85bcc"
+ },
+ {
+ "ImportPath": "golang.org/x/crypto/curve25519",
+ "Rev": "7b85b097bf7527677d54d3220065e966a0e3b613"
+ },
+ {
+ "ImportPath": "golang.org/x/crypto/pkcs12",
+ "Rev": "7b85b097bf7527677d54d3220065e966a0e3b613"
+ },
+ {
+ "ImportPath": "golang.org/x/crypto/ssh",
+ "Rev": "7b85b097bf7527677d54d3220065e966a0e3b613"
+ },
+ {
+ "ImportPath": "golang.org/x/net/context",
+ "Rev": "195180cfebf7362bd243a52477697895128c8777"
+ },
+ {
+ "ImportPath": "golang.org/x/oauth2",
+ "Rev": "442624c9ec9243441e83b374a9e22ac549b5c51d"
+ },
+ {
+ "ImportPath": "google.golang.org/api/compute/v1",
+ "Rev": "ece7143efeb53ec1839b960a0849db4e57d3cfa2"
+ },
+ {
+ "ImportPath": "google.golang.org/api/container/v1",
+ "Rev": "ece7143efeb53ec1839b960a0849db4e57d3cfa2"
+ },
+ {
+ "ImportPath": "google.golang.org/api/dns/v1",
+ "Rev": "ece7143efeb53ec1839b960a0849db4e57d3cfa2"
+ },
+ {
+ "ImportPath": "google.golang.org/api/gensupport",
+ "Rev": "ece7143efeb53ec1839b960a0849db4e57d3cfa2"
+ },
+ {
+ "ImportPath": "google.golang.org/api/googleapi",
+ "Rev": "ece7143efeb53ec1839b960a0849db4e57d3cfa2"
+ },
+ {
+ "ImportPath": "google.golang.org/api/sqladmin/v1beta4",
+ "Rev": "ece7143efeb53ec1839b960a0849db4e57d3cfa2"
+ },
+ {
+ "ImportPath": "google.golang.org/api/storage/v1",
+ "Rev": "ece7143efeb53ec1839b960a0849db4e57d3cfa2"
+ },
+ {
+ "ImportPath": "google.golang.org/cloud/compute/metadata",
+ "Rev": "2375e186ca77be721a7c9c7b13a659738a8511d2"
+ },
+ {
+ "ImportPath": "google.golang.org/cloud/internal",
+ "Rev": "2375e186ca77be721a7c9c7b13a659738a8511d2"
+ }
+ ]
+}
diff --git a/terraform/version.go b/terraform/version.go
index 043fdcf052..c2c835cecc 100644
--- a/terraform/version.go
+++ b/terraform/version.go
@@ -6,4 +6,4 @@ const Version = "0.6.8"
// A pre-release marker for the version. If this is "" (empty string)
// then it means that it is a final release. Otherwise, this is a pre-release
// such as "dev" (in development), "beta", "rc1", etc.
-const VersionPrerelease = "dev"
+const VersionPrerelease = ""
diff --git a/website/config.rb b/website/config.rb
index 236bbceb88..fe69229126 100644
--- a/website/config.rb
+++ b/website/config.rb
@@ -2,6 +2,6 @@ set :base_url, "https://www.terraform.io/"
activate :hashicorp do |h|
h.name = "terraform"
- h.version = "0.6.7"
+ h.version = "0.6.8"
h.github_slug = "hashicorp/terraform"
end
From f80655b1cefb03ae7d7bb304be2f28911ecbab3b Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Wed, 2 Dec 2015 15:32:07 +0000
Subject: [PATCH 098/664] release: clean up after v0.6.8
---
CHANGELOG.md | 2 ++
terraform/version.go | 4 ++--
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e316bb3036..bec5a0403c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,5 @@
+## 0.6.9 (Unreleased)
+
## 0.6.8 (December 2, 2015)
FEATURES:
diff --git a/terraform/version.go b/terraform/version.go
index c2c835cecc..00c34dccba 100644
--- a/terraform/version.go
+++ b/terraform/version.go
@@ -1,9 +1,9 @@
package terraform
// The main version number that is being run at the moment.
-const Version = "0.6.8"
+const Version = "0.6.9"
// A pre-release marker for the version. If this is "" (empty string)
// then it means that it is a final release. Otherwise, this is a pre-release
// such as "dev" (in development), "beta", "rc1", etc.
-const VersionPrerelease = ""
+const VersionPrerelease = "dev"
From 35b18ceb297fef423941ee29dfc34c12bc4a4dd8 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Wed, 2 Dec 2015 10:28:17 -0600
Subject: [PATCH 099/664] providers/aws: Update Security Group docs
---
.../source/docs/providers/aws/r/security_group.html.markdown | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/website/source/docs/providers/aws/r/security_group.html.markdown b/website/source/docs/providers/aws/r/security_group.html.markdown
index ebd21bc732..b045b01f8a 100644
--- a/website/source/docs/providers/aws/r/security_group.html.markdown
+++ b/website/source/docs/providers/aws/r/security_group.html.markdown
@@ -66,7 +66,8 @@ resource "aws_security_group" "allow_all" {
The following arguments are supported:
-* `name` - (Required) The name of the security group
+* `name` - (Optional) The name of the security group. If omitted, Terraform will
+assign a random, unique name
* `description` - (Optional) The security group description. Defaults to "Managed by Terraform". Cannot be "".
* `ingress` - (Optional) Can be specified multiple times for each
ingress rule. Each ingress block supports fields documented below.
From 69272f3113ef5bd2ab676501efc501c6f1131600 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Tue, 1 Dec 2015 15:22:02 -0500
Subject: [PATCH 100/664] provider/aws: error with empty list item on sg
This addresses the case where `compact` has not been used on a list
passed into security group as cidr_block. See #3786. Compact is still
the correct answer there, but we should prefer returning an error to
a panic. Fixes #3786.
---
.../aws/resource_aws_security_group_rule.go | 25 ++++++++++++++-----
1 file changed, 19 insertions(+), 6 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_security_group_rule.go b/builtin/providers/aws/resource_aws_security_group_rule.go
index 2a35303c39..d1759dcafa 100644
--- a/builtin/providers/aws/resource_aws_security_group_rule.go
+++ b/builtin/providers/aws/resource_aws_security_group_rule.go
@@ -93,7 +93,10 @@ func resourceAwsSecurityGroupRuleCreate(d *schema.ResourceData, meta interface{}
return err
}
- perm := expandIPPerm(d, sg)
+ perm, err := expandIPPerm(d, sg)
+ if err != nil {
+ return err
+ }
ruleType := d.Get("type").(string)
@@ -171,7 +174,10 @@ func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{})
rules = sg.IpPermissionsEgress
}
- p := expandIPPerm(d, sg)
+ p, err := expandIPPerm(d, sg)
+ if err != nil {
+ return err
+ }
if len(rules) == 0 {
log.Printf("[WARN] No %s rules were found for Security Group (%s) looking for Security Group Rule (%s)",
@@ -262,7 +268,10 @@ func resourceAwsSecurityGroupRuleDelete(d *schema.ResourceData, meta interface{}
return err
}
- perm := expandIPPerm(d, sg)
+ perm, err := expandIPPerm(d, sg)
+ if err != nil {
+ return err
+ }
ruleType := d.Get("type").(string)
switch ruleType {
case "ingress":
@@ -383,7 +392,7 @@ func ipPermissionIDHash(sg_id, ruleType string, ip *ec2.IpPermission) string {
return fmt.Sprintf("sgrule-%d", hashcode.String(buf.String()))
}
-func expandIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup) *ec2.IpPermission {
+func expandIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup) (*ec2.IpPermission, error) {
var perm ec2.IpPermission
perm.FromPort = aws.Int64(int64(d.Get("from_port").(int)))
@@ -435,9 +444,13 @@ func expandIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup) *ec2.IpPermissi
list := raw.([]interface{})
perm.IpRanges = make([]*ec2.IpRange, len(list))
for i, v := range list {
- perm.IpRanges[i] = &ec2.IpRange{CidrIp: aws.String(v.(string))}
+ cidrIP, ok := v.(string)
+ if !ok {
+ return nil, fmt.Errorf("empty element found in cidr_blocks - consider using the compact function")
+ }
+ perm.IpRanges[i] = &ec2.IpRange{CidrIp: aws.String(cidrIP)}
}
}
- return &perm
+ return &perm, nil
}
From 9e07b22a7e6bd1220064c99ff4a9108083205b08 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Wed, 2 Dec 2015 12:26:48 -0500
Subject: [PATCH 101/664] Update CHANGELOG.md
---
CHANGELOG.md | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index bec5a0403c..e84c8d426a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,17 @@
## 0.6.9 (Unreleased)
+FEATURES:
+
+IMPROVEMENTS:
+
+ * provider/docker: Add support for setting the entry point on `docker_container` resources [GH-3761]
+ * provider/docker: Add support for setting the restart policy on `docker_container` resources [GH-3761]
+ * provider/docker: Add support for setting memory, swap and CPU shares on `docker_container` resources [GH-3761]
+ * provider/docker: Add support for setting labels on `docker_container` resources [GH-3761]
+ * provider/docker: Add support for setting log driver and options on `docker_container` resources [GH-3761]
+
+BUG FIXES:
+
## 0.6.8 (December 2, 2015)
FEATURES:
From 6f7ef2faddb3f8597d163d9d0c2d00b14b22edc0 Mon Sep 17 00:00:00 2001
From: pat
Date: Wed, 28 Oct 2015 10:55:50 -0700
Subject: [PATCH 102/664] golang pubsub SDK has been released. moved
topics/subscriptions to use that
Conflicts:
builtin/providers/google/provider.go
builtin/providers/google/resource_subscription.go
builtin/providers/google/resource_subscription_test.go
golang pubsub SDK has been released. moved topics/subscriptions to use that
Conflicts:
builtin/providers/google/provider.go
builtin/providers/google/resource_subscription.go
builtin/providers/google/resource_subscription_test.go
file renames and add documentation files
remove typo'd merge and type file move
add to index page as well
only need to define that once
remove topic_computed schema value
I think this was used at one point but is no longer. away.
cleanup typo
adds a couple more config values
- ackDeadlineSeconds: number of seconds to wait for an ack
- pushAttributes: attributes of a push subscription
- pushEndpoint: target for a push subscription
rearrange to better match current conventions
respond to all of the comments
---
builtin/providers/google/config.go | 9 ++
builtin/providers/google/provider.go | 2 +
.../google/resource_pubsub_subscription.go | 134 ++++++++++++++++++
.../resource_pubsub_subscription_test.go | 74 ++++++++++
.../providers/google/resource_pubsub_topic.go | 68 +++++++++
.../google/resource_pubsub_topic_test.go | 68 +++++++++
.../r/pubsub_subscription.html.markdown | 39 +++++
.../google/r/pubsub_topic.html.markdown | 35 +++++
website/source/layouts/google.erb | 13 ++
9 files changed, 442 insertions(+)
create mode 100644 builtin/providers/google/resource_pubsub_subscription.go
create mode 100644 builtin/providers/google/resource_pubsub_subscription_test.go
create mode 100644 builtin/providers/google/resource_pubsub_topic.go
create mode 100644 builtin/providers/google/resource_pubsub_topic_test.go
create mode 100644 website/source/docs/providers/google/r/pubsub_subscription.html.markdown
create mode 100644 website/source/docs/providers/google/r/pubsub_topic.html.markdown
diff --git a/builtin/providers/google/config.go b/builtin/providers/google/config.go
index 218fda06f9..5467c6483b 100644
--- a/builtin/providers/google/config.go
+++ b/builtin/providers/google/config.go
@@ -18,6 +18,7 @@ import (
"google.golang.org/api/dns/v1"
"google.golang.org/api/sqladmin/v1beta4"
"google.golang.org/api/storage/v1"
+ "google.golang.org/api/pubsub/v1"
)
// Config is the configuration structure used to instantiate the Google
@@ -32,6 +33,7 @@ type Config struct {
clientDns *dns.Service
clientStorage *storage.Service
clientSqlAdmin *sqladmin.Service
+ clientPubsub *pubsub.Service
}
func (c *Config) loadAndValidate() error {
@@ -128,6 +130,13 @@ func (c *Config) loadAndValidate() error {
}
c.clientSqlAdmin.UserAgent = userAgent
+ log.Printf("[INFO] Instatiating Google Pubsub Client...")
+ c.clientPubsub, err = pubsub.New(client)
+ if err != nil {
+ return err
+ }
+ c.clientPubsub.UserAgent = userAgent
+
return nil
}
diff --git a/builtin/providers/google/provider.go b/builtin/providers/google/provider.go
index b2d083bc25..3fa46c7d56 100644
--- a/builtin/providers/google/provider.go
+++ b/builtin/providers/google/provider.go
@@ -70,6 +70,8 @@ func Provider() terraform.ResourceProvider {
"google_dns_record_set": resourceDnsRecordSet(),
"google_sql_database": resourceSqlDatabase(),
"google_sql_database_instance": resourceSqlDatabaseInstance(),
+ "google_pubsub_topic": resourcePubsubTopic(),
+ "google_pubsub_subscription": resourcePubsubSubscription(),
"google_storage_bucket": resourceStorageBucket(),
"google_storage_bucket_acl": resourceStorageBucketAcl(),
"google_storage_bucket_object": resourceStorageBucketObject(),
diff --git a/builtin/providers/google/resource_pubsub_subscription.go b/builtin/providers/google/resource_pubsub_subscription.go
new file mode 100644
index 0000000000..6a1f19da73
--- /dev/null
+++ b/builtin/providers/google/resource_pubsub_subscription.go
@@ -0,0 +1,134 @@
+package google
+
+import (
+ "fmt"
+ "google.golang.org/api/pubsub/v1"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourcePubsubSubscription() *schema.Resource {
+ return &schema.Resource{
+ Create: resourcePubsubSubscriptionCreate,
+ Read: resourcePubsubSubscriptionRead,
+ Delete: resourcePubsubSubscriptionDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "ack_deadline_seconds": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "push_config": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "attributes": &schema.Schema{
+ Type: schema.TypeMap,
+ Optional: true,
+ ForceNew: true,
+ Elem: schema.TypeString,
+ },
+
+ "push_endpoint": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ },
+ },
+
+ "topic": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ },
+ }
+}
+
+func cleanAdditionalArgs(args map[string]interface{}) map[string]string {
+ cleaned_args := make(map[string]string)
+ for k,v := range args {
+ cleaned_args[k] = v.(string)
+ }
+ return cleaned_args
+}
+
+func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) error {
+ config := meta.(*Config)
+
+ name := fmt.Sprintf("projects/%s/subscriptions/%s", config.Project, d.Get("name").(string))
+ computed_topic_name := fmt.Sprintf("projects/%s/topics/%s", config.Project, d.Get("topic").(string))
+
+ // process optional parameters
+ var ackDeadlineSeconds int64
+ ackDeadlineSeconds = 10
+ if v, ok := d.GetOk("ack_deadline_seconds"); ok {
+ ackDeadlineSeconds = v.(int64)
+ }
+
+ var subscription *pubsub.Subscription
+ if v, ok := d.GetOk("push_config"); ok {
+ push_configs := v.([]interface{})
+
+ if len(push_configs) > 1 {
+ return fmt.Errorf("At most one PushConfig is allowed per subscription!")
+ }
+
+ push_config := push_configs[0].(map[string]interface{})
+ attributes := push_config["attributes"].(map[string]interface{})
+ attributesClean := cleanAdditionalArgs(attributes)
+ pushConfig := &pubsub.PushConfig{Attributes: attributesClean, PushEndpoint: push_config["push_endpoint"].(string)}
+ subscription = &pubsub.Subscription{AckDeadlineSeconds: ackDeadlineSeconds, Topic: computed_topic_name, PushConfig: pushConfig}
+ } else {
+ subscription = &pubsub.Subscription{AckDeadlineSeconds: ackDeadlineSeconds, Topic: computed_topic_name}
+ }
+
+ call := config.clientPubsub.Projects.Subscriptions.Create(name, subscription)
+ res, err := call.Do()
+ if err != nil {
+ return err
+ }
+
+ d.SetId(res.Name)
+
+ return nil
+}
+
+func resourcePubsubSubscriptionRead(d *schema.ResourceData, meta interface{}) error {
+ config := meta.(*Config)
+
+ name := d.Id()
+ call := config.clientPubsub.Projects.Subscriptions.Get(name)
+ _, err := call.Do()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+
+func resourcePubsubSubscriptionDelete(d *schema.ResourceData, meta interface{}) error {
+ config := meta.(*Config)
+
+ name := d.Id()
+ call := config.clientPubsub.Projects.Subscriptions.Delete(name)
+ _, err := call.Do()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/builtin/providers/google/resource_pubsub_subscription_test.go b/builtin/providers/google/resource_pubsub_subscription_test.go
new file mode 100644
index 0000000000..b0eb2a25ba
--- /dev/null
+++ b/builtin/providers/google/resource_pubsub_subscription_test.go
@@ -0,0 +1,74 @@
+package google
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccPubsubSubscriptionCreate(t *testing.T) {
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckPubsubSubscriptionDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccPubsubSubscription,
+ Check: resource.ComposeTestCheckFunc(
+ testAccPubsubSubscriptionExists(
+ "google_pubsub_subscription.foobar_sub"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckPubsubSubscriptionDestroy(s *terraform.State) error {
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "google_pubsub_subscription" {
+ continue
+ }
+
+ config := testAccProvider.Meta().(*Config)
+ _, err := config.clientPubsub.Projects.Subscriptions.Get(rs.Primary.ID).Do()
+ if err != nil {
+ fmt.Errorf("Subscription still present")
+ }
+ }
+
+ return nil
+}
+
+func testAccPubsubSubscriptionExists(n string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No ID is set")
+ }
+ config := testAccProvider.Meta().(*Config)
+ _, err := config.clientPubsub.Projects.Subscriptions.Get(rs.Primary.ID).Do()
+ if err != nil {
+ fmt.Errorf("Subscription still present")
+ }
+
+ return nil
+ }
+}
+
+const testAccPubsubSubscription = `
+resource "google_pubsub_topic" "foobar_sub" {
+ name = "foobar_sub"
+}
+
+resource "google_pubsub_subscription" "foobar_sub" {
+ name = "foobar_sub"
+ topic = "${google_pubsub_topic.foobar_sub.name}"
+}`
+
diff --git a/builtin/providers/google/resource_pubsub_topic.go b/builtin/providers/google/resource_pubsub_topic.go
new file mode 100644
index 0000000000..c6ec7cf0fe
--- /dev/null
+++ b/builtin/providers/google/resource_pubsub_topic.go
@@ -0,0 +1,68 @@
+package google
+
+import (
+ "fmt"
+ "google.golang.org/api/pubsub/v1"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourcePubsubTopic() *schema.Resource {
+ return &schema.Resource{
+ Create: resourcePubsubTopicCreate,
+ Read: resourcePubsubTopicRead,
+ Delete: resourcePubsubTopicDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ },
+ }
+}
+
+func resourcePubsubTopicCreate(d *schema.ResourceData, meta interface{}) error {
+ config := meta.(*Config)
+
+ name := fmt.Sprintf("projects/%s/topics/%s", config.Project, d.Get("name").(string))
+ topic := &pubsub.Topic{}
+
+ call := config.clientPubsub.Projects.Topics.Create(name, topic)
+ res, err := call.Do()
+ if err != nil {
+ return err
+ }
+
+ d.SetId(res.Name)
+
+ return nil
+}
+
+func resourcePubsubTopicRead(d *schema.ResourceData, meta interface{}) error {
+ config := meta.(*Config)
+
+ name := d.Id()
+ call := config.clientPubsub.Projects.Topics.Get(name)
+ _, err := call.Do()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+
+func resourcePubsubTopicDelete(d *schema.ResourceData, meta interface{}) error {
+ config := meta.(*Config)
+
+ name := d.Id()
+ call := config.clientPubsub.Projects.Topics.Delete(name)
+ _, err := call.Do()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/builtin/providers/google/resource_pubsub_topic_test.go b/builtin/providers/google/resource_pubsub_topic_test.go
new file mode 100644
index 0000000000..3d6c655c7d
--- /dev/null
+++ b/builtin/providers/google/resource_pubsub_topic_test.go
@@ -0,0 +1,68 @@
+package google
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccPubsubTopicCreate(t *testing.T) {
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckPubsubTopicDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccPubsubTopic,
+ Check: resource.ComposeTestCheckFunc(
+ testAccPubsubTopicExists(
+ "google_pubsub_topic.foobar"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckPubsubTopicDestroy(s *terraform.State) error {
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "google_pubsub_topic" {
+ continue
+ }
+
+ config := testAccProvider.Meta().(*Config)
+ _, err := config.clientPubsub.Projects.Topics.Get(rs.Primary.ID).Do()
+ if err != nil {
+ fmt.Errorf("Topic still present")
+ }
+ }
+
+ return nil
+}
+
+func testAccPubsubTopicExists(n string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No ID is set")
+ }
+ config := testAccProvider.Meta().(*Config)
+ _, err := config.clientPubsub.Projects.Topics.Get(rs.Primary.ID).Do()
+ if err != nil {
+ fmt.Errorf("Topic still present")
+ }
+
+ return nil
+ }
+}
+
+const testAccPubsubTopic = `
+resource "google_pubsub_topic" "foobar" {
+ name = "foobar"
+}`
diff --git a/website/source/docs/providers/google/r/pubsub_subscription.html.markdown b/website/source/docs/providers/google/r/pubsub_subscription.html.markdown
new file mode 100644
index 0000000000..d1f43ef415
--- /dev/null
+++ b/website/source/docs/providers/google/r/pubsub_subscription.html.markdown
@@ -0,0 +1,39 @@
+---
+layout: "google"
+page_title: "Google: google_pubsub_subscription"
+sidebar_current: "docs-google-pubsub-subscription"
+description: |-
+ Creates a subscription in Google's pubsub queueing system
+---
+
+# google\_pubsub\_subscripion
+
+Creates a subscription in Google's pubsub queueing system. For more information see
+[the official documentation](https://cloud.google.com/pubsub/docs) and
+[API](https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions).
+
+
+## Example Usage
+
+```
+resource "google_pubsub_subscription" "default" {
+ name = "default-subscription"
+ topic = "default-topic"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) A unique name for the resource, required by pubsub.
+ Changing this forces a new resource to be created.
+* `topic` - (Required) A topic to bind this subscription to, required by pubsub.
+ Changing this forces a new resource to be created.
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `name` - The name of the resource.
+* `topic` - The topic to bind this resource to.
diff --git a/website/source/docs/providers/google/r/pubsub_topic.html.markdown b/website/source/docs/providers/google/r/pubsub_topic.html.markdown
new file mode 100644
index 0000000000..e371ddef19
--- /dev/null
+++ b/website/source/docs/providers/google/r/pubsub_topic.html.markdown
@@ -0,0 +1,35 @@
+---
+layout: "google"
+page_title: "Google: google_pubsub_topic"
+sidebar_current: "docs-google-pubsub-topic"
+description: |-
+ Creates a topic in Google's pubsub queueing system
+---
+
+# google\_pubsub\_topic
+
+Creates a topic in Google's pubsub queueing system. For more information see
+[the official documentation](https://cloud.google.com/pubsub/docs) and
+[API](https://cloud.google.com/pubsub/reference/rest/v1/projects.topics).
+
+
+## Example Usage
+
+```
+resource "google_pubsub_topic" "default" {
+ name = "default-topic"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) A unique name for the resource, required by pubsub.
+ Changing this forces a new resource to be created.
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `name` - The name of the resource.
diff --git a/website/source/layouts/google.erb b/website/source/layouts/google.erb
index a8b9b3f2aa..2ffae19583 100644
--- a/website/source/layouts/google.erb
+++ b/website/source/layouts/google.erb
@@ -129,6 +129,19 @@
+ >
+ Google PubSub Resources
+
+
+
>
Google SQL Resources
From 523436584d0a3549046b5097419f3656eab541f0 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Wed, 2 Dec 2015 14:01:20 -0600
Subject: [PATCH 103/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e84c8d426a..f7b83a16d4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -9,6 +9,7 @@ IMPROVEMENTS:
* provider/docker: Add support for setting memory, swap and CPU shares on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting labels on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting log driver and options on `docker_container` resources [GH-3761]
+ * provider/vsphere: Add support for custom vm params on `vsphere_virtual_machine` [GH-3867]
BUG FIXES:
From 41d1bd3c48cae5175b987c233bed55db4b8e59ad Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Wed, 2 Dec 2015 14:05:07 -0600
Subject: [PATCH 104/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f7b83a16d4..5d7c06fbb0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -10,6 +10,7 @@ IMPROVEMENTS:
* provider/docker: Add support for setting labels on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting log driver and options on `docker_container` resources [GH-3761]
* provider/vsphere: Add support for custom vm params on `vsphere_virtual_machine` [GH-3867]
+ * provider/vsphere: rename vcenter_server config parameter to something clearer [GH-3718]
BUG FIXES:
From d0b07c95a25cd2f47babd70f6cf8e6e99464f349 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Wed, 2 Dec 2015 14:05:14 -0600
Subject: [PATCH 105/664] Update CHANGELOG.md
---
CHANGELOG.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5d7c06fbb0..24a0692191 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -10,7 +10,7 @@ IMPROVEMENTS:
* provider/docker: Add support for setting labels on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting log driver and options on `docker_container` resources [GH-3761]
* provider/vsphere: Add support for custom vm params on `vsphere_virtual_machine` [GH-3867]
- * provider/vsphere: rename vcenter_server config parameter to something clearer [GH-3718]
+ * provider/vsphere: Rename vcenter_server config parameter to something clearer [GH-3718]
BUG FIXES:
From 4ce6455e8a4a0ef2ef440f46fc3b8fa736ed55de Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Wed, 2 Dec 2015 15:03:29 -0500
Subject: [PATCH 106/664] provider/docker: Refer to a tag instead of latest
This should make tests more stable going forward. Also switch out the
image used from Ubuntu to Alpine Linux to reduce required download size
during test runs.
---
builtin/providers/docker/resource_docker_image_test.go | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/builtin/providers/docker/resource_docker_image_test.go b/builtin/providers/docker/resource_docker_image_test.go
index b902749d7c..3a1c5b1388 100644
--- a/builtin/providers/docker/resource_docker_image_test.go
+++ b/builtin/providers/docker/resource_docker_image_test.go
@@ -17,7 +17,7 @@ func TestAccDockerImage_basic(t *testing.T) {
resource.TestCheckResourceAttr(
"docker_image.foo",
"latest",
- "d52aff8195301dba95e8e3d14f0c3738a874237afd54233d250a2fc4489bfa83"),
+ "8dd8107abd2e22bfd3b45b05733f3d2677d4078b09b5edce56ee3d8677d3c648"),
),
},
},
@@ -44,8 +44,8 @@ func TestAddDockerImage_private(t *testing.T) {
const testAccDockerImageConfig = `
resource "docker_image" "foo" {
- name = "ubuntu:trusty-20150320"
- keep_updated = true
+ name = "alpine:3.1"
+ keep_updated = false
}
`
From 2882d0190445d1a56de91420ff2f98c7c06cdda4 Mon Sep 17 00:00:00 2001
From: stack72
Date: Fri, 6 Nov 2015 17:36:26 +0000
Subject: [PATCH 107/664] Work to allow reducing the Number of Cache Nodes in
an ElastiCache Cluster
---
.../aws/resource_aws_elasticache_cluster.go | 20 ++++
.../resource_aws_elasticache_cluster_test.go | 100 ++++++++++++++++++
2 files changed, 120 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go
index cffcdab2de..c7686f5f3f 100644
--- a/builtin/providers/aws/resource_aws_elasticache_cluster.go
+++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go
@@ -395,8 +395,18 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{
}
if d.HasChange("num_cache_nodes") {
+ oraw, nraw := d.GetChange("num_cache_nodes")
+ o := oraw.(int)
+ n := nraw.(int)
+ if n < o {
+ log.Printf("[INFO] Cluster %s is marked for Decreasing cache nodes from %d to %d", d.Id(), o, n)
+ nodesToRemove := getCacheNodesToRemove(d, o, o-n)
+ req.CacheNodeIdsToRemove = nodesToRemove
+ }
+
req.NumCacheNodes = aws.Int64(int64(d.Get("num_cache_nodes").(int)))
requestUpdate = true
+
}
if requestUpdate {
@@ -426,6 +436,16 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{
return resourceAwsElasticacheClusterRead(d, meta)
}
+func getCacheNodesToRemove(d *schema.ResourceData, oldNumberOfNodes int, cacheNodesToRemove int) []*string {
+ nodesIdsToRemove := []*string{}
+ for i := oldNumberOfNodes; i > oldNumberOfNodes-cacheNodesToRemove && i > 0; i-- {
+ s := fmt.Sprintf("%04d", i)
+ nodesIdsToRemove = append(nodesIdsToRemove, &s)
+ }
+
+ return nodesIdsToRemove
+}
+
func setCacheNodeData(d *schema.ResourceData, c *elasticache.CacheCluster) error {
sortedCacheNodes := make([]*elasticache.CacheNode, len(c.CacheNodes))
copy(sortedCacheNodes, c.CacheNodes)
diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go
index a17c5d9b1e..88504e0f21 100644
--- a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go
+++ b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go
@@ -72,6 +72,41 @@ func TestAccAWSElasticacheCluster_snapshotsWithUpdates(t *testing.T) {
})
}
+func TestAccAWSElasticacheCluster_decreasingCacheNodes(t *testing.T) {
+ var ec elasticache.CacheCluster
+
+ ri := genRandInt()
+ preConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfigDecreasingNodes, ri, ri, ri)
+ postConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfigDecreasingNodes_update, ri, ri, ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSElasticacheClusterDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: preConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"),
+ testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
+ resource.TestCheckResourceAttr(
+ "aws_elasticache_cluster.bar", "num_cache_nodes", "3"),
+ ),
+ },
+
+ resource.TestStep{
+ Config: postConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"),
+ testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
+ resource.TestCheckResourceAttr(
+ "aws_elasticache_cluster.bar", "num_cache_nodes", "1"),
+ ),
+ },
+ },
+ })
+}
+
func TestAccAWSElasticacheCluster_vpc(t *testing.T) {
var csg elasticache.CacheSubnetGroup
var ec elasticache.CacheCluster
@@ -260,6 +295,71 @@ resource "aws_elasticache_cluster" "bar" {
}
`
+var testAccAWSElasticacheClusterConfigDecreasingNodes = `
+provider "aws" {
+ region = "us-east-1"
+}
+resource "aws_security_group" "bar" {
+ name = "tf-test-security-group-%03d"
+ description = "tf-test-security-group-descr"
+ ingress {
+ from_port = -1
+ to_port = -1
+ protocol = "icmp"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+}
+
+resource "aws_elasticache_security_group" "bar" {
+ name = "tf-test-security-group-%03d"
+ description = "tf-test-security-group-descr"
+ security_group_names = ["${aws_security_group.bar.name}"]
+}
+
+resource "aws_elasticache_cluster" "bar" {
+ cluster_id = "tf-test-%03d"
+ engine = "memcached"
+ node_type = "cache.m1.small"
+ num_cache_nodes = 3
+ port = 11211
+ parameter_group_name = "default.memcached1.4"
+ security_group_names = ["${aws_elasticache_security_group.bar.name}"]
+}
+`
+
+var testAccAWSElasticacheClusterConfigDecreasingNodes_update = `
+provider "aws" {
+ region = "us-east-1"
+}
+resource "aws_security_group" "bar" {
+ name = "tf-test-security-group-%03d"
+ description = "tf-test-security-group-descr"
+ ingress {
+ from_port = -1
+ to_port = -1
+ protocol = "icmp"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+}
+
+resource "aws_elasticache_security_group" "bar" {
+ name = "tf-test-security-group-%03d"
+ description = "tf-test-security-group-descr"
+ security_group_names = ["${aws_security_group.bar.name}"]
+}
+
+resource "aws_elasticache_cluster" "bar" {
+ cluster_id = "tf-test-%03d"
+ engine = "memcached"
+ node_type = "cache.m1.small"
+ num_cache_nodes = 1
+ port = 11211
+ parameter_group_name = "default.memcached1.4"
+ security_group_names = ["${aws_elasticache_security_group.bar.name}"]
+ apply_immediately = true
+}
+`
+
var testAccAWSElasticacheClusterInVPCConfig = fmt.Sprintf(`
resource "aws_vpc" "foo" {
cidr_block = "192.168.0.0/16"
From d1bba3095b8bac5e0d32b53024fb8e6a656de078 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Wed, 2 Dec 2015 14:19:19 -0600
Subject: [PATCH 108/664] providers/aws: Validate IOPs for EBS Volumes
---
.../providers/aws/resource_aws_ebs_volume.go | 29 ++++++++++++++-----
.../docs/providers/aws/r/ebs_volume.html.md | 2 +-
2 files changed, 23 insertions(+), 8 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_ebs_volume.go b/builtin/providers/aws/resource_aws_ebs_volume.go
index 1680b4f533..0e016ecaac 100644
--- a/builtin/providers/aws/resource_aws_ebs_volume.go
+++ b/builtin/providers/aws/resource_aws_ebs_volume.go
@@ -37,6 +37,14 @@ func resourceAwsEbsVolume() *schema.Resource {
Optional: true,
Computed: true,
ForceNew: true,
+ ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
+ value := v.(int)
+ if value < 100 {
+ es = append(es, fmt.Errorf(
+ "%q must be an integer, minimum value 100", k))
+ }
+ return
+ },
},
"kms_key_id": &schema.Schema{
Type: schema.TypeString,
@@ -76,9 +84,6 @@ func resourceAwsEbsVolumeCreate(d *schema.ResourceData, meta interface{}) error
if value, ok := d.GetOk("encrypted"); ok {
request.Encrypted = aws.Bool(value.(bool))
}
- if value, ok := d.GetOk("iops"); ok {
- request.Iops = aws.Int64(int64(value.(int)))
- }
if value, ok := d.GetOk("kms_key_id"); ok {
request.KmsKeyId = aws.String(value.(string))
}
@@ -88,18 +93,28 @@ func resourceAwsEbsVolumeCreate(d *schema.ResourceData, meta interface{}) error
if value, ok := d.GetOk("snapshot_id"); ok {
request.SnapshotId = aws.String(value.(string))
}
+ var t string
if value, ok := d.GetOk("type"); ok {
- request.VolumeType = aws.String(value.(string))
+ t = value.(string)
+ request.VolumeType = aws.String(t)
+ }
+ if value, ok := d.GetOk("iops"); ok {
+ if t == "io1" {
+ request.Iops = aws.Int64(int64(value.(int)))
+ } else {
+ return fmt.Errorf("iops is only valid for EBS Volume of type io1")
+ }
}
+ log.Printf(
+ "[DEBUG] EBS Volume create opts: %s", request)
result, err := conn.CreateVolume(request)
if err != nil {
return fmt.Errorf("Error creating EC2 volume: %s", err)
}
- log.Printf(
- "[DEBUG] Waiting for Volume (%s) to become available",
- d.Id())
+ log.Println(
+ "[DEBUG] Waiting for Volume to become available")
stateConf := &resource.StateChangeConf{
Pending: []string{"creating"},
diff --git a/website/source/docs/providers/aws/r/ebs_volume.html.md b/website/source/docs/providers/aws/r/ebs_volume.html.md
index 00bb639a6a..8a41ea26b5 100644
--- a/website/source/docs/providers/aws/r/ebs_volume.html.md
+++ b/website/source/docs/providers/aws/r/ebs_volume.html.md
@@ -14,7 +14,7 @@ Manages a single EBS volume.
```
resource "aws_ebs_volume" "example" {
- availability_zone = "us-west-1a"
+ availability_zone = "us-west-2a"
size = 40
tags {
Name = "HelloWorld"
From 78929df2a15e469c126b9126341782347a20ac0f Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Wed, 2 Dec 2015 16:05:45 -0600
Subject: [PATCH 109/664] providers/aws: Update Dynamo DB docs for GSI hash_key
---
.../source/docs/providers/aws/r/dynamodb_table.html.markdown | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/website/source/docs/providers/aws/r/dynamodb_table.html.markdown b/website/source/docs/providers/aws/r/dynamodb_table.html.markdown
index 5bab941974..15b25119ac 100644
--- a/website/source/docs/providers/aws/r/dynamodb_table.html.markdown
+++ b/website/source/docs/providers/aws/r/dynamodb_table.html.markdown
@@ -72,8 +72,9 @@ For both `local_secondary_index` and `global_secondary_index` objects,
the following properties are supported:
* `name` - (Required) The name of the LSI or GSI
-* `hash_key` - (Required) The name of the hash key in the index; must be
- defined as an attribute in the resource
+* `hash_key` - (Required for GSI) The name of the hash key in the index; must be
+defined as an attribute in the resource. Only applies to
+ `global_secondary_index`
* `range_key` - (Required) The name of the range key; must be defined
* `projection_type` - (Required) One of "ALL", "INCLUDE" or "KEYS_ONLY"
where *ALL* projects every attribute into the index, *KEYS_ONLY*
From 597fafbb68f4e76fee3fd2a15ee21562ef2848f2 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Wed, 2 Dec 2015 17:27:24 -0500
Subject: [PATCH 110/664] provider/docker: locate container via ID not name
This reapplies the patch mentioned in #3364 - for an unknown reason the
diff there was incorrect.
---
.../docker/resource_docker_container_funcs.go | 21 ++++---------------
1 file changed, 4 insertions(+), 17 deletions(-)
diff --git a/builtin/providers/docker/resource_docker_container_funcs.go b/builtin/providers/docker/resource_docker_container_funcs.go
index b0c262dfcd..814941bba3 100644
--- a/builtin/providers/docker/resource_docker_container_funcs.go
+++ b/builtin/providers/docker/resource_docker_container_funcs.go
@@ -4,7 +4,6 @@ import (
"errors"
"fmt"
"strconv"
- "strings"
"time"
dc "github.com/fsouza/go-dockerclient"
@@ -160,7 +159,7 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
func resourceDockerContainerRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*dc.Client)
- apiContainer, err := fetchDockerContainer(d.Get("name").(string), client)
+ apiContainer, err := fetchDockerContainer(d.Id(), client)
if err != nil {
return err
}
@@ -268,7 +267,7 @@ func mapTypeMapValsToString(typeMap map[string]interface{}) map[string]string {
return mapped
}
-func fetchDockerContainer(name string, client *dc.Client) (*dc.APIContainers, error) {
+func fetchDockerContainer(ID string, client *dc.Client) (*dc.APIContainers, error) {
apiContainers, err := client.ListContainers(dc.ListContainersOptions{All: true})
if err != nil {
@@ -276,20 +275,8 @@ func fetchDockerContainer(name string, client *dc.Client) (*dc.APIContainers, er
}
for _, apiContainer := range apiContainers {
- // Sometimes the Docker API prefixes container names with /
- // like it does in these commands. But if there's no
- // set name, it just uses the ID without a /...ugh.
- switch len(apiContainer.Names) {
- case 0:
- if apiContainer.ID == name {
- return &apiContainer, nil
- }
- default:
- for _, containerName := range apiContainer.Names {
- if strings.TrimLeft(containerName, "/") == name {
- return &apiContainer, nil
- }
- }
+ if apiContainer.ID == ID {
+ return &apiContainer, nil
}
}
From 81f7fb1e6f9eb0f5f0b8c24edd8dda7b63ca7a35 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Wed, 2 Dec 2015 18:24:46 -0500
Subject: [PATCH 111/664] Update CHANGELOG.md
---
CHANGELOG.md | 2 ++
1 file changed, 2 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 24a0692191..451f1bebec 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -14,6 +14,8 @@ IMPROVEMENTS:
BUG FIXES:
+ * provider/docker: Fix an issue running with Docker Swarm by looking up containers by ID instead of name [GH-4148]
+
## 0.6.8 (December 2, 2015)
FEATURES:
From 3f5997a6c13bf60ef32e85b78e5ca0c589545780 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Wed, 2 Dec 2015 17:39:24 -0600
Subject: [PATCH 112/664] examples: modernize aws-two-tier
* Set up a VPC instead of assuming EC2 classic
* Set up a keypair instead of requiring one be created beforehand;
this fixes #1567
* Use SSH Agent for authentication instead of explicit private key.
---
examples/aws-two-tier/main.tf | 119 ++++++++++++++++++++---------
examples/aws-two-tier/variables.tf | 12 +--
2 files changed, 92 insertions(+), 39 deletions(-)
diff --git a/examples/aws-two-tier/main.tf b/examples/aws-two-tier/main.tf
index 254cb7fdd4..8b98d979c7 100644
--- a/examples/aws-two-tier/main.tf
+++ b/examples/aws-two-tier/main.tf
@@ -3,33 +3,81 @@ provider "aws" {
region = "${var.aws_region}"
}
-# Our default security group to access
-# the instances over SSH and HTTP
-resource "aws_security_group" "default" {
- name = "terraform_example"
- description = "Used in the terraform"
+# Create a VPC to launch our instances into
+resource "aws_vpc" "default" {
+ cidr_block = "10.0.0.0/16"
+}
- # SSH access from anywhere
- ingress {
- from_port = 22
- to_port = 22
- protocol = "tcp"
- cidr_blocks = ["0.0.0.0/0"]
- }
+# Create an internet gateway to give our subnet access to the outside world
+resource "aws_internet_gateway" "default" {
+ vpc_id = "${aws_vpc.default.id}"
+}
+
+# Grant the VPC internet access on its main route table
+resource "aws_route" "internet_access" {
+ route_table_id = "${aws_vpc.default.main_route_table_id}"
+ destination_cidr_block = "0.0.0.0/0"
+ gateway_id = "${aws_internet_gateway.default.id}"
+}
+
+# Create a subnet to launch our instances into
+resource "aws_subnet" "default" {
+ vpc_id = "${aws_vpc.default.id}"
+ cidr_block = "10.0.1.0/24"
+ map_public_ip_on_launch = true
+}
+
+# A security group for the ELB so it is accessible via the web
+resource "aws_security_group" "elb" {
+ name = "terraform_example_elb"
+ description = "Used in the terraform"
+ vpc_id = "${aws_vpc.default.id}"
# HTTP access from anywhere
ingress {
- from_port = 80
- to_port = 80
- protocol = "tcp"
+ from_port = 80
+ to_port = 80
+ protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# outbound internet access
egress {
- from_port = 0
- to_port = 0
- protocol = "-1"
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+}
+
+# Our default security group to access
+# the instances over SSH and HTTP
+resource "aws_security_group" "default" {
+ name = "terraform_example"
+ description = "Used in the terraform"
+ vpc_id = "${aws_vpc.default.id}"
+
+ # SSH access from anywhere
+ ingress {
+ from_port = 22
+ to_port = 22
+ protocol = "tcp"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ # HTTP access from the VPC
+ ingress {
+ from_port = 80
+ to_port = 80
+ protocol = "tcp"
+ cidr_blocks = ["10.0.0.0/16"]
+ }
+
+ # outbound internet access
+ egress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
@@ -38,20 +86,23 @@ resource "aws_security_group" "default" {
resource "aws_elb" "web" {
name = "terraform-example-elb"
- # The same availability zone as our instance
- availability_zones = ["${aws_instance.web.availability_zone}"]
+ subnets = ["${aws_subnet.default.id}"]
+ security_groups = ["${aws_security_group.elb.id}"]
+ instances = ["${aws_instance.web.id}"]
listener {
- instance_port = 80
+ instance_port = 80
instance_protocol = "http"
- lb_port = 80
- lb_protocol = "http"
+ lb_port = 80
+ lb_protocol = "http"
}
- # The instance is registered automatically
- instances = ["${aws_instance.web.id}"]
}
+resource "aws_key_pair" "auth" {
+ key_name = "tf-aws-two-tier-example"
+ public_key = "${file(var.public_key_path)}"
+}
resource "aws_instance" "web" {
# The connection block tells our provisioner how to
@@ -60,8 +111,7 @@ resource "aws_instance" "web" {
# The default username for our AMI
user = "ubuntu"
- # The path to your keyfile
- key_file = "${var.key_path}"
+ # The connection will use the local SSH agent for authentication.
}
instance_type = "m1.small"
@@ -70,15 +120,16 @@ resource "aws_instance" "web" {
# we specified
ami = "${lookup(var.aws_amis, var.aws_region)}"
- # The name of our SSH keypair you've created and downloaded
- # from the AWS console.
- #
- # https://console.aws.amazon.com/ec2/v2/home?region=us-west-2#KeyPairs:
- #
- key_name = "${var.key_name}"
+ # The name of our SSH keypair we created above.
+ key_name = "${aws_key_pair.auth.id}"
# Our Security group to allow HTTP and SSH access
- security_groups = ["${aws_security_group.default.name}"]
+ vpc_security_group_ids = ["${aws_security_group.default.id}"]
+
+ # We're going to launch into the same subnet as our ELB. In a production
+ # environment it's more common to have a separate private subnet for
+ # backend instances.
+ subnet_id = "${aws_subnet.default.id}"
# We run a remote provisioner on the instance after creating it.
# In this case, we just install nginx and start it. By default,
diff --git a/examples/aws-two-tier/variables.tf b/examples/aws-two-tier/variables.tf
index ee80ee2928..1321fcf1b4 100644
--- a/examples/aws-two-tier/variables.tf
+++ b/examples/aws-two-tier/variables.tf
@@ -1,9 +1,11 @@
-variable "key_name" {
- description = "Name of the SSH keypair to use in AWS."
-}
+variable "public_key_path" {
+ description = <
Date: Wed, 2 Dec 2015 20:26:44 -0700
Subject: [PATCH 113/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 451f1bebec..80a0f4561b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -15,6 +15,7 @@ IMPROVEMENTS:
BUG FIXES:
* provider/docker: Fix an issue running with Docker Swarm by looking up containers by ID instead of name [GH-4148]
+ * provider/openstack: Better handling of load balancing resource state changes [GH-3926]
## 0.6.8 (December 2, 2015)
From 7a24764c150fad36c7b048dd77d94e6431e182f6 Mon Sep 17 00:00:00 2001
From: Paul Bellamy
Date: Fri, 9 Oct 2015 14:05:43 +0100
Subject: [PATCH 114/664] provider/docker: Add hosts parameter for containers
---
.../docker/resource_docker_container.go | 37 +++++++++++++++++++
.../docker/resource_docker_container_funcs.go | 22 ++++++++++-
.../docker/r/container.html.markdown | 11 ++++++
3 files changed, 69 insertions(+), 1 deletion(-)
diff --git a/builtin/providers/docker/resource_docker_container.go b/builtin/providers/docker/resource_docker_container.go
index 242462e1a7..6ad5176ace 100644
--- a/builtin/providers/docker/resource_docker_container.go
+++ b/builtin/providers/docker/resource_docker_container.go
@@ -130,6 +130,28 @@ func resourceDockerContainer() *schema.Resource {
Set: resourceDockerPortsHash,
},
+ "hosts": &schema.Schema{
+ Type: schema.TypeSet,
+ Optional: true,
+ ForceNew: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "ip": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "host": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ },
+ Set: resourceDockerHostsHash,
+ },
+
"env": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
@@ -323,6 +345,21 @@ func resourceDockerPortsHash(v interface{}) int {
return hashcode.String(buf.String())
}
+func resourceDockerHostsHash(v interface{}) int {
+ var buf bytes.Buffer
+ m := v.(map[string]interface{})
+
+ if v, ok := m["ip"]; ok {
+ buf.WriteString(fmt.Sprintf("%v-", v.(string)))
+ }
+
+ if v, ok := m["host"]; ok {
+ buf.WriteString(fmt.Sprintf("%v-", v.(string)))
+ }
+
+ return hashcode.String(buf.String())
+}
+
func resourceDockerVolumesHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
diff --git a/builtin/providers/docker/resource_docker_container_funcs.go b/builtin/providers/docker/resource_docker_container_funcs.go
index 814941bba3..bf2bc9e2f0 100644
--- a/builtin/providers/docker/resource_docker_container_funcs.go
+++ b/builtin/providers/docker/resource_docker_container_funcs.go
@@ -67,6 +67,11 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
createOpts.Config.ExposedPorts = exposedPorts
}
+ extraHosts := []string{}
+ if v, ok := d.GetOk("extra_hosts"); ok {
+ extraHosts = extraHostsSetToDockerExtraHosts(v.(*schema.Set))
+ }
+
volumes := map[string]struct{}{}
binds := []string{}
volumesFrom := []string{}
@@ -100,7 +105,9 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
if len(portBindings) != 0 {
hostConfig.PortBindings = portBindings
}
-
+ if len(extraHosts) != 0 {
+ hostConfig.ExtraHosts = extraHosts
+ }
if len(binds) != 0 {
hostConfig.Binds = binds
}
@@ -312,6 +319,19 @@ func portSetToDockerPorts(ports *schema.Set) (map[dc.Port]struct{}, map[dc.Port]
return retExposedPorts, retPortBindings
}
+func extraHostsSetToDockerExtraHosts(extraHosts *schema.Set) []string {
+ retExtraHosts := []string{}
+
+ for _, hostInt := range extraHosts.List() {
+ host := hostInt.(map[string]interface{})
+ ip := host["ip"].(string)
+ hostname := host["host"].(string)
+ retExtraHosts = append(retExtraHosts, hostname+":"+ip)
+ }
+
+ return retExtraHosts
+}
+
func volumeSetToDockerVolumes(volumes *schema.Set) (map[string]struct{}, []string, []string, error) {
retVolumeMap := map[string]struct{}{}
retHostConfigBinds := []string{}
diff --git a/website/source/docs/providers/docker/r/container.html.markdown b/website/source/docs/providers/docker/r/container.html.markdown
index 920288eb25..2f23d3db37 100644
--- a/website/source/docs/providers/docker/r/container.html.markdown
+++ b/website/source/docs/providers/docker/r/container.html.markdown
@@ -57,6 +57,7 @@ The following arguments are supported:
kept running. If false, then as long as the container exists, Terraform
assumes it is successful.
* `ports` - (Optional) See [Ports](#ports) below for details.
+* `extra_hosts` - (Optional) See [Extra Hosts](#extra_hosts) below for details.
* `privileged` - (Optional, bool) Run container in privileged mode.
* `publish_all_ports` - (Optional, bool) Publish all ports of the container.
* `volumes` - (Optional) See [Volumes](#volumes) below for details.
@@ -82,6 +83,16 @@ the following:
* `protocol` - (Optional, string) Protocol that can be used over this port,
defaults to TCP.
+
+## Extra Hosts
+
+`extra_hosts` is a block within the configuration that can be repeated to specify
+the extra host mappings for the container. Each `extra_hosts` block supports
+the following:
+
+* `host` - (Required, int) Hostname to add.
+* `ip` - (Required, int) IP address this hostname should resolve to..
+
## Volumes
From 52d178b7ac7f378cddc64d1395f1c0c86cf2ebd0 Mon Sep 17 00:00:00 2001
From: Paul Bellamy
Date: Thu, 3 Dec 2015 10:51:59 +0000
Subject: [PATCH 115/664] provider/docker: Inline ports and volumes schemas for
consistency
---
.../docker/resource_docker_container.go | 122 ++++++++----------
1 file changed, 57 insertions(+), 65 deletions(-)
diff --git a/builtin/providers/docker/resource_docker_container.go b/builtin/providers/docker/resource_docker_container.go
index 6ad5176ace..76b3e153d4 100644
--- a/builtin/providers/docker/resource_docker_container.go
+++ b/builtin/providers/docker/resource_docker_container.go
@@ -118,16 +118,69 @@ func resourceDockerContainer() *schema.Resource {
Type: schema.TypeSet,
Optional: true,
ForceNew: true,
- Elem: getVolumesElem(),
- Set: resourceDockerVolumesHash,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "from_container": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "container_path": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "host_path": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "read_only": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ },
+ Set: resourceDockerVolumesHash,
},
"ports": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
ForceNew: true,
- Elem: getPortsElem(),
- Set: resourceDockerPortsHash,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "internal": &schema.Schema{
+ Type: schema.TypeInt,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "external": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "ip": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "protocol": &schema.Schema{
+ Type: schema.TypeString,
+ Default: "tcp",
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ },
+ Set: resourceDockerPortsHash,
},
"hosts": &schema.Schema{
@@ -263,67 +316,6 @@ func resourceDockerContainer() *schema.Resource {
}
}
-func getVolumesElem() *schema.Resource {
- return &schema.Resource{
- Schema: map[string]*schema.Schema{
- "from_container": &schema.Schema{
- Type: schema.TypeString,
- Optional: true,
- ForceNew: true,
- },
-
- "container_path": &schema.Schema{
- Type: schema.TypeString,
- Optional: true,
- ForceNew: true,
- },
-
- "host_path": &schema.Schema{
- Type: schema.TypeString,
- Optional: true,
- ForceNew: true,
- },
-
- "read_only": &schema.Schema{
- Type: schema.TypeBool,
- Optional: true,
- ForceNew: true,
- },
- },
- }
-}
-
-func getPortsElem() *schema.Resource {
- return &schema.Resource{
- Schema: map[string]*schema.Schema{
- "internal": &schema.Schema{
- Type: schema.TypeInt,
- Required: true,
- ForceNew: true,
- },
-
- "external": &schema.Schema{
- Type: schema.TypeInt,
- Optional: true,
- ForceNew: true,
- },
-
- "ip": &schema.Schema{
- Type: schema.TypeString,
- Optional: true,
- ForceNew: true,
- },
-
- "protocol": &schema.Schema{
- Type: schema.TypeString,
- Default: "tcp",
- Optional: true,
- ForceNew: true,
- },
- },
- }
-}
-
func resourceDockerPortsHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
From b8f3417e79c216e6cc5ff5baa50c1c6047f3f2f0 Mon Sep 17 00:00:00 2001
From: Sander van Harmelen
Date: Thu, 3 Dec 2015 11:10:42 +0100
Subject: [PATCH 116/664] Change all firewall related resources to take a
cidr_list
Also some additional tweaks to improve performance and add in a little
concurrency to speed things up a little.
---
builtin/providers/cloudstack/provider_test.go | 12 +-
.../resource_cloudstack_egress_firewall.go | 275 ++++++++++-------
...esource_cloudstack_egress_firewall_test.go | 118 ++++----
.../resource_cloudstack_firewall.go | 276 +++++++++++-------
.../resource_cloudstack_firewall_test.go | 70 +++--
.../resource_cloudstack_instance_test.go | 6 +-
.../resource_cloudstack_network_acl_rule.go | 161 ++++------
...source_cloudstack_network_acl_rule_test.go | 98 +++++--
.../resource_cloudstack_port_forward.go | 183 ++++++++----
.../resource_cloudstack_port_forward_test.go | 32 +-
...rce_cloudstack_secondary_ipaddress_test.go | 6 +-
builtin/providers/cloudstack/resources.go | 34 +++
.../r/egress_firewall.html.markdown | 7 +-
.../cloudstack/r/firewall.html.markdown | 7 +-
.../r/network_acl_rule.html.markdown | 7 +-
15 files changed, 783 insertions(+), 509 deletions(-)
diff --git a/builtin/providers/cloudstack/provider_test.go b/builtin/providers/cloudstack/provider_test.go
index b1b8442a55..2585c5fa0d 100644
--- a/builtin/providers/cloudstack/provider_test.go
+++ b/builtin/providers/cloudstack/provider_test.go
@@ -84,8 +84,11 @@ func testAccPreCheck(t *testing.T) {
if v := os.Getenv("CLOUDSTACK_NETWORK_1"); v == "" {
t.Fatal("CLOUDSTACK_NETWORK_1 must be set for acceptance tests")
}
- if v := os.Getenv("CLOUDSTACK_NETWORK_1_IPADDRESS"); v == "" {
- t.Fatal("CLOUDSTACK_NETWORK_1_IPADDRESS must be set for acceptance tests")
+ if v := os.Getenv("CLOUDSTACK_NETWORK_1_IPADDRESS1"); v == "" {
+ t.Fatal("CLOUDSTACK_NETWORK_1_IPADDRESS1 must be set for acceptance tests")
+ }
+ if v := os.Getenv("CLOUDSTACK_NETWORK_1_IPADDRESS2"); v == "" {
+ t.Fatal("CLOUDSTACK_NETWORK_1_IPADDRESS2 must be set for acceptance tests")
}
if v := os.Getenv("CLOUDSTACK_NETWORK_2"); v == "" {
t.Fatal("CLOUDSTACK_NETWORK_2 must be set for acceptance tests")
@@ -159,7 +162,10 @@ var CLOUDSTACK_SERVICE_OFFERING_2 = os.Getenv("CLOUDSTACK_SERVICE_OFFERING_2")
var CLOUDSTACK_NETWORK_1 = os.Getenv("CLOUDSTACK_NETWORK_1")
// A valid IP address in CLOUDSTACK_NETWORK_1
-var CLOUDSTACK_NETWORK_1_IPADDRESS = os.Getenv("CLOUDSTACK_NETWORK_1_IPADDRESS")
+var CLOUDSTACK_NETWORK_1_IPADDRESS1 = os.Getenv("CLOUDSTACK_NETWORK_1_IPADDRESS1")
+
+// A valid IP address in CLOUDSTACK_NETWORK_1
+var CLOUDSTACK_NETWORK_1_IPADDRESS2 = os.Getenv("CLOUDSTACK_NETWORK_1_IPADDRESS2")
// Name for a network that will be created
var CLOUDSTACK_NETWORK_2 = os.Getenv("CLOUDSTACK_NETWORK_2")
diff --git a/builtin/providers/cloudstack/resource_cloudstack_egress_firewall.go b/builtin/providers/cloudstack/resource_cloudstack_egress_firewall.go
index a1d73b1676..41909f90ed 100644
--- a/builtin/providers/cloudstack/resource_cloudstack_egress_firewall.go
+++ b/builtin/providers/cloudstack/resource_cloudstack_egress_firewall.go
@@ -1,14 +1,14 @@
package cloudstack
import (
- "bytes"
"fmt"
"regexp"
- "sort"
"strconv"
"strings"
+ "sync"
+ "time"
- "github.com/hashicorp/terraform/helper/hashcode"
+ "github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/helper/schema"
"github.com/xanzy/go-cloudstack/cloudstack"
)
@@ -38,9 +38,17 @@ func resourceCloudStackEgressFirewall() *schema.Resource {
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
+ "cidr_list": &schema.Schema{
+ Type: schema.TypeSet,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ Set: schema.HashString,
+ },
+
"source_cidr": &schema.Schema{
- Type: schema.TypeString,
- Required: true,
+ Type: schema.TypeString,
+ Optional: true,
+ Deprecated: "Please use the `cidr_list` field instead",
},
"protocol": &schema.Schema{
@@ -64,9 +72,7 @@ func resourceCloudStackEgressFirewall() *schema.Resource {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
- Set: func(v interface{}) int {
- return hashcode.String(v.(string))
- },
+ Set: schema.HashString,
},
"uuids": &schema.Schema{
@@ -75,7 +81,6 @@ func resourceCloudStackEgressFirewall() *schema.Resource {
},
},
},
- Set: resourceCloudStackEgressFirewallRuleHash,
},
},
}
@@ -99,32 +104,66 @@ func resourceCloudStackEgressFirewallCreate(d *schema.ResourceData, meta interfa
d.SetId(networkid)
// Create all rules that are configured
- if rs := d.Get("rule").(*schema.Set); rs.Len() > 0 {
-
+ if nrs := d.Get("rule").(*schema.Set); nrs.Len() > 0 {
// Create an empty schema.Set to hold all rules
- rules := &schema.Set{
- F: resourceCloudStackEgressFirewallRuleHash,
- }
+ rules := resourceCloudStackEgressFirewall().Schema["rule"].ZeroValue().(*schema.Set)
- for _, rule := range rs.List() {
- // Create a single rule
- err := resourceCloudStackEgressFirewallCreateRule(d, meta, rule.(map[string]interface{}))
+ err := createEgressFirewallRules(d, meta, rules, nrs)
- // We need to update this first to preserve the correct state
- rules.Add(rule)
- d.Set("rule", rules)
+ // We need to update this first to preserve the correct state
+ d.Set("rule", rules)
- if err != nil {
- return err
- }
+ if err != nil {
+ return err
}
}
return resourceCloudStackEgressFirewallRead(d, meta)
}
-func resourceCloudStackEgressFirewallCreateRule(
- d *schema.ResourceData, meta interface{}, rule map[string]interface{}) error {
+func createEgressFirewallRules(
+ d *schema.ResourceData,
+ meta interface{},
+ rules *schema.Set,
+ nrs *schema.Set) error {
+ var errs *multierror.Error
+
+ var wg sync.WaitGroup
+ wg.Add(nrs.Len())
+
+ sem := make(chan struct{}, 10)
+ for _, rule := range nrs.List() {
+ // Put in a tiny sleep here to avoid DoS'ing the API
+ time.Sleep(500 * time.Millisecond)
+
+ go func(rule map[string]interface{}) {
+ defer wg.Done()
+ sem <- struct{}{}
+
+ // Create a single rule
+ err := createEgressFirewallRule(d, meta, rule)
+
+ // If we have at least one UUID, we need to save the rule
+ if len(rule["uuids"].(map[string]interface{})) > 0 {
+ rules.Add(rule)
+ }
+
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ }
+
+ <-sem
+ }(rule.(map[string]interface{}))
+ }
+
+ wg.Wait()
+
+ return errs.ErrorOrNil()
+}
+func createEgressFirewallRule(
+ d *schema.ResourceData,
+ meta interface{},
+ rule map[string]interface{}) error {
cs := meta.(*cloudstack.CloudStackClient)
uuids := rule["uuids"].(map[string]interface{})
@@ -137,7 +176,7 @@ func resourceCloudStackEgressFirewallCreateRule(
p := cs.Firewall.NewCreateEgressFirewallRuleParams(d.Id(), rule["protocol"].(string))
// Set the CIDR list
- p.SetCidrlist([]string{rule["source_cidr"].(string)})
+ p.SetCidrlist(retrieveCidrList(rule))
// If the protocol is ICMP set the needed ICMP parameters
if rule["protocol"].(string) == "icmp" {
@@ -157,14 +196,18 @@ func resourceCloudStackEgressFirewallCreateRule(
if ps := rule["ports"].(*schema.Set); ps.Len() > 0 {
// Create an empty schema.Set to hold all processed ports
- ports := &schema.Set{
- F: func(v interface{}) int {
- return hashcode.String(v.(string))
- },
- }
+ ports := &schema.Set{F: schema.HashString}
+
+ // Define a regexp for parsing the port
+ re := regexp.MustCompile(`^(\d+)(?:-(\d+))?$`)
for _, port := range ps.List() {
- re := regexp.MustCompile(`^(\d+)(?:-(\d+))?$`)
+ if _, ok := uuids[port.(string)]; ok {
+ ports.Add(port)
+ rule["ports"] = ports
+ continue
+ }
+
m := re.FindStringSubmatch(port.(string))
startPort, err := strconv.Atoi(m[1])
@@ -220,9 +263,7 @@ func resourceCloudStackEgressFirewallRead(d *schema.ResourceData, meta interface
}
// Create an empty schema.Set to hold all rules
- rules := &schema.Set{
- F: resourceCloudStackEgressFirewallRuleHash,
- }
+ rules := resourceCloudStackEgressFirewall().Schema["rule"].ZeroValue().(*schema.Set)
// Read all rules that are configured
if rs := d.Get("rule").(*schema.Set); rs.Len() > 0 {
@@ -247,10 +288,10 @@ func resourceCloudStackEgressFirewallRead(d *schema.ResourceData, meta interface
delete(ruleMap, id.(string))
// Update the values
- rule["source_cidr"] = r.Cidrlist
rule["protocol"] = r.Protocol
rule["icmp_type"] = r.Icmptype
rule["icmp_code"] = r.Icmpcode
+ setCidrList(rule, r.Cidrlist)
rules.Add(rule)
}
@@ -259,11 +300,7 @@ func resourceCloudStackEgressFirewallRead(d *schema.ResourceData, meta interface
if ps := rule["ports"].(*schema.Set); ps.Len() > 0 {
// Create an empty schema.Set to hold all ports
- ports := &schema.Set{
- F: func(v interface{}) int {
- return hashcode.String(v.(string))
- },
- }
+ ports := &schema.Set{F: schema.HashString}
// Loop through all ports and retrieve their info
for _, port := range ps.List() {
@@ -283,8 +320,8 @@ func resourceCloudStackEgressFirewallRead(d *schema.ResourceData, meta interface
delete(ruleMap, id.(string))
// Update the values
- rule["source_cidr"] = r.Cidrlist
rule["protocol"] = r.Protocol
+ setCidrList(rule, r.Cidrlist)
ports.Add(port)
}
@@ -302,11 +339,16 @@ func resourceCloudStackEgressFirewallRead(d *schema.ResourceData, meta interface
managed := d.Get("managed").(bool)
if managed && len(ruleMap) > 0 {
for uuid := range ruleMap {
+ // We need to create and add a dummy value to a schema.Set as the
+ // cidr_list is a required field and thus needs a value
+ cidrs := &schema.Set{F: schema.HashString}
+ cidrs.Add(uuid)
+
// Make a dummy rule to hold the unknown UUID
rule := map[string]interface{}{
- "source_cidr": uuid,
- "protocol": uuid,
- "uuids": map[string]interface{}{uuid: uuid},
+ "cidr_list": uuid,
+ "protocol": uuid,
+ "uuids": map[string]interface{}{uuid: uuid},
}
// Add the dummy rule to the rules set
@@ -335,27 +377,29 @@ func resourceCloudStackEgressFirewallUpdate(d *schema.ResourceData, meta interfa
ors := o.(*schema.Set).Difference(n.(*schema.Set))
nrs := n.(*schema.Set).Difference(o.(*schema.Set))
- // Now first loop through all the old rules and delete any obsolete ones
- for _, rule := range ors.List() {
- // Delete the rule as it no longer exists in the config
- err := resourceCloudStackEgressFirewallDeleteRule(d, meta, rule.(map[string]interface{}))
+ // We need to start with a rule set containing all the rules we
+ // already have and want to keep. Any rules that are not deleted
+ // correctly and any newly created rules, will be added to this
+ // set to make sure we end up in a consistent state
+ rules := o.(*schema.Set).Intersection(n.(*schema.Set))
+
+ // First loop through all the old rules and delete them
+ if ors.Len() > 0 {
+ err := deleteEgressFirewallRules(d, meta, rules, ors)
+
+ // We need to update this first to preserve the correct state
+ d.Set("rule", rules)
+
if err != nil {
return err
}
}
- // Make sure we save the state of the currently configured rules
- rules := o.(*schema.Set).Intersection(n.(*schema.Set))
- d.Set("rule", rules)
-
- // Then loop through all the currently configured rules and create the new ones
- for _, rule := range nrs.List() {
- // When successfully deleted, re-create it again if it still exists
- err := resourceCloudStackEgressFirewallCreateRule(
- d, meta, rule.(map[string]interface{}))
+ // Then loop through all the new rules and create them
+ if nrs.Len() > 0 {
+ err := createEgressFirewallRules(d, meta, rules, nrs)
// We need to update this first to preserve the correct state
- rules.Add(rule)
d.Set("rule", rules)
if err != nil {
@@ -368,26 +412,69 @@ func resourceCloudStackEgressFirewallUpdate(d *schema.ResourceData, meta interfa
}
func resourceCloudStackEgressFirewallDelete(d *schema.ResourceData, meta interface{}) error {
+ // Create an empty rule set to hold all rules that where
+ // not deleted correctly
+ rules := resourceCloudStackEgressFirewall().Schema["rule"].ZeroValue().(*schema.Set)
+
// Delete all rules
- if rs := d.Get("rule").(*schema.Set); rs.Len() > 0 {
- for _, rule := range rs.List() {
- // Delete a single rule
- err := resourceCloudStackEgressFirewallDeleteRule(d, meta, rule.(map[string]interface{}))
+ if ors := d.Get("rule").(*schema.Set); ors.Len() > 0 {
+ err := deleteEgressFirewallRules(d, meta, rules, ors)
- // We need to update this first to preserve the correct state
- d.Set("rule", rs)
+ // We need to update this first to preserve the correct state
+ d.Set("rule", rules)
- if err != nil {
- return err
- }
+ if err != nil {
+ return err
}
}
return nil
}
-func resourceCloudStackEgressFirewallDeleteRule(
- d *schema.ResourceData, meta interface{}, rule map[string]interface{}) error {
+func deleteEgressFirewallRules(
+ d *schema.ResourceData,
+ meta interface{},
+ rules *schema.Set,
+ ors *schema.Set) error {
+ var errs *multierror.Error
+
+ var wg sync.WaitGroup
+ wg.Add(ors.Len())
+
+ sem := make(chan struct{}, 10)
+ for _, rule := range ors.List() {
+ // Put a sleep here to avoid DoS'ing the API
+ time.Sleep(500 * time.Millisecond)
+
+ go func(rule map[string]interface{}) {
+ defer wg.Done()
+ sem <- struct{}{}
+
+ // Delete a single rule
+ err := deleteEgressFirewallRule(d, meta, rule)
+
+ // If we have at least one UUID, we need to save the rule
+ if len(rule["uuids"].(map[string]interface{})) > 0 {
+ rules.Add(rule)
+ }
+
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ }
+
+ <-sem
+ }(rule.(map[string]interface{}))
+ }
+
+ wg.Wait()
+
+ return errs.ErrorOrNil()
+}
+
+func deleteEgressFirewallRule(
+ d *schema.ResourceData,
+ meta interface{},
+ rule map[string]interface{}) error {
cs := meta.(*cloudstack.CloudStackClient)
uuids := rule["uuids"].(map[string]interface{})
@@ -416,47 +503,12 @@ func resourceCloudStackEgressFirewallDeleteRule(
// Delete the UUID of this rule
delete(uuids, k)
+ rule["uuids"] = uuids
}
- // Update the UUIDs
- rule["uuids"] = uuids
-
return nil
}
-func resourceCloudStackEgressFirewallRuleHash(v interface{}) int {
- var buf bytes.Buffer
- m := v.(map[string]interface{})
- buf.WriteString(fmt.Sprintf(
- "%s-%s-", m["source_cidr"].(string), m["protocol"].(string)))
-
- if v, ok := m["icmp_type"]; ok {
- buf.WriteString(fmt.Sprintf("%d-", v.(int)))
- }
-
- if v, ok := m["icmp_code"]; ok {
- buf.WriteString(fmt.Sprintf("%d-", v.(int)))
- }
-
- // We need to make sure to sort the strings below so that we always
- // generate the same hash code no matter what is in the set.
- if v, ok := m["ports"]; ok {
- vs := v.(*schema.Set).List()
- s := make([]string, len(vs))
-
- for i, raw := range vs {
- s[i] = raw.(string)
- }
- sort.Strings(s)
-
- for _, v := range s {
- buf.WriteString(fmt.Sprintf("%s-", v))
- }
- }
-
- return hashcode.String(buf.String())
-}
-
func verifyEgressFirewallParams(d *schema.ResourceData) error {
managed := d.Get("managed").(bool)
_, rules := d.GetOk("rule")
@@ -470,6 +522,17 @@ func verifyEgressFirewallParams(d *schema.ResourceData) error {
}
func verifyEgressFirewallRuleParams(d *schema.ResourceData, rule map[string]interface{}) error {
+ cidrList := rule["cidr_list"].(*schema.Set)
+ sourceCidr := rule["source_cidr"].(string)
+ if cidrList.Len() == 0 && sourceCidr == "" {
+ return fmt.Errorf(
+ "Parameter cidr_list is a required parameter")
+ }
+ if cidrList.Len() > 0 && sourceCidr != "" {
+ return fmt.Errorf(
+ "Parameter source_cidr is deprecated and cannot be used together with cidr_list")
+ }
+
protocol := rule["protocol"].(string)
if protocol != "tcp" && protocol != "udp" && protocol != "icmp" {
return fmt.Errorf(
diff --git a/builtin/providers/cloudstack/resource_cloudstack_egress_firewall_test.go b/builtin/providers/cloudstack/resource_cloudstack_egress_firewall_test.go
index dbca8c32b4..07f4e0d8a2 100644
--- a/builtin/providers/cloudstack/resource_cloudstack_egress_firewall_test.go
+++ b/builtin/providers/cloudstack/resource_cloudstack_egress_firewall_test.go
@@ -2,19 +2,15 @@ package cloudstack
import (
"fmt"
- "strconv"
"strings"
"testing"
"github.com/hashicorp/terraform/helper/resource"
- "github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
"github.com/xanzy/go-cloudstack/cloudstack"
)
func TestAccCloudStackEgressFirewall_basic(t *testing.T) {
- hash := makeTestCloudStackEgressFirewallRuleHash([]interface{}{"1000-2000", "80"})
-
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@@ -26,18 +22,26 @@ func TestAccCloudStackEgressFirewall_basic(t *testing.T) {
testAccCheckCloudStackEgressFirewallRulesExist("cloudstack_egress_firewall.foo"),
resource.TestCheckResourceAttr(
"cloudstack_egress_firewall.foo", "network", CLOUDSTACK_NETWORK_1),
+ resource.TestCheckResourceAttr(
+ "cloudstack_egress_firewall.foo", "rule.#", "2"),
resource.TestCheckResourceAttr(
"cloudstack_egress_firewall.foo",
- "rule."+hash+".source_cidr",
- CLOUDSTACK_NETWORK_1_IPADDRESS+"/32"),
+ "rule.1081385056.cidr_list.3378711023",
+ CLOUDSTACK_NETWORK_1_IPADDRESS1+"/32"),
resource.TestCheckResourceAttr(
- "cloudstack_egress_firewall.foo", "rule."+hash+".protocol", "tcp"),
+ "cloudstack_egress_firewall.foo", "rule.1081385056.protocol", "tcp"),
resource.TestCheckResourceAttr(
- "cloudstack_egress_firewall.foo", "rule."+hash+".ports.#", "2"),
+ "cloudstack_egress_firewall.foo", "rule.1081385056.ports.32925333", "8080"),
resource.TestCheckResourceAttr(
- "cloudstack_egress_firewall.foo", "rule."+hash+".ports.1209010669", "1000-2000"),
+ "cloudstack_egress_firewall.foo",
+ "rule.1129999216.source_cidr",
+ CLOUDSTACK_NETWORK_1_IPADDRESS1+"/32"),
resource.TestCheckResourceAttr(
- "cloudstack_egress_firewall.foo", "rule."+hash+".ports.1889509032", "80"),
+ "cloudstack_egress_firewall.foo", "rule.1129999216.protocol", "tcp"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_egress_firewall.foo", "rule.1129999216.ports.1209010669", "1000-2000"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_egress_firewall.foo", "rule.1129999216.ports.1889509032", "80"),
),
},
},
@@ -45,9 +49,6 @@ func TestAccCloudStackEgressFirewall_basic(t *testing.T) {
}
func TestAccCloudStackEgressFirewall_update(t *testing.T) {
- hash1 := makeTestCloudStackEgressFirewallRuleHash([]interface{}{"1000-2000", "80"})
- hash2 := makeTestCloudStackEgressFirewallRuleHash([]interface{}{"443"})
-
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@@ -60,19 +61,25 @@ func TestAccCloudStackEgressFirewall_update(t *testing.T) {
resource.TestCheckResourceAttr(
"cloudstack_egress_firewall.foo", "network", CLOUDSTACK_NETWORK_1),
resource.TestCheckResourceAttr(
- "cloudstack_egress_firewall.foo", "rule.#", "1"),
+ "cloudstack_egress_firewall.foo", "rule.#", "2"),
resource.TestCheckResourceAttr(
"cloudstack_egress_firewall.foo",
- "rule."+hash1+".source_cidr",
- CLOUDSTACK_NETWORK_1_IPADDRESS+"/32"),
+ "rule.1081385056.cidr_list.3378711023",
+ CLOUDSTACK_NETWORK_1_IPADDRESS1+"/32"),
resource.TestCheckResourceAttr(
- "cloudstack_egress_firewall.foo", "rule."+hash1+".protocol", "tcp"),
+ "cloudstack_egress_firewall.foo", "rule.1081385056.protocol", "tcp"),
resource.TestCheckResourceAttr(
- "cloudstack_egress_firewall.foo", "rule."+hash1+".ports.#", "2"),
+ "cloudstack_egress_firewall.foo", "rule.1081385056.ports.32925333", "8080"),
resource.TestCheckResourceAttr(
- "cloudstack_egress_firewall.foo", "rule."+hash1+".ports.1209010669", "1000-2000"),
+ "cloudstack_egress_firewall.foo",
+ "rule.1129999216.source_cidr",
+ CLOUDSTACK_NETWORK_1_IPADDRESS1+"/32"),
resource.TestCheckResourceAttr(
- "cloudstack_egress_firewall.foo", "rule."+hash1+".ports.1889509032", "80"),
+ "cloudstack_egress_firewall.foo", "rule.1129999216.protocol", "tcp"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_egress_firewall.foo", "rule.1129999216.ports.1209010669", "1000-2000"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_egress_firewall.foo", "rule.1129999216.ports.1889509032", "80"),
),
},
@@ -83,29 +90,37 @@ func TestAccCloudStackEgressFirewall_update(t *testing.T) {
resource.TestCheckResourceAttr(
"cloudstack_egress_firewall.foo", "network", CLOUDSTACK_NETWORK_1),
resource.TestCheckResourceAttr(
- "cloudstack_egress_firewall.foo", "rule.#", "2"),
+ "cloudstack_egress_firewall.foo", "rule.#", "3"),
resource.TestCheckResourceAttr(
"cloudstack_egress_firewall.foo",
- "rule."+hash1+".source_cidr",
- CLOUDSTACK_NETWORK_1_IPADDRESS+"/32"),
- resource.TestCheckResourceAttr(
- "cloudstack_egress_firewall.foo", "rule."+hash1+".protocol", "tcp"),
- resource.TestCheckResourceAttr(
- "cloudstack_egress_firewall.foo", "rule."+hash1+".ports.#", "2"),
- resource.TestCheckResourceAttr(
- "cloudstack_egress_firewall.foo", "rule."+hash1+".ports.1209010669", "1000-2000"),
- resource.TestCheckResourceAttr(
- "cloudstack_egress_firewall.foo", "rule."+hash1+".ports.1889509032", "80"),
+ "rule.59731059.cidr_list.1910468234",
+ CLOUDSTACK_NETWORK_1_IPADDRESS2+"/32"),
resource.TestCheckResourceAttr(
"cloudstack_egress_firewall.foo",
- "rule."+hash2+".source_cidr",
- CLOUDSTACK_NETWORK_1_IPADDRESS+"/32"),
+ "rule.59731059.cidr_list.3378711023",
+ CLOUDSTACK_NETWORK_1_IPADDRESS1+"/32"),
resource.TestCheckResourceAttr(
- "cloudstack_egress_firewall.foo", "rule."+hash2+".protocol", "tcp"),
+ "cloudstack_egress_firewall.foo", "rule.59731059.protocol", "tcp"),
resource.TestCheckResourceAttr(
- "cloudstack_egress_firewall.foo", "rule."+hash2+".ports.#", "1"),
+ "cloudstack_egress_firewall.foo", "rule.59731059.ports.32925333", "8080"),
resource.TestCheckResourceAttr(
- "cloudstack_egress_firewall.foo", "rule."+hash2+".ports.3638101695", "443"),
+ "cloudstack_egress_firewall.foo",
+ "rule.1052669680.source_cidr",
+ CLOUDSTACK_NETWORK_1_IPADDRESS1+"/32"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_egress_firewall.foo", "rule.1052669680.protocol", "tcp"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_egress_firewall.foo", "rule.1052669680.ports.3638101695", "443"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_egress_firewall.foo",
+ "rule.1129999216.source_cidr",
+ CLOUDSTACK_NETWORK_1_IPADDRESS1+"/32"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_egress_firewall.foo", "rule.1129999216.protocol", "tcp"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_egress_firewall.foo", "rule.1129999216.ports.1209010669", "1000-2000"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_egress_firewall.foo", "rule.1129999216.ports.1889509032", "80"),
),
},
},
@@ -171,20 +186,16 @@ func testAccCheckCloudStackEgressFirewallDestroy(s *terraform.State) error {
return nil
}
-func makeTestCloudStackEgressFirewallRuleHash(ports []interface{}) string {
- return strconv.Itoa(resourceCloudStackEgressFirewallRuleHash(map[string]interface{}{
- "source_cidr": CLOUDSTACK_NETWORK_1_IPADDRESS + "/32",
- "protocol": "tcp",
- "ports": schema.NewSet(schema.HashString, ports),
- "icmp_type": 0,
- "icmp_code": 0,
- }))
-}
-
var testAccCloudStackEgressFirewall_basic = fmt.Sprintf(`
resource "cloudstack_egress_firewall" "foo" {
network = "%s"
+ rule {
+ cidr_list = ["%s/32"]
+ protocol = "tcp"
+ ports = ["8080"]
+ }
+
rule {
source_cidr = "%s/32"
protocol = "tcp"
@@ -192,12 +203,19 @@ resource "cloudstack_egress_firewall" "foo" {
}
}`,
CLOUDSTACK_NETWORK_1,
- CLOUDSTACK_NETWORK_1_IPADDRESS)
+ CLOUDSTACK_NETWORK_1_IPADDRESS1,
+ CLOUDSTACK_NETWORK_1_IPADDRESS1)
var testAccCloudStackEgressFirewall_update = fmt.Sprintf(`
resource "cloudstack_egress_firewall" "foo" {
network = "%s"
+ rule {
+ cidr_list = ["%s/32", "%s/32"]
+ protocol = "tcp"
+ ports = ["8080"]
+ }
+
rule {
source_cidr = "%s/32"
protocol = "tcp"
@@ -211,5 +229,7 @@ resource "cloudstack_egress_firewall" "foo" {
}
}`,
CLOUDSTACK_NETWORK_1,
- CLOUDSTACK_NETWORK_1_IPADDRESS,
- CLOUDSTACK_NETWORK_1_IPADDRESS)
+ CLOUDSTACK_NETWORK_1_IPADDRESS1,
+ CLOUDSTACK_NETWORK_1_IPADDRESS2,
+ CLOUDSTACK_NETWORK_1_IPADDRESS1,
+ CLOUDSTACK_NETWORK_1_IPADDRESS1)
diff --git a/builtin/providers/cloudstack/resource_cloudstack_firewall.go b/builtin/providers/cloudstack/resource_cloudstack_firewall.go
index c5a8f87638..b8f92b553a 100644
--- a/builtin/providers/cloudstack/resource_cloudstack_firewall.go
+++ b/builtin/providers/cloudstack/resource_cloudstack_firewall.go
@@ -1,14 +1,14 @@
package cloudstack
import (
- "bytes"
"fmt"
"regexp"
- "sort"
"strconv"
"strings"
+ "sync"
+ "time"
- "github.com/hashicorp/terraform/helper/hashcode"
+ "github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/helper/schema"
"github.com/xanzy/go-cloudstack/cloudstack"
)
@@ -38,9 +38,17 @@ func resourceCloudStackFirewall() *schema.Resource {
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
+ "cidr_list": &schema.Schema{
+ Type: schema.TypeSet,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ Set: schema.HashString,
+ },
+
"source_cidr": &schema.Schema{
- Type: schema.TypeString,
- Required: true,
+ Type: schema.TypeString,
+ Optional: true,
+ Deprecated: "Please use the `cidr_list` field instead",
},
"protocol": &schema.Schema{
@@ -64,9 +72,7 @@ func resourceCloudStackFirewall() *schema.Resource {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
- Set: func(v interface{}) int {
- return hashcode.String(v.(string))
- },
+ Set: schema.HashString,
},
"uuids": &schema.Schema{
@@ -75,7 +81,6 @@ func resourceCloudStackFirewall() *schema.Resource {
},
},
},
- Set: resourceCloudStackFirewallRuleHash,
},
},
}
@@ -99,32 +104,66 @@ func resourceCloudStackFirewallCreate(d *schema.ResourceData, meta interface{})
d.SetId(ipaddressid)
// Create all rules that are configured
- if rs := d.Get("rule").(*schema.Set); rs.Len() > 0 {
-
+ if nrs := d.Get("rule").(*schema.Set); nrs.Len() > 0 {
// Create an empty schema.Set to hold all rules
- rules := &schema.Set{
- F: resourceCloudStackFirewallRuleHash,
- }
+ rules := resourceCloudStackFirewall().Schema["rule"].ZeroValue().(*schema.Set)
- for _, rule := range rs.List() {
- // Create a single rule
- err := resourceCloudStackFirewallCreateRule(d, meta, rule.(map[string]interface{}))
+ err := createFirewallRules(d, meta, rules, nrs)
- // We need to update this first to preserve the correct state
- rules.Add(rule)
- d.Set("rule", rules)
+ // We need to update this first to preserve the correct state
+ d.Set("rule", rules)
- if err != nil {
- return err
- }
+ if err != nil {
+ return err
}
}
return resourceCloudStackFirewallRead(d, meta)
}
+func createFirewallRules(
+ d *schema.ResourceData,
+ meta interface{},
+ rules *schema.Set,
+ nrs *schema.Set) error {
+ var errs *multierror.Error
-func resourceCloudStackFirewallCreateRule(
- d *schema.ResourceData, meta interface{}, rule map[string]interface{}) error {
+ var wg sync.WaitGroup
+ wg.Add(nrs.Len())
+
+ sem := make(chan struct{}, 10)
+ for _, rule := range nrs.List() {
+ // Put in a tiny sleep here to avoid DoS'ing the API
+ time.Sleep(500 * time.Millisecond)
+
+ go func(rule map[string]interface{}) {
+ defer wg.Done()
+ sem <- struct{}{}
+
+ // Create a single rule
+ err := createFirewallRule(d, meta, rule)
+
+ // If we have at least one UUID, we need to save the rule
+ if len(rule["uuids"].(map[string]interface{})) > 0 {
+ rules.Add(rule)
+ }
+
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ }
+
+ <-sem
+ }(rule.(map[string]interface{}))
+ }
+
+ wg.Wait()
+
+ return errs.ErrorOrNil()
+}
+
+func createFirewallRule(
+ d *schema.ResourceData,
+ meta interface{},
+ rule map[string]interface{}) error {
cs := meta.(*cloudstack.CloudStackClient)
uuids := rule["uuids"].(map[string]interface{})
@@ -137,7 +176,7 @@ func resourceCloudStackFirewallCreateRule(
p := cs.Firewall.NewCreateFirewallRuleParams(d.Id(), rule["protocol"].(string))
// Set the CIDR list
- p.SetCidrlist([]string{rule["source_cidr"].(string)})
+ p.SetCidrlist(retrieveCidrList(rule))
// If the protocol is ICMP set the needed ICMP parameters
if rule["protocol"].(string) == "icmp" {
@@ -148,6 +187,7 @@ func resourceCloudStackFirewallCreateRule(
if err != nil {
return err
}
+
uuids["icmp"] = r.Id
rule["uuids"] = uuids
}
@@ -157,14 +197,18 @@ func resourceCloudStackFirewallCreateRule(
if ps := rule["ports"].(*schema.Set); ps.Len() > 0 {
// Create an empty schema.Set to hold all processed ports
- ports := &schema.Set{
- F: func(v interface{}) int {
- return hashcode.String(v.(string))
- },
- }
+ ports := &schema.Set{F: schema.HashString}
+
+ // Define a regexp for parsing the port
+ re := regexp.MustCompile(`^(\d+)(?:-(\d+))?$`)
for _, port := range ps.List() {
- re := regexp.MustCompile(`^(\d+)(?:-(\d+))?$`)
+ if _, ok := uuids[port.(string)]; ok {
+ ports.Add(port)
+ rule["ports"] = ports
+ continue
+ }
+
m := re.FindStringSubmatch(port.(string))
startPort, err := strconv.Atoi(m[1])
@@ -220,9 +264,7 @@ func resourceCloudStackFirewallRead(d *schema.ResourceData, meta interface{}) er
}
// Create an empty schema.Set to hold all rules
- rules := &schema.Set{
- F: resourceCloudStackFirewallRuleHash,
- }
+ rules := resourceCloudStackFirewall().Schema["rule"].ZeroValue().(*schema.Set)
// Read all rules that are configured
if rs := d.Get("rule").(*schema.Set); rs.Len() > 0 {
@@ -247,10 +289,10 @@ func resourceCloudStackFirewallRead(d *schema.ResourceData, meta interface{}) er
delete(ruleMap, id.(string))
// Update the values
- rule["source_cidr"] = r.Cidrlist
rule["protocol"] = r.Protocol
rule["icmp_type"] = r.Icmptype
rule["icmp_code"] = r.Icmpcode
+ setCidrList(rule, r.Cidrlist)
rules.Add(rule)
}
@@ -259,11 +301,7 @@ func resourceCloudStackFirewallRead(d *schema.ResourceData, meta interface{}) er
if ps := rule["ports"].(*schema.Set); ps.Len() > 0 {
// Create an empty schema.Set to hold all ports
- ports := &schema.Set{
- F: func(v interface{}) int {
- return hashcode.String(v.(string))
- },
- }
+ ports := &schema.Set{F: schema.HashString}
// Loop through all ports and retrieve their info
for _, port := range ps.List() {
@@ -283,8 +321,8 @@ func resourceCloudStackFirewallRead(d *schema.ResourceData, meta interface{}) er
delete(ruleMap, id.(string))
// Update the values
- rule["source_cidr"] = r.Cidrlist
rule["protocol"] = r.Protocol
+ setCidrList(rule, r.Cidrlist)
ports.Add(port)
}
@@ -302,11 +340,16 @@ func resourceCloudStackFirewallRead(d *schema.ResourceData, meta interface{}) er
managed := d.Get("managed").(bool)
if managed && len(ruleMap) > 0 {
for uuid := range ruleMap {
+ // We need to create and add a dummy value to a schema.Set as the
+ // cidr_list is a required field and thus needs a value
+ cidrs := &schema.Set{F: schema.HashString}
+ cidrs.Add(uuid)
+
// Make a dummy rule to hold the unknown UUID
rule := map[string]interface{}{
- "source_cidr": uuid,
- "protocol": uuid,
- "uuids": map[string]interface{}{uuid: uuid},
+ "cidr_list": cidrs,
+ "protocol": uuid,
+ "uuids": map[string]interface{}{uuid: uuid},
}
// Add the dummy rule to the rules set
@@ -335,27 +378,29 @@ func resourceCloudStackFirewallUpdate(d *schema.ResourceData, meta interface{})
ors := o.(*schema.Set).Difference(n.(*schema.Set))
nrs := n.(*schema.Set).Difference(o.(*schema.Set))
- // Now first loop through all the old rules and delete any obsolete ones
- for _, rule := range ors.List() {
- // Delete the rule as it no longer exists in the config
- err := resourceCloudStackFirewallDeleteRule(d, meta, rule.(map[string]interface{}))
+ // We need to start with a rule set containing all the rules we
+ // already have and want to keep. Any rules that are not deleted
+ // correctly and any newly created rules, will be added to this
+ // set to make sure we end up in a consistent state
+ rules := o.(*schema.Set).Intersection(n.(*schema.Set))
+
+ // First loop through all the old rules and delete them
+ if ors.Len() > 0 {
+ err := deleteFirewallRules(d, meta, rules, ors)
+
+ // We need to update this first to preserve the correct state
+ d.Set("rule", rules)
+
if err != nil {
return err
}
}
- // Make sure we save the state of the currently configured rules
- rules := o.(*schema.Set).Intersection(n.(*schema.Set))
- d.Set("rule", rules)
-
- // Then loop through all the currently configured rules and create the new ones
- for _, rule := range nrs.List() {
- // When successfully deleted, re-create it again if it still exists
- err := resourceCloudStackFirewallCreateRule(
- d, meta, rule.(map[string]interface{}))
+ // Then loop through all the new rules and create them
+ if nrs.Len() > 0 {
+ err := createFirewallRules(d, meta, rules, nrs)
// We need to update this first to preserve the correct state
- rules.Add(rule)
d.Set("rule", rules)
if err != nil {
@@ -368,26 +413,69 @@ func resourceCloudStackFirewallUpdate(d *schema.ResourceData, meta interface{})
}
func resourceCloudStackFirewallDelete(d *schema.ResourceData, meta interface{}) error {
+ // Create an empty rule set to hold all rules that where
+ // not deleted correctly
+ rules := resourceCloudStackFirewall().Schema["rule"].ZeroValue().(*schema.Set)
+
// Delete all rules
- if rs := d.Get("rule").(*schema.Set); rs.Len() > 0 {
- for _, rule := range rs.List() {
- // Delete a single rule
- err := resourceCloudStackFirewallDeleteRule(d, meta, rule.(map[string]interface{}))
+ if ors := d.Get("rule").(*schema.Set); ors.Len() > 0 {
+ err := deleteFirewallRules(d, meta, rules, ors)
- // We need to update this first to preserve the correct state
- d.Set("rule", rs)
+ // We need to update this first to preserve the correct state
+ d.Set("rule", rules)
- if err != nil {
- return err
- }
+ if err != nil {
+ return err
}
}
return nil
}
-func resourceCloudStackFirewallDeleteRule(
- d *schema.ResourceData, meta interface{}, rule map[string]interface{}) error {
+func deleteFirewallRules(
+ d *schema.ResourceData,
+ meta interface{},
+ rules *schema.Set,
+ ors *schema.Set) error {
+ var errs *multierror.Error
+
+ var wg sync.WaitGroup
+ wg.Add(ors.Len())
+
+ sem := make(chan struct{}, 10)
+ for _, rule := range ors.List() {
+ // Put a sleep here to avoid DoS'ing the API
+ time.Sleep(500 * time.Millisecond)
+
+ go func(rule map[string]interface{}) {
+ defer wg.Done()
+ sem <- struct{}{}
+
+ // Delete a single rule
+ err := deleteFirewallRule(d, meta, rule)
+
+ // If we have at least one UUID, we need to save the rule
+ if len(rule["uuids"].(map[string]interface{})) > 0 {
+ rules.Add(rule)
+ }
+
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ }
+
+ <-sem
+ }(rule.(map[string]interface{}))
+ }
+
+ wg.Wait()
+
+ return errs.ErrorOrNil()
+}
+
+func deleteFirewallRule(
+ d *schema.ResourceData,
+ meta interface{},
+ rule map[string]interface{}) error {
cs := meta.(*cloudstack.CloudStackClient)
uuids := rule["uuids"].(map[string]interface{})
@@ -416,47 +504,12 @@ func resourceCloudStackFirewallDeleteRule(
// Delete the UUID of this rule
delete(uuids, k)
+ rule["uuids"] = uuids
}
- // Update the UUIDs
- rule["uuids"] = uuids
-
return nil
}
-func resourceCloudStackFirewallRuleHash(v interface{}) int {
- var buf bytes.Buffer
- m := v.(map[string]interface{})
- buf.WriteString(fmt.Sprintf(
- "%s-%s-", m["source_cidr"].(string), m["protocol"].(string)))
-
- if v, ok := m["icmp_type"]; ok {
- buf.WriteString(fmt.Sprintf("%d-", v.(int)))
- }
-
- if v, ok := m["icmp_code"]; ok {
- buf.WriteString(fmt.Sprintf("%d-", v.(int)))
- }
-
- // We need to make sure to sort the strings below so that we always
- // generate the same hash code no matter what is in the set.
- if v, ok := m["ports"]; ok {
- vs := v.(*schema.Set).List()
- s := make([]string, len(vs))
-
- for i, raw := range vs {
- s[i] = raw.(string)
- }
- sort.Strings(s)
-
- for _, v := range s {
- buf.WriteString(fmt.Sprintf("%s-", v))
- }
- }
-
- return hashcode.String(buf.String())
-}
-
func verifyFirewallParams(d *schema.ResourceData) error {
managed := d.Get("managed").(bool)
_, rules := d.GetOk("rule")
@@ -470,6 +523,17 @@ func verifyFirewallParams(d *schema.ResourceData) error {
}
func verifyFirewallRuleParams(d *schema.ResourceData, rule map[string]interface{}) error {
+ cidrList := rule["cidr_list"].(*schema.Set)
+ sourceCidr := rule["source_cidr"].(string)
+ if cidrList.Len() == 0 && sourceCidr == "" {
+ return fmt.Errorf(
+ "Parameter cidr_list is a required parameter")
+ }
+ if cidrList.Len() > 0 && sourceCidr != "" {
+ return fmt.Errorf(
+ "Parameter source_cidr is deprecated and cannot be used together with cidr_list")
+ }
+
protocol := rule["protocol"].(string)
if protocol != "tcp" && protocol != "udp" && protocol != "icmp" {
return fmt.Errorf(
diff --git a/builtin/providers/cloudstack/resource_cloudstack_firewall_test.go b/builtin/providers/cloudstack/resource_cloudstack_firewall_test.go
index a86cdc3b2b..d93a2c73eb 100644
--- a/builtin/providers/cloudstack/resource_cloudstack_firewall_test.go
+++ b/builtin/providers/cloudstack/resource_cloudstack_firewall_test.go
@@ -23,15 +23,21 @@ func TestAccCloudStackFirewall_basic(t *testing.T) {
resource.TestCheckResourceAttr(
"cloudstack_firewall.foo", "ipaddress", CLOUDSTACK_PUBLIC_IPADDRESS),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.1702320581.source_cidr", "10.0.0.0/24"),
+ "cloudstack_firewall.foo", "rule.#", "2"),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.1702320581.protocol", "tcp"),
+ "cloudstack_firewall.foo", "rule.60926170.cidr_list.3482919157", "10.0.0.0/24"),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.1702320581.ports.#", "2"),
+ "cloudstack_firewall.foo", "rule.60926170.protocol", "tcp"),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.1702320581.ports.1209010669", "1000-2000"),
+ "cloudstack_firewall.foo", "rule.60926170.ports.32925333", "8080"),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.1702320581.ports.1889509032", "80"),
+ "cloudstack_firewall.foo", "rule.716592205.source_cidr", "10.0.0.0/24"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_firewall.foo", "rule.716592205.protocol", "tcp"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_firewall.foo", "rule.716592205.ports.1209010669", "1000-2000"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_firewall.foo", "rule.716592205.ports.1889509032", "80"),
),
},
},
@@ -51,17 +57,21 @@ func TestAccCloudStackFirewall_update(t *testing.T) {
resource.TestCheckResourceAttr(
"cloudstack_firewall.foo", "ipaddress", CLOUDSTACK_PUBLIC_IPADDRESS),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.#", "1"),
+ "cloudstack_firewall.foo", "rule.#", "2"),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.1702320581.source_cidr", "10.0.0.0/24"),
+ "cloudstack_firewall.foo", "rule.60926170.cidr_list.3482919157", "10.0.0.0/24"),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.1702320581.protocol", "tcp"),
+ "cloudstack_firewall.foo", "rule.60926170.protocol", "tcp"),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.1702320581.ports.#", "2"),
+ "cloudstack_firewall.foo", "rule.60926170.ports.32925333", "8080"),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.1702320581.ports.1209010669", "1000-2000"),
+ "cloudstack_firewall.foo", "rule.716592205.source_cidr", "10.0.0.0/24"),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.1702320581.ports.1889509032", "80"),
+ "cloudstack_firewall.foo", "rule.716592205.protocol", "tcp"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_firewall.foo", "rule.716592205.ports.1209010669", "1000-2000"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_firewall.foo", "rule.716592205.ports.1889509032", "80"),
),
},
@@ -72,27 +82,31 @@ func TestAccCloudStackFirewall_update(t *testing.T) {
resource.TestCheckResourceAttr(
"cloudstack_firewall.foo", "ipaddress", CLOUDSTACK_PUBLIC_IPADDRESS),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.#", "2"),
+ "cloudstack_firewall.foo", "rule.#", "3"),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.1702320581.source_cidr", "10.0.0.0/24"),
+ "cloudstack_firewall.foo", "rule.2207610982.cidr_list.80081744", "10.0.1.0/24"),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.1702320581.protocol", "tcp"),
+ "cloudstack_firewall.foo", "rule.2207610982.cidr_list.3482919157", "10.0.0.0/24"),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.1702320581.ports.#", "2"),
+ "cloudstack_firewall.foo", "rule.2207610982.protocol", "tcp"),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.1702320581.ports.1209010669", "1000-2000"),
+ "cloudstack_firewall.foo", "rule.2207610982.ports.32925333", "8080"),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.1702320581.ports.1889509032", "80"),
+ "cloudstack_firewall.foo", "rule.716592205.source_cidr", "10.0.0.0/24"),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.3779782959.source_cidr", "172.16.100.0/24"),
+ "cloudstack_firewall.foo", "rule.716592205.protocol", "tcp"),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.3779782959.protocol", "tcp"),
+ "cloudstack_firewall.foo", "rule.716592205.ports.1209010669", "1000-2000"),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.3779782959.ports.#", "2"),
+ "cloudstack_firewall.foo", "rule.716592205.ports.1889509032", "80"),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.3779782959.ports.1889509032", "80"),
+ "cloudstack_firewall.foo", "rule.4449157.source_cidr", "172.16.100.0/24"),
resource.TestCheckResourceAttr(
- "cloudstack_firewall.foo", "rule.3779782959.ports.3638101695", "443"),
+ "cloudstack_firewall.foo", "rule.4449157.protocol", "tcp"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_firewall.foo", "rule.4449157.ports.1889509032", "80"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_firewall.foo", "rule.4449157.ports.3638101695", "443"),
),
},
},
@@ -162,6 +176,12 @@ var testAccCloudStackFirewall_basic = fmt.Sprintf(`
resource "cloudstack_firewall" "foo" {
ipaddress = "%s"
+ rule {
+ cidr_list = ["10.0.0.0/24"]
+ protocol = "tcp"
+ ports = ["8080"]
+ }
+
rule {
source_cidr = "10.0.0.0/24"
protocol = "tcp"
@@ -173,6 +193,12 @@ var testAccCloudStackFirewall_update = fmt.Sprintf(`
resource "cloudstack_firewall" "foo" {
ipaddress = "%s"
+ rule {
+ cidr_list = ["10.0.0.0/24", "10.0.1.0/24"]
+ protocol = "tcp"
+ ports = ["8080"]
+ }
+
rule {
source_cidr = "10.0.0.0/24"
protocol = "tcp"
diff --git a/builtin/providers/cloudstack/resource_cloudstack_instance_test.go b/builtin/providers/cloudstack/resource_cloudstack_instance_test.go
index b0f241a678..ced4514be5 100644
--- a/builtin/providers/cloudstack/resource_cloudstack_instance_test.go
+++ b/builtin/providers/cloudstack/resource_cloudstack_instance_test.go
@@ -80,7 +80,7 @@ func TestAccCloudStackInstance_fixedIP(t *testing.T) {
testAccCheckCloudStackInstanceExists(
"cloudstack_instance.foobar", &instance),
resource.TestCheckResourceAttr(
- "cloudstack_instance.foobar", "ipaddress", CLOUDSTACK_NETWORK_1_IPADDRESS),
+ "cloudstack_instance.foobar", "ipaddress", CLOUDSTACK_NETWORK_1_IPADDRESS1),
),
},
},
@@ -268,7 +268,7 @@ resource "cloudstack_instance" "foobar" {
}`,
CLOUDSTACK_SERVICE_OFFERING_1,
CLOUDSTACK_NETWORK_1,
- CLOUDSTACK_NETWORK_1_IPADDRESS,
+ CLOUDSTACK_NETWORK_1_IPADDRESS1,
CLOUDSTACK_TEMPLATE,
CLOUDSTACK_ZONE)
@@ -290,7 +290,7 @@ resource "cloudstack_instance" "foobar" {
}`,
CLOUDSTACK_SERVICE_OFFERING_1,
CLOUDSTACK_NETWORK_1,
- CLOUDSTACK_NETWORK_1_IPADDRESS,
+ CLOUDSTACK_NETWORK_1_IPADDRESS1,
CLOUDSTACK_TEMPLATE,
CLOUDSTACK_ZONE)
diff --git a/builtin/providers/cloudstack/resource_cloudstack_network_acl_rule.go b/builtin/providers/cloudstack/resource_cloudstack_network_acl_rule.go
index 10c91de696..2a55427552 100644
--- a/builtin/providers/cloudstack/resource_cloudstack_network_acl_rule.go
+++ b/builtin/providers/cloudstack/resource_cloudstack_network_acl_rule.go
@@ -1,17 +1,14 @@
package cloudstack
import (
- "bytes"
"fmt"
"regexp"
- "sort"
"strconv"
"strings"
"sync"
"time"
"github.com/hashicorp/go-multierror"
- "github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
"github.com/xanzy/go-cloudstack/cloudstack"
)
@@ -47,9 +44,17 @@ func resourceCloudStackNetworkACLRule() *schema.Resource {
Default: "allow",
},
+ "cidr_list": &schema.Schema{
+ Type: schema.TypeSet,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ Set: schema.HashString,
+ },
+
"source_cidr": &schema.Schema{
- Type: schema.TypeString,
- Required: true,
+ Type: schema.TypeString,
+ Optional: true,
+ Deprecated: "Please use the `cidr_list` field instead",
},
"protocol": &schema.Schema{
@@ -73,9 +78,7 @@ func resourceCloudStackNetworkACLRule() *schema.Resource {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
- Set: func(v interface{}) int {
- return hashcode.String(v.(string))
- },
+ Set: schema.HashString,
},
"traffic_type": &schema.Schema{
@@ -90,7 +93,6 @@ func resourceCloudStackNetworkACLRule() *schema.Resource {
},
},
},
- Set: resourceCloudStackNetworkACLRuleHash,
},
},
}
@@ -108,11 +110,9 @@ func resourceCloudStackNetworkACLRuleCreate(d *schema.ResourceData, meta interfa
// Create all rules that are configured
if nrs := d.Get("rule").(*schema.Set); nrs.Len() > 0 {
// Create an empty rule set to hold all newly created rules
- rules := &schema.Set{
- F: resourceCloudStackNetworkACLRuleHash,
- }
+ rules := resourceCloudStackNetworkACLRule().Schema["rule"].ZeroValue().(*schema.Set)
- err := resourceCloudStackNetworkACLRuleCreateRules(d, meta, rules, nrs)
+ err := createNetworkACLRules(d, meta, rules, nrs)
// We need to update this first to preserve the correct state
d.Set("rule", rules)
@@ -125,7 +125,7 @@ func resourceCloudStackNetworkACLRuleCreate(d *schema.ResourceData, meta interfa
return resourceCloudStackNetworkACLRuleRead(d, meta)
}
-func resourceCloudStackNetworkACLRuleCreateRules(
+func createNetworkACLRules(
d *schema.ResourceData,
meta interface{},
rules *schema.Set,
@@ -145,7 +145,7 @@ func resourceCloudStackNetworkACLRuleCreateRules(
sem <- struct{}{}
// Create a single rule
- err := resourceCloudStackNetworkACLRuleCreateRule(d, meta, rule)
+ err := createNetworkACLRule(d, meta, rule)
// If we have at least one UUID, we need to save the rule
if len(rule["uuids"].(map[string]interface{})) > 0 {
@@ -162,13 +162,10 @@ func resourceCloudStackNetworkACLRuleCreateRules(
wg.Wait()
- // We need to update this first to preserve the correct state
- d.Set("rule", rules)
-
return errs.ErrorOrNil()
}
-func resourceCloudStackNetworkACLRuleCreateRule(
+func createNetworkACLRule(
d *schema.ResourceData,
meta interface{},
rule map[string]interface{}) error {
@@ -190,7 +187,7 @@ func resourceCloudStackNetworkACLRuleCreateRule(
p.SetAction(rule["action"].(string))
// Set the CIDR list
- p.SetCidrlist([]string{rule["source_cidr"].(string)})
+ p.SetCidrlist(retrieveCidrList(rule))
// Set the traffic type
p.SetTraffictype(rule["traffic_type"].(string))
@@ -225,11 +222,7 @@ func resourceCloudStackNetworkACLRuleCreateRule(
if ps := rule["ports"].(*schema.Set); ps.Len() > 0 {
// Create an empty schema.Set to hold all processed ports
- ports := &schema.Set{
- F: func(v interface{}) int {
- return hashcode.String(v.(string))
- },
- }
+ ports := &schema.Set{F: schema.HashString}
// Define a regexp for parsing the port
re := regexp.MustCompile(`^(\d+)(?:-(\d+))?$`)
@@ -296,9 +289,7 @@ func resourceCloudStackNetworkACLRuleRead(d *schema.ResourceData, meta interface
}
// Create an empty schema.Set to hold all rules
- rules := &schema.Set{
- F: resourceCloudStackNetworkACLRuleHash,
- }
+ rules := resourceCloudStackNetworkACLRule().Schema["rule"].ZeroValue().(*schema.Set)
// Read all rules that are configured
if rs := d.Get("rule").(*schema.Set); rs.Len() > 0 {
@@ -324,11 +315,11 @@ func resourceCloudStackNetworkACLRuleRead(d *schema.ResourceData, meta interface
// Update the values
rule["action"] = strings.ToLower(r.Action)
- rule["source_cidr"] = r.Cidrlist
rule["protocol"] = r.Protocol
rule["icmp_type"] = r.Icmptype
rule["icmp_code"] = r.Icmpcode
rule["traffic_type"] = strings.ToLower(r.Traffictype)
+ setCidrList(rule, r.Cidrlist)
rules.Add(rule)
}
@@ -350,9 +341,9 @@ func resourceCloudStackNetworkACLRuleRead(d *schema.ResourceData, meta interface
// Update the values
rule["action"] = strings.ToLower(r.Action)
- rule["source_cidr"] = r.Cidrlist
rule["protocol"] = r.Protocol
rule["traffic_type"] = strings.ToLower(r.Traffictype)
+ setCidrList(rule, r.Cidrlist)
rules.Add(rule)
}
@@ -361,11 +352,7 @@ func resourceCloudStackNetworkACLRuleRead(d *schema.ResourceData, meta interface
if ps := rule["ports"].(*schema.Set); ps.Len() > 0 {
// Create an empty schema.Set to hold all ports
- ports := &schema.Set{
- F: func(v interface{}) int {
- return hashcode.String(v.(string))
- },
- }
+ ports := &schema.Set{F: schema.HashString}
// Loop through all ports and retrieve their info
for _, port := range ps.List() {
@@ -386,9 +373,9 @@ func resourceCloudStackNetworkACLRuleRead(d *schema.ResourceData, meta interface
// Update the values
rule["action"] = strings.ToLower(r.Action)
- rule["source_cidr"] = r.Cidrlist
rule["protocol"] = r.Protocol
rule["traffic_type"] = strings.ToLower(r.Traffictype)
+ setCidrList(rule, r.Cidrlist)
ports.Add(port)
}
@@ -402,15 +389,20 @@ func resourceCloudStackNetworkACLRuleRead(d *schema.ResourceData, meta interface
}
}
- // If this is a managed firewall, add all unknown rules into a single dummy rule
+ // If this is a managed firewall, add all unknown rules into dummy rules
managed := d.Get("managed").(bool)
if managed && len(ruleMap) > 0 {
for uuid := range ruleMap {
+ // We need to create and add a dummy value to a schema.Set as the
+ // cidr_list is a required field and thus needs a value
+ cidrs := &schema.Set{F: schema.HashString}
+ cidrs.Add(uuid)
+
// Make a dummy rule to hold the unknown UUID
rule := map[string]interface{}{
- "source_cidr": uuid,
- "protocol": uuid,
- "uuids": map[string]interface{}{uuid: uuid},
+ "cidr_list": cidrs,
+ "protocol": uuid,
+ "uuids": map[string]interface{}{uuid: uuid},
}
// Add the dummy rule to the rules set
@@ -445,9 +437,9 @@ func resourceCloudStackNetworkACLRuleUpdate(d *schema.ResourceData, meta interfa
// set to make sure we end up in a consistent state
rules := o.(*schema.Set).Intersection(n.(*schema.Set))
- // Now first loop through all the old rules and delete them
- if ors.Len() > 0 {
- err := resourceCloudStackNetworkACLRuleDeleteRules(d, meta, rules, ors)
+ // First loop through all the new rules and create (before destroy) them
+ if nrs.Len() > 0 {
+ err := createNetworkACLRules(d, meta, rules, nrs)
// We need to update this first to preserve the correct state
d.Set("rule", rules)
@@ -457,9 +449,9 @@ func resourceCloudStackNetworkACLRuleUpdate(d *schema.ResourceData, meta interfa
}
}
- // Then loop through all the new rules and create them
- if nrs.Len() > 0 {
- err := resourceCloudStackNetworkACLRuleCreateRules(d, meta, rules, nrs)
+ // Then loop through all the old rules and delete them
+ if ors.Len() > 0 {
+ err := deleteNetworkACLRules(d, meta, rules, ors)
// We need to update this first to preserve the correct state
d.Set("rule", rules)
@@ -476,13 +468,11 @@ func resourceCloudStackNetworkACLRuleUpdate(d *schema.ResourceData, meta interfa
func resourceCloudStackNetworkACLRuleDelete(d *schema.ResourceData, meta interface{}) error {
// Create an empty rule set to hold all rules that where
// not deleted correctly
- rules := &schema.Set{
- F: resourceCloudStackNetworkACLRuleHash,
- }
+ rules := resourceCloudStackNetworkACLRule().Schema["rule"].ZeroValue().(*schema.Set)
// Delete all rules
- if rs := d.Get("rule").(*schema.Set); rs.Len() > 0 {
- err := resourceCloudStackNetworkACLRuleDeleteRules(d, meta, rules, rs)
+ if ors := d.Get("rule").(*schema.Set); ors.Len() > 0 {
+ err := deleteNetworkACLRules(d, meta, rules, ors)
// We need to update this first to preserve the correct state
d.Set("rule", rules)
@@ -495,7 +485,7 @@ func resourceCloudStackNetworkACLRuleDelete(d *schema.ResourceData, meta interfa
return nil
}
-func resourceCloudStackNetworkACLRuleDeleteRules(
+func deleteNetworkACLRules(
d *schema.ResourceData,
meta interface{},
rules *schema.Set,
@@ -515,7 +505,7 @@ func resourceCloudStackNetworkACLRuleDeleteRules(
sem <- struct{}{}
// Delete a single rule
- err := resourceCloudStackNetworkACLRuleDeleteRule(d, meta, rule)
+ err := deleteNetworkACLRule(d, meta, rule)
// If we have at least one UUID, we need to save the rule
if len(rule["uuids"].(map[string]interface{})) > 0 {
@@ -535,7 +525,7 @@ func resourceCloudStackNetworkACLRuleDeleteRules(
return errs.ErrorOrNil()
}
-func resourceCloudStackNetworkACLRuleDeleteRule(
+func deleteNetworkACLRule(
d *schema.ResourceData,
meta interface{},
rule map[string]interface{}) error {
@@ -574,58 +564,6 @@ func resourceCloudStackNetworkACLRuleDeleteRule(
return nil
}
-func resourceCloudStackNetworkACLRuleHash(v interface{}) int {
- var buf bytes.Buffer
- m := v.(map[string]interface{})
-
- // This is a little ugly, but it's needed because these arguments have
- // a default value that needs to be part of the string to hash
- var action, trafficType string
- if a, ok := m["action"]; ok {
- action = a.(string)
- } else {
- action = "allow"
- }
- if t, ok := m["traffic_type"]; ok {
- trafficType = t.(string)
- } else {
- trafficType = "ingress"
- }
-
- buf.WriteString(fmt.Sprintf(
- "%s-%s-%s-%s-",
- action,
- m["source_cidr"].(string),
- m["protocol"].(string),
- trafficType))
-
- if v, ok := m["icmp_type"]; ok {
- buf.WriteString(fmt.Sprintf("%d-", v.(int)))
- }
-
- if v, ok := m["icmp_code"]; ok {
- buf.WriteString(fmt.Sprintf("%d-", v.(int)))
- }
-
- // We need to make sure to sort the strings below so that we always
- // generate the same hash code no matter what is in the set.
- if v, ok := m["ports"]; ok {
- vs := v.(*schema.Set).List()
- s := make([]string, len(vs))
-
- for i, raw := range vs {
- s[i] = raw.(string)
- }
- sort.Strings(s)
-
- for _, v := range s {
- buf.WriteString(fmt.Sprintf("%s-", v))
- }
- }
-
- return hashcode.String(buf.String())
-}
-
func verifyNetworkACLParams(d *schema.ResourceData) error {
managed := d.Get("managed").(bool)
_, rules := d.GetOk("rule")
@@ -644,6 +582,17 @@ func verifyNetworkACLRuleParams(d *schema.ResourceData, rule map[string]interfac
return fmt.Errorf("Parameter action only accepts 'allow' or 'deny' as values")
}
+ cidrList := rule["cidr_list"].(*schema.Set)
+ sourceCidr := rule["source_cidr"].(string)
+ if cidrList.Len() == 0 && sourceCidr == "" {
+ return fmt.Errorf(
+ "Parameter cidr_list is a required parameter")
+ }
+ if cidrList.Len() > 0 && sourceCidr != "" {
+ return fmt.Errorf(
+ "Parameter source_cidr is deprecated and cannot be used together with cidr_list")
+ }
+
protocol := rule["protocol"].(string)
switch protocol {
case "icmp":
diff --git a/builtin/providers/cloudstack/resource_cloudstack_network_acl_rule_test.go b/builtin/providers/cloudstack/resource_cloudstack_network_acl_rule_test.go
index 6f2370f5b6..862418f704 100644
--- a/builtin/providers/cloudstack/resource_cloudstack_network_acl_rule_test.go
+++ b/builtin/providers/cloudstack/resource_cloudstack_network_acl_rule_test.go
@@ -23,19 +23,31 @@ func TestAccCloudStackNetworkACLRule_basic(t *testing.T) {
resource.TestCheckResourceAttr(
"cloudstack_network_acl_rule.foo", "rule.#", "3"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.action", "allow"),
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.action", "allow"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.source_cidr", "172.16.100.0/24"),
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.source_cidr", "172.16.100.0/24"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.protocol", "tcp"),
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.protocol", "tcp"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.ports.#", "2"),
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.ports.#", "2"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.ports.1889509032", "80"),
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.ports.1889509032", "80"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.ports.3638101695", "443"),
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.ports.3638101695", "443"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.traffic_type", "ingress"),
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.traffic_type", "ingress"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_network_acl_rule.foo", "rule.4029966697.action", "allow"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_network_acl_rule.foo", "rule.4029966697.cidr_list.#", "1"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_network_acl_rule.foo", "rule.4029966697.cidr_list.3056857544", "172.18.100.0/24"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_network_acl_rule.foo", "rule.4029966697.icmp_code", "-1"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_network_acl_rule.foo", "rule.4029966697.icmp_type", "-1"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_network_acl_rule.foo", "rule.4029966697.traffic_type", "ingress"),
),
},
},
@@ -55,19 +67,31 @@ func TestAccCloudStackNetworkACLRule_update(t *testing.T) {
resource.TestCheckResourceAttr(
"cloudstack_network_acl_rule.foo", "rule.#", "3"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.action", "allow"),
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.action", "allow"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.source_cidr", "172.16.100.0/24"),
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.source_cidr", "172.16.100.0/24"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.protocol", "tcp"),
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.protocol", "tcp"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.ports.#", "2"),
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.ports.#", "2"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.ports.1889509032", "80"),
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.ports.1889509032", "80"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.ports.3638101695", "443"),
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.ports.3638101695", "443"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.traffic_type", "ingress"),
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.traffic_type", "ingress"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_network_acl_rule.foo", "rule.4029966697.action", "allow"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_network_acl_rule.foo", "rule.4029966697.cidr_list.#", "1"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_network_acl_rule.foo", "rule.4029966697.cidr_list.3056857544", "172.18.100.0/24"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_network_acl_rule.foo", "rule.4029966697.icmp_code", "-1"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_network_acl_rule.foo", "rule.4029966697.icmp_type", "-1"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_network_acl_rule.foo", "rule.4029966697.traffic_type", "ingress"),
),
},
@@ -78,33 +102,47 @@ func TestAccCloudStackNetworkACLRule_update(t *testing.T) {
resource.TestCheckResourceAttr(
"cloudstack_network_acl_rule.foo", "rule.#", "4"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.action", "allow"),
+ "cloudstack_network_acl_rule.foo", "rule.2254982534.action", "deny"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.source_cidr", "172.16.100.0/24"),
+ "cloudstack_network_acl_rule.foo", "rule.2254982534.source_cidr", "10.0.0.0/24"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.protocol", "tcp"),
+ "cloudstack_network_acl_rule.foo", "rule.2254982534.protocol", "tcp"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.ports.#", "2"),
+ "cloudstack_network_acl_rule.foo", "rule.2254982534.ports.#", "2"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.ports.1889509032", "80"),
+ "cloudstack_network_acl_rule.foo", "rule.2254982534.ports.1209010669", "1000-2000"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.ports.3638101695", "443"),
+ "cloudstack_network_acl_rule.foo", "rule.2254982534.ports.1889509032", "80"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.3247834462.traffic_type", "ingress"),
+ "cloudstack_network_acl_rule.foo", "rule.2254982534.traffic_type", "egress"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.4267872693.action", "deny"),
+ "cloudstack_network_acl_rule.foo", "rule.2704020556.action", "deny"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.4267872693.source_cidr", "10.0.0.0/24"),
+ "cloudstack_network_acl_rule.foo", "rule.2704020556.cidr_list.#", "2"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.4267872693.protocol", "tcp"),
+ "cloudstack_network_acl_rule.foo", "rule.2704020556.cidr_list.2104435309", "172.18.101.0/24"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.4267872693.ports.#", "2"),
+ "cloudstack_network_acl_rule.foo", "rule.2704020556.cidr_list.3056857544", "172.18.100.0/24"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.4267872693.ports.1209010669", "1000-2000"),
+ "cloudstack_network_acl_rule.foo", "rule.2704020556.icmp_code", "-1"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.4267872693.ports.1889509032", "80"),
+ "cloudstack_network_acl_rule.foo", "rule.2704020556.icmp_type", "-1"),
resource.TestCheckResourceAttr(
- "cloudstack_network_acl_rule.foo", "rule.4267872693.traffic_type", "egress"),
+ "cloudstack_network_acl_rule.foo", "rule.2704020556.traffic_type", "ingress"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.action", "allow"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.source_cidr", "172.16.100.0/24"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.protocol", "tcp"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.ports.#", "2"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.ports.1889509032", "80"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.ports.3638101695", "443"),
+ resource.TestCheckResourceAttr(
+ "cloudstack_network_acl_rule.foo", "rule.2792403380.traffic_type", "ingress"),
),
},
},
@@ -196,7 +234,7 @@ resource "cloudstack_network_acl_rule" "foo" {
rule {
action = "allow"
- source_cidr = "172.18.100.0/24"
+ cidr_list = ["172.18.100.0/24"]
protocol = "icmp"
icmp_type = "-1"
icmp_code = "-1"
@@ -240,7 +278,7 @@ resource "cloudstack_network_acl_rule" "foo" {
rule {
action = "deny"
- source_cidr = "172.18.100.0/24"
+ cidr_list = ["172.18.100.0/24", "172.18.101.0/24"]
protocol = "icmp"
icmp_type = "-1"
icmp_code = "-1"
diff --git a/builtin/providers/cloudstack/resource_cloudstack_port_forward.go b/builtin/providers/cloudstack/resource_cloudstack_port_forward.go
index e1f8c99fca..044482bcb6 100644
--- a/builtin/providers/cloudstack/resource_cloudstack_port_forward.go
+++ b/builtin/providers/cloudstack/resource_cloudstack_port_forward.go
@@ -1,13 +1,14 @@
package cloudstack
import (
- "bytes"
"fmt"
+ "sync"
+ "time"
"strconv"
"strings"
- "github.com/hashicorp/terraform/helper/hashcode"
+ "github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/helper/schema"
"github.com/xanzy/go-cloudstack/cloudstack"
)
@@ -63,7 +64,6 @@ func resourceCloudStackPortForward() *schema.Resource {
},
},
},
- Set: resourceCloudStackPortForwardHash,
},
},
}
@@ -82,32 +82,66 @@ func resourceCloudStackPortForwardCreate(d *schema.ResourceData, meta interface{
d.SetId(ipaddressid)
// Create all forwards that are configured
- if rs := d.Get("forward").(*schema.Set); rs.Len() > 0 {
-
+ if nrs := d.Get("forward").(*schema.Set); nrs.Len() > 0 {
// Create an empty schema.Set to hold all forwards
- forwards := &schema.Set{
- F: resourceCloudStackPortForwardHash,
- }
+ forwards := resourceCloudStackPortForward().Schema["forward"].ZeroValue().(*schema.Set)
- for _, forward := range rs.List() {
- // Create a single forward
- err := resourceCloudStackPortForwardCreateForward(d, meta, forward.(map[string]interface{}))
+ err := createPortForwards(d, meta, forwards, nrs)
- // We need to update this first to preserve the correct state
- forwards.Add(forward)
- d.Set("forward", forwards)
+ // We need to update this first to preserve the correct state
+ d.Set("forward", forwards)
- if err != nil {
- return err
- }
+ if err != nil {
+ return err
}
}
return resourceCloudStackPortForwardRead(d, meta)
}
-func resourceCloudStackPortForwardCreateForward(
- d *schema.ResourceData, meta interface{}, forward map[string]interface{}) error {
+func createPortForwards(
+ d *schema.ResourceData,
+ meta interface{},
+ forwards *schema.Set,
+ nrs *schema.Set) error {
+ var errs *multierror.Error
+
+ var wg sync.WaitGroup
+ wg.Add(nrs.Len())
+
+ sem := make(chan struct{}, 10)
+ for _, forward := range nrs.List() {
+ // Put in a tiny sleep here to avoid DoS'ing the API
+ time.Sleep(500 * time.Millisecond)
+
+ go func(forward map[string]interface{}) {
+ defer wg.Done()
+ sem <- struct{}{}
+
+ // Create a single forward
+ err := createPortForward(d, meta, forward)
+
+ // If we have a UUID, we need to save the forward
+ if forward["uuid"].(string) != "" {
+ forwards.Add(forward)
+ }
+
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ }
+
+ <-sem
+ }(forward.(map[string]interface{}))
+ }
+
+ wg.Wait()
+
+ return errs.ErrorOrNil()
+}
+func createPortForward(
+ d *schema.ResourceData,
+ meta interface{},
+ forward map[string]interface{}) error {
cs := meta.(*cloudstack.CloudStackClient)
// Make sure all required parameters are there
@@ -167,9 +201,7 @@ func resourceCloudStackPortForwardRead(d *schema.ResourceData, meta interface{})
}
// Create an empty schema.Set to hold all forwards
- forwards := &schema.Set{
- F: resourceCloudStackPortForwardHash,
- }
+ forwards := resourceCloudStackPortForward().Schema["forward"].ZeroValue().(*schema.Set)
// Read all forwards that are configured
if rs := d.Get("forward").(*schema.Set); rs.Len() > 0 {
@@ -250,26 +282,29 @@ func resourceCloudStackPortForwardUpdate(d *schema.ResourceData, meta interface{
ors := o.(*schema.Set).Difference(n.(*schema.Set))
nrs := n.(*schema.Set).Difference(o.(*schema.Set))
- // Now first loop through all the old forwards and delete any obsolete ones
- for _, forward := range ors.List() {
- // Delete the forward as it no longer exists in the config
- err := resourceCloudStackPortForwardDeleteForward(d, meta, forward.(map[string]interface{}))
+ // We need to start with a rule set containing all the rules we
+ // already have and want to keep. Any rules that are not deleted
+ // correctly and any newly created rules, will be added to this
+ // set to make sure we end up in a consistent state
+ forwards := o.(*schema.Set).Intersection(n.(*schema.Set))
+
+ // First loop through all the new forwards and create (before destroy) them
+ if nrs.Len() > 0 {
+ err := createPortForwards(d, meta, forwards, nrs)
+
+ // We need to update this first to preserve the correct state
+ d.Set("forward", forwards)
+
if err != nil {
return err
}
}
- // Make sure we save the state of the currently configured forwards
- forwards := o.(*schema.Set).Intersection(n.(*schema.Set))
- d.Set("forward", forwards)
-
- // Then loop through all the currently configured forwards and create the new ones
- for _, forward := range nrs.List() {
- err := resourceCloudStackPortForwardCreateForward(
- d, meta, forward.(map[string]interface{}))
+ // Then loop through all the old forwards and delete them
+ if ors.Len() > 0 {
+ err := deletePortForwards(d, meta, forwards, ors)
// We need to update this first to preserve the correct state
- forwards.Add(forward)
d.Set("forward", forwards)
if err != nil {
@@ -282,26 +317,69 @@ func resourceCloudStackPortForwardUpdate(d *schema.ResourceData, meta interface{
}
func resourceCloudStackPortForwardDelete(d *schema.ResourceData, meta interface{}) error {
+ // Create an empty rule set to hold all rules that where
+ // not deleted correctly
+ forwards := resourceCloudStackPortForward().Schema["forward"].ZeroValue().(*schema.Set)
+
// Delete all forwards
- if rs := d.Get("forward").(*schema.Set); rs.Len() > 0 {
- for _, forward := range rs.List() {
- // Delete a single forward
- err := resourceCloudStackPortForwardDeleteForward(d, meta, forward.(map[string]interface{}))
+ if ors := d.Get("forward").(*schema.Set); ors.Len() > 0 {
+ err := deletePortForwards(d, meta, forwards, ors)
- // We need to update this first to preserve the correct state
- d.Set("forward", rs)
+ // We need to update this first to preserve the correct state
+ d.Set("forward", forwards)
- if err != nil {
- return err
- }
+ if err != nil {
+ return err
}
}
return nil
}
-func resourceCloudStackPortForwardDeleteForward(
- d *schema.ResourceData, meta interface{}, forward map[string]interface{}) error {
+func deletePortForwards(
+ d *schema.ResourceData,
+ meta interface{},
+ forwards *schema.Set,
+ ors *schema.Set) error {
+ var errs *multierror.Error
+
+ var wg sync.WaitGroup
+ wg.Add(ors.Len())
+
+ sem := make(chan struct{}, 10)
+ for _, forward := range ors.List() {
+ // Put a sleep here to avoid DoS'ing the API
+ time.Sleep(500 * time.Millisecond)
+
+ go func(forward map[string]interface{}) {
+ defer wg.Done()
+ sem <- struct{}{}
+
+ // Delete a single forward
+ err := deletePortForward(d, meta, forward)
+
+ // If we have a UUID, we need to save the forward
+ if forward["uuid"].(string) != "" {
+ forwards.Add(forward)
+ }
+
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ }
+
+ <-sem
+ }(forward.(map[string]interface{}))
+ }
+
+ wg.Wait()
+
+ return errs.ErrorOrNil()
+}
+
+func deletePortForward(
+ d *schema.ResourceData,
+ meta interface{},
+ forward map[string]interface{}) error {
cs := meta.(*cloudstack.CloudStackClient)
// Create the parameter struct
@@ -323,19 +401,6 @@ func resourceCloudStackPortForwardDeleteForward(
return nil
}
-func resourceCloudStackPortForwardHash(v interface{}) int {
- var buf bytes.Buffer
- m := v.(map[string]interface{})
- buf.WriteString(fmt.Sprintf(
- "%s-%d-%d-%s",
- m["protocol"].(string),
- m["private_port"].(int),
- m["public_port"].(int),
- m["virtual_machine"].(string)))
-
- return hashcode.String(buf.String())
-}
-
func verifyPortForwardParams(d *schema.ResourceData, forward map[string]interface{}) error {
protocol := forward["protocol"].(string)
if protocol != "tcp" && protocol != "udp" {
diff --git a/builtin/providers/cloudstack/resource_cloudstack_port_forward_test.go b/builtin/providers/cloudstack/resource_cloudstack_port_forward_test.go
index b0851753f8..63dcdb001b 100644
--- a/builtin/providers/cloudstack/resource_cloudstack_port_forward_test.go
+++ b/builtin/providers/cloudstack/resource_cloudstack_port_forward_test.go
@@ -23,13 +23,13 @@ func TestAccCloudStackPortForward_basic(t *testing.T) {
resource.TestCheckResourceAttr(
"cloudstack_port_forward.foo", "ipaddress", CLOUDSTACK_PUBLIC_IPADDRESS),
resource.TestCheckResourceAttr(
- "cloudstack_port_forward.foo", "forward.1537694805.protocol", "tcp"),
+ "cloudstack_port_forward.foo", "forward.952396423.protocol", "tcp"),
resource.TestCheckResourceAttr(
- "cloudstack_port_forward.foo", "forward.1537694805.private_port", "443"),
+ "cloudstack_port_forward.foo", "forward.952396423.private_port", "443"),
resource.TestCheckResourceAttr(
- "cloudstack_port_forward.foo", "forward.1537694805.public_port", "8443"),
+ "cloudstack_port_forward.foo", "forward.952396423.public_port", "8443"),
resource.TestCheckResourceAttr(
- "cloudstack_port_forward.foo", "forward.1537694805.virtual_machine", "terraform-test"),
+ "cloudstack_port_forward.foo", "forward.952396423.virtual_machine", "terraform-test"),
),
},
},
@@ -51,13 +51,13 @@ func TestAccCloudStackPortForward_update(t *testing.T) {
resource.TestCheckResourceAttr(
"cloudstack_port_forward.foo", "forward.#", "1"),
resource.TestCheckResourceAttr(
- "cloudstack_port_forward.foo", "forward.1537694805.protocol", "tcp"),
+ "cloudstack_port_forward.foo", "forward.952396423.protocol", "tcp"),
resource.TestCheckResourceAttr(
- "cloudstack_port_forward.foo", "forward.1537694805.private_port", "443"),
+ "cloudstack_port_forward.foo", "forward.952396423.private_port", "443"),
resource.TestCheckResourceAttr(
- "cloudstack_port_forward.foo", "forward.1537694805.public_port", "8443"),
+ "cloudstack_port_forward.foo", "forward.952396423.public_port", "8443"),
resource.TestCheckResourceAttr(
- "cloudstack_port_forward.foo", "forward.1537694805.virtual_machine", "terraform-test"),
+ "cloudstack_port_forward.foo", "forward.952396423.virtual_machine", "terraform-test"),
),
},
@@ -70,21 +70,21 @@ func TestAccCloudStackPortForward_update(t *testing.T) {
resource.TestCheckResourceAttr(
"cloudstack_port_forward.foo", "forward.#", "2"),
resource.TestCheckResourceAttr(
- "cloudstack_port_forward.foo", "forward.8416686.protocol", "tcp"),
+ "cloudstack_port_forward.foo", "forward.260687715.protocol", "tcp"),
resource.TestCheckResourceAttr(
- "cloudstack_port_forward.foo", "forward.8416686.private_port", "80"),
+ "cloudstack_port_forward.foo", "forward.260687715.private_port", "80"),
resource.TestCheckResourceAttr(
- "cloudstack_port_forward.foo", "forward.8416686.public_port", "8080"),
+ "cloudstack_port_forward.foo", "forward.260687715.public_port", "8080"),
resource.TestCheckResourceAttr(
- "cloudstack_port_forward.foo", "forward.8416686.virtual_machine", "terraform-test"),
+ "cloudstack_port_forward.foo", "forward.260687715.virtual_machine", "terraform-test"),
resource.TestCheckResourceAttr(
- "cloudstack_port_forward.foo", "forward.1537694805.protocol", "tcp"),
+ "cloudstack_port_forward.foo", "forward.952396423.protocol", "tcp"),
resource.TestCheckResourceAttr(
- "cloudstack_port_forward.foo", "forward.1537694805.private_port", "443"),
+ "cloudstack_port_forward.foo", "forward.952396423.private_port", "443"),
resource.TestCheckResourceAttr(
- "cloudstack_port_forward.foo", "forward.1537694805.public_port", "8443"),
+ "cloudstack_port_forward.foo", "forward.952396423.public_port", "8443"),
resource.TestCheckResourceAttr(
- "cloudstack_port_forward.foo", "forward.1537694805.virtual_machine", "terraform-test"),
+ "cloudstack_port_forward.foo", "forward.952396423.virtual_machine", "terraform-test"),
),
},
},
diff --git a/builtin/providers/cloudstack/resource_cloudstack_secondary_ipaddress_test.go b/builtin/providers/cloudstack/resource_cloudstack_secondary_ipaddress_test.go
index beedcd2cb2..dd59ca3f49 100644
--- a/builtin/providers/cloudstack/resource_cloudstack_secondary_ipaddress_test.go
+++ b/builtin/providers/cloudstack/resource_cloudstack_secondary_ipaddress_test.go
@@ -43,7 +43,7 @@ func TestAccCloudStackSecondaryIPAddress_fixedIP(t *testing.T) {
"cloudstack_secondary_ipaddress.foo", &ip),
testAccCheckCloudStackSecondaryIPAddressAttributes(&ip),
resource.TestCheckResourceAttr(
- "cloudstack_secondary_ipaddress.foo", "ipaddress", CLOUDSTACK_NETWORK_1_IPADDRESS),
+ "cloudstack_secondary_ipaddress.foo", "ipaddress", CLOUDSTACK_NETWORK_1_IPADDRESS1),
),
},
},
@@ -117,7 +117,7 @@ func testAccCheckCloudStackSecondaryIPAddressAttributes(
ip *cloudstack.AddIpToNicResponse) resource.TestCheckFunc {
return func(s *terraform.State) error {
- if ip.Ipaddress != CLOUDSTACK_NETWORK_1_IPADDRESS {
+ if ip.Ipaddress != CLOUDSTACK_NETWORK_1_IPADDRESS1 {
return fmt.Errorf("Bad IP address: %s", ip.Ipaddress)
}
return nil
@@ -222,4 +222,4 @@ resource "cloudstack_secondary_ipaddress" "foo" {
CLOUDSTACK_NETWORK_1,
CLOUDSTACK_TEMPLATE,
CLOUDSTACK_ZONE,
- CLOUDSTACK_NETWORK_1_IPADDRESS)
+ CLOUDSTACK_NETWORK_1_IPADDRESS1)
diff --git a/builtin/providers/cloudstack/resources.go b/builtin/providers/cloudstack/resources.go
index 2fe67c6e31..4182b947f2 100644
--- a/builtin/providers/cloudstack/resources.go
+++ b/builtin/providers/cloudstack/resources.go
@@ -4,6 +4,7 @@ import (
"fmt"
"log"
"regexp"
+ "strings"
"time"
"github.com/hashicorp/terraform/helper/schema"
@@ -145,3 +146,36 @@ func Retry(n int, f RetryFunc) (interface{}, error) {
return nil, lastErr
}
+
+// This is a temporary helper function to support both the new
+// cidr_list and the deprecated source_cidr parameter
+func retrieveCidrList(rule map[string]interface{}) []string {
+ sourceCidr := rule["source_cidr"].(string)
+ if sourceCidr != "" {
+ return []string{sourceCidr}
+ }
+
+ var cidrList []string
+ for _, cidr := range rule["cidr_list"].(*schema.Set).List() {
+ cidrList = append(cidrList, cidr.(string))
+ }
+
+ return cidrList
+}
+
+// This is a temporary helper function to support both the new
+// cidr_list and the deprecated source_cidr parameter
+func setCidrList(rule map[string]interface{}, cidrList string) {
+ sourceCidr := rule["source_cidr"].(string)
+ if sourceCidr != "" {
+ rule["source_cidr"] = cidrList
+ return
+ }
+
+ cidrs := &schema.Set{F: schema.HashString}
+ for _, cidr := range strings.Split(cidrList, ",") {
+ cidrs.Add(cidr)
+ }
+
+ rule["cidr_list"] = cidrs
+}
diff --git a/website/source/docs/providers/cloudstack/r/egress_firewall.html.markdown b/website/source/docs/providers/cloudstack/r/egress_firewall.html.markdown
index b905bc0e99..dfb42281a9 100644
--- a/website/source/docs/providers/cloudstack/r/egress_firewall.html.markdown
+++ b/website/source/docs/providers/cloudstack/r/egress_firewall.html.markdown
@@ -17,7 +17,7 @@ resource "cloudstack_egress_firewall" "default" {
network = "test-network"
rule {
- source_cidr = "10.0.0.0/8"
+ cidr_list = ["10.0.0.0/8"]
protocol = "tcp"
ports = ["80", "1000-2000"]
}
@@ -40,7 +40,10 @@ The following arguments are supported:
The `rule` block supports:
-* `source_cidr` - (Required) The source CIDR to allow access to the given ports.
+* `cidr_list` - (Required) A CIDR list to allow access to the given ports.
+
+* `source_cidr` - (Optional, Deprecated) The source CIDR to allow access to the
+ given ports. This attribute is deprecated, please use `cidr_list` instead.
* `protocol` - (Required) The name of the protocol to allow. Valid options are:
`tcp`, `udp` and `icmp`.
diff --git a/website/source/docs/providers/cloudstack/r/firewall.html.markdown b/website/source/docs/providers/cloudstack/r/firewall.html.markdown
index 455449049d..70478fa104 100644
--- a/website/source/docs/providers/cloudstack/r/firewall.html.markdown
+++ b/website/source/docs/providers/cloudstack/r/firewall.html.markdown
@@ -17,7 +17,7 @@ resource "cloudstack_firewall" "default" {
ipaddress = "192.168.0.1"
rule {
- source_cidr = "10.0.0.0/8"
+ cidr_list = ["10.0.0.0/8"]
protocol = "tcp"
ports = ["80", "1000-2000"]
}
@@ -40,7 +40,10 @@ The following arguments are supported:
The `rule` block supports:
-* `source_cidr` - (Required) The source CIDR to allow access to the given ports.
+* `cidr_list` - (Required) A CIDR list to allow access to the given ports.
+
+* `source_cidr` - (Optional, Deprecated) The source CIDR to allow access to the
+ given ports. This attribute is deprecated, please use `cidr_list` instead.
* `protocol` - (Required) The name of the protocol to allow. Valid options are:
`tcp`, `udp` and `icmp`.
diff --git a/website/source/docs/providers/cloudstack/r/network_acl_rule.html.markdown b/website/source/docs/providers/cloudstack/r/network_acl_rule.html.markdown
index f82b8f4464..59b6f1b9a1 100644
--- a/website/source/docs/providers/cloudstack/r/network_acl_rule.html.markdown
+++ b/website/source/docs/providers/cloudstack/r/network_acl_rule.html.markdown
@@ -18,7 +18,7 @@ resource "cloudstack_network_acl_rule" "default" {
rule {
action = "allow"
- source_cidr = "10.0.0.0/8"
+ cidr_list = ["10.0.0.0/8"]
protocol = "tcp"
ports = ["80", "1000-2000"]
traffic_type = "ingress"
@@ -45,7 +45,10 @@ The `rule` block supports:
* `action` - (Optional) The action for the rule. Valid options are: `allow` and
`deny` (defaults allow).
-* `source_cidr` - (Required) The source CIDR to allow access to the given ports.
+* `cidr_list` - (Required) A CIDR list to allow access to the given ports.
+
+* `source_cidr` - (Optional, Deprecated) The source CIDR to allow access to the
+ given ports. This attribute is deprecated, please use `cidr_list` instead.
* `protocol` - (Required) The name of the protocol to allow. Valid options are:
`tcp`, `udp`, `icmp`, `all` or a valid protocol number.
From 55af736adecf542c6210e43632dc260887ffbfa1 Mon Sep 17 00:00:00 2001
From: Clint
Date: Thu, 3 Dec 2015 14:23:52 -0600
Subject: [PATCH 117/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 80a0f4561b..d1fde0181c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -16,6 +16,7 @@ BUG FIXES:
* provider/docker: Fix an issue running with Docker Swarm by looking up containers by ID instead of name [GH-4148]
* provider/openstack: Better handling of load balancing resource state changes [GH-3926]
+ * provider/aws: Skip `source_security_group_id` determination logic for Classic ELBs [GH-4075]
## 0.6.8 (December 2, 2015)
From 16016b8866c111109e6b01215d8db4b931984e47 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Thu, 3 Dec 2015 14:24:35 -0600
Subject: [PATCH 118/664] fix typo
---
website/source/docs/providers/aws/r/elb.html.markdown | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/website/source/docs/providers/aws/r/elb.html.markdown b/website/source/docs/providers/aws/r/elb.html.markdown
index dde90e54d7..997d7274d1 100644
--- a/website/source/docs/providers/aws/r/elb.html.markdown
+++ b/website/source/docs/providers/aws/r/elb.html.markdown
@@ -120,5 +120,5 @@ The following attributes are exported:
instances. Use this for Classic or Default VPC only.
* `source_security_group_id` - The ID of the security group that you can use as
part of your inbound rules for your load balancer's back-end application
- instances. Only available on ELBs launch in a VPC.
+ instances. Only available on ELBs launched in a VPC.
* `zone_id` - The canonical hosted zone ID of the ELB (to be used in a Route 53 Alias record)
From 895dd29090daacd8f3d9d357a3d6cd8bf5a87bec Mon Sep 17 00:00:00 2001
From: Dave Cunningham
Date: Thu, 3 Dec 2015 15:33:35 -0500
Subject: [PATCH 119/664] Update CHANGELOG.md
(Forgot to do this yesterday)
---
CHANGELOG.md | 2 ++
1 file changed, 2 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d1fde0181c..e1941e7d12 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,8 @@
## 0.6.9 (Unreleased)
FEATURES:
+ * **New resource: `google_pubsub_topic`** [GH-3671]
+ * **New resource: `google_pubsub_subscription`** [GH-3671]
IMPROVEMENTS:
From 738ad14ebbb763af123bf0057cd99523bbec57fe Mon Sep 17 00:00:00 2001
From: Lars Wander
Date: Thu, 3 Dec 2015 09:48:45 -0500
Subject: [PATCH 120/664] provider/google: Added missing pubsub documentation
---
.../r/pubsub_subscription.html.markdown | 29 +++++++++++++++----
1 file changed, 23 insertions(+), 6 deletions(-)
diff --git a/website/source/docs/providers/google/r/pubsub_subscription.html.markdown b/website/source/docs/providers/google/r/pubsub_subscription.html.markdown
index d1f43ef415..7917205364 100644
--- a/website/source/docs/providers/google/r/pubsub_subscription.html.markdown
+++ b/website/source/docs/providers/google/r/pubsub_subscription.html.markdown
@@ -17,8 +17,15 @@ Creates a subscription in Google's pubsub queueing system. For more information
```
resource "google_pubsub_subscription" "default" {
- name = "default-subscription"
- topic = "default-topic"
+ name = "default-subscription"
+ topic = "default-topic"
+ ack_deadline_seconds = 20
+ push_config {
+ endpoint = "https://example.com/push"
+ attributes {
+ x-goog-version = "v1"
+ }
+ }
}
```
@@ -28,12 +35,22 @@ The following arguments are supported:
* `name` - (Required) A unique name for the resource, required by pubsub.
Changing this forces a new resource to be created.
+
* `topic` - (Required) A topic to bind this subscription to, required by pubsub.
Changing this forces a new resource to be created.
-## Attributes Reference
+* `ack_deadline_seconds` - (Optional) The maximum number of seconds a
+ subscriber has to acknowledge a received message, otherwise the message is
+ redelivered. Changing this forces a new resource to be created.
-The following attributes are exported:
+The optional `push_config` block supports:
-* `name` - The name of the resource.
-* `topic` - The topic to bind this resource to.
+* `push_endpoint` - (Optional) The URL of the endpoint to which messages should
+ be pushed. Changing this forces a new resource to be created.
+
+* `attributes` - (Optional) Key-value pairs of API supported attributes used
+ to control aspects of the message delivery. Currently, only
+ `x-goog-version` is supported, which controls the format of the data
+ delivery. For more information, read [the API docs
+ here](https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions#PushConfig.FIELDS.attributes).
+ Changing this forces a new resource to be created.
From 9203bfedf1c8f3796fe7432b14f78ec2ac62b788 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Thu, 3 Dec 2015 15:52:28 -0600
Subject: [PATCH 121/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e1941e7d12..30da5fcedc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,7 @@
## 0.6.9 (Unreleased)
FEATURES:
+ * **New provider: `vcd` - VMware vCloud Director** [GH-3785]
* **New resource: `google_pubsub_topic`** [GH-3671]
* **New resource: `google_pubsub_subscription`** [GH-3671]
From 725a7eef67d512f5a39d003c44d57adb5f7521c7 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Thu, 3 Dec 2015 18:05:40 -0600
Subject: [PATCH 122/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 30da5fcedc..0f00ecd6e1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,7 @@ FEATURES:
IMPROVEMENTS:
+ * provider/aws: Add placement_group as an option for `aws_autoscaling_group` [GH-3704]
* provider/docker: Add support for setting the entry point on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting the restart policy on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting memory, swap and CPU shares on `docker_container` resources [GH-3761]
From 8e40b6b855e84d3d12960197ab995a249e27f729 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Thu, 3 Dec 2015 18:17:54 -0600
Subject: [PATCH 123/664] provider/aws: fixes to ASG placement_group tests
Based on @stack72's recommendations in #3704, thanks @stack72!
---
.../resource_aws_autoscaling_group_test.go | 56 ++++++++++++++++++-
1 file changed, 53 insertions(+), 3 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_autoscaling_group_test.go b/builtin/providers/aws/resource_aws_autoscaling_group_test.go
index 0d7f2c734f..d9268f6925 100644
--- a/builtin/providers/aws/resource_aws_autoscaling_group_test.go
+++ b/builtin/providers/aws/resource_aws_autoscaling_group_test.go
@@ -49,8 +49,6 @@ func TestAccAWSAutoScalingGroup_basic(t *testing.T) {
"aws_autoscaling_group.bar", "termination_policies.0", "OldestInstance"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "termination_policies.1", "ClosestToNextInstanceHour"),
- resource.TestCheckResourceAttr(
- "aws_autoscaling_group.bar", "placement_group", "test"),
),
},
@@ -173,6 +171,26 @@ func TestAccAWSAutoScalingGroup_WithLoadBalancer(t *testing.T) {
})
}
+func TestAccAWSAutoScalingGroup_withPlacementGroup(t *testing.T) {
+ var group autoscaling.Group
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSAutoScalingGroupConfig_withPlacementGroup,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
+ resource.TestCheckResourceAttr(
+ "aws_autoscaling_group.bar", "placement_group", "test"),
+ ),
+ },
+ },
+ })
+}
+
func testAccCheckAWSAutoScalingGroupDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).autoscalingconn
@@ -418,7 +436,6 @@ resource "aws_autoscaling_group" "bar" {
desired_capacity = 4
force_delete = true
termination_policies = ["OldestInstance","ClosestToNextInstanceHour"]
- placement_group = "${aws_placement_group.test.name}"
launch_configuration = "${aws_launch_configuration.foobar.name}"
@@ -636,3 +653,36 @@ resource "aws_autoscaling_group" "bar" {
launch_configuration = "${aws_launch_configuration.foobar.name}"
}
`
+
+const testAccAWSAutoScalingGroupConfig_withPlacementGroup = `
+resource "aws_launch_configuration" "foobar" {
+ image_id = "ami-21f78e11"
+ instance_type = "c3.large"
+}
+
+resource "aws_placement_group" "test" {
+ name = "test"
+ strategy = "cluster"
+}
+
+resource "aws_autoscaling_group" "bar" {
+ availability_zones = ["us-west-2a"]
+ name = "foobar3-terraform-test"
+ max_size = 1
+ min_size = 1
+ health_check_grace_period = 300
+ health_check_type = "ELB"
+ desired_capacity = 1
+ force_delete = true
+ termination_policies = ["OldestInstance","ClosestToNextInstanceHour"]
+ placement_group = "${aws_placement_group.test.name}"
+
+ launch_configuration = "${aws_launch_configuration.foobar.name}"
+
+ tag {
+ key = "Foo"
+ value = "foo-bar"
+ propagate_at_launch = true
+ }
+}
+`
From ef0b02ede3d9728346fdceb085ff7f8e76204ec2 Mon Sep 17 00:00:00 2001
From: Timothy
Date: Thu, 3 Dec 2015 23:21:35 -0500
Subject: [PATCH 124/664] Update downloads.html.erb
Its a PGP key, not a GPG key.
---
website/source/downloads.html.erb | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/website/source/downloads.html.erb b/website/source/downloads.html.erb
index f5e0945a00..411d84b03e 100644
--- a/website/source/downloads.html.erb
+++ b/website/source/downloads.html.erb
@@ -25,7 +25,7 @@ description: |-
verify the checksums signature file
- which has been signed using HashiCorp's GPG key .
+ which has been signed using HashiCorp's PGP key .
You can also download older versions of Terraform from the releases service.
From e1eef15646fd8da87fa88fa41dca3c1ace37a86b Mon Sep 17 00:00:00 2001
From: Adrian Chelaru
Date: Tue, 27 Oct 2015 12:04:19 +0200
Subject: [PATCH 125/664] postgresql provider with "database" and "role"
resources
---
builtin/bins/provider-postgresql/main.go | 12 ++
builtin/bins/provider-postgresql/main_test.go | 1 +
builtin/providers/postgresql/config.go | 43 +++++
builtin/providers/postgresql/provider.go | 63 ++++++
builtin/providers/postgresql/provider_test.go | 41 ++++
.../resource_postgresql_database.go | 160 ++++++++++++++++
.../resource_postgresql_database_test.go | 144 ++++++++++++++
.../postgresql/resource_postgresql_role.go | 179 ++++++++++++++++++
.../resource_postgresql_role_test.go | 132 +++++++++++++
website/source/assets/stylesheets/_docs.scss | 1 +
.../providers/postgresql/index.html.markdown | 63 ++++++
.../r/postgresql_database.html.markdown | 30 +++
.../r/postgresql_role.html.markdown | 37 ++++
website/source/layouts/docs.erb | 4 +
website/source/layouts/postgresql.erb | 29 +++
15 files changed, 939 insertions(+)
create mode 100644 builtin/bins/provider-postgresql/main.go
create mode 100644 builtin/bins/provider-postgresql/main_test.go
create mode 100644 builtin/providers/postgresql/config.go
create mode 100644 builtin/providers/postgresql/provider.go
create mode 100644 builtin/providers/postgresql/provider_test.go
create mode 100644 builtin/providers/postgresql/resource_postgresql_database.go
create mode 100644 builtin/providers/postgresql/resource_postgresql_database_test.go
create mode 100644 builtin/providers/postgresql/resource_postgresql_role.go
create mode 100644 builtin/providers/postgresql/resource_postgresql_role_test.go
create mode 100644 website/source/docs/providers/postgresql/index.html.markdown
create mode 100644 website/source/docs/providers/postgresql/r/postgresql_database.html.markdown
create mode 100644 website/source/docs/providers/postgresql/r/postgresql_role.html.markdown
create mode 100644 website/source/layouts/postgresql.erb
diff --git a/builtin/bins/provider-postgresql/main.go b/builtin/bins/provider-postgresql/main.go
new file mode 100644
index 0000000000..860ae37f48
--- /dev/null
+++ b/builtin/bins/provider-postgresql/main.go
@@ -0,0 +1,12 @@
+package main
+
+import (
+ "github.com/hashicorp/terraform/builtin/providers/postgresql"
+ "github.com/hashicorp/terraform/plugin"
+)
+
+func main() {
+ plugin.Serve(&plugin.ServeOpts{
+ ProviderFunc: postgresql.Provider,
+ })
+}
diff --git a/builtin/bins/provider-postgresql/main_test.go b/builtin/bins/provider-postgresql/main_test.go
new file mode 100644
index 0000000000..06ab7d0f9a
--- /dev/null
+++ b/builtin/bins/provider-postgresql/main_test.go
@@ -0,0 +1 @@
+package main
diff --git a/builtin/providers/postgresql/config.go b/builtin/providers/postgresql/config.go
new file mode 100644
index 0000000000..454c016141
--- /dev/null
+++ b/builtin/providers/postgresql/config.go
@@ -0,0 +1,43 @@
+package postgresql
+
+import (
+ "database/sql"
+ "fmt"
+ _ "github.com/lib/pq" //PostgreSQL db
+)
+
+// Config - provider config
+type Config struct {
+ Host string
+ Port int
+ Username string
+ Password string
+}
+
+// Client struct holding connection string
+type Client struct {
+ username string
+ connStr string
+}
+
+//NewClient returns new client config
+func (c *Config) NewClient() (*Client, error) {
+ connStr := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=postgres", c.Host, c.Port, c.Username, c.Password)
+
+ client := Client{
+ connStr: connStr,
+ username: c.Username,
+ }
+
+ return &client, nil
+}
+
+//Connect will manually connect/diconnect to prevent a large number or db connections being made
+func (c *Client) Connect() (*sql.DB, error) {
+ db, err := sql.Open("postgres", c.connStr)
+ if err != nil {
+ return nil, fmt.Errorf("Error connecting to postgresql server: %s", err)
+ }
+
+ return db, nil
+}
diff --git a/builtin/providers/postgresql/provider.go b/builtin/providers/postgresql/provider.go
new file mode 100644
index 0000000000..c048ec3ece
--- /dev/null
+++ b/builtin/providers/postgresql/provider.go
@@ -0,0 +1,63 @@
+package postgresql
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Provider returns a terraform.ResourceProvider.
+func Provider() terraform.ResourceProvider {
+ return &schema.Provider{
+ Schema: map[string]*schema.Schema{
+ "host": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ DefaultFunc: schema.EnvDefaultFunc("POSTGRESQL_HOST", nil),
+ Description: "The postgresql server address",
+ },
+ "port": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 5432,
+ Description: "The postgresql server port",
+ },
+ "username": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ DefaultFunc: schema.EnvDefaultFunc("POSTGRESQL_USERNAME", nil),
+ Description: "Username for postgresql server connection",
+ },
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ DefaultFunc: schema.EnvDefaultFunc("POSTGRESQL_PASSWORD", nil),
+ Description: "Password for postgresql server connection",
+ },
+ },
+
+ ResourcesMap: map[string]*schema.Resource{
+ "postgresql_database": resourcePostgresqlDatabase(),
+ "postgresql_role": resourcePostgresqlRole(),
+ },
+
+ ConfigureFunc: providerConfigure,
+ }
+}
+
+func providerConfigure(d *schema.ResourceData) (interface{}, error) {
+ config := Config{
+ Host: d.Get("host").(string),
+ Port: d.Get("port").(int),
+ Username: d.Get("username").(string),
+ Password: d.Get("password").(string),
+ }
+
+ client, err := config.NewClient()
+ if err != nil {
+ return nil, fmt.Errorf("Error initializing Postgresql client: %s", err)
+ }
+
+ return client, nil
+}
diff --git a/builtin/providers/postgresql/provider_test.go b/builtin/providers/postgresql/provider_test.go
new file mode 100644
index 0000000000..19c65cb38b
--- /dev/null
+++ b/builtin/providers/postgresql/provider_test.go
@@ -0,0 +1,41 @@
+package postgresql
+
+import (
+ "os"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+var testAccProviders map[string]terraform.ResourceProvider
+var testAccProvider *schema.Provider
+
+func init() {
+ testAccProvider = Provider().(*schema.Provider)
+ testAccProviders = map[string]terraform.ResourceProvider{
+ "postgresql": testAccProvider,
+ }
+}
+
+func TestProvider(t *testing.T) {
+ if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestProvider_impl(t *testing.T) {
+ var _ terraform.ResourceProvider = Provider()
+}
+
+func testAccPreCheck(t *testing.T) {
+ if v := os.Getenv("POSTGRESQL_HOST"); v == "" {
+ t.Fatal("POSTGRESQL_HOST must be set for acceptance tests")
+ }
+ if v := os.Getenv("POSTGRESQL_USERNAME"); v == "" {
+ t.Fatal("POSTGRESQL_USERNAME must be set for acceptance tests")
+ }
+ if v := os.Getenv("POSTGRESQL_PASSWORD"); v == "" {
+ t.Fatal("POSTGRESQL_PASSWORD must be set for acceptance tests")
+ }
+}
diff --git a/builtin/providers/postgresql/resource_postgresql_database.go b/builtin/providers/postgresql/resource_postgresql_database.go
new file mode 100644
index 0000000000..bf01ae42ea
--- /dev/null
+++ b/builtin/providers/postgresql/resource_postgresql_database.go
@@ -0,0 +1,160 @@
+package postgresql
+
+import (
+ "database/sql"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/lib/pq"
+)
+
+func resourcePostgresqlDatabase() *schema.Resource {
+ return &schema.Resource{
+ Create: resourcePostgresqlDatabaseCreate,
+ Read: resourcePostgresqlDatabaseRead,
+ Update: resourcePostgresqlDatabaseUpdate,
+ Delete: resourcePostgresqlDatabaseDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "owner": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: false,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func resourcePostgresqlDatabaseCreate(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*Client)
+ conn, err := client.Connect()
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+
+ dbName := d.Get("name").(string)
+ dbOwner := d.Get("owner").(string)
+ connUsername := client.username
+
+ var dbOwnerCfg string
+ if dbOwner != "" {
+ dbOwnerCfg = fmt.Sprintf("WITH OWNER=%s", pq.QuoteIdentifier(dbOwner))
+ } else {
+ dbOwnerCfg = ""
+ }
+
+ //needed in order to set the owner of the db if the connection user is not a superuser
+ err = grantRoleMembership(conn, dbOwner, connUsername)
+ if err != nil {
+ return err
+ }
+
+ query := fmt.Sprintf("CREATE DATABASE %s %s", pq.QuoteIdentifier(dbName), dbOwnerCfg)
+ _, err = conn.Query(query)
+ if err != nil {
+ return fmt.Errorf("Error creating postgresql database %s: %s", dbName, err)
+ }
+
+ d.SetId(dbName)
+
+ return nil
+}
+
+func resourcePostgresqlDatabaseDelete(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*Client)
+ conn, err := client.Connect()
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+
+ dbName := d.Get("name").(string)
+ connUsername := client.username
+ dbOwner := d.Get("owner").(string)
+ //needed in order to set the owner of the db if the connection user is not a superuser
+ err = grantRoleMembership(conn, dbOwner, connUsername)
+ if err != nil {
+ return err
+ }
+
+ query := fmt.Sprintf("DROP DATABASE %s", pq.QuoteIdentifier(dbName))
+ _, err = conn.Query(query)
+ if err != nil {
+ return err
+ }
+
+ d.SetId("")
+
+ return nil
+}
+
+func resourcePostgresqlDatabaseRead(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*Client)
+ conn, err := client.Connect()
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+
+ dbName := d.Get("name").(string)
+
+ var owner string
+ err = conn.QueryRow("SELECT pg_catalog.pg_get_userbyid(d.datdba) from pg_database d WHERE datname=$1", dbName).Scan(&owner)
+ switch {
+ case err == sql.ErrNoRows:
+ d.SetId("")
+ return nil
+ case err != nil:
+ return fmt.Errorf("Error reading info about database: %s", err)
+ default:
+ d.Set("owner", owner)
+ return nil
+ }
+}
+
+func resourcePostgresqlDatabaseUpdate(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*Client)
+ conn, err := client.Connect()
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+
+ dbName := d.Get("name").(string)
+
+ if d.HasChange("owner") {
+ owner := d.Get("owner").(string)
+ if owner != "" {
+ query := fmt.Sprintf("ALTER DATABASE %s OWNER TO %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(owner))
+ _, err := conn.Query(query)
+ if err != nil {
+ return fmt.Errorf("Error updating owner for database: %s", err)
+ }
+ }
+ }
+
+ return resourcePostgresqlDatabaseRead(d, meta)
+}
+
+func grantRoleMembership(conn *sql.DB, dbOwner string, connUsername string) error {
+ if dbOwner != "" && dbOwner != connUsername {
+ query := fmt.Sprintf("GRANT %s TO %s", pq.QuoteIdentifier(dbOwner), pq.QuoteIdentifier(connUsername))
+ _, err := conn.Query(query)
+ if err != nil {
+ //is already member or role
+ if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
+ return nil
+ }
+ return fmt.Errorf("Error granting membership: %s", err)
+ }
+ }
+ return nil
+}
diff --git a/builtin/providers/postgresql/resource_postgresql_database_test.go b/builtin/providers/postgresql/resource_postgresql_database_test.go
new file mode 100644
index 0000000000..35d2b271c9
--- /dev/null
+++ b/builtin/providers/postgresql/resource_postgresql_database_test.go
@@ -0,0 +1,144 @@
+package postgresql
+
+import (
+ "database/sql"
+ "fmt"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccPostgresqlDatabase_Basic(t *testing.T) {
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckPostgresqlDatabaseDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccPostgresqlDatabaseConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckPostgresqlDatabaseExists("postgresql_database.mydb", "myrole"),
+ resource.TestCheckResourceAttr(
+ "postgresql_database.mydb", "name", "mydb"),
+ resource.TestCheckResourceAttr(
+ "postgresql_database.mydb", "owner", "myrole"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccPostgresqlDatabase_DefaultOwner(t *testing.T) {
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckPostgresqlDatabaseDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccPostgresqlDatabaseConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckPostgresqlDatabaseExists("postgresql_database.mydb_default_owner", ""),
+ resource.TestCheckResourceAttr(
+ "postgresql_database.mydb_default_owner", "name", "mydb_default_owner"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckPostgresqlDatabaseDestroy(s *terraform.State) error {
+ client := testAccProvider.Meta().(*Client)
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "postgresql_database" {
+ continue
+ }
+
+ exists, err := checkDatabaseExists(client, rs.Primary.ID)
+
+ if err != nil {
+ return fmt.Errorf("Error checking db %s", err)
+ }
+
+ if exists {
+ return fmt.Errorf("Db still exists after destroy")
+ }
+ }
+
+ return nil
+}
+
+func testAccCheckPostgresqlDatabaseExists(n string, owner string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Resource not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No ID is set")
+ }
+
+ actualOwner := rs.Primary.Attributes["owner"]
+ if actualOwner != owner {
+ return fmt.Errorf("Wrong owner for db expected %s got %s", owner, actualOwner)
+ }
+
+ client := testAccProvider.Meta().(*Client)
+ exists, err := checkDatabaseExists(client, rs.Primary.ID)
+
+ if err != nil {
+ return fmt.Errorf("Error checking db %s", err)
+ }
+
+ if !exists {
+ return fmt.Errorf("Db not found")
+ }
+
+ return nil
+ }
+}
+
+func checkDatabaseExists(client *Client, dbName string) (bool, error) {
+ conn, err := client.Connect()
+ if err != nil {
+ return false, err
+ }
+ defer conn.Close()
+
+ var _rez int
+ err = conn.QueryRow("SELECT 1 from pg_database d WHERE datname=$1", dbName).Scan(&_rez)
+ switch {
+ case err == sql.ErrNoRows:
+ return false, nil
+ case err != nil:
+ return false, fmt.Errorf("Error reading info about database: %s", err)
+ default:
+ return true, nil
+ }
+}
+
+var testAccPostgresqlDatabaseConfig = `
+resource "postgresql_role" "myrole" {
+ name = "myrole"
+ login = true
+}
+
+resource "postgresql_database" "mydb" {
+ name = "mydb"
+ owner = "${postgresql_role.myrole.name}"
+}
+
+resource "postgresql_database" "mydb2" {
+ name = "mydb2"
+ owner = "${postgresql_role.myrole.name}"
+}
+
+resource "postgresql_database" "mydb_default_owner" {
+ name = "mydb_default_owner"
+}
+
+`
diff --git a/builtin/providers/postgresql/resource_postgresql_role.go b/builtin/providers/postgresql/resource_postgresql_role.go
new file mode 100644
index 0000000000..104b5c9d01
--- /dev/null
+++ b/builtin/providers/postgresql/resource_postgresql_role.go
@@ -0,0 +1,179 @@
+package postgresql
+
+import (
+ "database/sql"
+ "fmt"
+
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/lib/pq"
+)
+
+func resourcePostgresqlRole() *schema.Resource {
+ return &schema.Resource{
+ Create: resourcePostgresqlRoleCreate,
+ Read: resourcePostgresqlRoleRead,
+ Update: resourcePostgresqlRoleUpdate,
+ Delete: resourcePostgresqlRoleDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "login": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: false,
+ Default: false,
+ },
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: false,
+ },
+ "encrypted": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: false,
+ Default: false,
+ },
+ },
+ }
+}
+
+func resourcePostgresqlRoleCreate(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*Client)
+ conn, err := client.Connect()
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+
+ roleName := d.Get("name").(string)
+ loginAttr := getLoginStr(d.Get("login").(bool))
+ password := d.Get("password").(string)
+
+ encryptedCfg := getEncryptedStr(d.Get("encrypted").(bool))
+
+ query := fmt.Sprintf("CREATE ROLE %s %s %s PASSWORD '%s'", pq.QuoteIdentifier(roleName), loginAttr, encryptedCfg, password)
+ _, err = conn.Query(query)
+ if err != nil {
+ return fmt.Errorf("Error creating role: %s", err)
+ }
+
+ d.SetId(roleName)
+
+ return nil
+}
+
+func resourcePostgresqlRoleDelete(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*Client)
+ conn, err := client.Connect()
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+
+ roleName := d.Get("name").(string)
+
+ query := fmt.Sprintf("DROP ROLE %s", pq.QuoteIdentifier(roleName))
+ _, err = conn.Query(query)
+ if err != nil {
+ return err
+ }
+
+ d.SetId("")
+
+ return nil
+}
+
+func resourcePostgresqlRoleRead(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*Client)
+ conn, err := client.Connect()
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+
+ roleName := d.Get("name").(string)
+
+ var canLogin bool
+ err = conn.QueryRow("select rolcanlogin from pg_roles where rolname=$1", roleName).Scan(&canLogin)
+ switch {
+ case err == sql.ErrNoRows:
+ d.SetId("")
+ return nil
+ case err != nil:
+ return fmt.Errorf("Error reading info about role: %s", err)
+ default:
+ d.Set("login", canLogin)
+ return nil
+ }
+}
+
+func resourcePostgresqlRoleUpdate(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*Client)
+ conn, err := client.Connect()
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+
+ d.Partial(true)
+
+ roleName := d.Get("name").(string)
+
+ if d.HasChange("login") {
+ loginAttr := getLoginStr(d.Get("login").(bool))
+ query := fmt.Sprintf("ALTER ROLE %s %s", pq.QuoteIdentifier(roleName), pq.QuoteIdentifier(loginAttr))
+ _, err := conn.Query(query)
+ if err != nil {
+ return fmt.Errorf("Error updating login attribute for role: %s", err)
+ }
+
+ d.SetPartial("login")
+ }
+
+ password := d.Get("password").(string)
+ if d.HasChange("password") {
+ encryptedCfg := getEncryptedStr(d.Get("encrypted").(bool))
+
+ query := fmt.Sprintf("ALTER ROLE %s %s PASSWORD '%s'", pq.QuoteIdentifier(roleName), encryptedCfg, password)
+ _, err := conn.Query(query)
+ if err != nil {
+ return fmt.Errorf("Error updating password attribute for role: %s", err)
+ }
+
+ d.SetPartial("password")
+ }
+
+ if d.HasChange("encrypted") {
+ encryptedCfg := getEncryptedStr(d.Get("encrypted").(bool))
+
+ query := fmt.Sprintf("ALTER ROLE %s %s PASSWORD '%s'", pq.QuoteIdentifier(roleName), encryptedCfg, password)
+ _, err := conn.Query(query)
+ if err != nil {
+ return fmt.Errorf("Error updating encrypted attribute for role: %s", err)
+ }
+
+ d.SetPartial("encrypted")
+ }
+
+ d.Partial(false)
+ return resourcePostgresqlRoleRead(d, meta)
+}
+
+func getLoginStr(canLogin bool) string {
+ if canLogin {
+ return "login"
+ }
+ return "nologin"
+}
+
+func getEncryptedStr(isEncrypted bool) string {
+ if isEncrypted {
+ return "encrypted"
+ }
+ return "unencrypted"
+}
diff --git a/builtin/providers/postgresql/resource_postgresql_role_test.go b/builtin/providers/postgresql/resource_postgresql_role_test.go
new file mode 100644
index 0000000000..0839b2ef6c
--- /dev/null
+++ b/builtin/providers/postgresql/resource_postgresql_role_test.go
@@ -0,0 +1,132 @@
+package postgresql
+
+import (
+ "database/sql"
+ "fmt"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccPostgresqlRole_Basic(t *testing.T) {
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckPostgresqlRoleDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccPostgresqlRoleConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckPostgresqlRoleExists("postgresql_role.myrole2", "true"),
+ resource.TestCheckResourceAttr(
+ "postgresql_role.myrole2", "name", "myrole2"),
+ resource.TestCheckResourceAttr(
+ "postgresql_role.myrole2", "login", "true"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckPostgresqlRoleDestroy(s *terraform.State) error {
+ client := testAccProvider.Meta().(*Client)
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "postgresql_role" {
+ continue
+ }
+
+ exists, err := checkRoleExists(client, rs.Primary.ID)
+
+ if err != nil {
+ return fmt.Errorf("Error checking role %s", err)
+ }
+
+ if exists {
+ return fmt.Errorf("Role still exists after destroy")
+ }
+ }
+
+ return nil
+}
+
+func testAccCheckPostgresqlRoleExists(n string, canLogin string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Resource not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No ID is set")
+ }
+
+ actualCanLogin := rs.Primary.Attributes["login"]
+ if actualCanLogin != canLogin {
+ return fmt.Errorf("Wrong value for login expected %s got %s", canLogin, actualCanLogin)
+ }
+
+ client := testAccProvider.Meta().(*Client)
+ exists, err := checkRoleExists(client, rs.Primary.ID)
+
+ if err != nil {
+ return fmt.Errorf("Error checking role %s", err)
+ }
+
+ if !exists {
+ return fmt.Errorf("Role not found")
+ }
+
+ return nil
+ }
+}
+
+func checkRoleExists(client *Client, roleName string) (bool, error) {
+ conn, err := client.Connect()
+ if err != nil {
+ return false, err
+ }
+ defer conn.Close()
+
+ var _rez int
+ err = conn.QueryRow("SELECT 1 from pg_roles d WHERE rolname=$1", roleName).Scan(&_rez)
+ switch {
+ case err == sql.ErrNoRows:
+ return false, nil
+ case err != nil:
+ return false, fmt.Errorf("Error reading info about role: %s", err)
+ default:
+ return true, nil
+ }
+}
+
+var testAccPostgresqlRoleConfig = `
+resource "postgresql_role" "myrole2" {
+ name = "myrole2"
+ login = true
+}
+
+resource "postgresql_role" "role_with_pwd" {
+ name = "role_with_pwd"
+ login = true
+ password = "mypass"
+}
+
+resource "postgresql_role" "role_with_pwd_encr" {
+ name = "role_with_pwd_encr"
+ login = true
+ password = "mypass"
+ encrypted = true
+}
+
+resource "postgresql_role" "role_with_pwd_no_login" {
+ name = "role_with_pwd_no_login"
+ password = "mypass"
+}
+
+resource "postgresql_role" "role_simple" {
+ name = "role_simple"
+}
+`
diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss
index 410a2efb1a..01ef3d1944 100755
--- a/website/source/assets/stylesheets/_docs.scss
+++ b/website/source/assets/stylesheets/_docs.scss
@@ -21,6 +21,7 @@ body.layout-heroku,
body.layout-mailgun,
body.layout-openstack,
body.layout-packet,
+body.layout-postgresql,
body.layout-rundeck,
body.layout-statuscake,
body.layout-template,
diff --git a/website/source/docs/providers/postgresql/index.html.markdown b/website/source/docs/providers/postgresql/index.html.markdown
new file mode 100644
index 0000000000..36761b626a
--- /dev/null
+++ b/website/source/docs/providers/postgresql/index.html.markdown
@@ -0,0 +1,63 @@
+---
+layout: "postgresql"
+page_title: "Provider: PostgreSQL"
+sidebar_current: "docs-postgresql-index"
+description: |-
+ A provider for PostgreSQL Server.
+---
+
+# PostgreSQL Provider
+
+The PostgreSQL provider gives the ability to deploy and configure resources in a PostgreSQL server.
+
+Use the navigation to the left to read about the available resources.
+
+## Usage
+
+```
+provider "postgresql" {
+ host = "postgres_server_ip"
+ port = 5432
+ username = "postgres_user"
+ password = "postgres_password"
+}
+
+```
+
+Configuring multiple servers can be done by specifying the alias option.
+
+```
+provider "postgresql" {
+ alias = "pg1"
+ host = "postgres_server_ip1"
+ username = "postgres_user1"
+ password = "postgres_password1"
+}
+
+provider "postgresql" {
+ alias = "pg2"
+ host = "postgres_server_ip2"
+ username = "postgres_user2"
+ password = "postgres_password2"
+}
+
+resource "postgresql_database" "my_db1" {
+ provider = "postgresql.pg1"
+ name = "my_db1"
+}
+resource "postgresql_database" "my_db2" {
+ provider = "postgresql.pg2"
+ name = "my_db2"
+}
+
+
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `host` - (Required) The address for the postgresql server connection.
+* `port` - (Optional) The port for the postgresql server connection. (Default 5432)
+* `username` - (Required) Username for the server connection.
+* `password` - (Optional) Password for the server connection.
\ No newline at end of file
diff --git a/website/source/docs/providers/postgresql/r/postgresql_database.html.markdown b/website/source/docs/providers/postgresql/r/postgresql_database.html.markdown
new file mode 100644
index 0000000000..0c23a7d129
--- /dev/null
+++ b/website/source/docs/providers/postgresql/r/postgresql_database.html.markdown
@@ -0,0 +1,30 @@
+---
+layout: "postgresql"
+page_title: "PostgreSQL: postgresql_database"
+sidebar_current: "docs-postgresql-resource-postgresql_database"
+description: |-
+ Creates and manages a database on a PostgreSQL server.
+---
+
+# postgresql\_database
+
+The ``postgresql_database`` resource creates and manages a database on a PostgreSQL
+server.
+
+
+## Usage
+
+```
+resource "postgresql_database" "my_db" {
+ name = "my_db"
+ owner = "my_role
+}
+
+```
+
+## Argument Reference
+
+* `name` - (Required) The name of the database. Must be unique on the PostgreSQL server instance
+ where it is configured.
+
+* `owner` - (Optional) The owner role of the database. If not specified the default is the user executing the command. To create a database owned by another role, you must be a direct or indirect member of that role, or be a superuser.
diff --git a/website/source/docs/providers/postgresql/r/postgresql_role.html.markdown b/website/source/docs/providers/postgresql/r/postgresql_role.html.markdown
new file mode 100644
index 0000000000..a5d5c17d87
--- /dev/null
+++ b/website/source/docs/providers/postgresql/r/postgresql_role.html.markdown
@@ -0,0 +1,37 @@
+---
+layout: "postgresql"
+page_title: "PostgreSQL: postgresql_role"
+sidebar_current: "docs-postgresql-resource-postgresql_role"
+description: |-
+ Creates and manages a database on a PostgreSQL server.
+---
+
+# postgresql\_role
+
+The ``postgresql_role`` resource creates and manages a role on a PostgreSQL
+server.
+
+
+## Usage
+
+```
+resource "postgresql_role" "my_role" {
+ name = "my_role"
+ login = true
+ password = "mypass"
+ encrypted = true
+}
+
+```
+
+## Argument Reference
+
+* `name` - (Required) The name of the role. Must be unique on the PostgreSQL server instance
+ where it is configured.
+
+* `login` - (Optional) Configures whether a role is allowed to log in; that is, whether the role can be given as the initial session authorization name during client connection. Corresponds to the LOGIN/NOLOGIN
+clauses in 'CREATE ROLE'. Default value is false.
+
+* `password` - (Optional) Sets the role's password. (A password is only of use for roles having the LOGIN attribute, but you can nonetheless define one for roles without it.) If you do not plan to use password authentication you can omit this option. If no password is specified, the password will be set to null and password authentication will always fail for that user.
+
+* `encrypted` - (Optional) Corresponds to ENCRYPTED, UNENCRYPTED in PostgreSQL. This controls whether the password is stored encrypted in the system catalogs. Default is false.
\ No newline at end of file
diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb
index 14ab87377e..f784ad2d25 100644
--- a/website/source/layouts/docs.erb
+++ b/website/source/layouts/docs.erb
@@ -181,6 +181,10 @@
Packet
+ >
+ PostgreSQL
+
+
>
Rundeck
diff --git a/website/source/layouts/postgresql.erb b/website/source/layouts/postgresql.erb
new file mode 100644
index 0000000000..7375784ce6
--- /dev/null
+++ b/website/source/layouts/postgresql.erb
@@ -0,0 +1,29 @@
+<% wrap_layout :inner do %>
+ <% content_for :sidebar do %>
+
+ <% end %>
+
+ <%= yield %>
+ <% end %>
\ No newline at end of file
From 1a19f43ee1a897821ba67c79770cb04bf7b6730c Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Fri, 4 Dec 2015 07:15:18 -0600
Subject: [PATCH 126/664] core: support HTTP basic auth in consul remote state
Closes #1663
---
state/remote/consul.go | 12 ++++++++++++
.../source/docs/commands/remote-config.html.markdown | 4 ++++
2 files changed, 16 insertions(+)
diff --git a/state/remote/consul.go b/state/remote/consul.go
index 791f4dca37..6a3894b686 100644
--- a/state/remote/consul.go
+++ b/state/remote/consul.go
@@ -3,6 +3,7 @@ package remote
import (
"crypto/md5"
"fmt"
+ "strings"
consulapi "github.com/hashicorp/consul/api"
)
@@ -23,6 +24,17 @@ func consulFactory(conf map[string]string) (Client, error) {
if scheme, ok := conf["scheme"]; ok && scheme != "" {
config.Scheme = scheme
}
+ if auth, ok := conf["http_auth"]; ok && auth != "" {
+ var username, password string
+ if strings.Contains(auth, ":") {
+ split := strings.SplitN(auth, ":", 2)
+ username = split[0]
+ password = split[1]
+ } else {
+ username = auth
+ }
+ config.HttpAuth = &consulapi.HttpBasicAuth{username, password}
+ }
client, err := consulapi.NewClient(config)
if err != nil {
diff --git a/website/source/docs/commands/remote-config.html.markdown b/website/source/docs/commands/remote-config.html.markdown
index ad31021134..818642929b 100644
--- a/website/source/docs/commands/remote-config.html.markdown
+++ b/website/source/docs/commands/remote-config.html.markdown
@@ -57,6 +57,10 @@ The following backends are supported:
* `scheme` - Specifies what protocol to use when talking to the given
`address`, either `http` or `https`. SSL support can also be triggered
by setting then environment variable `CONSUL_HTTP_SSL` to `true`.
+ * `http_auth` - HTTP Basic Authentication credentials to be used when
+ communicating with Consul, in the format of either `user` or `user:pass`.
+ This may also be specified using the `CONSUL_HTTP_AUTH` environment
+ variable.
* Etcd - Stores the state in etcd at a given path.
Requires the `path` and `endpoints` variables. The `username` and `password`
From 6817e0d1449d92ee3c21669c4570f74fd2a832b6 Mon Sep 17 00:00:00 2001
From: stack72
Date: Fri, 4 Dec 2015 09:21:08 -0500
Subject: [PATCH 127/664] Adding the ability to generate a securitygroup
name-prefix
---
.../aws/resource_aws_security_group.go | 17 +++++++
.../aws/resource_aws_security_group_test.go | 49 +++++++++++++++++++
.../aws/r/security_group.html.markdown | 2 +
3 files changed, 68 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_security_group.go b/builtin/providers/aws/resource_aws_security_group.go
index 5bfdf3612d..b0cabec2bc 100644
--- a/builtin/providers/aws/resource_aws_security_group.go
+++ b/builtin/providers/aws/resource_aws_security_group.go
@@ -28,6 +28,7 @@ func resourceAwsSecurityGroup() *schema.Resource {
Optional: true,
Computed: true,
ForceNew: true,
+ ConflictsWith: []string{"name_prefix"},
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if len(value) > 255 {
@@ -38,6 +39,20 @@ func resourceAwsSecurityGroup() *schema.Resource {
},
},
+ "name_prefix": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ if len(value) > 100 {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot be longer than 100 characters, name is limited to 255", k))
+ }
+ return
+ },
+ },
+
"description": &schema.Schema{
Type: schema.TypeString,
Optional: true,
@@ -178,6 +193,8 @@ func resourceAwsSecurityGroupCreate(d *schema.ResourceData, meta interface{}) er
var groupName string
if v, ok := d.GetOk("name"); ok {
groupName = v.(string)
+ } else if v, ok := d.GetOk("name_prefix"); ok {
+ groupName = resource.PrefixedUniqueId(v.(string))
} else {
groupName = resource.UniqueId()
}
diff --git a/builtin/providers/aws/resource_aws_security_group_test.go b/builtin/providers/aws/resource_aws_security_group_test.go
index e6b520d957..d5142c68ea 100644
--- a/builtin/providers/aws/resource_aws_security_group_test.go
+++ b/builtin/providers/aws/resource_aws_security_group_test.go
@@ -46,6 +46,26 @@ func TestAccAWSSecurityGroup_basic(t *testing.T) {
})
}
+func TestAccAWSSecurityGroup_namePrefix( t *testing.T) {
+ var group ec2.SecurityGroup
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSSecurityGroupPrefixNameConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAWSSecurityGroupExists("aws_security_group.baz", &group),
+ testAccCheckAWSSecurityGroupGeneratedNamePrefix(
+ "aws_security_group.baz", "baz-"),
+ ),
+ },
+ },
+ })
+}
+
func TestAccAWSSecurityGroup_self(t *testing.T) {
var group ec2.SecurityGroup
@@ -324,6 +344,24 @@ func testAccCheckAWSSecurityGroupDestroy(s *terraform.State) error {
return nil
}
+func testAccCheckAWSSecurityGroupGeneratedNamePrefix(
+resource, prefix string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ r, ok := s.RootModule().Resources[resource]
+ if !ok {
+ return fmt.Errorf("Resource not found")
+ }
+ name, ok := r.Primary.Attributes["name"]
+ if !ok {
+ return fmt.Errorf("Name attr not found: %#v", r.Primary.Attributes)
+ }
+ if !strings.HasPrefix(name, prefix) {
+ return fmt.Errorf("Name: %q, does not have prefix: %q", name, prefix)
+ }
+ return nil
+ }
+}
+
func testAccCheckAWSSecurityGroupExists(n string, group *ec2.SecurityGroup) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
@@ -809,3 +847,14 @@ resource "aws_security_group" "web" {
description = "Used in the terraform acceptance tests"
}
`
+
+const testAccAWSSecurityGroupPrefixNameConfig = `
+provider "aws" {
+ region = "us-east-1"
+}
+
+resource "aws_security_group" "baz" {
+ name_prefix = "baz-"
+ description = "Used in the terraform acceptance tests"
+}
+`
diff --git a/website/source/docs/providers/aws/r/security_group.html.markdown b/website/source/docs/providers/aws/r/security_group.html.markdown
index b045b01f8a..860d6a4b9c 100644
--- a/website/source/docs/providers/aws/r/security_group.html.markdown
+++ b/website/source/docs/providers/aws/r/security_group.html.markdown
@@ -68,6 +68,8 @@ The following arguments are supported:
* `name` - (Optional) The name of the security group. If omitted, Terraform will
assign a random, unique name
+* `name_prefix` - (Optional) Creates a unique name beginning with the specified
+ prefix. Conflicts with `name`.
* `description` - (Optional) The security group description. Defaults to "Managed by Terraform". Cannot be "".
* `ingress` - (Optional) Can be specified multiple times for each
ingress rule. Each ingress block supports fields documented below.
From 3a08cc9334dc5feebebfc4145da49c4e7273dd51 Mon Sep 17 00:00:00 2001
From: Takaaki Furukawa
Date: Sun, 15 Nov 2015 12:24:28 +0900
Subject: [PATCH 128/664] provider/vsphere: Add allow_unverified_ssl flag for
unverified SSL requests
---
builtin/providers/vsphere/config.go | 7 ++-----
builtin/providers/vsphere/provider.go | 8 ++++++++
website/source/docs/providers/vsphere/index.html.markdown | 5 +++++
3 files changed, 15 insertions(+), 5 deletions(-)
diff --git a/builtin/providers/vsphere/config.go b/builtin/providers/vsphere/config.go
index 06deedaebb..07ec95d002 100644
--- a/builtin/providers/vsphere/config.go
+++ b/builtin/providers/vsphere/config.go
@@ -9,14 +9,11 @@ import (
"golang.org/x/net/context"
)
-const (
- defaultInsecureFlag = true
-)
-
type Config struct {
User string
Password string
VSphereServer string
+ InsecureFlag bool
}
// Client() returns a new client for accessing VMWare vSphere.
@@ -28,7 +25,7 @@ func (c *Config) Client() (*govmomi.Client, error) {
u.User = url.UserPassword(c.User, c.Password)
- client, err := govmomi.NewClient(context.TODO(), u, defaultInsecureFlag)
+ client, err := govmomi.NewClient(context.TODO(), u, c.InsecureFlag)
if err != nil {
return nil, fmt.Errorf("Error setting up client: %s", err)
}
diff --git a/builtin/providers/vsphere/provider.go b/builtin/providers/vsphere/provider.go
index 9a749a127b..4ed1488857 100644
--- a/builtin/providers/vsphere/provider.go
+++ b/builtin/providers/vsphere/provider.go
@@ -29,6 +29,13 @@ func Provider() terraform.ResourceProvider {
DefaultFunc: schema.EnvDefaultFunc("VSPHERE_SERVER", nil),
Description: "The vSphere Server name for vSphere API operations.",
},
+
+ "allow_unverified_ssl": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ DefaultFunc: schema.EnvDefaultFunc("VSPHERE_ALLOW_UNVERIFIED_SSL", false),
+ Description: "If set, VMware vSphere client will permit unverifiable SSL certificates.",
+ },
},
ResourcesMap: map[string]*schema.Resource{
@@ -44,6 +51,7 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
User: d.Get("user").(string),
Password: d.Get("password").(string),
VSphereServer: d.Get("vsphere_server").(string),
+ InsecureFlag: d.Get("allow_unverified_ssl").(bool),
}
return config.Client()
diff --git a/website/source/docs/providers/vsphere/index.html.markdown b/website/source/docs/providers/vsphere/index.html.markdown
index 8cacfd36b9..db0edc1924 100644
--- a/website/source/docs/providers/vsphere/index.html.markdown
+++ b/website/source/docs/providers/vsphere/index.html.markdown
@@ -58,6 +58,11 @@ The following arguments are used to configure the VMware vSphere Provider:
* `vsphere_server` - (Required) This is the vCenter server name for vSphere API
operations. Can also be specified with the `VSPHERE_SERVER` environment
variable.
+* `allow_unverified_ssl` - (Optional) Boolean that can be set to true to
+ disable SSL certificate verification. This should be used with care as it
+ could allow an attacker to intercept your auth token. If omitted, default
+ value is `false`. Can also be specified with the `VSPHERE_ALLOW_UNVERIFIED_SSL`
+ environment variable.
## Acceptance Tests
From eed83275e32ec09f0bff9436bd9eba5a31066eb3 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Fri, 4 Dec 2015 08:51:49 -0600
Subject: [PATCH 129/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3d4929ca73..bb92078e4c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -16,6 +16,7 @@ IMPROVEMENTS:
* provider/docker: Add support for setting log driver and options on `docker_container` resources [GH-3761]
* provider/vsphere: Add support for custom vm params on `vsphere_virtual_machine` [GH-3867]
* provider/vsphere: Rename vcenter_server config parameter to something clearer [GH-3718]
+ * provider/vsphere: Make allow_unverified_ssl a configuable on the provider [GH-3933]
BUG FIXES:
From a9ee6a48838add6530f5b56ad759528844fa9e65 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Fri, 4 Dec 2015 09:04:16 -0600
Subject: [PATCH 130/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index bb92078e4c..0691458ca6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -17,6 +17,7 @@ IMPROVEMENTS:
* provider/vsphere: Add support for custom vm params on `vsphere_virtual_machine` [GH-3867]
* provider/vsphere: Rename vcenter_server config parameter to something clearer [GH-3718]
* provider/vsphere: Make allow_unverified_ssl a configuable on the provider [GH-3933]
+ * core: change set internals and make (extreme) performance improvements [GH-3992]
BUG FIXES:
From 652229bdcba4c321bb03e10c88aa1d5615238847 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Fri, 4 Dec 2015 09:08:02 -0600
Subject: [PATCH 131/664] Update CHANGELOG.md
---
CHANGELOG.md | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0691458ca6..f18706b0c5 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -8,7 +8,9 @@ FEATURES:
IMPROVEMENTS:
+ * core: Change set internals for performance improvements [GH-3992]
* provider/aws: Add placement_group as an option for `aws_autoscaling_group` [GH-3704]
+ * provider/cloudstack: performance improvements [GH-4150]
* provider/docker: Add support for setting the entry point on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting the restart policy on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting memory, swap and CPU shares on `docker_container` resources [GH-3761]
@@ -17,7 +19,6 @@ IMPROVEMENTS:
* provider/vsphere: Add support for custom vm params on `vsphere_virtual_machine` [GH-3867]
* provider/vsphere: Rename vcenter_server config parameter to something clearer [GH-3718]
* provider/vsphere: Make allow_unverified_ssl a configuable on the provider [GH-3933]
- * core: change set internals and make (extreme) performance improvements [GH-3992]
BUG FIXES:
From 3d28b8dec227716eda962716cb0e315074d2b05b Mon Sep 17 00:00:00 2001
From: Piotr Komborski
Date: Fri, 4 Dec 2015 15:10:26 +0000
Subject: [PATCH 132/664] S3 bucket force_destroy error: MalformedXML
AWS provider was not checking whether DeleteMarkers are left in S3
bucket causing s3.DeleteObjectsInput to send empty XML which resulted in
400 error and MalformedXML message.
---
.../providers/aws/resource_aws_s3_bucket.go | 44 +++++++++++++------
1 file changed, 30 insertions(+), 14 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_s3_bucket.go b/builtin/providers/aws/resource_aws_s3_bucket.go
index 069cb837ab..ec57452022 100644
--- a/builtin/providers/aws/resource_aws_s3_bucket.go
+++ b/builtin/providers/aws/resource_aws_s3_bucket.go
@@ -406,30 +406,46 @@ func resourceAwsS3BucketDelete(d *schema.ResourceData, meta interface{}) error {
log.Printf("[DEBUG] S3 Bucket attempting to forceDestroy %+v", err)
bucket := d.Get("bucket").(string)
- resp, err := s3conn.ListObjects(
- &s3.ListObjectsInput{
+ resp, err := s3conn.ListObjectVersions(
+ &s3.ListObjectVersionsInput{
Bucket: aws.String(bucket),
},
)
if err != nil {
- return fmt.Errorf("Error S3 Bucket list Objects err: %s", err)
+ return fmt.Errorf("Error S3 Bucket list Object Versions err: %s", err)
}
- objectsToDelete := make([]*s3.ObjectIdentifier, len(resp.Contents))
- for i, v := range resp.Contents {
- objectsToDelete[i] = &s3.ObjectIdentifier{
- Key: v.Key,
+ objectsToDelete := make([]*s3.ObjectIdentifier, 0)
+
+ if len(resp.DeleteMarkers) != 0 {
+
+ for _, v := range resp.DeleteMarkers {
+ objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{
+ Key: v.Key,
+ VersionId: v.VersionId,
+ })
}
}
- _, err = s3conn.DeleteObjects(
- &s3.DeleteObjectsInput{
- Bucket: aws.String(bucket),
- Delete: &s3.Delete{
- Objects: objectsToDelete,
- },
+
+ if len(resp.Versions) != 0 {
+ for _, v := range resp.Versions {
+ objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{
+ Key: v.Key,
+ VersionId: v.VersionId,
+ })
+ }
+ }
+
+ params := &s3.DeleteObjectsInput{
+ Bucket: aws.String(bucket),
+ Delete: &s3.Delete{
+ Objects: objectsToDelete,
},
- )
+ }
+
+ _, err = s3conn.DeleteObjects(params)
+
if err != nil {
return fmt.Errorf("Error S3 Bucket force_destroy error deleting: %s", err)
}
From e06b76ac438ae63dbc4b43d9d9823d04c61d6622 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Fri, 4 Dec 2015 11:40:28 -0600
Subject: [PATCH 133/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f18706b0c5..583a29567a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -9,6 +9,7 @@ FEATURES:
IMPROVEMENTS:
* core: Change set internals for performance improvements [GH-3992]
+ * core: Support HTTP basic auth in consul remote state [GH-4166]
* provider/aws: Add placement_group as an option for `aws_autoscaling_group` [GH-3704]
* provider/cloudstack: performance improvements [GH-4150]
* provider/docker: Add support for setting the entry point on `docker_container` resources [GH-3761]
From b7acbd7887fabb370ae0e2c766bd5e1f00af59ba Mon Sep 17 00:00:00 2001
From: Corey Farwell
Date: Sat, 5 Dec 2015 14:20:24 -0500
Subject: [PATCH 134/664] =?UTF-8?q?Fix=20typo=20'storaege'=20=E2=86=92=20'?=
=?UTF-8?q?storage'?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
website/source/layouts/google.erb | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/website/source/layouts/google.erb b/website/source/layouts/google.erb
index 2ffae19583..a10277203f 100644
--- a/website/source/layouts/google.erb
+++ b/website/source/layouts/google.erb
@@ -156,7 +156,7 @@
>
- Google Storaege Resources
+ Google Storage Resources
>
google_storage_bucket
From 82e502359c3d9118d2c982f146c3aefe027b285f Mon Sep 17 00:00:00 2001
From: Corey Farwell
Date: Sat, 5 Dec 2015 16:51:05 -0500
Subject: [PATCH 135/664] Fix minor spacing inconsistency
---
.../docs/providers/google/r/container_cluster.html.markdown | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/website/source/docs/providers/google/r/container_cluster.html.markdown b/website/source/docs/providers/google/r/container_cluster.html.markdown
index 5a66ec9aaf..b2db4bedfa 100644
--- a/website/source/docs/providers/google/r/container_cluster.html.markdown
+++ b/website/source/docs/providers/google/r/container_cluster.html.markdown
@@ -41,7 +41,7 @@ resource "google_container_cluster" "primary" {
* `monitoring_service` - (Optional) The monitoring service that the cluster should write metrics to.
Available options include `monitoring.googleapis.com` and `none`. Defaults to `monitoring.googleapis.com`
* `network` - (Optional) The name of the Google Compute Engine network to which the cluster is connected
-* `node_config` - (Optional)The machine type and image to use for all nodes in this cluster
+* `node_config` - (Optional) The machine type and image to use for all nodes in this cluster
**Master Auth** supports the following arguments:
From 53f02fc4a2a53cfdacc565bf136c39686e812b4a Mon Sep 17 00:00:00 2001
From: Alex Berghage
Date: Sun, 6 Dec 2015 06:52:35 -0500
Subject: [PATCH 136/664] Analogous to #3768 -- just for DNS and keys.
The error string for 404s on DNS domains has (apparently)
changed, causing things to be a little sad when you modify
DNS domains from the DO console instead of terraform. This
is just the same fix as was applied to droplets around this
time last month.
While I was at it I just fixed this everywhere I saw it in the
DO provider source tree.
---
.../digitalocean/resource_digitalocean_domain.go | 5 ++---
.../digitalocean/resource_digitalocean_record.go | 12 ++++++------
.../digitalocean/resource_digitalocean_ssh_key.go | 5 ++---
3 files changed, 10 insertions(+), 12 deletions(-)
diff --git a/builtin/providers/digitalocean/resource_digitalocean_domain.go b/builtin/providers/digitalocean/resource_digitalocean_domain.go
index d7c4edca13..657acb21df 100644
--- a/builtin/providers/digitalocean/resource_digitalocean_domain.go
+++ b/builtin/providers/digitalocean/resource_digitalocean_domain.go
@@ -3,7 +3,6 @@ package digitalocean
import (
"fmt"
"log"
- "strings"
"github.com/digitalocean/godo"
"github.com/hashicorp/terraform/helper/schema"
@@ -56,11 +55,11 @@ func resourceDigitalOceanDomainCreate(d *schema.ResourceData, meta interface{})
func resourceDigitalOceanDomainRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*godo.Client)
- domain, _, err := client.Domains.Get(d.Id())
+ domain, resp, err := client.Domains.Get(d.Id())
if err != nil {
// If the domain is somehow already destroyed, mark as
// successfully gone
- if strings.Contains(err.Error(), "404 Not Found") {
+ if resp.StatusCode == 404 {
d.SetId("")
return nil
}
diff --git a/builtin/providers/digitalocean/resource_digitalocean_record.go b/builtin/providers/digitalocean/resource_digitalocean_record.go
index ebcb2e0f8f..1db6084bd3 100644
--- a/builtin/providers/digitalocean/resource_digitalocean_record.go
+++ b/builtin/providers/digitalocean/resource_digitalocean_record.go
@@ -115,11 +115,11 @@ func resourceDigitalOceanRecordRead(d *schema.ResourceData, meta interface{}) er
return fmt.Errorf("invalid record ID: %v", err)
}
- rec, _, err := client.Domains.Record(domain, id)
+ rec, resp, err := client.Domains.Record(domain, id)
if err != nil {
// If the record is somehow already destroyed, mark as
// successfully gone
- if strings.Contains(err.Error(), "404 Not Found") {
+ if resp.StatusCode == 404 {
d.SetId("")
return nil
}
@@ -183,15 +183,15 @@ func resourceDigitalOceanRecordDelete(d *schema.ResourceData, meta interface{})
log.Printf("[INFO] Deleting record: %s, %d", domain, id)
- _, err = client.Domains.DeleteRecord(domain, id)
- if err != nil {
+ resp, delErr := client.Domains.DeleteRecord(domain, id)
+ if delErr != nil {
// If the record is somehow already destroyed, mark as
// successfully gone
- if strings.Contains(err.Error(), "404 Not Found") {
+ if resp.StatusCode == 404 {
return nil
}
- return fmt.Errorf("Error deleting record: %s", err)
+ return fmt.Errorf("Error deleting record: %s", delErr)
}
return nil
diff --git a/builtin/providers/digitalocean/resource_digitalocean_ssh_key.go b/builtin/providers/digitalocean/resource_digitalocean_ssh_key.go
index d6eb96f09f..79614f5999 100644
--- a/builtin/providers/digitalocean/resource_digitalocean_ssh_key.go
+++ b/builtin/providers/digitalocean/resource_digitalocean_ssh_key.go
@@ -4,7 +4,6 @@ import (
"fmt"
"log"
"strconv"
- "strings"
"github.com/digitalocean/godo"
"github.com/hashicorp/terraform/helper/schema"
@@ -71,11 +70,11 @@ func resourceDigitalOceanSSHKeyRead(d *schema.ResourceData, meta interface{}) er
return fmt.Errorf("invalid SSH key id: %v", err)
}
- key, _, err := client.Keys.GetByID(id)
+ key, resp, err := client.Keys.GetByID(id)
if err != nil {
// If the key is somehow already destroyed, mark as
// successfully gone
- if strings.Contains(err.Error(), "404 Not Found") {
+ if resp.StatusCode == 404 {
d.SetId("")
return nil
}
From fae25a9ed8485aba839ffd1dc6e187bb1fe83bc7 Mon Sep 17 00:00:00 2001
From: Takaaki Furukawa
Date: Mon, 7 Dec 2015 10:49:04 +0900
Subject: [PATCH 137/664] Update a default example in VMware vSphere provider
---
website/source/docs/providers/vsphere/index.html.markdown | 3 +--
.../docs/providers/vsphere/r/virtual_machine.html.markdown | 3 +--
2 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/website/source/docs/providers/vsphere/index.html.markdown b/website/source/docs/providers/vsphere/index.html.markdown
index db0edc1924..2fa79bd7e6 100644
--- a/website/source/docs/providers/vsphere/index.html.markdown
+++ b/website/source/docs/providers/vsphere/index.html.markdown
@@ -41,8 +41,7 @@ resource "vsphere_virtual_machine" "web" {
}
disk {
- size = 1
- iops = 500
+ template = "centos-7"
}
}
```
diff --git a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown
index 003edaf463..605778503b 100644
--- a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown
+++ b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown
@@ -24,8 +24,7 @@ resource "vsphere_virtual_machine" "web" {
}
disk {
- size = 1
- iops = 500
+ template = "centos-7"
}
}
```
From 6e7c7b6a0bb227e808e1a94b86b598634c7b914b Mon Sep 17 00:00:00 2001
From: Andrew Teixeira
Date: Mon, 7 Dec 2015 11:40:41 -0500
Subject: [PATCH 138/664] Move pricing_plan and replication_type in
documentation to "settings" sublist as that is where the code looks for those
options
---
.../google/r/sql_database_instance.html.markdown | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/website/source/docs/providers/google/r/sql_database_instance.html.markdown b/website/source/docs/providers/google/r/sql_database_instance.html.markdown
index 7889f1448e..ca47db3b12 100644
--- a/website/source/docs/providers/google/r/sql_database_instance.html.markdown
+++ b/website/source/docs/providers/google/r/sql_database_instance.html.markdown
@@ -41,12 +41,6 @@ The following arguments are supported:
* `database_version` - (Optional, Default: `MYSQL_5_5`) The MySQL version to
use. Can be either `MYSQL_5_5` or `MYSQL_5_6`.
-* `pricing_plan` - (Optional) Pricing plan for this instance, can be one of
- `PER_USE` or `PACKAGE`.
-
-* `replication_type` - (Optional) Replication type for this instance, can be one of
- `ASYNCHRONOUS` or `SYNCHRONOUS`.
-
The required `settings` block supports:
* `tier` - (Required) The machine tier to use. See
@@ -62,6 +56,12 @@ The required `settings` block supports:
* `crash_safe_replication` - (Optional) Specific to read instances, indicates
when crash-safe replication flags are enabled.
+* `pricing_plan` - (Optional) Pricing plan for this instance, can be one of
+ `PER_USE` or `PACKAGE`.
+
+* `replication_type` - (Optional) Replication type for this instance, can be one of
+ `ASYNCHRONOUS` or `SYNCHRONOUS`.
+
The optional `settings.database_flags` sublist supports:
* `name` - (Optional) Name of the flag.
From 5e54bcc6ffa0cc1ff684ded0698dbf1cd639f505 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Mon, 7 Dec 2015 11:16:29 -0600
Subject: [PATCH 139/664] Add test for iops with gp2, remove strict validation
---
.../providers/aws/resource_aws_ebs_volume.go | 8 ------
.../aws/resource_aws_ebs_volume_test.go | 28 +++++++++++++++++++
2 files changed, 28 insertions(+), 8 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_ebs_volume.go b/builtin/providers/aws/resource_aws_ebs_volume.go
index 0e016ecaac..856c94871e 100644
--- a/builtin/providers/aws/resource_aws_ebs_volume.go
+++ b/builtin/providers/aws/resource_aws_ebs_volume.go
@@ -37,14 +37,6 @@ func resourceAwsEbsVolume() *schema.Resource {
Optional: true,
Computed: true,
ForceNew: true,
- ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
- value := v.(int)
- if value < 100 {
- es = append(es, fmt.Errorf(
- "%q must be an integer, minimum value 100", k))
- }
- return
- },
},
"kms_key_id": &schema.Schema{
Type: schema.TypeString,
diff --git a/builtin/providers/aws/resource_aws_ebs_volume_test.go b/builtin/providers/aws/resource_aws_ebs_volume_test.go
index aab92eb011..fabcdb1a11 100644
--- a/builtin/providers/aws/resource_aws_ebs_volume_test.go
+++ b/builtin/providers/aws/resource_aws_ebs_volume_test.go
@@ -26,6 +26,22 @@ func TestAccAWSEBSVolume_basic(t *testing.T) {
})
}
+func TestAccAWSEBSVolume_Iops(t *testing.T) {
+ var v ec2.Volume
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAwsEbsVolumeConfigWithIops,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckVolumeExists("aws_ebs_volume.iops_test", &v),
+ ),
+ },
+ },
+ })
+}
+
func TestAccAWSEBSVolume_withTags(t *testing.T) {
var v ec2.Volume
resource.Test(t, resource.TestCase{
@@ -86,3 +102,15 @@ resource "aws_ebs_volume" "tags_test" {
}
}
`
+
+const testAccAwsEbsVolumeConfigWithIops = `
+resource "aws_ebs_volume" "iops_test" {
+ availability_zone = "us-west-2a"
+ size = 10
+ type = "gp2"
+ iops = 0
+ tags {
+ Name = "TerraformTest"
+ }
+}
+`
From 99244c559745b85164ee190854099a5fd8827edc Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Mon, 7 Dec 2015 11:27:04 -0600
Subject: [PATCH 140/664] helper/schema: skip provider input for deprecated
fields
There's no reason that a field that's been deprecated should ever
prompt.
fixes #4033
---
helper/schema/schema.go | 5 +++++
helper/schema/schema_test.go | 34 ++++++++++++++++++++++++++++++++++
2 files changed, 39 insertions(+)
diff --git a/helper/schema/schema.go b/helper/schema/schema.go
index 7ac0a65a2e..2bfaebaece 100644
--- a/helper/schema/schema.go
+++ b/helper/schema/schema.go
@@ -398,6 +398,11 @@ func (m schemaMap) Input(
continue
}
+ // Deprecated fields should never prompt
+ if v.Deprecated != "" {
+ continue
+ }
+
// Skip things that have a value of some sort already
if _, ok := c.Raw[k]; ok {
continue
diff --git a/helper/schema/schema_test.go b/helper/schema/schema_test.go
index c70827a5fd..0b0288da79 100644
--- a/helper/schema/schema_test.go
+++ b/helper/schema/schema_test.go
@@ -2567,6 +2567,40 @@ func TestSchemaMap_InputDefault(t *testing.T) {
}
}
+func TestSchemaMap_InputDeprecated(t *testing.T) {
+ emptyConfig := make(map[string]interface{})
+ c, err := config.NewRawConfig(emptyConfig)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ rc := terraform.NewResourceConfig(c)
+ rc.Config = make(map[string]interface{})
+
+ input := new(terraform.MockUIInput)
+ input.InputFn = func(opts *terraform.InputOpts) (string, error) {
+ t.Fatalf("InputFn should not be called on: %#v", opts)
+ return "", nil
+ }
+
+ schema := map[string]*Schema{
+ "availability_zone": &Schema{
+ Type: TypeString,
+ Deprecated: "long gone",
+ Optional: true,
+ },
+ }
+ actual, err := schemaMap(schema).Input(input, rc)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := map[string]interface{}{}
+
+ if !reflect.DeepEqual(expected, actual.Config) {
+ t.Fatalf("got: %#v\nexpected: %#v", actual.Config, expected)
+ }
+}
+
func TestSchemaMap_InternalValidate(t *testing.T) {
cases := map[string]struct {
In map[string]*Schema
From d8022645bacdf0388550e9187413eb63e1563a73 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Mon, 7 Dec 2015 11:45:53 -0600
Subject: [PATCH 141/664] travis: remove go tip builds
I don't really see a good reason for running a build against Go tip.
This probably made more sense in the earlier days of Golang, but now
IMHO it's just wasting cycles for not much benefit.
/cc @jen20 @mitchellh
---
.travis.yml | 1 -
1 file changed, 1 deletion(-)
diff --git a/.travis.yml b/.travis.yml
index c36571ca10..e600013027 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,7 +4,6 @@ language: go
go:
- 1.5
- - tip
install: make updatedeps
From e60d6915bc45b7b73e2bb7da986eaaaebbfd06f9 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Mon, 7 Dec 2015 11:55:56 -0600
Subject: [PATCH 142/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index dd554a3524..770b6067d1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -24,6 +24,7 @@ IMPROVEMENTS:
BUG FIXES:
+ * core: skip provider input for deprecated fields [GH-4193]
* provider/docker: Fix an issue running with Docker Swarm by looking up containers by ID instead of name [GH-4148]
* provider/openstack: Better handling of load balancing resource state changes [GH-3926]
* provider/aws: Skip `source_security_group_id` determination logic for Classic ELBs [GH-4075]
From 7bf404619c0ffaa756f22e58769b1f62327802ed Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Mon, 7 Dec 2015 14:49:44 -0600
Subject: [PATCH 143/664] adjust the ebs validation to not error, only log, and
only set iops for io1
---
.../providers/aws/resource_aws_ebs_volume.go | 33 ++++++++++++++-----
.../aws/resource_aws_ebs_volume_test.go | 6 ++--
2 files changed, 27 insertions(+), 12 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_ebs_volume.go b/builtin/providers/aws/resource_aws_ebs_volume.go
index 856c94871e..3046ac46c6 100644
--- a/builtin/providers/aws/resource_aws_ebs_volume.go
+++ b/builtin/providers/aws/resource_aws_ebs_volume.go
@@ -85,17 +85,24 @@ func resourceAwsEbsVolumeCreate(d *schema.ResourceData, meta interface{}) error
if value, ok := d.GetOk("snapshot_id"); ok {
request.SnapshotId = aws.String(value.(string))
}
+
+ // IOPs are only valid, and required for, storage type io1. The current minimu
+ // is 100. Instead of a hard validation we we only apply the IOPs to the
+ // request if the type is io1, and log a warning otherwise. This allows users
+ // to "disable" iops. See https://github.com/hashicorp/terraform/pull/4146
var t string
if value, ok := d.GetOk("type"); ok {
t = value.(string)
request.VolumeType = aws.String(t)
}
- if value, ok := d.GetOk("iops"); ok {
- if t == "io1" {
- request.Iops = aws.Int64(int64(value.(int)))
- } else {
- return fmt.Errorf("iops is only valid for EBS Volume of type io1")
- }
+
+ iops := d.Get("iops").(int)
+ if t != "io1" && iops > 0 {
+ log.Printf("[WARN] IOPs is only valid for storate type io1 for EBS Volumes")
+ } else if t == "io1" {
+ // We add the iops value without validating it's size, to allow AWS to
+ // enforce a size requirement (currently 100)
+ request.Iops = aws.Int64(int64(iops))
}
log.Printf(
@@ -206,9 +213,6 @@ func readVolume(d *schema.ResourceData, volume *ec2.Volume) error {
if volume.Encrypted != nil {
d.Set("encrypted", *volume.Encrypted)
}
- if volume.Iops != nil {
- d.Set("iops", *volume.Iops)
- }
if volume.KmsKeyId != nil {
d.Set("kms_key_id", *volume.KmsKeyId)
}
@@ -221,6 +225,17 @@ func readVolume(d *schema.ResourceData, volume *ec2.Volume) error {
if volume.VolumeType != nil {
d.Set("type", *volume.VolumeType)
}
+
+ if volume.VolumeType != nil && *volume.VolumeType == "io1" {
+ // Only set the iops attribute if the volume type is io1. Setting otherwise
+ // can trigger a refresh/plan loop based on the computed value that is given
+ // from AWS, and prevent us from specifying 0 as a valid iops.
+ // See https://github.com/hashicorp/terraform/pull/4146
+ if volume.Iops != nil {
+ d.Set("iops", *volume.Iops)
+ }
+ }
+
if volume.Tags != nil {
d.Set("tags", tagsToMap(volume.Tags))
}
diff --git a/builtin/providers/aws/resource_aws_ebs_volume_test.go b/builtin/providers/aws/resource_aws_ebs_volume_test.go
index fabcdb1a11..940c8157ca 100644
--- a/builtin/providers/aws/resource_aws_ebs_volume_test.go
+++ b/builtin/providers/aws/resource_aws_ebs_volume_test.go
@@ -26,14 +26,14 @@ func TestAccAWSEBSVolume_basic(t *testing.T) {
})
}
-func TestAccAWSEBSVolume_Iops(t *testing.T) {
+func TestAccAWSEBSVolume_NoIops(t *testing.T) {
var v ec2.Volume
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccAwsEbsVolumeConfigWithIops,
+ Config: testAccAwsEbsVolumeConfigWithNoIops,
Check: resource.ComposeTestCheckFunc(
testAccCheckVolumeExists("aws_ebs_volume.iops_test", &v),
),
@@ -103,7 +103,7 @@ resource "aws_ebs_volume" "tags_test" {
}
`
-const testAccAwsEbsVolumeConfigWithIops = `
+const testAccAwsEbsVolumeConfigWithNoIops = `
resource "aws_ebs_volume" "iops_test" {
availability_zone = "us-west-2a"
size = 10
From aa97300a3b15b615723ccb8d9683f229a39fa2ef Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Kristinn=20=C3=96rn=20Sigur=C3=B0sson?=
Date: Thu, 3 Dec 2015 14:59:16 +0100
Subject: [PATCH 144/664] Create and attach additional disks before bootup.
Additional disks weren't created and attached to cloned vms until after bootup.
I also fixed some typos.
---
.../resource_vsphere_virtual_machine.go | 23 ++++++++++---------
1 file changed, 12 insertions(+), 11 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
index 4fbd66b891..5d14561d80 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
@@ -271,7 +271,7 @@ func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{
if vL, ok := d.GetOk("custom_configuration_parameters"); ok {
if custom_configs, ok := vL.(map[string]interface{}); ok {
custom := make(map[string]types.AnyType)
- for k,v := range custom_configs {
+ for k, v := range custom_configs {
custom[k] = v
}
vm.customConfigurations = custom
@@ -763,7 +763,7 @@ func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.D
return datastore, nil
}
-// createVirtualMchine creates a new VirtualMachine.
+// createVirtualMachine creates a new VirtualMachine.
func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
dc, err := getDatacenter(c, vm.datacenter)
if err != nil {
@@ -831,7 +831,7 @@ func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
Key: key,
Value: &value,
}
- log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k,v)
+ log.Printf("[DEBUG] virtual machine Extra Config spec: %s,%s", k, v)
ov = append(ov, &o)
}
configSpec.ExtraConfig = ov
@@ -914,7 +914,7 @@ func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
return nil
}
-// deployVirtualMchine deploys a new VirtualMachine.
+// deployVirtualMachine deploys a new VirtualMachine.
func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
dc, err := getDatacenter(c, vm.datacenter)
if err != nil {
@@ -1135,6 +1135,14 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
}
log.Printf("[DEBUG]VM customization finished")
+ for i := 1; i < len(vm.hardDisks); i++ {
+ err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, "eager_zeroed")
+ if err != nil {
+ return err
+ }
+ }
+ log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
+
newVM.PowerOn(context.TODO())
ip, err := newVM.WaitForIP(context.TODO())
@@ -1143,12 +1151,5 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
}
log.Printf("[DEBUG] ip address: %v", ip)
- for i := 1; i < len(vm.hardDisks); i++ {
- err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, "eager_zeroed")
- if err != nil {
- return err
- }
- }
- log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
return nil
}
From d69abbaeda737b8585b4309e7ef9569f554ba7fb Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Mon, 7 Dec 2015 15:24:34 -0600
Subject: [PATCH 145/664] Run go fmt on VMWare vCloud Director provider
---
builtin/providers/vcd/config.go | 14 +++++++-------
builtin/providers/vcd/provider.go | 10 +++++-----
.../vcd/resource_vcd_firewall_rules_test.go | 10 +++++-----
3 files changed, 17 insertions(+), 17 deletions(-)
diff --git a/builtin/providers/vcd/config.go b/builtin/providers/vcd/config.go
index c6b5ba509a..44403146e4 100644
--- a/builtin/providers/vcd/config.go
+++ b/builtin/providers/vcd/config.go
@@ -8,11 +8,11 @@ import (
)
type Config struct {
- User string
- Password string
- Org string
- Href string
- VDC string
+ User string
+ Password string
+ Org string
+ Href string
+ VDC string
MaxRetryTimeout int
}
@@ -28,8 +28,8 @@ func (c *Config) Client() (*VCDClient, error) {
}
vcdclient := &VCDClient{
- govcd.NewVCDClient(*u),
- c.MaxRetryTimeout}
+ govcd.NewVCDClient(*u),
+ c.MaxRetryTimeout}
org, vcd, err := vcdclient.Authenticate(c.User, c.Password, c.Org, c.VDC)
if err != nil {
return nil, fmt.Errorf("Something went wrong: %s", err)
diff --git a/builtin/providers/vcd/provider.go b/builtin/providers/vcd/provider.go
index aab15cedd3..6ba1a07a6e 100644
--- a/builtin/providers/vcd/provider.go
+++ b/builtin/providers/vcd/provider.go
@@ -66,11 +66,11 @@ func Provider() terraform.ResourceProvider {
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
config := Config{
- User: d.Get("user").(string),
- Password: d.Get("password").(string),
- Org: d.Get("org").(string),
- Href: d.Get("url").(string),
- VDC: d.Get("vdc").(string),
+ User: d.Get("user").(string),
+ Password: d.Get("password").(string),
+ Org: d.Get("org").(string),
+ Href: d.Get("url").(string),
+ VDC: d.Get("vdc").(string),
MaxRetryTimeout: d.Get("maxRetryTimeout").(int),
}
diff --git a/builtin/providers/vcd/resource_vcd_firewall_rules_test.go b/builtin/providers/vcd/resource_vcd_firewall_rules_test.go
index 2c1fa69e6b..1cb2d1e3ad 100644
--- a/builtin/providers/vcd/resource_vcd_firewall_rules_test.go
+++ b/builtin/providers/vcd/resource_vcd_firewall_rules_test.go
@@ -72,11 +72,11 @@ func testAccCheckVcdFirewallRulesAttributes(newRules, existingRules *govcd.EdgeG
func createFirewallRulesConfigs(existingRules *govcd.EdgeGateway) string {
config := Config{
- User: os.Getenv("VCD_USER"),
- Password: os.Getenv("VCD_PASSWORD"),
- Org: os.Getenv("VCD_ORG"),
- Href: os.Getenv("VCD_URL"),
- VDC: os.Getenv("VCD_VDC"),
+ User: os.Getenv("VCD_USER"),
+ Password: os.Getenv("VCD_PASSWORD"),
+ Org: os.Getenv("VCD_ORG"),
+ Href: os.Getenv("VCD_URL"),
+ VDC: os.Getenv("VCD_VDC"),
MaxRetryTimeout: 240,
}
conn, err := config.Client()
From cf87642bc81490ec430279c85c1a66a4824c991c Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Mon, 7 Dec 2015 16:33:37 -0600
Subject: [PATCH 146/664] provider/aws: Fix issue destroy Route 53 zone/record
if it no longer exists
---
builtin/providers/aws/resource_aws_route53_record.go | 10 ++++++++++
builtin/providers/aws/resource_aws_route53_zone.go | 5 +++++
2 files changed, 15 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_route53_record.go b/builtin/providers/aws/resource_aws_route53_record.go
index cf99b9b9b3..a6c88ade40 100644
--- a/builtin/providers/aws/resource_aws_route53_record.go
+++ b/builtin/providers/aws/resource_aws_route53_record.go
@@ -245,6 +245,11 @@ func resourceAwsRoute53RecordRead(d *schema.ResourceData, meta interface{}) erro
// get expanded name
zoneRecord, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone)})
if err != nil {
+ if r53err, ok := err.(awserr.Error); ok && r53err.Code() == "NoSuchHostedZone" {
+ log.Printf("[DEBUG] No matching Route 53 Record found for: %s, removing from state file", d.Id())
+ d.SetId("")
+ return nil
+ }
return err
}
en := expandRecordName(d.Get("name").(string), *zoneRecord.HostedZone.Name)
@@ -312,6 +317,11 @@ func resourceAwsRoute53RecordDelete(d *schema.ResourceData, meta interface{}) er
var err error
zoneRecord, err := conn.GetHostedZone(&route53.GetHostedZoneInput{Id: aws.String(zone)})
if err != nil {
+ if r53err, ok := err.(awserr.Error); ok && r53err.Code() == "NoSuchHostedZone" {
+ log.Printf("[DEBUG] No matching Route 53 Record found for: %s, removing from state file", d.Id())
+ d.SetId("")
+ return nil
+ }
return err
}
// Get the records
diff --git a/builtin/providers/aws/resource_aws_route53_zone.go b/builtin/providers/aws/resource_aws_route53_zone.go
index 50478bfdb8..b846737ad6 100644
--- a/builtin/providers/aws/resource_aws_route53_zone.go
+++ b/builtin/providers/aws/resource_aws_route53_zone.go
@@ -213,6 +213,11 @@ func resourceAwsRoute53ZoneDelete(d *schema.ResourceData, meta interface{}) erro
d.Get("name").(string), d.Id())
_, err := r53.DeleteHostedZone(&route53.DeleteHostedZoneInput{Id: aws.String(d.Id())})
if err != nil {
+ if r53err, ok := err.(awserr.Error); ok && r53err.Code() == "NoSuchHostedZone" {
+ log.Printf("[DEBUG] No matching Route 53 Zone found for: %s, removing from state file", d.Id())
+ d.SetId("")
+ return nil
+ }
return err
}
From c3f6bbcece0582b5056163b56ee78b97522572d8 Mon Sep 17 00:00:00 2001
From: Joe Topjian
Date: Tue, 8 Dec 2015 05:47:34 +0000
Subject: [PATCH 147/664] provider/openstack: Handle volumes in "deleting"
state
This commit prevents Terraform from erroring when an attempt is made
to delete a volume already in a "deleting" state. This can happen when
the volume is the root disk of an instance and the instance was
terminated.
---
.../resource_openstack_blockstorage_volume_v1.go | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1.go b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1.go
index f8fde11eff..bbd65c7310 100644
--- a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1.go
+++ b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1.go
@@ -259,9 +259,13 @@ func resourceBlockStorageVolumeV1Delete(d *schema.ResourceData, meta interface{}
}
}
- err = volumes.Delete(blockStorageClient, d.Id()).ExtractErr()
- if err != nil {
- return fmt.Errorf("Error deleting OpenStack volume: %s", err)
+ // It's possible that this volume was used as a boot device and is currently
+ // in a "deleting" state from when the instance was terminated.
+ // If this is true, just move on. It'll eventually delete.
+ if v.Status != "deleting" {
+ if err := volumes.Delete(blockStorageClient, d.Id()).ExtractErr(); err != nil {
+ return CheckDeleted(d, err, "volume")
+ }
}
// Wait for the volume to delete before moving on.
From 1cd2fea1f911522dd167d6d296d2338744ec6999 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Philipp=20Pre=C3=9F?=
Date: Tue, 8 Dec 2015 14:55:17 +0100
Subject: [PATCH 148/664] Fix AWS RDS cross-region read replica always being
placed in default VPC
Fixes #4192 by honouring the `db_subnet_group_name` parameter when creating a cross-region read replica.
---
builtin/providers/aws/resource_aws_db_instance.go | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_db_instance.go b/builtin/providers/aws/resource_aws_db_instance.go
index bd566b8a54..f0649d582f 100644
--- a/builtin/providers/aws/resource_aws_db_instance.go
+++ b/builtin/providers/aws/resource_aws_db_instance.go
@@ -271,6 +271,7 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
CopyTagsToSnapshot: aws.Bool(d.Get("copy_tags_to_snapshot").(bool)),
DBInstanceClass: aws.String(d.Get("instance_class").(string)),
DBInstanceIdentifier: aws.String(d.Get("identifier").(string)),
+ DBSubnetGroupName: aws.String(d.Get("db_subnet_group_name").(string)),
Tags: tags,
}
if attr, ok := d.GetOk("iops"); ok {
@@ -288,6 +289,10 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
if attr, ok := d.GetOk("publicly_accessible"); ok {
opts.PubliclyAccessible = aws.Bool(attr.(bool))
}
+
+ if attr, ok := d.GetOk("db_subnet_group_name"); ok {
+ opts.DBSubnetGroupName = aws.String(attr.(string))
+ }
_, err := conn.CreateDBInstanceReadReplica(&opts)
if err != nil {
return fmt.Errorf("Error creating DB Instance: %s", err)
From d46348c23305e16ed5dcc2520a922f3a188f6b9d Mon Sep 17 00:00:00 2001
From: stack72
Date: Tue, 8 Dec 2015 14:07:11 +0000
Subject: [PATCH 149/664] Adding support for AWS DynamoDB Table for
StreamSpecifications
---
.../aws/resource_aws_dynamodb_table.go | 49 +++++++++++++++
.../aws/resource_aws_dynamodb_table_test.go | 61 +++++++++++++++++++
.../aws/r/dynamodb_table.html.markdown | 2 +
3 files changed, 112 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_dynamodb_table.go b/builtin/providers/aws/resource_aws_dynamodb_table.go
index 88146662b5..d408101e46 100644
--- a/builtin/providers/aws/resource_aws_dynamodb_table.go
+++ b/builtin/providers/aws/resource_aws_dynamodb_table.go
@@ -12,6 +12,7 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/hashicorp/terraform/helper/hashcode"
+ "strings"
)
// Number of times to retry if a throttling-related exception occurs
@@ -158,6 +159,20 @@ func resourceAwsDynamoDbTable() *schema.Resource {
return hashcode.String(buf.String())
},
},
+ "stream_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ Computed: true,
+ },
+ "stream_view_type": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ StateFunc: func(v interface{}) string {
+ value := v.(string)
+ return strings.ToUpper(value)
+ },
+ },
},
}
}
@@ -263,6 +278,16 @@ func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) er
req.GlobalSecondaryIndexes = globalSecondaryIndexes
}
+ if _, ok := d.GetOk("stream_enabled"); ok {
+
+ req.StreamSpecification = &dynamodb.StreamSpecification{
+ StreamEnabled: aws.Bool(d.Get("stream_enabled").(bool)),
+ StreamViewType: aws.String(d.Get("stream_view_type").(string)),
+ }
+
+ fmt.Printf("[DEBUG] Adding StreamSpecifications to the table")
+ }
+
attemptCount := 1
for attemptCount <= DYNAMODB_MAX_THROTTLE_RETRIES {
output, err := dynamodbconn.CreateTable(req)
@@ -340,6 +365,25 @@ func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) er
waitForTableToBeActive(d.Id(), meta)
}
+ if d.HasChange("stream_enabled") || d.HasChange("stream_view_type") {
+ req := &dynamodb.UpdateTableInput{
+ TableName: aws.String(d.Id()),
+ }
+
+ req.StreamSpecification = &dynamodb.StreamSpecification{
+ StreamEnabled: aws.Bool(d.Get("stream_enabled").(bool)),
+ StreamViewType: aws.String(d.Get("stream_view_type").(string)),
+ }
+
+ _, err := dynamodbconn.UpdateTable(req)
+
+ if err != nil {
+ return err
+ }
+
+ waitForTableToBeActive(d.Id(), meta)
+ }
+
if d.HasChange("global_secondary_index") {
log.Printf("[DEBUG] Changed GSI data")
req := &dynamodb.UpdateTableInput{
@@ -587,6 +631,11 @@ func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) erro
log.Printf("[DEBUG] Added GSI: %s - Read: %d / Write: %d", gsi["name"], gsi["read_capacity"], gsi["write_capacity"])
}
+ if table.StreamSpecification != nil {
+ d.Set("stream_view_type", table.StreamSpecification.StreamViewType)
+ d.Set("stream_enabled", table.StreamSpecification.StreamEnabled)
+ }
+
err = d.Set("global_secondary_index", gsiList)
if err != nil {
return err
diff --git a/builtin/providers/aws/resource_aws_dynamodb_table_test.go b/builtin/providers/aws/resource_aws_dynamodb_table_test.go
index adf457f0a6..890243cf37 100644
--- a/builtin/providers/aws/resource_aws_dynamodb_table_test.go
+++ b/builtin/providers/aws/resource_aws_dynamodb_table_test.go
@@ -33,6 +33,26 @@ func TestAccAWSDynamoDbTable(t *testing.T) {
})
}
+func TestAccAWSDynamoDbTable_streamSpecification(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSDynamoDbTableDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSDynamoDbConfigStreamSpecification,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.basic-dynamodb-table"),
+ resource.TestCheckResourceAttr(
+ "aws_dynamodb_table.basic-dynamodb-table", "stream_enabled", "true"),
+ resource.TestCheckResourceAttr(
+ "aws_dynamodb_table.basic-dynamodb-table", "stream_view_type", "KEYS_ONLY"),
+ ),
+ },
+ },
+ })
+}
+
func testAccCheckAWSDynamoDbTableDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).dynamodbconn
@@ -295,3 +315,44 @@ resource "aws_dynamodb_table" "basic-dynamodb-table" {
}
}
`
+
+const testAccAWSDynamoDbConfigStreamSpecification = `
+resource "aws_dynamodb_table" "basic-dynamodb-table" {
+ name = "TerraformTestStreamTable"
+ read_capacity = 10
+ write_capacity = 20
+ hash_key = "TestTableHashKey"
+ range_key = "TestTableRangeKey"
+ attribute {
+ name = "TestTableHashKey"
+ type = "S"
+ }
+ attribute {
+ name = "TestTableRangeKey"
+ type = "S"
+ }
+ attribute {
+ name = "TestLSIRangeKey"
+ type = "N"
+ }
+ attribute {
+ name = "TestGSIRangeKey"
+ type = "S"
+ }
+ local_secondary_index {
+ name = "TestTableLSI"
+ range_key = "TestLSIRangeKey"
+ projection_type = "ALL"
+ }
+ global_secondary_index {
+ name = "InitialTestTableGSI"
+ hash_key = "TestTableHashKey"
+ range_key = "TestGSIRangeKey"
+ write_capacity = 10
+ read_capacity = 10
+ projection_type = "KEYS_ONLY"
+ }
+ stream_enabled = true
+ stream_view_type = "KEYS_ONLY"
+}
+`
diff --git a/website/source/docs/providers/aws/r/dynamodb_table.html.markdown b/website/source/docs/providers/aws/r/dynamodb_table.html.markdown
index 15b25119ac..b2b5f8507f 100644
--- a/website/source/docs/providers/aws/r/dynamodb_table.html.markdown
+++ b/website/source/docs/providers/aws/r/dynamodb_table.html.markdown
@@ -84,6 +84,8 @@ parameter.
* `non_key_attributes` - (Optional) Only required with *INCLUDE* as a
projection type; a list of attributes to project into the index. These
do not need to be defined as attributes on the table.
+* `stream_enabled` - (Optional) Indicates whether Streams is to be enabled (true) or disabled (false).
+* `stream_view_type` - (Optional) When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values are KEYS_ONLY, NEW_IMAGE, OLD_IMAGE, NEW_AND_OLD_IMAGES.
For `global_secondary_index` objects only, you need to specify
`write_capacity` and `read_capacity` in the same way you would for the
From a4c62673ee88d5e99458cbf7c1ca69fada50b389 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Philipp=20Pre=C3=9F?=
Date: Tue, 8 Dec 2015 15:09:04 +0100
Subject: [PATCH 150/664] Remove redundant -build-toolchain for gox
Running `make release` on the provided Vagrant machine errors with an error.
Removing the `-build-toolchain` fixes it.
---
Makefile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile b/Makefile
index fea0478cdc..80a0b836c8 100644
--- a/Makefile
+++ b/Makefile
@@ -28,7 +28,7 @@ plugin-dev: generate
mv $(GOPATH)/bin/$(PLUGIN) $(GOPATH)/bin/terraform-$(PLUGIN)
release: updatedeps
- gox -build-toolchain
+ gox
@$(MAKE) bin
# test runs the unit tests and vets the code
From a0a89ecde80fb3c0c217f0ac17a44c6f79e4e3f0 Mon Sep 17 00:00:00 2001
From: stack72
Date: Tue, 8 Dec 2015 14:13:26 +0000
Subject: [PATCH 151/664] Documenting the version upgrade flags on db_instance
---
website/source/docs/providers/aws/r/db_instance.html.markdown | 2 ++
1 file changed, 2 insertions(+)
diff --git a/website/source/docs/providers/aws/r/db_instance.html.markdown b/website/source/docs/providers/aws/r/db_instance.html.markdown
index 55d13e250f..2b7f553696 100644
--- a/website/source/docs/providers/aws/r/db_instance.html.markdown
+++ b/website/source/docs/providers/aws/r/db_instance.html.markdown
@@ -82,6 +82,8 @@ database, and to use this value as the source database. This correlates to the
more information on using Replication.
* `snapshot_identifier` - (Optional) Specifies whether or not to create this database from a snapshot. This correlates to the snapshot ID you'd find in the RDS console, e.g: rds:production-2015-06-26-06-05.
* `license_model` - (Optional, but required for some DB engines, i.e. Oracle SE1) License model information for this DB instance.
+* `auto_minor_version_upgrade` - (Optional) Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window. Defaults to true.
+* `auto_major_version_upgrade` - (Optional) Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.
~> **NOTE:** Removing the `replicate_source_db` attribute from an existing RDS
Replicate database managed by Terraform will promote the database to a fully
From c041f221920d3d6a0123a2bd51a34a55c5066db9 Mon Sep 17 00:00:00 2001
From: Clint
Date: Tue, 8 Dec 2015 08:44:46 -0600
Subject: [PATCH 152/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 770b6067d1..0ff594a2b6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -28,6 +28,7 @@ BUG FIXES:
* provider/docker: Fix an issue running with Docker Swarm by looking up containers by ID instead of name [GH-4148]
* provider/openstack: Better handling of load balancing resource state changes [GH-3926]
* provider/aws: Skip `source_security_group_id` determination logic for Classic ELBs [GH-4075]
+ * provider/aws: Fix issue destroy Route 53 zone/record if it no longer exists [GH-4198]
## 0.6.8 (December 2, 2015)
From 4b5cb0747559aa070d2c6ec7975e6c6f5259c2ea Mon Sep 17 00:00:00 2001
From: stack72
Date: Tue, 8 Dec 2015 16:29:13 +0000
Subject: [PATCH 153/664] Fixing the digitalocean floatingip resource for a
panic when droplet wasn't available
---
.../digitalocean/resource_digitalocean_floating_ip.go | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/digitalocean/resource_digitalocean_floating_ip.go b/builtin/providers/digitalocean/resource_digitalocean_floating_ip.go
index 03e4b07467..bb8bee00ba 100644
--- a/builtin/providers/digitalocean/resource_digitalocean_floating_ip.go
+++ b/builtin/providers/digitalocean/resource_digitalocean_floating_ip.go
@@ -83,8 +83,10 @@ func resourceDigitalOceanFloatingIpRead(d *schema.ResourceData, meta interface{}
}
if _, ok := d.GetOk("droplet_id"); ok {
- log.Printf("[INFO] The region of the Droplet is %s", floatingIp.Droplet.Region)
- d.Set("region", floatingIp.Droplet.Region.Slug)
+ if floatingIp.Droplet != nil {
+ log.Printf("[INFO] The region of the Droplet is %s", floatingIp.Droplet.Region)
+ d.Set("region", floatingIp.Droplet.Region.Slug)
+ }
} else {
d.Set("region", floatingIp.Region.Slug)
}
From 6460f235931ed9adb082207bc99e0e9a83e0a78b Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Tue, 8 Dec 2015 11:39:52 -0500
Subject: [PATCH 154/664] Update CHANGELOG.md
---
CHANGELOG.md | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0ff594a2b6..571ab0b303 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -11,7 +11,7 @@ IMPROVEMENTS:
* core: Change set internals for performance improvements [GH-3992]
* core: Support HTTP basic auth in consul remote state [GH-4166]
- * provider/aws: Add placement_group as an option for `aws_autoscaling_group` [GH-3704]
+ * provider/aws: Add `placement_group` as an option for `aws_autoscaling_group` [GH-3704]
* provider/cloudstack: performance improvements [GH-4150]
* provider/docker: Add support for setting the entry point on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting the restart policy on `docker_container` resources [GH-3761]
@@ -29,6 +29,7 @@ BUG FIXES:
* provider/openstack: Better handling of load balancing resource state changes [GH-3926]
* provider/aws: Skip `source_security_group_id` determination logic for Classic ELBs [GH-4075]
* provider/aws: Fix issue destroy Route 53 zone/record if it no longer exists [GH-4198]
+ * provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
## 0.6.8 (December 2, 2015)
From 0619898f6a6692b2cd5b0da96987da9cd67c681a Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 8 Dec 2015 10:52:17 -0600
Subject: [PATCH 155/664] provider/aws: Add arn to DB Instance output
---
builtin/providers/aws/resource_aws_db_instance.go | 6 ++++++
.../source/docs/providers/aws/r/db_instance.html.markdown | 1 +
2 files changed, 7 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_db_instance.go b/builtin/providers/aws/resource_aws_db_instance.go
index f0649d582f..517fc738dd 100644
--- a/builtin/providers/aws/resource_aws_db_instance.go
+++ b/builtin/providers/aws/resource_aws_db_instance.go
@@ -31,6 +31,11 @@ func resourceAwsDbInstance() *schema.Resource {
ForceNew: true,
},
+ "arn": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
"username": &schema.Schema{
Type: schema.TypeString,
Required: true,
@@ -553,6 +558,7 @@ func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error {
}
log.Printf("[DEBUG] Error building ARN for DB Instance, not setting Tags for DB %s", name)
} else {
+ d.Set("arn", arn)
resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{
ResourceName: aws.String(arn),
})
diff --git a/website/source/docs/providers/aws/r/db_instance.html.markdown b/website/source/docs/providers/aws/r/db_instance.html.markdown
index 55d13e250f..1c75f977a8 100644
--- a/website/source/docs/providers/aws/r/db_instance.html.markdown
+++ b/website/source/docs/providers/aws/r/db_instance.html.markdown
@@ -93,6 +93,7 @@ The following attributes are exported:
* `id` - The RDS instance ID.
* `address` - The address of the RDS instance.
+* `arn` - The ARN of the RDS instance.
* `allocated_storage` - The amount of allocated storage
* `availability_zone` - The availability zone of the instance
* `backup_retention_period` - The backup retention period
From e52af33db7b2d03a34adb560eb4e37535a9a91d3 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 8 Dec 2015 10:55:57 -0600
Subject: [PATCH 156/664] remove extra DBSubnetGroupName
---
builtin/providers/aws/resource_aws_db_instance.go | 1 -
1 file changed, 1 deletion(-)
diff --git a/builtin/providers/aws/resource_aws_db_instance.go b/builtin/providers/aws/resource_aws_db_instance.go
index 517fc738dd..3e9cb9d809 100644
--- a/builtin/providers/aws/resource_aws_db_instance.go
+++ b/builtin/providers/aws/resource_aws_db_instance.go
@@ -276,7 +276,6 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
CopyTagsToSnapshot: aws.Bool(d.Get("copy_tags_to_snapshot").(bool)),
DBInstanceClass: aws.String(d.Get("instance_class").(string)),
DBInstanceIdentifier: aws.String(d.Get("identifier").(string)),
- DBSubnetGroupName: aws.String(d.Get("db_subnet_group_name").(string)),
Tags: tags,
}
if attr, ok := d.GetOk("iops"); ok {
From 22388139a3768492db346fabf24e937686d09ed1 Mon Sep 17 00:00:00 2001
From: stack72
Date: Tue, 8 Dec 2015 16:52:41 +0000
Subject: [PATCH 157/664] Changing the DigitalOcean FloatingIP Read func to
check for Droplet Region and then fallback to FloatingIP Region
---
.../digitalocean/resource_digitalocean_floating_ip.go | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/builtin/providers/digitalocean/resource_digitalocean_floating_ip.go b/builtin/providers/digitalocean/resource_digitalocean_floating_ip.go
index bb8bee00ba..06a3760f8d 100644
--- a/builtin/providers/digitalocean/resource_digitalocean_floating_ip.go
+++ b/builtin/providers/digitalocean/resource_digitalocean_floating_ip.go
@@ -82,11 +82,10 @@ func resourceDigitalOceanFloatingIpRead(d *schema.ResourceData, meta interface{}
return fmt.Errorf("Error retrieving FloatingIP: %s", err)
}
- if _, ok := d.GetOk("droplet_id"); ok {
- if floatingIp.Droplet != nil {
- log.Printf("[INFO] The region of the Droplet is %s", floatingIp.Droplet.Region)
- d.Set("region", floatingIp.Droplet.Region.Slug)
- }
+ if floatingIp.Droplet != nil {
+ log.Printf("[INFO] A droplet was detected on the FloatingIP so setting the Region based on the Droplet")
+ log.Printf("[INFO] The region of the Droplet is %s", floatingIp.Droplet.Region.Slug)
+ d.Set("region", floatingIp.Droplet.Region.Slug)
} else {
d.Set("region", floatingIp.Region.Slug)
}
From 54ce59ebdd3d9cdfef20916faa2fd061d6bd006e Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Tue, 8 Dec 2015 12:44:43 -0500
Subject: [PATCH 158/664] Remove release target and document cross-compiling
Also document the `plugin-dev` and `core-dev` targets.
---
Makefile | 4 ----
README.md | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 50 insertions(+), 4 deletions(-)
diff --git a/Makefile b/Makefile
index 80a0b836c8..5ecb1ce010 100644
--- a/Makefile
+++ b/Makefile
@@ -27,10 +27,6 @@ plugin-dev: generate
go install github.com/hashicorp/terraform/builtin/bins/$(PLUGIN)
mv $(GOPATH)/bin/$(PLUGIN) $(GOPATH)/bin/terraform-$(PLUGIN)
-release: updatedeps
- gox
- @$(MAKE) bin
-
# test runs the unit tests and vets the code
test: generate
TF_ACC= go test $(TEST) $(TESTARGS) -timeout=30s -parallel=4
diff --git a/README.md b/README.md
index 7c811d7c78..16bc83d108 100644
--- a/README.md
+++ b/README.md
@@ -61,6 +61,18 @@ $ make test TEST=./terraform
...
```
+If you're working on a specific provider and only wish to rebuild that provider, you can use the `plugin-dev` target. For example, to build only the Azure provider:
+
+```sh
+$ make plugin-dev PLUGIN=provider-azure
+```
+
+If you're working on the core of Terraform, and only wish to rebuild that without rebuilding providers, you can use the `core-dev` target. It is important to note that some types of changes may require both core and providers to be rebuilt - for example work on the RPC interface. To build just the core of Terraform:
+
+```sh
+$ make core-dev
+```
+
### Acceptance Tests
Terraform also has a comprehensive [acceptance test](http://en.wikipedia.org/wiki/Acceptance_testing) suite covering most of the major features of the built-in providers.
@@ -85,3 +97,41 @@ TF_ACC=1 go test ./builtin/providers/aws -v -run=Vpc -timeout 90m
The `TEST` variable is required, and you should specify the folder where the provider is. The `TESTARGS` variable is recommended to filter down to a specific resource to test, since testing all of them at once can take a very long time.
Acceptance tests typically require other environment variables to be set for things such as access keys. The provider itself should error early and tell you what to set, so it is not documented here.
+
+### Cross Compilation and Building for Distribution
+
+If you wish to cross-compile Terraform for another architecture, you can set the `XC_OS` and `XC_ARCH` environment variables to values representing the target operating system and architecture before calling `make`. The output is placed in the `pkg` subdirectory tree both expanded in a directory representing the OS/architecture combination and as a ZIP archive.
+
+For example, to compile 64-bit Linux binaries on Mac OS X Linux, you can run:
+
+```sh
+$ XC_OS=linux XC_ARCH=amd64 make bin
+...
+$ file pkg/linux_amd64/terraform
+terraform: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), statically linked, not stripped
+```
+
+`XC_OS` and `XC_ARCH` can be space separated lists representing different combinations of operating system and architecture. For example, to compile for both Linux and Mac OS X, targeting both 32- and 64-bit architectures, you can run:
+
+```sh
+$ XC_OS="linux darwin" XC_ARCH="386 amd64" make bin
+...
+$ tree ./pkg/ -P "terraform|*.zip"
+./pkg/
+├── darwin_386
+│ └── terraform
+├── darwin_386.zip
+├── darwin_amd64
+│ └── terraform
+├── darwin_amd64.zip
+├── linux_386
+│ └── terraform
+├── linux_386.zip
+├── linux_amd64
+│ └── terraform
+└── linux_amd64.zip
+
+4 directories, 8 files
+```
+
+_Note: Cross-compilation uses [gox](https://github.com/mitchellh/gox), which requires toolchains to be built with versions of Go prior to 1.5. In order to successfully cross-compile with older versions of Go, you will need to run `gox -build-toolchain` before running the commands detailed above._
From 9fc6c27de14d1ae4c27d19bcd57910d636385e16 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 8 Dec 2015 14:22:51 -0600
Subject: [PATCH 159/664] provider/aws: Check for empty instances in AWS
Instance RunInstance response
Fixes #4206
---
builtin/providers/aws/resource_aws_instance.go | 3 +++
1 file changed, 3 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_instance.go b/builtin/providers/aws/resource_aws_instance.go
index d096a45d6f..f9f2a29d72 100644
--- a/builtin/providers/aws/resource_aws_instance.go
+++ b/builtin/providers/aws/resource_aws_instance.go
@@ -370,6 +370,9 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error {
if err != nil {
return fmt.Errorf("Error launching source instance: %s", err)
}
+ if runResp == nil || len(runResp.Instances) == 0 {
+ return fmt.Errorf("Error launching source instance: no instances returned in response")
+ }
instance := runResp.Instances[0]
log.Printf("[INFO] Instance ID: %s", *instance.InstanceId)
From 6e32320e41308a9b0f5260065c1aa0a8664b390d Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 8 Dec 2015 14:40:24 -0600
Subject: [PATCH 160/664] provider/aws: switch test to use more accessible ami
Switch out an AMI that doesn't require marketplace terms acceptance.
---
builtin/providers/aws/resource_aws_ami_copy_test.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_ami_copy_test.go b/builtin/providers/aws/resource_aws_ami_copy_test.go
index 0a469a8e0e..029e9a5abd 100644
--- a/builtin/providers/aws/resource_aws_ami_copy_test.go
+++ b/builtin/providers/aws/resource_aws_ami_copy_test.go
@@ -169,9 +169,9 @@ resource "aws_subnet" "foo" {
resource "aws_instance" "test" {
// This AMI has one block device mapping, so we expect to have
// one snapshot in our created AMI.
- // This is an Amazon Linux HVM AMI. A public HVM AMI is required
+ // This is an Ubuntu Linux HVM AMI. A public HVM AMI is required
// because paravirtual images cannot be copied between accounts.
- ami = "ami-5449393e"
+ ami = "ami-0f8bce65"
instance_type = "t2.micro"
tags {
Name = "terraform-acc-ami-copy-victim"
From a6ca034968daa1570a051d863b90b5bdc1db090b Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 8 Dec 2015 14:46:08 -0600
Subject: [PATCH 161/664] provider/aws: run codeclimate tests in us-east-1
It's only available there.
---
.../aws/resource_aws_codecommit_repository_test.go | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_codecommit_repository_test.go b/builtin/providers/aws/resource_aws_codecommit_repository_test.go
index 14fcdf3211..332e2b04a6 100644
--- a/builtin/providers/aws/resource_aws_codecommit_repository_test.go
+++ b/builtin/providers/aws/resource_aws_codecommit_repository_test.go
@@ -95,6 +95,9 @@ func testAccCheckCodeCommitRepositoryDestroy(s *terraform.State) error {
}
const testAccCodeCommitRepository_basic = `
+provider "aws" {
+ region = "us-east-1"
+}
resource "aws_codecommit_repository" "test" {
repository_name = "my_test_repository"
description = "This is a test description"
@@ -102,6 +105,9 @@ resource "aws_codecommit_repository" "test" {
`
const testAccCodeCommitRepository_withChanges = `
+provider "aws" {
+ region = "us-east-1"
+}
resource "aws_codecommit_repository" "test" {
repository_name = "my_test_repository"
description = "This is a test description - with changes"
From 8b79881deaf69f9ecdac9a44478c39b6b664a485 Mon Sep 17 00:00:00 2001
From: stack72
Date: Tue, 8 Dec 2015 20:58:06 +0000
Subject: [PATCH 162/664] Adding a validation function for the DynamoDb Table
StreamViewType
---
.../aws/resource_aws_dynamodb_table.go | 16 ++++++++
.../aws/resource_aws_dynamodb_table_test.go | 40 +++++++++++++++++++
2 files changed, 56 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_dynamodb_table.go b/builtin/providers/aws/resource_aws_dynamodb_table.go
index d408101e46..addf368eff 100644
--- a/builtin/providers/aws/resource_aws_dynamodb_table.go
+++ b/builtin/providers/aws/resource_aws_dynamodb_table.go
@@ -172,6 +172,7 @@ func resourceAwsDynamoDbTable() *schema.Resource {
value := v.(string)
return strings.ToUpper(value)
},
+ ValidateFunc: validateStreamViewType,
},
},
}
@@ -800,3 +801,18 @@ func waitForTableToBeActive(tableName string, meta interface{}) error {
return nil
}
+
+func validateStreamViewType(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ viewTypes := map[string]bool {
+ "KEYS_ONLY": true,
+ "NEW_IMAGE": true,
+ "OLD_IMAGE": true,
+ "NEW_AND_OLD_IMAGES": true,
+ }
+
+ if !viewTypes[value] {
+ errors = append(errors, fmt.Errorf("%q be a valid DynamoDB StreamViewType", k))
+ }
+ return
+}
diff --git a/builtin/providers/aws/resource_aws_dynamodb_table_test.go b/builtin/providers/aws/resource_aws_dynamodb_table_test.go
index 890243cf37..425cd204f9 100644
--- a/builtin/providers/aws/resource_aws_dynamodb_table_test.go
+++ b/builtin/providers/aws/resource_aws_dynamodb_table_test.go
@@ -53,6 +53,46 @@ func TestAccAWSDynamoDbTable_streamSpecification(t *testing.T) {
})
}
+func TestResourceAWSDynamoDbTableStreamViewType_validation(t *testing.T) {
+ cases := []struct {
+ Value string
+ ErrCount int
+ }{
+ {
+ Value: "KEYS-ONLY",
+ ErrCount: 1,
+ },
+ {
+ Value: "RANDOM-STRING",
+ ErrCount: 1,
+ },
+ {
+ Value: "KEYS_ONLY",
+ ErrCount: 0,
+ },
+ {
+ Value: "NEW_AND_OLD_IMAGES",
+ ErrCount: 0,
+ },
+ {
+ Value: "NEW_IMAGE",
+ ErrCount: 0,
+ },
+ {
+ Value: "OLD_IMAGE",
+ ErrCount: 0,
+ },
+ }
+
+ for _, tc := range cases {
+ _, errors := validateStreamViewType(tc.Value, "aws_dynamodb_table_stream_view_type")
+
+ if len(errors) != tc.ErrCount {
+ t.Fatalf("Expected the DynamoDB stream_view_type to trigger a validation error")
+ }
+ }
+}
+
func testAccCheckAWSDynamoDbTableDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).dynamodbconn
From 9a572e6d4ff2c2e6b69419255c9460069ad61802 Mon Sep 17 00:00:00 2001
From: Clint
Date: Tue, 8 Dec 2015 15:25:32 -0600
Subject: [PATCH 163/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 571ab0b303..ed00859369 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -29,6 +29,7 @@ BUG FIXES:
* provider/openstack: Better handling of load balancing resource state changes [GH-3926]
* provider/aws: Skip `source_security_group_id` determination logic for Classic ELBs [GH-4075]
* provider/aws: Fix issue destroy Route 53 zone/record if it no longer exists [GH-4198]
+ * provider/aws: Fix issue force destroying a versioned S3 bucket [GH-4168]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
## 0.6.8 (December 2, 2015)
From ef9838c7967186f585119eab0383650fd43ddf57 Mon Sep 17 00:00:00 2001
From: Nic Grayson
Date: Tue, 8 Dec 2015 16:03:33 -0600
Subject: [PATCH 164/664] increased openstack 10 minute timeouts to 30 minutes
---
.../openstack/resource_openstack_compute_instance_v2.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/openstack/resource_openstack_compute_instance_v2.go b/builtin/providers/openstack/resource_openstack_compute_instance_v2.go
index d21e1afedb..f78a8d015a 100644
--- a/builtin/providers/openstack/resource_openstack_compute_instance_v2.go
+++ b/builtin/providers/openstack/resource_openstack_compute_instance_v2.go
@@ -404,7 +404,7 @@ func resourceComputeInstanceV2Create(d *schema.ResourceData, meta interface{}) e
Pending: []string{"BUILD"},
Target: "ACTIVE",
Refresh: ServerV2StateRefreshFunc(computeClient, server.ID),
- Timeout: 10 * time.Minute,
+ Timeout: 30 * time.Minute,
Delay: 10 * time.Second,
MinTimeout: 3 * time.Second,
}
@@ -791,7 +791,7 @@ func resourceComputeInstanceV2Delete(d *schema.ResourceData, meta interface{}) e
Pending: []string{"ACTIVE"},
Target: "DELETED",
Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()),
- Timeout: 10 * time.Minute,
+ Timeout: 30 * time.Minute,
Delay: 10 * time.Second,
MinTimeout: 3 * time.Second,
}
From baeb790ed9f53e9b12b2e7029c727e75267dfd24 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 8 Dec 2015 17:48:57 -0600
Subject: [PATCH 165/664] provider/aws: fix capitalization in acctest names
---
.../aws/resource_aws_lb_cookie_stickiness_policy_test.go | 2 +-
.../providers/aws/resource_aws_opsworks_custom_layer_test.go | 2 +-
builtin/providers/aws/resource_aws_opsworks_stack_test.go | 4 ++--
builtin/providers/aws/resource_aws_vpn_connection_test.go | 2 +-
builtin/providers/aws/resource_vpn_connection_route_test.go | 2 +-
5 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_lb_cookie_stickiness_policy_test.go b/builtin/providers/aws/resource_aws_lb_cookie_stickiness_policy_test.go
index 765d2ffcd4..7417f01217 100644
--- a/builtin/providers/aws/resource_aws_lb_cookie_stickiness_policy_test.go
+++ b/builtin/providers/aws/resource_aws_lb_cookie_stickiness_policy_test.go
@@ -11,7 +11,7 @@ import (
"github.com/hashicorp/terraform/terraform"
)
-func TestAccAwsLBCookieStickinessPolicy_basic(t *testing.T) {
+func TestAccAWSLBCookieStickinessPolicy_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
diff --git a/builtin/providers/aws/resource_aws_opsworks_custom_layer_test.go b/builtin/providers/aws/resource_aws_opsworks_custom_layer_test.go
index a39b5dbdba..477bd2b866 100644
--- a/builtin/providers/aws/resource_aws_opsworks_custom_layer_test.go
+++ b/builtin/providers/aws/resource_aws_opsworks_custom_layer_test.go
@@ -11,7 +11,7 @@ import (
// These tests assume the existence of predefined Opsworks IAM roles named `aws-opsworks-ec2-role`
// and `aws-opsworks-service-role`.
-func TestAccAwsOpsworksCustomLayer(t *testing.T) {
+func TestAccAWSOpsworksCustomLayer(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
diff --git a/builtin/providers/aws/resource_aws_opsworks_stack_test.go b/builtin/providers/aws/resource_aws_opsworks_stack_test.go
index 63a27578c6..ab23dc879c 100644
--- a/builtin/providers/aws/resource_aws_opsworks_stack_test.go
+++ b/builtin/providers/aws/resource_aws_opsworks_stack_test.go
@@ -123,7 +123,7 @@ resource "aws_opsworks_stack" "tf-acc" {
}
`
-func TestAccAwsOpsworksStackNoVpc(t *testing.T) {
+func TestAccAWSOpsworksStackNoVpc(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@@ -200,7 +200,7 @@ resource "aws_opsworks_stack" "tf-acc" {
}
`
-func TestAccAwsOpsworksStackVpc(t *testing.T) {
+func TestAccAWSOpsworksStackVpc(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
diff --git a/builtin/providers/aws/resource_aws_vpn_connection_test.go b/builtin/providers/aws/resource_aws_vpn_connection_test.go
index 137694a610..123cb07e6b 100644
--- a/builtin/providers/aws/resource_aws_vpn_connection_test.go
+++ b/builtin/providers/aws/resource_aws_vpn_connection_test.go
@@ -11,7 +11,7 @@ import (
"github.com/hashicorp/terraform/terraform"
)
-func TestAccAwsVpnConnection_basic(t *testing.T) {
+func TestAccAWSVpnConnection_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
diff --git a/builtin/providers/aws/resource_vpn_connection_route_test.go b/builtin/providers/aws/resource_vpn_connection_route_test.go
index b80feaae66..dbe91649e5 100644
--- a/builtin/providers/aws/resource_vpn_connection_route_test.go
+++ b/builtin/providers/aws/resource_vpn_connection_route_test.go
@@ -11,7 +11,7 @@ import (
"github.com/hashicorp/terraform/terraform"
)
-func TestAccAwsVpnConnectionRoute_basic(t *testing.T) {
+func TestAccAWSVpnConnectionRoute_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
From 4bd4e18defcf73f22c206ce11bdde604c64d05fd Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Mon, 7 Dec 2015 18:10:30 -0600
Subject: [PATCH 166/664] core: use same logging setup for acctests
We weren't doing any log setup for acceptance tests, which made it
difficult to wrangle log output in CI.
This moves the log setup functions we use in `main` over into a helper
package so we can use them for acceptance tests as well.
This means that acceptance tests will by default be a _lot_ quieter,
only printing out actual test output. Setting `TF_LOG=trace` will
restore the full prior noise level.
Only minor behavior change is to make `ioutil.Discard` the default
return value rather than a `nil` that needs to be checked for.
---
log.go => helper/logging/logging.go | 9 +++++----
helper/resource/testing.go | 7 +++++++
main.go | 10 ++--------
3 files changed, 14 insertions(+), 12 deletions(-)
rename log.go => helper/logging/logging.go (88%)
diff --git a/log.go b/helper/logging/logging.go
similarity index 88%
rename from log.go
rename to helper/logging/logging.go
index 1077c3e558..b8de7a37fb 100644
--- a/log.go
+++ b/helper/logging/logging.go
@@ -1,7 +1,8 @@
-package main
+package logging
import (
"io"
+ "io/ioutil"
"log"
"os"
"strings"
@@ -18,9 +19,9 @@ const (
var validLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"}
-// logOutput determines where we should send logs (if anywhere) and the log level.
-func logOutput() (logOutput io.Writer, err error) {
- logOutput = nil
+// LogOutput determines where we should send logs (if anywhere) and the log level.
+func LogOutput() (logOutput io.Writer, err error) {
+ logOutput = ioutil.Discard
envLevel := os.Getenv(EnvLog)
if envLevel == "" {
return
diff --git a/helper/resource/testing.go b/helper/resource/testing.go
index 0b53c3c615..18a40553b2 100644
--- a/helper/resource/testing.go
+++ b/helper/resource/testing.go
@@ -13,6 +13,7 @@ import (
"github.com/hashicorp/go-getter"
"github.com/hashicorp/terraform/config/module"
+ "github.com/hashicorp/terraform/helper/logging"
"github.com/hashicorp/terraform/terraform"
)
@@ -103,6 +104,12 @@ func Test(t TestT, c TestCase) {
return
}
+ logWriter, err := logging.LogOutput()
+ if err != nil {
+ t.Error(fmt.Errorf("error setting up logging: %s", err))
+ }
+ log.SetOutput(logWriter)
+
// We require verbose mode so that the user knows what is going on.
if !testTesting && !testing.Verbose() {
t.Fatal("Acceptance tests must be run with the -v flag on tests")
diff --git a/main.go b/main.go
index 1685831076..da71456d59 100644
--- a/main.go
+++ b/main.go
@@ -8,6 +8,7 @@ import (
"os"
"sync"
+ "github.com/hashicorp/terraform/helper/logging"
"github.com/hashicorp/terraform/plugin"
"github.com/mitchellh/cli"
"github.com/mitchellh/panicwrap"
@@ -23,14 +24,11 @@ func realMain() int {
if !panicwrap.Wrapped(&wrapConfig) {
// Determine where logs should go in general (requested by the user)
- logWriter, err := logOutput()
+ logWriter, err := logging.LogOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "Couldn't setup log output: %s", err)
return 1
}
- if logWriter == nil {
- logWriter = ioutil.Discard
- }
// We always send logs to a temporary file that we use in case
// there is a panic. Otherwise, we delete it.
@@ -42,10 +40,6 @@ func realMain() int {
defer os.Remove(logTempFile.Name())
defer logTempFile.Close()
- // Tell the logger to log to this file
- os.Setenv(EnvLog, "")
- os.Setenv(EnvLogFile, "")
-
// Setup the prefixed readers that send data properly to
// stdout/stderr.
doneCh := make(chan struct{})
From ccf51b3b6cb38cb804c0fe95312397813b530859 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 8 Dec 2015 18:43:27 -0600
Subject: [PATCH 167/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ed00859369..c3057edecd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -12,6 +12,7 @@ IMPROVEMENTS:
* core: Change set internals for performance improvements [GH-3992]
* core: Support HTTP basic auth in consul remote state [GH-4166]
* provider/aws: Add `placement_group` as an option for `aws_autoscaling_group` [GH-3704]
+ * provider/aws: Add support for DynamoDB Table StreamSpecifications [GH-4208]
* provider/cloudstack: performance improvements [GH-4150]
* provider/docker: Add support for setting the entry point on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting the restart policy on `docker_container` resources [GH-3761]
From 8271c79b40546c26a4cd340bf19030c10b8b0be2 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 8 Dec 2015 20:16:06 -0600
Subject: [PATCH 168/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c3057edecd..cd772ea608 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -26,6 +26,7 @@ IMPROVEMENTS:
BUG FIXES:
* core: skip provider input for deprecated fields [GH-4193]
+ * core: Fix issue which could cause fields that become empty to retain old values in the state [GH-3257]
* provider/docker: Fix an issue running with Docker Swarm by looking up containers by ID instead of name [GH-4148]
* provider/openstack: Better handling of load balancing resource state changes [GH-3926]
* provider/aws: Skip `source_security_group_id` determination logic for Classic ELBs [GH-4075]
From 67bfc2faef6b25fd4265d1a7ce796f10f75f55be Mon Sep 17 00:00:00 2001
From: mdeboercw
Date: Mon, 16 Nov 2015 10:44:39 -0800
Subject: [PATCH 169/664] Added folder handling for folder-qualified vm names
Added acceptance test for creation in folders
Added 'baseName' as computed schema attribute for convenience
Added 'base_name' computed attribute for convenience
Added new vsphere folder resource
Fixed folder behavior
Assure test folders are properly removed
Avoid creating recreating search index in loop
Fix typeo in vsphere.createFolder
Updated website documentation
Renamed test folders to be unique across tests
Fixes based on acc test findings; code cleanup
Added combined folder and vm acc test
Restored newline; fixed skipped acc tests
Marked 'existing_path' as computed only
Removed debug logging from tests
Changed folder read to return error
---
builtin/providers/vsphere/provider.go | 1 +
.../vsphere/resource_vsphere_folder.go | 233 +++++++++++++++
.../vsphere/resource_vsphere_folder_test.go | 279 ++++++++++++++++++
.../resource_vsphere_virtual_machine.go | 85 ++++--
.../resource_vsphere_virtual_machine_test.go | 206 ++++++++++++-
.../providers/vsphere/index.html.markdown | 8 +-
.../providers/vsphere/r/folder.html.markdown | 28 ++
7 files changed, 814 insertions(+), 26 deletions(-)
create mode 100644 builtin/providers/vsphere/resource_vsphere_folder.go
create mode 100644 builtin/providers/vsphere/resource_vsphere_folder_test.go
create mode 100644 website/source/docs/providers/vsphere/r/folder.html.markdown
diff --git a/builtin/providers/vsphere/provider.go b/builtin/providers/vsphere/provider.go
index 9a749a127b..b5a096d303 100644
--- a/builtin/providers/vsphere/provider.go
+++ b/builtin/providers/vsphere/provider.go
@@ -32,6 +32,7 @@ func Provider() terraform.ResourceProvider {
},
ResourcesMap: map[string]*schema.Resource{
+ "vsphere_folder": resourceVSphereFolder(),
"vsphere_virtual_machine": resourceVSphereVirtualMachine(),
},
diff --git a/builtin/providers/vsphere/resource_vsphere_folder.go b/builtin/providers/vsphere/resource_vsphere_folder.go
new file mode 100644
index 0000000000..3ed4d52ad5
--- /dev/null
+++ b/builtin/providers/vsphere/resource_vsphere_folder.go
@@ -0,0 +1,233 @@
+package vsphere
+
+import (
+ "fmt"
+ "log"
+ "path"
+ "strings"
+
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/vmware/govmomi"
+ "github.com/vmware/govmomi/find"
+ "github.com/vmware/govmomi/object"
+ "golang.org/x/net/context"
+)
+
+type folder struct {
+ datacenter string
+ existingPath string
+ path string
+}
+
+func resourceVSphereFolder() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceVSphereFolderCreate,
+ Read: resourceVSphereFolderRead,
+ Delete: resourceVSphereFolderDelete,
+
+ Schema: map[string]*schema.Schema{
+ "datacenter": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "path": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "existing_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func resourceVSphereFolderCreate(d *schema.ResourceData, meta interface{}) error {
+
+ client := meta.(*govmomi.Client)
+
+ f := folder{
+ path: strings.TrimRight(d.Get("path").(string), "/"),
+ }
+
+ if v, ok := d.GetOk("datacenter"); ok {
+ f.datacenter = v.(string)
+ }
+
+ createFolder(client, &f)
+
+ d.Set("existing_path", f.existingPath)
+ d.SetId(fmt.Sprintf("%v/%v", f.datacenter, f.path))
+ log.Printf("[INFO] Created folder: %s", f.path)
+
+ return resourceVSphereFolderRead(d, meta)
+}
+
+
+func createFolder(client *govmomi.Client, f *folder) error {
+
+ finder := find.NewFinder(client.Client, true)
+
+ dc, err := finder.Datacenter(context.TODO(), f.datacenter)
+ if err != nil {
+ return fmt.Errorf("error %s", err)
+ }
+ finder = finder.SetDatacenter(dc)
+ si := object.NewSearchIndex(client.Client)
+
+ dcFolders, err := dc.Folders(context.TODO())
+ if err != nil {
+ return fmt.Errorf("error %s", err)
+ }
+
+ folder := dcFolders.VmFolder
+ var workingPath string
+
+ pathParts := strings.Split(f.path, "/")
+ for _, pathPart := range pathParts {
+ if len(workingPath) > 0 {
+ workingPath += "/"
+ }
+ workingPath += pathPart
+ subfolder, err := si.FindByInventoryPath(
+ context.TODO(), fmt.Sprintf("%v/vm/%v", f.datacenter, workingPath))
+
+ if err != nil {
+ return fmt.Errorf("error %s", err)
+ } else if subfolder == nil {
+ log.Printf("[DEBUG] folder not found; creating: %s", workingPath)
+ folder, err = folder.CreateFolder(context.TODO(), pathPart)
+ if err != nil {
+ return fmt.Errorf("Failed to create folder at %s; %s", workingPath, err)
+ }
+ } else {
+ log.Printf("[DEBUG] folder already exists: %s", workingPath)
+ f.existingPath = workingPath
+ folder = subfolder.(*object.Folder)
+ }
+ }
+ return nil
+}
+
+
+func resourceVSphereFolderRead(d *schema.ResourceData, meta interface{}) error {
+
+ log.Printf("[DEBUG] reading folder: %#v", d)
+ client := meta.(*govmomi.Client)
+
+ dc, err := getDatacenter(client, d.Get("datacenter").(string))
+ if err != nil {
+ return err
+ }
+
+ finder := find.NewFinder(client.Client, true)
+ finder = finder.SetDatacenter(dc)
+
+ folder, err := object.NewSearchIndex(client.Client).FindByInventoryPath(
+ context.TODO(), fmt.Sprintf("%v/vm/%v", d.Get("datacenter").(string),
+ d.Get("path").(string)))
+
+ if err != nil {
+ return err
+ }
+
+ if folder == nil {
+ d.SetId("")
+ }
+
+ return nil
+}
+
+func resourceVSphereFolderDelete(d *schema.ResourceData, meta interface{}) error {
+
+ f := folder{
+ path: strings.TrimRight(d.Get("path").(string), "/"),
+ existingPath: d.Get("existing_path").(string),
+ }
+
+ if v, ok := d.GetOk("datacenter"); ok {
+ f.datacenter = v.(string)
+ }
+
+ client := meta.(*govmomi.Client)
+
+ deleteFolder(client, &f)
+
+ d.SetId("")
+ return nil
+}
+
+func deleteFolder(client *govmomi.Client, f *folder) error {
+ dc, err := getDatacenter(client, f.datacenter)
+ if err != nil {
+ return err
+ }
+ var folder *object.Folder
+ currentPath := f.path
+
+ finder := find.NewFinder(client.Client, true)
+ finder = finder.SetDatacenter(dc)
+ si := object.NewSearchIndex(client.Client)
+
+ folderRef, err := si.FindByInventoryPath(
+ context.TODO(), fmt.Sprintf("%v/vm/%v", f.datacenter, f.path))
+
+ if err != nil {
+ return fmt.Errorf("[ERROR] Could not locate folder %s: %v", f.path, err)
+ } else {
+ folder = folderRef.(*object.Folder)
+ }
+
+ log.Printf("[INFO] Deleting empty sub-folders of existing path: %s", f.existingPath)
+ for currentPath != f.existingPath {
+ log.Printf("[INFO] Deleting folder: %s", currentPath)
+ children, err := folder.Children(context.TODO())
+ if err != nil {
+ return err
+ }
+
+ if len(children) > 0 {
+ return fmt.Errorf("Folder %s is non-empty and will not be deleted", currentPath)
+ } else {
+ log.Printf("[DEBUG] current folder: %#v", folder)
+ currentPath = path.Dir(currentPath)
+ if currentPath == "." {
+ currentPath = ""
+ }
+ log.Printf("[INFO] parent path of %s is calculated as %s", f.path, currentPath)
+ task, err := folder.Destroy(context.TODO())
+ if err != nil {
+ return err
+ }
+ err = task.Wait(context.TODO())
+ if err != nil {
+ return err
+ }
+ folderRef, err = si.FindByInventoryPath(
+ context.TODO(), fmt.Sprintf("%v/vm/%v", f.datacenter, currentPath))
+
+ if err != nil {
+ return err
+ } else if folderRef != nil {
+ folder = folderRef.(*object.Folder)
+ }
+ }
+ }
+ return nil
+}
+
+// getDatacenter gets datacenter object
+func getDatacenter(c *govmomi.Client, dc string) (*object.Datacenter, error) {
+ finder := find.NewFinder(c.Client, true)
+ if dc != "" {
+ d, err := finder.Datacenter(context.TODO(), dc)
+ return d, err
+ } else {
+ d, err := finder.DefaultDatacenter(context.TODO())
+ return d, err
+ }
+}
diff --git a/builtin/providers/vsphere/resource_vsphere_folder_test.go b/builtin/providers/vsphere/resource_vsphere_folder_test.go
new file mode 100644
index 0000000000..c8dd9828a4
--- /dev/null
+++ b/builtin/providers/vsphere/resource_vsphere_folder_test.go
@@ -0,0 +1,279 @@
+package vsphere
+
+import (
+ "fmt"
+ "os"
+ "testing"
+
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/vmware/govmomi"
+ "github.com/vmware/govmomi/find"
+ "github.com/vmware/govmomi/object"
+ "golang.org/x/net/context"
+)
+
+// Basic top-level folder creation
+func TestAccVSphereFolder_basic(t *testing.T) {
+ var f folder
+ datacenter := os.Getenv("VSPHERE_DATACENTER")
+ testMethod := "basic"
+ resourceName := "vsphere_folder." + testMethod
+ path := "tf_test_basic"
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckVSphereFolderDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: fmt.Sprintf(
+ testAccCheckVSphereFolderConfig,
+ testMethod,
+ path,
+ datacenter,
+ ),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckVSphereFolderExists(resourceName, &f),
+ resource.TestCheckResourceAttr(
+ resourceName, "path", path),
+ resource.TestCheckResourceAttr(
+ resourceName, "existing_path", ""),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccVSphereFolder_nested(t *testing.T) {
+
+ var f folder
+ datacenter := os.Getenv("VSPHERE_DATACENTER")
+ testMethod := "nested"
+ resourceName := "vsphere_folder." + testMethod
+ path := "tf_test_nested/tf_test_folder"
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckVSphereFolderDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: fmt.Sprintf(
+ testAccCheckVSphereFolderConfig,
+ testMethod,
+ path,
+ datacenter,
+ ),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckVSphereFolderExists(resourceName, &f),
+ resource.TestCheckResourceAttr(
+ resourceName, "path", path),
+ resource.TestCheckResourceAttr(
+ resourceName, "existing_path", ""),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccVSphereFolder_dontDeleteExisting(t *testing.T) {
+
+ var f folder
+ datacenter := os.Getenv("VSPHERE_DATACENTER")
+ testMethod := "dontDeleteExisting"
+ resourceName := "vsphere_folder." + testMethod
+ existingPath := "tf_test_dontDeleteExisting/tf_existing"
+ path := existingPath + "/tf_nested/tf_test"
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: resource.ComposeTestCheckFunc(
+ assertVSphereFolderExists(datacenter, existingPath),
+ removeVSphereFolder(datacenter, existingPath, ""),
+ ),
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ PreConfig: func() {
+ createVSphereFolder(datacenter, existingPath)
+ },
+ Config: fmt.Sprintf(
+ testAccCheckVSphereFolderConfig,
+ testMethod,
+ path,
+ datacenter,
+ ),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckVSphereFolderExistingPathExists(resourceName, &f),
+ resource.TestCheckResourceAttr(
+ resourceName, "path", path),
+ resource.TestCheckResourceAttr(
+ resourceName, "existing_path", existingPath),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckVSphereFolderDestroy(s *terraform.State) error {
+ client := testAccProvider.Meta().(*govmomi.Client)
+ finder := find.NewFinder(client.Client, true)
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "vsphere_folder" {
+ continue
+ }
+
+ dc, err := finder.Datacenter(context.TODO(), rs.Primary.Attributes["datacenter"])
+ if err != nil {
+ return fmt.Errorf("error %s", err)
+ }
+
+ dcFolders, err := dc.Folders(context.TODO())
+ if err != nil {
+ return fmt.Errorf("error %s", err)
+ }
+
+ _, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), dcFolders.VmFolder, rs.Primary.Attributes["path"])
+ if err == nil {
+ return fmt.Errorf("Record still exists")
+ }
+ }
+
+ return nil
+}
+
+func testAccCheckVSphereFolderExists(n string, f *folder) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Resource not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No ID is set")
+ }
+
+ client := testAccProvider.Meta().(*govmomi.Client)
+ finder := find.NewFinder(client.Client, true)
+
+ dc, err := finder.Datacenter(context.TODO(), rs.Primary.Attributes["datacenter"])
+ if err != nil {
+ return fmt.Errorf("error %s", err)
+ }
+
+ dcFolders, err := dc.Folders(context.TODO())
+ if err != nil {
+ return fmt.Errorf("error %s", err)
+ }
+
+ _, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), dcFolders.VmFolder, rs.Primary.Attributes["path"])
+
+
+ *f = folder{
+ path: rs.Primary.Attributes["path"],
+ }
+
+ return nil
+ }
+}
+
+func testAccCheckVSphereFolderExistingPathExists(n string, f *folder) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Resource %s not found in %#v", n, s.RootModule().Resources)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No ID is set")
+ }
+
+ client := testAccProvider.Meta().(*govmomi.Client)
+ finder := find.NewFinder(client.Client, true)
+
+ dc, err := finder.Datacenter(context.TODO(), rs.Primary.Attributes["datacenter"])
+ if err != nil {
+ return fmt.Errorf("error %s", err)
+ }
+
+ dcFolders, err := dc.Folders(context.TODO())
+ if err != nil {
+ return fmt.Errorf("error %s", err)
+ }
+
+ _, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), dcFolders.VmFolder, rs.Primary.Attributes["existing_path"])
+
+
+ *f = folder{
+ path: rs.Primary.Attributes["path"],
+ }
+
+ return nil
+ }
+}
+
+func assertVSphereFolderExists(datacenter string, folder_name string) resource.TestCheckFunc {
+
+ return func(s *terraform.State) error {
+ client := testAccProvider.Meta().(*govmomi.Client)
+ folder, err := object.NewSearchIndex(client.Client).FindByInventoryPath(
+ context.TODO(), fmt.Sprintf("%v/vm/%v", datacenter, folder_name))
+ if err != nil {
+ return fmt.Errorf("Error: %s", err)
+ } else if folder == nil {
+ return fmt.Errorf("Folder %s does not exist!", folder_name)
+ }
+
+ return nil
+ }
+}
+
+func createVSphereFolder(datacenter string, folder_name string) error {
+
+ client := testAccProvider.Meta().(*govmomi.Client)
+
+ f := folder{path: folder_name, datacenter: datacenter,}
+
+ folder, err := object.NewSearchIndex(client.Client).FindByInventoryPath(
+ context.TODO(), fmt.Sprintf("%v/vm/%v", datacenter, folder_name))
+ if err != nil {
+ return fmt.Errorf("error %s", err)
+ }
+
+ if folder == nil {
+ createFolder(client, &f)
+ } else {
+ return fmt.Errorf("Folder %s already exists", folder_name)
+ }
+
+ return nil
+}
+
+func removeVSphereFolder(datacenter string, folder_name string, existing_path string) resource.TestCheckFunc {
+
+ f := folder{path: folder_name, datacenter: datacenter, existingPath: existing_path,}
+
+ return func(s *terraform.State) error {
+
+ client := testAccProvider.Meta().(*govmomi.Client)
+ // finder := find.NewFinder(client.Client, true)
+
+ folder, _ := object.NewSearchIndex(client.Client).FindByInventoryPath(
+ context.TODO(), fmt.Sprintf("%v/vm/%v", datacenter, folder_name))
+ if folder != nil {
+ deleteFolder(client, &f)
+ }
+
+ return nil
+ }
+}
+
+const testAccCheckVSphereFolderConfig = `
+resource "vsphere_folder" "%s" {
+ path = "%s"
+ datacenter = "%s"
+}
+`
\ No newline at end of file
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
index 4fbd66b891..3ecddfeb66 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
@@ -42,6 +42,7 @@ type hardDisk struct {
type virtualMachine struct {
name string
+ folder string
datacenter string
cluster string
resourcePool string
@@ -59,6 +60,18 @@ type virtualMachine struct {
customConfigurations map[string](types.AnyType)
}
+func (v virtualMachine) Path() string {
+ return vmPath(v.folder, v.name)
+}
+
+func vmPath(folder string, name string) string {
+ var path string
+ if len(folder) > 0 {
+ path += folder + "/"
+ }
+ return path + name
+}
+
func resourceVSphereVirtualMachine() *schema.Resource {
return &schema.Resource{
Create: resourceVSphereVirtualMachineCreate,
@@ -72,6 +85,12 @@ func resourceVSphereVirtualMachine() *schema.Resource {
ForceNew: true,
},
+ "folder": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+
"vcpu": &schema.Schema{
Type: schema.TypeInt,
Required: true,
@@ -228,6 +247,10 @@ func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{
memoryMb: int64(d.Get("memory").(int)),
}
+ if v, ok := d.GetOk("folder"); ok {
+ vm.folder = v.(string)
+ }
+
if v, ok := d.GetOk("datacenter"); ok {
vm.datacenter = v.(string)
}
@@ -344,7 +367,7 @@ func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{
stateConf := &resource.StateChangeConf{
Pending: []string{"pending"},
Target: "active",
- Refresh: waitForNetworkingActive(client, vm.datacenter, vm.name),
+ Refresh: waitForNetworkingActive(client, vm.datacenter, vm.Path()),
Timeout: 600 * time.Second,
Delay: time.Duration(v.(int)) * time.Second,
MinTimeout: 2 * time.Second,
@@ -356,13 +379,15 @@ func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{
}
}
}
- d.SetId(vm.name)
+ d.SetId(vm.Path())
log.Printf("[INFO] Created virtual machine: %s", d.Id())
return resourceVSphereVirtualMachineRead(d, meta)
}
func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error {
+
+ log.Printf("[DEBUG] reading virtual machine: %#v", d)
client := meta.(*govmomi.Client)
dc, err := getDatacenter(client, d.Get("datacenter").(string))
if err != nil {
@@ -371,9 +396,8 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
finder := find.NewFinder(client.Client, true)
finder = finder.SetDatacenter(dc)
- vm, err := finder.VirtualMachine(context.TODO(), d.Get("name").(string))
+ vm, err := finder.VirtualMachine(context.TODO(), d.Id())
if err != nil {
- log.Printf("[ERROR] Virtual machine not found: %s", d.Get("name").(string))
d.SetId("")
return nil
}
@@ -458,7 +482,7 @@ func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{
finder := find.NewFinder(client.Client, true)
finder = finder.SetDatacenter(dc)
- vm, err := finder.VirtualMachine(context.TODO(), d.Get("name").(string))
+ vm, err := finder.VirtualMachine(context.TODO(), vmPath(d.Get("folder").(string), d.Get("name").(string)))
if err != nil {
return err
}
@@ -522,18 +546,6 @@ func waitForNetworkingActive(client *govmomi.Client, datacenter, name string) re
}
}
-// getDatacenter gets datacenter object
-func getDatacenter(c *govmomi.Client, dc string) (*object.Datacenter, error) {
- finder := find.NewFinder(c.Client, true)
- if dc != "" {
- d, err := finder.Datacenter(context.TODO(), dc)
- return d, err
- } else {
- d, err := finder.DefaultDatacenter(context.TODO())
- return d, err
- }
-}
-
// addHardDisk adds a new Hard Disk to the VirtualMachine.
func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string) error {
devices, err := vm.Device(context.TODO())
@@ -766,6 +778,7 @@ func findDatastore(c *govmomi.Client, sps types.StoragePlacementSpec) (*object.D
// createVirtualMchine creates a new VirtualMachine.
func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
dc, err := getDatacenter(c, vm.datacenter)
+
if err != nil {
return err
}
@@ -798,6 +811,21 @@ func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
return err
}
+ log.Printf("[DEBUG] folder: %#v", vm.folder)
+ folder := dcFolders.VmFolder
+ if len(vm.folder) > 0 {
+ si := object.NewSearchIndex(c.Client)
+ folderRef, err := si.FindByInventoryPath(
+ context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder))
+ if err != nil {
+ return fmt.Errorf("Error reading folder %s: %s", vm.folder, err)
+ } else if folderRef == nil {
+ return fmt.Errorf("Cannot find folder %s", vm.folder)
+ } else {
+ folder = folderRef.(*object.Folder)
+ }
+ }
+
// network
networkDevices := []types.BaseVirtualDeviceConfigSpec{}
for _, network := range vm.networkInterfaces {
@@ -886,7 +914,7 @@ func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
})
configSpec.Files = &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf("[%s]", mds.Name)}
- task, err := dcFolders.VmFolder.CreateVM(context.TODO(), configSpec, resourcePool, nil)
+ task, err := folder.CreateVM(context.TODO(), configSpec, resourcePool, nil)
if err != nil {
log.Printf("[ERROR] %s", err)
}
@@ -896,7 +924,7 @@ func (vm *virtualMachine) createVirtualMachine(c *govmomi.Client) error {
log.Printf("[ERROR] %s", err)
}
- newVM, err := finder.VirtualMachine(context.TODO(), vm.name)
+ newVM, err := finder.VirtualMachine(context.TODO(), vm.Path())
if err != nil {
return err
}
@@ -954,6 +982,21 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
if err != nil {
return err
}
+
+ log.Printf("[DEBUG] folder: %#v", vm.folder)
+ folder := dcFolders.VmFolder
+ if len(vm.folder) > 0 {
+ si := object.NewSearchIndex(c.Client)
+ folderRef, err := si.FindByInventoryPath(
+ context.TODO(), fmt.Sprintf("%v/vm/%v", vm.datacenter, vm.folder))
+ if err != nil {
+ return fmt.Errorf("Error reading folder %s: %s", vm.folder, err)
+ } else if folderRef == nil {
+ return fmt.Errorf("Cannot find folder %s", vm.folder)
+ } else {
+ folder = folderRef.(*object.Folder)
+ }
+ }
var datastore *object.Datastore
if vm.datastore == "" {
@@ -1084,7 +1127,7 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
}
log.Printf("[DEBUG] clone spec: %v", cloneSpec)
- task, err := template.Clone(context.TODO(), dcFolders.VmFolder, vm.name, cloneSpec)
+ task, err := template.Clone(context.TODO(), folder, vm.name, cloneSpec)
if err != nil {
return err
}
@@ -1094,7 +1137,7 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
return err
}
- newVM, err := finder.VirtualMachine(context.TODO(), vm.name)
+ newVM, err := finder.VirtualMachine(context.TODO(), vm.Path())
if err != nil {
return err
}
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
index 130523a47b..d92a4119ea 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
@@ -191,6 +191,140 @@ func TestAccVSphereVirtualMachine_custom_configs(t *testing.T) {
})
}
+func TestAccVSphereVirtualMachine_createInExistingFolder(t *testing.T) {
+ var vm virtualMachine
+ var locationOpt string
+ var datastoreOpt string
+ var datacenter string
+
+ folder := "tf_test_createInExistingFolder"
+
+ if v := os.Getenv("VSPHERE_DATACENTER"); v != "" {
+ locationOpt += fmt.Sprintf(" datacenter = \"%s\"\n", v)
+ datacenter = v
+ }
+ if v := os.Getenv("VSPHERE_CLUSTER"); v != "" {
+ locationOpt += fmt.Sprintf(" cluster = \"%s\"\n", v)
+ }
+ if v := os.Getenv("VSPHERE_RESOURCE_POOL"); v != "" {
+ locationOpt += fmt.Sprintf(" resource_pool = \"%s\"\n", v)
+ }
+ if v := os.Getenv("VSPHERE_DATASTORE"); v != "" {
+ datastoreOpt = fmt.Sprintf(" datastore = \"%s\"\n", v)
+ }
+ template := os.Getenv("VSPHERE_TEMPLATE")
+ label := os.Getenv("VSPHERE_NETWORK_LABEL_DHCP")
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: resource.ComposeTestCheckFunc(
+ testAccCheckVSphereVirtualMachineDestroy,
+ removeVSphereFolder(datacenter, folder, ""),
+ ),
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ PreConfig: func() { createVSphereFolder(datacenter, folder) },
+ Config: fmt.Sprintf(
+ testAccCheckVSphereVirtualMachineConfig_createInFolder,
+ folder,
+ locationOpt,
+ label,
+ datastoreOpt,
+ template,
+ ),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckVSphereVirtualMachineExists("vsphere_virtual_machine.folder", &vm),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.folder", "name", "terraform-test-folder"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.folder", "folder", folder),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.folder", "vcpu", "2"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.folder", "memory", "4096"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.folder", "disk.#", "1"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.folder", "disk.0.template", template),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.folder", "network_interface.#", "1"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.folder", "network_interface.0.label", label),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccVSphereVirtualMachine_createWithFolder(t *testing.T) {
+ var vm virtualMachine
+ var f folder
+ var locationOpt string
+ var folderLocationOpt string
+ var datastoreOpt string
+
+ folder := "tf_test_createWithFolder"
+
+ if v := os.Getenv("VSPHERE_DATACENTER"); v != "" {
+ folderLocationOpt = fmt.Sprintf(" datacenter = \"%s\"\n", v)
+ locationOpt += folderLocationOpt
+ }
+ if v := os.Getenv("VSPHERE_CLUSTER"); v != "" {
+ locationOpt += fmt.Sprintf(" cluster = \"%s\"\n", v)
+ }
+ if v := os.Getenv("VSPHERE_RESOURCE_POOL"); v != "" {
+ locationOpt += fmt.Sprintf(" resource_pool = \"%s\"\n", v)
+ }
+ if v := os.Getenv("VSPHERE_DATASTORE"); v != "" {
+ datastoreOpt = fmt.Sprintf(" datastore = \"%s\"\n", v)
+ }
+ template := os.Getenv("VSPHERE_TEMPLATE")
+ label := os.Getenv("VSPHERE_NETWORK_LABEL_DHCP")
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: resource.ComposeTestCheckFunc(
+ testAccCheckVSphereVirtualMachineDestroy,
+ testAccCheckVSphereFolderDestroy,
+ ),
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: fmt.Sprintf(
+ testAccCheckVSphereVirtualMachineConfig_createWithFolder,
+ folder,
+ folderLocationOpt,
+ locationOpt,
+ label,
+ datastoreOpt,
+ template,
+ ),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckVSphereVirtualMachineExists("vsphere_virtual_machine.with_folder", &vm),
+ testAccCheckVSphereFolderExists("vsphere_folder.with_folder", &f),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.with_folder", "name", "terraform-test-with-folder"),
+ // resource.TestCheckResourceAttr(
+ // "vsphere_virtual_machine.with_folder", "folder", folder),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.with_folder", "vcpu", "2"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.with_folder", "memory", "4096"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.with_folder", "disk.#", "1"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.with_folder", "disk.0.template", template),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.with_folder", "network_interface.#", "1"),
+ resource.TestCheckResourceAttr(
+ "vsphere_virtual_machine.with_folder", "network_interface.0.label", label),
+ ),
+ },
+ },
+ })
+}
+
func testAccCheckVSphereVirtualMachineDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*govmomi.Client)
finder := find.NewFinder(client.Client, true)
@@ -210,7 +344,21 @@ func testAccCheckVSphereVirtualMachineDestroy(s *terraform.State) error {
return fmt.Errorf("error %s", err)
}
- _, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), dcFolders.VmFolder, rs.Primary.Attributes["name"])
+
+ folder := dcFolders.VmFolder
+ if len(rs.Primary.Attributes["folder"]) > 0 {
+ si := object.NewSearchIndex(client.Client)
+ folderRef, err := si.FindByInventoryPath(
+ context.TODO(), fmt.Sprintf("%v/vm/%v", rs.Primary.Attributes["datacenter"], rs.Primary.Attributes["folder"]))
+ if err != nil {
+ return err
+ } else if folderRef != nil {
+ folder = folderRef.(*object.Folder)
+ }
+ }
+
+ _, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), folder, rs.Primary.Attributes["name"])
+
if err == nil {
return fmt.Errorf("Record still exists")
}
@@ -306,9 +454,9 @@ func testAccCheckVSphereVirtualMachineExistsHasCustomConfig(n string, vm *virtua
return nil
}
}
+
func testAccCheckVSphereVirtualMachineExists(n string, vm *virtualMachine) resource.TestCheckFunc {
return func(s *terraform.State) error {
-
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
@@ -331,14 +479,26 @@ func testAccCheckVSphereVirtualMachineExists(n string, vm *virtualMachine) resou
return fmt.Errorf("error %s", err)
}
- _, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), dcFolders.VmFolder, rs.Primary.Attributes["name"])
+ folder := dcFolders.VmFolder
+ if len(rs.Primary.Attributes["folder"]) > 0 {
+ si := object.NewSearchIndex(client.Client)
+ folderRef, err := si.FindByInventoryPath(
+ context.TODO(), fmt.Sprintf("%v/vm/%v", rs.Primary.Attributes["datacenter"], rs.Primary.Attributes["folder"]))
+ if err != nil {
+ return err
+ } else if folderRef != nil {
+ folder = folderRef.(*object.Folder)
+ }
+ }
+
+ _, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), folder, rs.Primary.Attributes["name"])
+
*vm = virtualMachine{
name: rs.Primary.ID,
}
return nil
-
}
}
@@ -401,3 +561,41 @@ resource "vsphere_virtual_machine" "car" {
}
}
`
+
+const testAccCheckVSphereVirtualMachineConfig_createInFolder = `
+resource "vsphere_virtual_machine" "folder" {
+ name = "terraform-test-folder"
+ folder = "%s"
+%s
+ vcpu = 2
+ memory = 4096
+ network_interface {
+ label = "%s"
+ }
+ disk {
+%s
+ template = "%s"
+ }
+}
+`
+
+const testAccCheckVSphereVirtualMachineConfig_createWithFolder = `
+resource "vsphere_folder" "with_folder" {
+ path = "%s"
+%s
+}
+resource "vsphere_virtual_machine" "with_folder" {
+ name = "terraform-test-with-folder"
+ folder = "${vsphere_folder.with_folder.path}"
+%s
+ vcpu = 2
+ memory = 4096
+ network_interface {
+ label = "%s"
+ }
+ disk {
+%s
+ template = "%s"
+ }
+}
+`
\ No newline at end of file
diff --git a/website/source/docs/providers/vsphere/index.html.markdown b/website/source/docs/providers/vsphere/index.html.markdown
index 8cacfd36b9..428d798f2a 100644
--- a/website/source/docs/providers/vsphere/index.html.markdown
+++ b/website/source/docs/providers/vsphere/index.html.markdown
@@ -30,9 +30,15 @@ provider "vsphere" {
vsphere_server = "${var.vsphere_server}"
}
-# Create a virtual machine
+# Create a folder
+resource "vsphere_folder" "frontend" {
+ path = "frontend"
+}
+
+# Create a virtual machine within the folder
resource "vsphere_virtual_machine" "web" {
name = "terraform_web"
+ folder = "${vsphere_folder.frontend.path}"
vcpu = 2
memory = 4096
diff --git a/website/source/docs/providers/vsphere/r/folder.html.markdown b/website/source/docs/providers/vsphere/r/folder.html.markdown
new file mode 100644
index 0000000000..9825a10edb
--- /dev/null
+++ b/website/source/docs/providers/vsphere/r/folder.html.markdown
@@ -0,0 +1,28 @@
+---
+layout: "vsphere"
+page_title: "VMware vSphere: vsphere_folder"
+sidebar_current: "docs-vsphere-resource-folder"
+description: |-
+ Provides a VMware vSphere virtual machine folder resource. This can be used to create and delete virtual machine folders.
+---
+
+# vsphere\_virtual\_machine
+
+Provides a VMware vSphere virtual machine folder resource. This can be used to create and delete virtual machine folders.
+
+## Example Usage
+
+```
+resource "vsphere_folder" "web" {
+ path = "terraform_web_folder"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `path` - (Required) The path of the folder to be created (relative to the datacenter root); should not begin or end with a "/"
+* `datacenter` - (Optional) The name of a Datacenter in which the folder will be created
+* `existing_path` - (Computed) The path of any parent folder segments which existed at the time this folder was created; on a
+destroy action, the (pre-) existing path is not removed.
From 377ac3e3a0749954ffad76342af5b1234f1d24ae Mon Sep 17 00:00:00 2001
From: Joe Topjian
Date: Tue, 8 Dec 2015 22:20:04 -0700
Subject: [PATCH 170/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index cd772ea608..cab8c56b97 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -22,6 +22,7 @@ IMPROVEMENTS:
* provider/vsphere: Add support for custom vm params on `vsphere_virtual_machine` [GH-3867]
* provider/vsphere: Rename vcenter_server config parameter to something clearer [GH-3718]
* provider/vsphere: Make allow_unverified_ssl a configuable on the provider [GH-3933]
+ * provider/openstack: Increase instance timeout from 10 to 30 minutes [GH-4223]
BUG FIXES:
From e842ad33d6cb7c1a18fdfd748ddaca1cdfe2e23d Mon Sep 17 00:00:00 2001
From: Joe Topjian
Date: Tue, 8 Dec 2015 22:59:03 -0700
Subject: [PATCH 171/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index cab8c56b97..8f7b8ef81e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -34,6 +34,7 @@ BUG FIXES:
* provider/aws: Fix issue destroy Route 53 zone/record if it no longer exists [GH-4198]
* provider/aws: Fix issue force destroying a versioned S3 bucket [GH-4168]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
+ * provider/openstack: Handle volumes in "deleting" state [GH-4204]
## 0.6.8 (December 2, 2015)
From c570af3ec2901ba11cd74021270ddfb9f7aec5f7 Mon Sep 17 00:00:00 2001
From: Clint
Date: Wed, 9 Dec 2015 09:55:20 -0600
Subject: [PATCH 172/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8f7b8ef81e..9b206f5bcc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -33,6 +33,7 @@ BUG FIXES:
* provider/aws: Skip `source_security_group_id` determination logic for Classic ELBs [GH-4075]
* provider/aws: Fix issue destroy Route 53 zone/record if it no longer exists [GH-4198]
* provider/aws: Fix issue force destroying a versioned S3 bucket [GH-4168]
+ * provider/aws: Fix issue creating AWS RDS replicas across regions [GH-4215]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
* provider/openstack: Handle volumes in "deleting" state [GH-4204]
From 9da2c0a55679915e8e94fda01c1f38c8524cbfa3 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Wed, 9 Dec 2015 09:56:17 -0600
Subject: [PATCH 173/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9b206f5bcc..8944b29cfc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -22,6 +22,7 @@ IMPROVEMENTS:
* provider/vsphere: Add support for custom vm params on `vsphere_virtual_machine` [GH-3867]
* provider/vsphere: Rename vcenter_server config parameter to something clearer [GH-3718]
* provider/vsphere: Make allow_unverified_ssl a configuable on the provider [GH-3933]
+ * provider/vsphere: Add folder handling for folder-qualified vm names [GH-3939]
* provider/openstack: Increase instance timeout from 10 to 30 minutes [GH-4223]
BUG FIXES:
From dac7d7b0cafd9474f196742d2bffd8fab3e2734b Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Wed, 9 Dec 2015 10:08:54 -0600
Subject: [PATCH 174/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8944b29cfc..864321dcfa 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -37,6 +37,7 @@ BUG FIXES:
* provider/aws: Fix issue creating AWS RDS replicas across regions [GH-4215]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
* provider/openstack: Handle volumes in "deleting" state [GH-4204]
+ * provider/vsphere: Create and attach additional disks before bootup [GH-4196]
## 0.6.8 (December 2, 2015)
From bd22c77c1fbcbda61186738d18e45dd7eadb0ef1 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Wed, 9 Dec 2015 10:44:22 -0600
Subject: [PATCH 175/664] providers/aws: Update Hosted Zones to fix issue with
eu-central
---
builtin/providers/aws/hosted_zones.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/builtin/providers/aws/hosted_zones.go b/builtin/providers/aws/hosted_zones.go
index 7633e06349..ec4a508a57 100644
--- a/builtin/providers/aws/hosted_zones.go
+++ b/builtin/providers/aws/hosted_zones.go
@@ -8,7 +8,7 @@ var hostedZoneIDsMap = map[string]string{
"us-west-2": "Z3BJ6K6RIION7M",
"us-west-1": "Z2F56UZL2M1ACD",
"eu-west-1": "Z1BKCTXD74EZPE",
- "central-1": "Z21DNDUVLTQW6Q",
+ "eu-central-1": "Z21DNDUVLTQW6Q",
"ap-southeast-1": "Z3O0J2DXBE1FTB",
"ap-southeast-2": "Z1WCIGYICN2BYD",
"ap-northeast-1": "Z2M4EHUR26P7ZW",
From 14f8c363bd5b55cfd2798d33d723928d4fc83d12 Mon Sep 17 00:00:00 2001
From: Clint
Date: Wed, 9 Dec 2015 10:51:43 -0600
Subject: [PATCH 176/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 864321dcfa..792ca573ae 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -35,6 +35,7 @@ BUG FIXES:
* provider/aws: Fix issue destroy Route 53 zone/record if it no longer exists [GH-4198]
* provider/aws: Fix issue force destroying a versioned S3 bucket [GH-4168]
* provider/aws: Fix issue creating AWS RDS replicas across regions [GH-4215]
+ * providers/aws: Fix issue with finding S3 Hosted Zone ID for eu-central-1 region [GH-4236]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
* provider/openstack: Handle volumes in "deleting" state [GH-4204]
* provider/vsphere: Create and attach additional disks before bootup [GH-4196]
From b6e0b0187bbf7c28c690f4aec86a0024f01753df Mon Sep 17 00:00:00 2001
From: Clint
Date: Wed, 9 Dec 2015 10:51:54 -0600
Subject: [PATCH 177/664] Update CHANGELOG.md
---
CHANGELOG.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 792ca573ae..917c984acc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -35,7 +35,7 @@ BUG FIXES:
* provider/aws: Fix issue destroy Route 53 zone/record if it no longer exists [GH-4198]
* provider/aws: Fix issue force destroying a versioned S3 bucket [GH-4168]
* provider/aws: Fix issue creating AWS RDS replicas across regions [GH-4215]
- * providers/aws: Fix issue with finding S3 Hosted Zone ID for eu-central-1 region [GH-4236]
+ * provider/aws: Fix issue with finding S3 Hosted Zone ID for eu-central-1 region [GH-4236]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
* provider/openstack: Handle volumes in "deleting" state [GH-4204]
* provider/vsphere: Create and attach additional disks before bootup [GH-4196]
From 1b6f3558bbf4199cb4afabb0378c9c68c04df960 Mon Sep 17 00:00:00 2001
From: Clint
Date: Wed, 9 Dec 2015 11:43:23 -0600
Subject: [PATCH 178/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 917c984acc..908a7a3b06 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -13,6 +13,7 @@ IMPROVEMENTS:
* core: Support HTTP basic auth in consul remote state [GH-4166]
* provider/aws: Add `placement_group` as an option for `aws_autoscaling_group` [GH-3704]
* provider/aws: Add support for DynamoDB Table StreamSpecifications [GH-4208]
+ * provider/aws: Add `name_prefix` to Security Groups [GH-4167]
* provider/cloudstack: performance improvements [GH-4150]
* provider/docker: Add support for setting the entry point on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting the restart policy on `docker_container` resources [GH-3761]
From 60158742737216bd5cb0ce49be92a0eeb6665f59 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Thu, 3 Dec 2015 10:48:34 -0600
Subject: [PATCH 179/664] providers/aws: Update DB Replica to honor storage
type
DB Replica can be of a different storage type, but we were skipping that part.
Note that they are created as the default (or as the primary?) initially,
and then modified to be of the correct type
---
builtin/providers/aws/resource_aws_db_instance.go | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_db_instance.go b/builtin/providers/aws/resource_aws_db_instance.go
index 3e9cb9d809..c078b57912 100644
--- a/builtin/providers/aws/resource_aws_db_instance.go
+++ b/builtin/providers/aws/resource_aws_db_instance.go
@@ -290,6 +290,10 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
opts.AvailabilityZone = aws.String(attr.(string))
}
+ if attr, ok := d.GetOk("storage_type"); ok {
+ opts.StorageType = aws.String(attr.(string))
+ }
+
if attr, ok := d.GetOk("publicly_accessible"); ok {
opts.PubliclyAccessible = aws.Bool(attr.(bool))
}
@@ -297,6 +301,8 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
if attr, ok := d.GetOk("db_subnet_group_name"); ok {
opts.DBSubnetGroupName = aws.String(attr.(string))
}
+
+ log.Printf("[DEBUG] DB Instance Replica create configuration: %#v", opts)
_, err := conn.CreateDBInstanceReadReplica(&opts)
if err != nil {
return fmt.Errorf("Error creating DB Instance: %s", err)
From 5a5df8b6bb393f55d25bca7167bb498e2bd9ffe0 Mon Sep 17 00:00:00 2001
From: Clint
Date: Wed, 9 Dec 2015 13:30:45 -0600
Subject: [PATCH 180/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 908a7a3b06..b2bed3c788 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -35,6 +35,7 @@ BUG FIXES:
* provider/aws: Skip `source_security_group_id` determination logic for Classic ELBs [GH-4075]
* provider/aws: Fix issue destroy Route 53 zone/record if it no longer exists [GH-4198]
* provider/aws: Fix issue force destroying a versioned S3 bucket [GH-4168]
+ * provider/aws: Update DB Replica to honor storage type #4155
* provider/aws: Fix issue creating AWS RDS replicas across regions [GH-4215]
* provider/aws: Fix issue with finding S3 Hosted Zone ID for eu-central-1 region [GH-4236]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
From 385adb734a8ce21b57cae2e5ac24d3525e7c2971 Mon Sep 17 00:00:00 2001
From: Clint
Date: Wed, 9 Dec 2015 13:31:41 -0600
Subject: [PATCH 181/664] Update CHANGELOG.md
---
CHANGELOG.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b2bed3c788..e527d75ba3 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -35,7 +35,7 @@ BUG FIXES:
* provider/aws: Skip `source_security_group_id` determination logic for Classic ELBs [GH-4075]
* provider/aws: Fix issue destroy Route 53 zone/record if it no longer exists [GH-4198]
* provider/aws: Fix issue force destroying a versioned S3 bucket [GH-4168]
- * provider/aws: Update DB Replica to honor storage type #4155
+ * provider/aws: Update DB Replica to honor storage type [GH-4155]
* provider/aws: Fix issue creating AWS RDS replicas across regions [GH-4215]
* provider/aws: Fix issue with finding S3 Hosted Zone ID for eu-central-1 region [GH-4236]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
From 5b036fbf4f363b5acf6a328c656b1ca7282e157f Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Wed, 9 Dec 2015 14:05:18 -0600
Subject: [PATCH 182/664] provider/dyn: Add Dyn to the documentation sidebar
---
website/source/assets/stylesheets/_docs.scss | 1 +
website/source/layouts/docs.erb | 4 ++++
2 files changed, 5 insertions(+)
diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss
index 01ef3d1944..ed1a598d31 100755
--- a/website/source/assets/stylesheets/_docs.scss
+++ b/website/source/assets/stylesheets/_docs.scss
@@ -16,6 +16,7 @@ body.layout-digitalocean,
body.layout-dme,
body.layout-dnsimple,
body.layout-docker,
+body.layout-dyn,
body.layout-google,
body.layout-heroku,
body.layout-mailgun,
diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb
index f784ad2d25..ff088e8e8c 100644
--- a/website/source/layouts/docs.erb
+++ b/website/source/layouts/docs.erb
@@ -161,6 +161,10 @@
Docker
+ >
+ Dyn
+
+
>
Google Cloud
From 921944c9564b425200215bc4b6e210ae790fc633 Mon Sep 17 00:00:00 2001
From: Buck Ryan
Date: Wed, 9 Dec 2015 16:58:05 -0500
Subject: [PATCH 183/664] Make the OpsWorks cookbooks SSH key write only
Fixes #3635
This follows the suggestion of @apparentlymart in
https://github.com/hashicorp/terraform/issues/3635#issuecomment-151000068
to fix the issue of OpsWorks stacks always complaining about the custom
cookbooks SSH key needing to be changed.
Functional tests:
* Created a new stack and gave it an SSH key. The key was written to
OpsWorks properly.
* Ran "plan" again and terraform indicated it needed to change the SSH
key, which is expected since terraform cannot read what the existing
SSH is.
* Removed the key from my resource and this time, "plan" did not have
any changes. The `tfstate` file indicated the SSH key was "" (empty
string).
* Changed an unrelated property of the stack. Previously this was not
working for me due to terraform attempting to change the SSH key.
---
builtin/providers/aws/resource_aws_opsworks_stack.go | 3 ---
1 file changed, 3 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_opsworks_stack.go b/builtin/providers/aws/resource_aws_opsworks_stack.go
index 8eeda3f05b..08fe2ab3e3 100644
--- a/builtin/providers/aws/resource_aws_opsworks_stack.go
+++ b/builtin/providers/aws/resource_aws_opsworks_stack.go
@@ -231,9 +231,6 @@ func resourceAwsOpsworksSetStackCustomCookbooksSource(d *schema.ResourceData, v
if v.Revision != nil {
m["revision"] = *v.Revision
}
- if v.SshKey != nil {
- m["ssh_key"] = *v.SshKey
- }
nv = append(nv, m)
}
From 5c60f7f2c146952f752ee1375d9f1d28ea5f643f Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Wed, 9 Dec 2015 15:59:36 -0600
Subject: [PATCH 184/664] provider/aws: Trap Instance error from mismatched SG
IDs and Names
---
builtin/providers/aws/resource_aws_instance.go | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_instance.go b/builtin/providers/aws/resource_aws_instance.go
index f9f2a29d72..f6b7ef69ab 100644
--- a/builtin/providers/aws/resource_aws_instance.go
+++ b/builtin/providers/aws/resource_aws_instance.go
@@ -364,6 +364,13 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error {
time.Sleep(2 * time.Second)
continue
}
+
+ // Warn if the AWS Error involves group ids, to help identify situation
+ // where a user uses group ids in security_groups for the Default VPC.
+ // See https://github.com/hashicorp/terraform/issues/3798
+ if awsErr.Code() == "InvalidParameterValue" && strings.Contains(awsErr.Message(), "groupId is invalid") {
+ return fmt.Errorf("Error launching instance, possible mismatch of Security Group IDs and Names. See AWS Instance docs here: %s.\n\n\tAWS Error: %s", "https://terraform.io/docs/providers/aws/r/instance.html", awsErr.Message())
+ }
}
break
}
From e4dba8609805c890cd6035cf05cdd98634ca34c1 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Wed, 9 Dec 2015 16:39:03 -0600
Subject: [PATCH 185/664] provider/aws: Fix missing AMI issue with Launch
Configurations
---
builtin/providers/aws/resource_aws_launch_configuration.go | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_launch_configuration.go b/builtin/providers/aws/resource_aws_launch_configuration.go
index 1cc010634e..a257a10b44 100644
--- a/builtin/providers/aws/resource_aws_launch_configuration.go
+++ b/builtin/providers/aws/resource_aws_launch_configuration.go
@@ -386,6 +386,11 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
}
if dn, err := fetchRootDeviceName(d.Get("image_id").(string), ec2conn); err == nil {
+ if dn == nil {
+ return fmt.Errorf(
+ "Expected to find a Root Device name for AMI (%s), but got none",
+ d.Get("image_id").(string))
+ }
blockDevices = append(blockDevices, &autoscaling.BlockDeviceMapping{
DeviceName: dn,
Ebs: ebs,
From b6626eed577806b1d635616f0481603cbbde737e Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Wed, 9 Dec 2015 18:05:49 -0600
Subject: [PATCH 186/664] config: friendlier error message on resource arity
mismatch
closes #2072
---
config/loader_hcl.go | 5 +++--
config/loader_test.go | 11 +++++++++++
config/test-fixtures/resource-arity-mistake.tf | 5 +++++
3 files changed, 19 insertions(+), 2 deletions(-)
create mode 100644 config/test-fixtures/resource-arity-mistake.tf
diff --git a/config/loader_hcl.go b/config/loader_hcl.go
index c62ca37314..59fe817952 100644
--- a/config/loader_hcl.go
+++ b/config/loader_hcl.go
@@ -406,8 +406,9 @@ func loadResourcesHcl(list *ast.ObjectList) ([]*Resource, error) {
// all of the actual resources.
for _, item := range list.Items {
if len(item.Keys) != 2 {
- // TODO: bad error message
- return nil, fmt.Errorf("resource needs exactly 2 names")
+ return nil, fmt.Errorf(
+ "position %s: resource must be followed by exactly two strings, a type and a name",
+ item.Pos())
}
t := item.Keys[0].Token.Value().(string)
diff --git a/config/loader_test.go b/config/loader_test.go
index 4c291f6e98..6dbcfbede9 100644
--- a/config/loader_test.go
+++ b/config/loader_test.go
@@ -45,6 +45,17 @@ func TestLoadFile_badType(t *testing.T) {
}
}
+func TestLoadFile_resourceArityMistake(t *testing.T) {
+ _, err := LoadFile(filepath.Join(fixtureDir, "resource-arity-mistake.tf"))
+ if err == nil {
+ t.Fatal("should have error")
+ }
+ expected := "Error loading test-fixtures/resource-arity-mistake.tf: position 2:10: resource must be followed by exactly two strings, a type and a name"
+ if err.Error() != expected {
+ t.Fatalf("expected:\n%s\ngot:\n%s", expected, err)
+ }
+}
+
func TestLoadFileWindowsLineEndings(t *testing.T) {
testFile := filepath.Join(fixtureDir, "windows-line-endings.tf")
diff --git a/config/test-fixtures/resource-arity-mistake.tf b/config/test-fixtures/resource-arity-mistake.tf
new file mode 100644
index 0000000000..14e49ddf86
--- /dev/null
+++ b/config/test-fixtures/resource-arity-mistake.tf
@@ -0,0 +1,5 @@
+# I forgot the resource name!
+resource "aws_instance" {
+ ami = "ami-abc123"
+ instance_type = "t2.micro"
+}
From 07f398fb3b1d88174b03831b02827d3ca3f3b592 Mon Sep 17 00:00:00 2001
From: Martin Atkins
Date: Wed, 9 Dec 2015 18:06:09 -0800
Subject: [PATCH 187/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e527d75ba3..65a0f95edb 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -38,6 +38,7 @@ BUG FIXES:
* provider/aws: Update DB Replica to honor storage type [GH-4155]
* provider/aws: Fix issue creating AWS RDS replicas across regions [GH-4215]
* provider/aws: Fix issue with finding S3 Hosted Zone ID for eu-central-1 region [GH-4236]
+ * provider/aws: Opsworks stack SSH key is write-only [GH-4241]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
* provider/openstack: Handle volumes in "deleting" state [GH-4204]
* provider/vsphere: Create and attach additional disks before bootup [GH-4196]
From 6fbfd99ace68db5c3e328d79571085bdde5fdf7e Mon Sep 17 00:00:00 2001
From: Grisha Trubetskoy
Date: Wed, 9 Dec 2015 23:29:38 -0500
Subject: [PATCH 188/664] Fix a bug whereby AWS elasticsearch domain
access_policy will always appear changed because of a missing
normalizeJson().
---
builtin/providers/aws/resource_aws_elasticsearch_domain.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/builtin/providers/aws/resource_aws_elasticsearch_domain.go b/builtin/providers/aws/resource_aws_elasticsearch_domain.go
index 8f2d6c9c9f..5ccbacc282 100644
--- a/builtin/providers/aws/resource_aws_elasticsearch_domain.go
+++ b/builtin/providers/aws/resource_aws_elasticsearch_domain.go
@@ -247,7 +247,7 @@ func resourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface{}
ds := out.DomainStatus
- d.Set("access_policies", *ds.AccessPolicies)
+ d.Set("access_policies", normalizeJson(*ds.AccessPolicies))
err = d.Set("advanced_options", pointersMapToStringList(ds.AdvancedOptions))
if err != nil {
return err
From 68ac4bceafcefe17e5ad25f87df1d6dd40e974fe Mon Sep 17 00:00:00 2001
From: Takaaki Furukawa
Date: Tue, 24 Nov 2015 09:53:11 +0900
Subject: [PATCH 189/664] provider/vsphere: Change ip_address parameter to
ipv4_address and ipv6_address for ipv6 support
---
.../resource_vsphere_virtual_machine.go | 111 +++++++++++++-----
.../resource_vsphere_virtual_machine_test.go | 4 +-
.../vsphere/r/virtual_machine.html.markdown | 34 ++++--
3 files changed, 106 insertions(+), 43 deletions(-)
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
index 98a5234883..9c4db401f4 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
@@ -28,11 +28,13 @@ var DefaultDNSServers = []string{
}
type networkInterface struct {
- deviceName string
- label string
- ipAddress string
- subnetMask string
- adapterType string // TODO: Make "adapter_type" argument
+ deviceName string
+ label string
+ ipv4Address string
+ ipv4PrefixLength int
+ ipv6Address string
+ ipv6PrefixLength int
+ adapterType string // TODO: Make "adapter_type" argument
}
type hardDisk struct {
@@ -148,15 +150,40 @@ func resourceVSphereVirtualMachine() *schema.Resource {
},
"ip_address": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ Deprecated: "Please use ipv4_address",
+ },
+
+ "subnet_mask": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ Deprecated: "Please use ipv4_prefix_length",
+ },
+
+ "ipv4_address": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
+ },
+
+ "ipv4_prefix_length": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ Computed: true,
+ },
+
+ // TODO: Imprement ipv6 parameters to be optional
+ "ipv6_address": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
ForceNew: true,
},
- "subnet_mask": &schema.Schema{
- Type: schema.TypeString,
- Optional: true,
+ "ipv6_prefix_length": &schema.Schema{
+ Type: schema.TypeInt,
Computed: true,
ForceNew: true,
},
@@ -267,10 +294,23 @@ func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{
network := v.(map[string]interface{})
networks[i].label = network["label"].(string)
if v, ok := network["ip_address"].(string); ok && v != "" {
- networks[i].ipAddress = v
+ networks[i].ipv4Address = v
}
if v, ok := network["subnet_mask"].(string); ok && v != "" {
- networks[i].subnetMask = v
+ ip := net.ParseIP(v).To4()
+ if ip != nil {
+ mask := net.IPv4Mask(ip[0], ip[1], ip[2], ip[3])
+ pl, _ := mask.Size()
+ networks[i].ipv4PrefixLength = pl
+ } else {
+ return fmt.Errorf("subnet_mask parameter is invalid.")
+ }
+ }
+ if v, ok := network["ipv4_address"].(string); ok && v != "" {
+ networks[i].ipv4Address = v
+ }
+ if v, ok := network["ipv4_prefix_length"].(int); ok && v != 0 {
+ networks[i].ipv4PrefixLength = v
}
}
vm.networkInterfaces = networks
@@ -321,7 +361,7 @@ func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{
}
}
- if _, ok := d.GetOk("network_interface.0.ip_address"); !ok {
+ if _, ok := d.GetOk("network_interface.0.ipv4_address"); !ok {
if v, ok := d.GetOk("boot_delay"); ok {
stateConf := &resource.StateChangeConf{
Pending: []string{"pending"},
@@ -377,15 +417,22 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
log.Printf("[DEBUG] %#v", v.Network)
networkInterface := make(map[string]interface{})
networkInterface["label"] = v.Network
- if len(v.IpAddress) > 0 {
- log.Printf("[DEBUG] %#v", v.IpAddress[0])
- networkInterface["ip_address"] = v.IpAddress[0]
-
- m := net.CIDRMask(v.IpConfig.IpAddress[0].PrefixLength, 32)
- subnetMask := net.IPv4(m[0], m[1], m[2], m[3])
- networkInterface["subnet_mask"] = subnetMask.String()
- log.Printf("[DEBUG] %#v", subnetMask.String())
+ for _, ip := range v.IpConfig.IpAddress {
+ p := net.ParseIP(ip.IpAddress)
+ if p.To4() != nil {
+ log.Printf("[DEBUG] %#v", p.String())
+ log.Printf("[DEBUG] %#v", ip.PrefixLength)
+ networkInterface["ipv4_address"] = p.String()
+ networkInterface["ipv4_prefix_length"] = ip.PrefixLength
+ } else if p.To16() != nil {
+ log.Printf("[DEBUG] %#v", p.String())
+ log.Printf("[DEBUG] %#v", ip.PrefixLength)
+ networkInterface["ipv6_address"] = p.String()
+ networkInterface["ipv6_prefix_length"] = ip.PrefixLength
+ }
+ log.Printf("[DEBUG] networkInterface: %#v", networkInterface)
}
+ log.Printf("[DEBUG] networkInterface: %#v", networkInterface)
networkInterfaces = append(networkInterfaces, networkInterface)
}
}
@@ -420,14 +467,6 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{})
d.Set("cpu", mvm.Summary.Config.NumCpu)
d.Set("datastore", rootDatastore)
- // Initialize the connection info
- if len(networkInterfaces) > 0 {
- d.SetConnInfo(map[string]string{
- "type": "ssh",
- "host": networkInterfaces[0]["ip_address"].(string),
- })
- }
-
return nil
}
@@ -967,23 +1006,31 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
}
networkDevices = append(networkDevices, nd)
+ // TODO: IPv6 support
var ipSetting types.CustomizationIPSettings
- if network.ipAddress == "" {
+ if network.ipv4Address == "" {
ipSetting = types.CustomizationIPSettings{
Ip: &types.CustomizationDhcpIpGenerator{},
}
} else {
+ if network.ipv4PrefixLength == 0 {
+ return fmt.Errorf("Error: ipv4_prefix_length argument is empty.")
+ }
+ m := net.CIDRMask(network.ipv4PrefixLength, 32)
+ sm := net.IPv4(m[0], m[1], m[2], m[3])
+ subnetMask := sm.String()
log.Printf("[DEBUG] gateway: %v", vm.gateway)
- log.Printf("[DEBUG] ip address: %v", network.ipAddress)
- log.Printf("[DEBUG] subnet mask: %v", network.subnetMask)
+ log.Printf("[DEBUG] ipv4 address: %v", network.ipv4Address)
+ log.Printf("[DEBUG] ipv4 prefix length: %v", network.ipv4PrefixLength)
+ log.Printf("[DEBUG] ipv4 subnet mask: %v", subnetMask)
ipSetting = types.CustomizationIPSettings{
Gateway: []string{
vm.gateway,
},
Ip: &types.CustomizationFixedIp{
- IpAddress: network.ipAddress,
+ IpAddress: network.ipv4Address,
},
- SubnetMask: network.subnetMask,
+ SubnetMask: subnetMask,
}
}
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
index 66d6ea44f8..102db69939 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
@@ -198,8 +198,8 @@ resource "vsphere_virtual_machine" "foo" {
gateway = "%s"
network_interface {
label = "%s"
- ip_address = "%s"
- subnet_mask = "255.255.255.0"
+ ipv4_address = "%s"
+ ipv4_prefix_length = 24
}
disk {
%s
diff --git a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown
index 19421aaa9c..4c704bf17e 100644
--- a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown
+++ b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown
@@ -49,21 +49,37 @@ The following arguments are supported:
* `disk` - (Required) Configures virtual disks; see [Disks](#disks) below for details
* `boot_delay` - (Optional) Time in seconds to wait for machine network to be ready.
-
-## Network Interfaces
-
-Network interfaces support the following attributes:
+The `network_interface` block supports:
* `label` - (Required) Label to assign to this network interface
-* `ip_address` - (Optional) Static IP to assign to this network interface. Interface will use DHCP if this is left blank. Currently only IPv4 IP addresses are supported.
-* `subnet_mask` - (Optional) Subnet mask to use when statically assigning an IP.
+* `ipv4_address` - (Optional) Static IP to assign to this network interface. Interface will use DHCP if this is left blank. Currently only IPv4 IP addresses are supported.
+* `ipv4_prefix_length` - (Optional) prefix length to use when statically assigning an IP.
-
-## Disks
+The following arguments are maintained for backwards compatibility and may be
+removed in a future version:
-Disks support the following attributes:
+* `ip_address` - __Deprecated, please use `ipv4_address` instead_.
+* `subnet_mask` - __Deprecated, please use `ipv4_prefix_length` instead_.
+
+
+The `disk` block supports:
* `template` - (Required if size not provided) Template for this disk.
* `datastore` - (Optional) Datastore for this disk
* `size` - (Required if template not provided) Size of this disk (in GB).
* `iops` - (Optional) Number of virtual iops to allocate for this disk.
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The instance ID.
+* `name` - See Argument Reference above.
+* `vcpu` - See Argument Reference above.
+* `memory` - See Argument Reference above.
+* `datacenter` - See Argument Reference above.
+* `network_interface/label` - See Argument Reference above.
+* `network_interface/ipv4_address` - See Argument Reference above.
+* `network_interface/ipv4_prefix_length` - See Argument Reference above.
+* `network_interface/ipv6_address` - Assigned static IPv6 address.
+* `network_interface/ipv6_prefix_length` - Prefix length of assigned static IPv6 address.
From b23e6ed57e2f6c2ca20ee8f3994e248626d2a1e9 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Thu, 10 Dec 2015 08:38:50 -0600
Subject: [PATCH 190/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 65a0f95edb..ac9177b750 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -11,6 +11,7 @@ IMPROVEMENTS:
* core: Change set internals for performance improvements [GH-3992]
* core: Support HTTP basic auth in consul remote state [GH-4166]
+ * core: Improve error message on resource arity mismatch [GH-4244]
* provider/aws: Add `placement_group` as an option for `aws_autoscaling_group` [GH-3704]
* provider/aws: Add support for DynamoDB Table StreamSpecifications [GH-4208]
* provider/aws: Add `name_prefix` to Security Groups [GH-4167]
From 21aa9dd5bac85b77b0d08e4f51dea63250cd7b59 Mon Sep 17 00:00:00 2001
From: Clint
Date: Thu, 10 Dec 2015 08:56:17 -0600
Subject: [PATCH 191/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ac9177b750..479d0ce4ee 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -39,6 +39,7 @@ BUG FIXES:
* provider/aws: Update DB Replica to honor storage type [GH-4155]
* provider/aws: Fix issue creating AWS RDS replicas across regions [GH-4215]
* provider/aws: Fix issue with finding S3 Hosted Zone ID for eu-central-1 region [GH-4236]
+ * provider/aws: Fix missing AMI issue with Launch Configurations [GH-4242]
* provider/aws: Opsworks stack SSH key is write-only [GH-4241]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
* provider/openstack: Handle volumes in "deleting" state [GH-4204]
From b11966bca65b28e611179649d9c0d2d40a5f79a9 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Thu, 10 Dec 2015 12:02:55 -0600
Subject: [PATCH 192/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 479d0ce4ee..d03fbd5b2a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -25,6 +25,7 @@ IMPROVEMENTS:
* provider/vsphere: Rename vcenter_server config parameter to something clearer [GH-3718]
* provider/vsphere: Make allow_unverified_ssl a configuable on the provider [GH-3933]
* provider/vsphere: Add folder handling for folder-qualified vm names [GH-3939]
+ * provider/vsphere: Change ip_address parameter for ipv6 support [GH-4035]
* provider/openstack: Increase instance timeout from 10 to 30 minutes [GH-4223]
BUG FIXES:
From cfea7c8e2d3d189adf453ca02c67801892941107 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Wed, 2 Dec 2015 14:08:14 -0600
Subject: [PATCH 193/664] provider/vsphere: restore vcenter_server as
deprecated field
As promised in my comment in #3718, this preserves backwards
compatibility while warning users of the new proper name for the field.
---
builtin/providers/vsphere/provider.go | 29 +++++++++++++++++++++++----
1 file changed, 25 insertions(+), 4 deletions(-)
diff --git a/builtin/providers/vsphere/provider.go b/builtin/providers/vsphere/provider.go
index febd39ecd5..5c98d31c01 100644
--- a/builtin/providers/vsphere/provider.go
+++ b/builtin/providers/vsphere/provider.go
@@ -1,6 +1,8 @@
package vsphere
import (
+ "fmt"
+
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
)
@@ -25,21 +27,26 @@ func Provider() terraform.ResourceProvider {
"vsphere_server": &schema.Schema{
Type: schema.TypeString,
- Required: true,
+ Optional: true,
DefaultFunc: schema.EnvDefaultFunc("VSPHERE_SERVER", nil),
Description: "The vSphere Server name for vSphere API operations.",
},
-
"allow_unverified_ssl": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("VSPHERE_ALLOW_UNVERIFIED_SSL", false),
Description: "If set, VMware vSphere client will permit unverifiable SSL certificates.",
},
+ "vcenter_server": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ DefaultFunc: schema.EnvDefaultFunc("VSPHERE_VCENTER", nil),
+ Deprecated: "This field has been renamed to vsphere_server.",
+ },
},
ResourcesMap: map[string]*schema.Resource{
- "vsphere_folder": resourceVSphereFolder(),
+ "vsphere_folder": resourceVSphereFolder(),
"vsphere_virtual_machine": resourceVSphereVirtualMachine(),
},
@@ -48,11 +55,25 @@ func Provider() terraform.ResourceProvider {
}
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
+ // Handle backcompat support for vcenter_server; once that is removed,
+ // vsphere_server can just become a Required field that is referenced inline
+ // in Config below.
+ server := d.Get("vsphere_server").(string)
+
+ if server == "" {
+ server = d.Get("vcenter_server").(string)
+ }
+
+ if server == "" {
+ return nil, fmt.Errorf(
+ "One of vsphere_server or [deprecated] vcenter_server must be provided.")
+ }
+
config := Config{
User: d.Get("user").(string),
Password: d.Get("password").(string),
- VSphereServer: d.Get("vsphere_server").(string),
InsecureFlag: d.Get("allow_unverified_ssl").(bool),
+ VSphereServer: server,
}
return config.Client()
From e976d6e787fff2c5baf9f04217137d17df78b54e Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Thu, 10 Dec 2015 13:09:57 -0500
Subject: [PATCH 194/664] testing: Use a copy of pre-destroy state in destroy
check
In the acceptance testing framework, it is neccessary to provide a copy
of the state _before_ the destroy is applied to the check in order that
it can loop over resources to verify their destruction. This patch makes
a deep copy of the state prior to applying test steps which have the
Destroy option set and then passes that to the destroy check.
---
helper/resource/testing.go | 15 +++++++++++++--
1 file changed, 13 insertions(+), 2 deletions(-)
diff --git a/helper/resource/testing.go b/helper/resource/testing.go
index 18a40553b2..db74d8d2ee 100644
--- a/helper/resource/testing.go
+++ b/helper/resource/testing.go
@@ -247,6 +247,11 @@ func testStep(
log.Printf("[WARN] Test: Step plan: %s", p)
}
+ // We need to keep a copy of the state prior to destroying
+ // such that destroy steps can verify their behaviour in the check
+ // function
+ stateBeforeApplication := state.DeepCopy()
+
// Apply!
state, err = ctx.Apply()
if err != nil {
@@ -255,8 +260,14 @@ func testStep(
// Check! Excitement!
if step.Check != nil {
- if err := step.Check(state); err != nil {
- return state, fmt.Errorf("Check failed: %s", err)
+ if step.Destroy {
+ if err := step.Check(stateBeforeApplication); err != nil {
+ return state, fmt.Errorf("Check failed: %s", err)
+ }
+ } else {
+ if err := step.Check(state); err != nil {
+ return state, fmt.Errorf("Check failed: %s", err)
+ }
}
}
From a6e8590cd3b794efbaff5ef348eddd55434a3a99 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Thu, 10 Dec 2015 14:09:47 -0500
Subject: [PATCH 195/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d03fbd5b2a..78cd8a634c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -42,6 +42,7 @@ BUG FIXES:
* provider/aws: Fix issue with finding S3 Hosted Zone ID for eu-central-1 region [GH-4236]
* provider/aws: Fix missing AMI issue with Launch Configurations [GH-4242]
* provider/aws: Opsworks stack SSH key is write-only [GH-4241]
+ * provider/aws: Fix issue with ElasticSearch Domain `access_policies` always appear changed [GH-4245]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
* provider/openstack: Handle volumes in "deleting" state [GH-4204]
* provider/vsphere: Create and attach additional disks before bootup [GH-4196]
From 5796b133730973d16ad2143d4aca1e081d13f19e Mon Sep 17 00:00:00 2001
From: stack72
Date: Tue, 10 Nov 2015 23:05:07 +0000
Subject: [PATCH 196/664] Adding skip_final_snapshop bool to th db_instance.
This will allow us to specify whether a snapshot is needed directly rather
than checking for an empty string
---
.../providers/aws/resource_aws_db_instance.go | 16 +++++++++++-----
.../providers/aws/r/db_instance.html.markdown | 1 +
2 files changed, 12 insertions(+), 5 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_db_instance.go b/builtin/providers/aws/resource_aws_db_instance.go
index c078b57912..ac6205cd2c 100644
--- a/builtin/providers/aws/resource_aws_db_instance.go
+++ b/builtin/providers/aws/resource_aws_db_instance.go
@@ -188,6 +188,12 @@ func resourceAwsDbInstance() *schema.Resource {
},
},
+ "skip_final_snapshot": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ Default: false,
+ },
+
"copy_tags_to_snapshot": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
@@ -619,11 +625,11 @@ func resourceAwsDbInstanceDelete(d *schema.ResourceData, meta interface{}) error
opts := rds.DeleteDBInstanceInput{DBInstanceIdentifier: aws.String(d.Id())}
- finalSnapshot := d.Get("final_snapshot_identifier").(string)
- if finalSnapshot == "" {
- opts.SkipFinalSnapshot = aws.Bool(true)
- } else {
- opts.FinalDBSnapshotIdentifier = aws.String(finalSnapshot)
+ skipFinalSnapshot := d.Get("skip_final_snapshot").(bool)
+ opts.SkipFinalSnapshot = aws.Bool(skipFinalSnapshot)
+
+ if name, present := d.GetOk("final_snapshot_identifier"); present && !skipFinalSnapshot {
+ opts.FinalDBSnapshotIdentifier = aws.String(name.(string))
}
log.Printf("[DEBUG] DB Instance destroy configuration: %v", opts)
diff --git a/website/source/docs/providers/aws/r/db_instance.html.markdown b/website/source/docs/providers/aws/r/db_instance.html.markdown
index 7f36f33858..8cf95de3a7 100644
--- a/website/source/docs/providers/aws/r/db_instance.html.markdown
+++ b/website/source/docs/providers/aws/r/db_instance.html.markdown
@@ -45,6 +45,7 @@ The following arguments are supported:
* `final_snapshot_identifier` - (Optional) The name of your final DB snapshot
when this DB instance is deleted. If omitted, no final snapshot will be
made.
+* `skip_final_snapshot` - (Optional) Determines whether a final DB snapshot is created before the DB instance is deleted. If true is specified, no DBSnapshot is created. If false is specified, a DB snapshot is created before the DB instance is deleted. Default is false.
* `copy_tags_to_snapshot` – (Optional, boolean) On delete, copy all Instance `tags` to
the final snapshot (if `final_snapshot_identifier` is specified). Default
`false`
From 6082e3e732cb4a1f7405e27757a4217ed61b9446 Mon Sep 17 00:00:00 2001
From: stack72
Date: Wed, 11 Nov 2015 16:39:24 +0000
Subject: [PATCH 197/664] Changing the db_instance to throw an error is a final
snapshot is required but yet no identified is given
---
builtin/providers/aws/resource_aws_db_instance.go | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_db_instance.go b/builtin/providers/aws/resource_aws_db_instance.go
index ac6205cd2c..4699112bcd 100644
--- a/builtin/providers/aws/resource_aws_db_instance.go
+++ b/builtin/providers/aws/resource_aws_db_instance.go
@@ -628,8 +628,12 @@ func resourceAwsDbInstanceDelete(d *schema.ResourceData, meta interface{}) error
skipFinalSnapshot := d.Get("skip_final_snapshot").(bool)
opts.SkipFinalSnapshot = aws.Bool(skipFinalSnapshot)
- if name, present := d.GetOk("final_snapshot_identifier"); present && !skipFinalSnapshot {
- opts.FinalDBSnapshotIdentifier = aws.String(name.(string))
+ if !skipFinalSnapshot {
+ if name, present := d.GetOk("final_snapshot_identifier"); present {
+ opts.FinalDBSnapshotIdentifier = aws.String(name.(string))
+ } else {
+ return fmt.Errorf("DB Instance FinalSnapshotIdentifier is required when a final snapshot is required")
+ }
}
log.Printf("[DEBUG] DB Instance destroy configuration: %v", opts)
From 69c839d2ae2240cb0a1970893cc8e71f491d8b9a Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Thu, 10 Dec 2015 15:33:38 -0500
Subject: [PATCH 198/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 78cd8a634c..4fbcdc8a09 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -15,6 +15,7 @@ IMPROVEMENTS:
* provider/aws: Add `placement_group` as an option for `aws_autoscaling_group` [GH-3704]
* provider/aws: Add support for DynamoDB Table StreamSpecifications [GH-4208]
* provider/aws: Add `name_prefix` to Security Groups [GH-4167]
+ * provider/aws: Add support for removing nodes to `aws_elasticache_cluster` [GH-3809]
* provider/cloudstack: performance improvements [GH-4150]
* provider/docker: Add support for setting the entry point on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting the restart policy on `docker_container` resources [GH-3761]
From c5d066b828a4d0bea7ab6040034b2ddf87db912c Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Thu, 10 Dec 2015 15:35:46 -0500
Subject: [PATCH 199/664] provider/aws: Add note about cluster size reduction
---
.../docs/providers/aws/r/elasticache_cluster.html.markdown | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown
index e39d6172a7..9d547b5dc1 100644
--- a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown
+++ b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown
@@ -47,7 +47,8 @@ supported node types
* `num_cache_nodes` – (Required) The initial number of cache nodes that the
cache cluster will have. For Redis, this value must be 1. For Memcache, this
-value must be between 1 and 20
+value must be between 1 and 20. If this number is reduced on subsequent runs,
+the highest numbered nodes will be removed.
* `parameter_group_name` – (Required) Name of the parameter group to associate
with this cache cluster
From 2b0c7aa4e9a039734ca4193c20594ac0c9df6955 Mon Sep 17 00:00:00 2001
From: stack72
Date: Wed, 11 Nov 2015 17:24:59 +0000
Subject: [PATCH 200/664] Making the changes to db_instance skip_final_snapshot
on the feedback from @catsby
---
.../providers/aws/resource_aws_db_instance.go | 2 +-
.../aws/resource_aws_db_instance_test.go | 183 ++++++++++++++++++
.../providers/aws/r/db_instance.html.markdown | 2 +-
3 files changed, 185 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_db_instance.go b/builtin/providers/aws/resource_aws_db_instance.go
index 4699112bcd..a034b7953a 100644
--- a/builtin/providers/aws/resource_aws_db_instance.go
+++ b/builtin/providers/aws/resource_aws_db_instance.go
@@ -191,7 +191,7 @@ func resourceAwsDbInstance() *schema.Resource {
"skip_final_snapshot": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
- Default: false,
+ Default: true,
},
"copy_tags_to_snapshot": &schema.Schema{
diff --git a/builtin/providers/aws/resource_aws_db_instance_test.go b/builtin/providers/aws/resource_aws_db_instance_test.go
index a2c2f69cad..74ed455f72 100644
--- a/builtin/providers/aws/resource_aws_db_instance_test.go
+++ b/builtin/providers/aws/resource_aws_db_instance_test.go
@@ -12,6 +12,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/rds"
+ "log"
)
func TestAccAWSDBInstance_basic(t *testing.T) {
@@ -67,6 +68,42 @@ func TestAccAWSDBInstanceReplica(t *testing.T) {
})
}
+func TestAccAWSDBInstanceSnapshot(t *testing.T) {
+ var snap rds.DBInstance
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSDBInstanceSnapshot,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccSnapshotInstanceConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAWSDBInstanceExists("aws_db_instance.snapshot", &snap),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAWSDBInstanceNoSnapshot(t *testing.T) {
+ var nosnap rds.DBInstance
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSDBInstanceNoSnapshot,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccNoSnapshotInstanceConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAWSDBInstanceExists("aws_db_instance.no_snapshot", &nosnap),
+ ),
+ },
+ },
+ })
+}
+
func testAccCheckAWSDBInstanceDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).rdsconn
@@ -132,6 +169,104 @@ func testAccCheckAWSDBInstanceReplicaAttributes(source, replica *rds.DBInstance)
}
}
+func testAccCheckAWSDBInstanceSnapshot(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*AWSClient).rdsconn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_db_instance" {
+ continue
+ }
+
+ var err error
+ resp, err := conn.DescribeDBInstances(
+ &rds.DescribeDBInstancesInput{
+ DBInstanceIdentifier: aws.String(rs.Primary.ID),
+ })
+
+ if err != nil {
+ newerr, _ := err.(awserr.Error)
+ if newerr.Code() != "DBInstanceNotFound" {
+ return err
+ }
+
+ } else {
+ if len(resp.DBInstances) != 0 &&
+ *resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID {
+ return fmt.Errorf("DB Instance still exists")
+ }
+ }
+
+ log.Printf("[INFO] Trying to locate the DBInstance Final Snapshot")
+ snapshot_identifier := "foobarbaz-test-terraform-final-snapshot-1"
+ _, snapErr := conn.DescribeDBSnapshots(
+ &rds.DescribeDBSnapshotsInput{
+ DBSnapshotIdentifier: aws.String(snapshot_identifier),
+ })
+
+ if snapErr != nil {
+ newerr, _ := snapErr.(awserr.Error)
+ if newerr.Code() == "DBSnapshotNotFound" {
+ return fmt.Errorf("Snapshot %s not found", snapshot_identifier)
+ }
+ } else {
+ log.Printf("[INFO] Deleting the Snapshot %s", snapshot_identifier)
+ _, snapDeleteErr := conn.DeleteDBSnapshot(
+ &rds.DeleteDBSnapshotInput{
+ DBSnapshotIdentifier: aws.String(snapshot_identifier),
+ })
+ if snapDeleteErr != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func testAccCheckAWSDBInstanceNoSnapshot(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*AWSClient).rdsconn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_db_instance" {
+ continue
+ }
+
+ var err error
+ resp, err := conn.DescribeDBInstances(
+ &rds.DescribeDBInstancesInput{
+ DBInstanceIdentifier: aws.String(rs.Primary.ID),
+ })
+
+ if err != nil {
+ newerr, _ := err.(awserr.Error)
+ if newerr.Code() != "DBInstanceNotFound" {
+ return err
+ }
+
+ } else {
+ if len(resp.DBInstances) != 0 &&
+ *resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID {
+ return fmt.Errorf("DB Instance still exists")
+ }
+ }
+
+ snapshot_identifier := "foobarbaz-test-terraform-final-snapshot-2"
+ _, snapErr := conn.DescribeDBSnapshots(
+ &rds.DescribeDBSnapshotsInput{
+ DBSnapshotIdentifier: aws.String(snapshot_identifier),
+ })
+
+ if snapErr != nil {
+ newerr, _ := snapErr.(awserr.Error)
+ if newerr.Code() != "DBSnapshotNotFound" {
+ return fmt.Errorf("Snapshot %s found and it shouldn't have been", snapshot_identifier)
+ }
+ }
+ }
+
+ return nil
+}
+
func testAccCheckAWSDBInstanceExists(n string, v *rds.DBInstance) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
@@ -226,3 +361,51 @@ func testAccReplicaInstanceConfig(val int) string {
}
`, val, val)
}
+
+var testAccSnapshotInstanceConfig = `
+provider "aws" {
+ region = "us-east-1"
+}
+resource "aws_db_instance" "snapshot" {
+ identifier = "foobarbaz-test-terraform-snapshot-1"
+
+ allocated_storage = 5
+ engine = "mysql"
+ engine_version = "5.6.21"
+ instance_class = "db.t1.micro"
+ name = "baz"
+ password = "barbarbarbar"
+ username = "foo"
+ security_group_names = ["default"]
+ backup_retention_period = 1
+
+ parameter_group_name = "default.mysql5.6"
+
+ skip_final_snapshot = false
+ final_snapshot_identifier = "foobarbaz-test-terraform-final-snapshot-1"
+}
+`
+
+var testAccNoSnapshotInstanceConfig = `
+provider "aws" {
+ region = "us-east-1"
+}
+resource "aws_db_instance" "no_snapshot" {
+ identifier = "foobarbaz-test-terraform-snapshot-2"
+
+ allocated_storage = 5
+ engine = "mysql"
+ engine_version = "5.6.21"
+ instance_class = "db.t1.micro"
+ name = "baz"
+ password = "barbarbarbar"
+ username = "foo"
+ security_group_names = ["default"]
+ backup_retention_period = 1
+
+ parameter_group_name = "default.mysql5.6"
+
+ skip_final_snapshot = true
+ final_snapshot_identifier = "foobarbaz-test-terraform-final-snapshot-2"
+}
+`
diff --git a/website/source/docs/providers/aws/r/db_instance.html.markdown b/website/source/docs/providers/aws/r/db_instance.html.markdown
index 8cf95de3a7..2ff282d105 100644
--- a/website/source/docs/providers/aws/r/db_instance.html.markdown
+++ b/website/source/docs/providers/aws/r/db_instance.html.markdown
@@ -45,7 +45,7 @@ The following arguments are supported:
* `final_snapshot_identifier` - (Optional) The name of your final DB snapshot
when this DB instance is deleted. If omitted, no final snapshot will be
made.
-* `skip_final_snapshot` - (Optional) Determines whether a final DB snapshot is created before the DB instance is deleted. If true is specified, no DBSnapshot is created. If false is specified, a DB snapshot is created before the DB instance is deleted. Default is false.
+* `skip_final_snapshot` - (Optional) Determines whether a final DB snapshot is created before the DB instance is deleted. If true is specified, no DBSnapshot is created. If false is specified, a DB snapshot is created before the DB instance is deleted. Default is true.
* `copy_tags_to_snapshot` – (Optional, boolean) On delete, copy all Instance `tags` to
the final snapshot (if `final_snapshot_identifier` is specified). Default
`false`
From 3330da00b997968a2272bc99be101019e397bc1f Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Thu, 10 Dec 2015 18:39:47 -0500
Subject: [PATCH 201/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4fbcdc8a09..2c23a1c90e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -16,6 +16,7 @@ IMPROVEMENTS:
* provider/aws: Add support for DynamoDB Table StreamSpecifications [GH-4208]
* provider/aws: Add `name_prefix` to Security Groups [GH-4167]
* provider/aws: Add support for removing nodes to `aws_elasticache_cluster` [GH-3809]
+ * provider/aws: Add support for `skip_final_snapshot` to `aws_db_instance` [GH-3853]
* provider/cloudstack: performance improvements [GH-4150]
* provider/docker: Add support for setting the entry point on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting the restart policy on `docker_container` resources [GH-3761]
From a7c8d0714eabbcbb5eecf1be3042c6abc0c373b9 Mon Sep 17 00:00:00 2001
From: aboschke
Date: Fri, 11 Dec 2015 01:24:04 -0800
Subject: [PATCH 202/664] Trivial change for AWS upgraded T2 instance type and
AMI ami-5189a661
---
.../source/intro/getting-started/build.html.md | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/website/source/intro/getting-started/build.html.md b/website/source/intro/getting-started/build.html.md
index a40369fade..0c380d4fb3 100644
--- a/website/source/intro/getting-started/build.html.md
+++ b/website/source/intro/getting-started/build.html.md
@@ -59,8 +59,8 @@ provider "aws" {
}
resource "aws_instance" "example" {
- ami = "ami-408c7f28"
- instance_type = "t1.micro"
+ ami = "ami-5189a661"
+ instance_type = "t2.micro"
}
```
@@ -95,7 +95,7 @@ Within the resource block itself is configuration for that
resource. This is dependent on each resource provider and
is fully documented within our
[providers reference](/docs/providers/index.html). For our EC2 instance, we specify
-an AMI for Ubuntu, and request a "t1.micro" instance so we
+an AMI for Ubuntu, and request a "t2.micro" instance so we
qualify under the free tier.
## Execution Plan
@@ -111,9 +111,9 @@ $ terraform plan
...
+ aws_instance.example
- ami: "" => "ami-408c7f28"
+ ami: "" => "ami-5189a661"
availability_zone: "" => ""
- instance_type: "" => "t1.micro"
+ instance_type: "" => "t2.micro"
key_name: "" => ""
private_dns: "" => ""
private_ip: "" => ""
@@ -148,8 +148,8 @@ since Terraform waits for the EC2 instance to become available.
```
$ terraform apply
aws_instance.example: Creating...
- ami: "" => "ami-408c7f28"
- instance_type: "" => "t1.micro"
+ ami: "" => "ami-5189a661"
+ instance_type: "" => "t2.micro"
Apply complete! Resources: 1 added, 0 changed, 0 destroyed.
@@ -172,9 +172,9 @@ You can inspect the state using `terraform show`:
$ terraform show
aws_instance.example:
id = i-e60900cd
- ami = ami-408c7f28
+ ami = ami-5189a661
availability_zone = us-east-1c
- instance_type = t1.micro
+ instance_type = t2.micro
key_name =
private_dns = domU-12-31-39-12-38-AB.compute-1.internal
private_ip = 10.200.59.89
From d84d6796c46c97257c4bde57a3904c9bbdd2fa23 Mon Sep 17 00:00:00 2001
From: stack72
Date: Fri, 11 Dec 2015 00:18:59 +0000
Subject: [PATCH 203/664] Initial CRUD work for the Autoscaling Group Scheduled
Actions
---
builtin/providers/aws/provider.go | 1 +
.../aws/resource_aws_autoscaling_schedule.go | 117 ++++++++++++++++++
2 files changed, 118 insertions(+)
create mode 100644 builtin/providers/aws/resource_aws_autoscaling_schedule.go
diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go
index c123cc184c..04f94cc347 100644
--- a/builtin/providers/aws/provider.go
+++ b/builtin/providers/aws/provider.go
@@ -174,6 +174,7 @@ func Provider() terraform.ResourceProvider {
"aws_autoscaling_group": resourceAwsAutoscalingGroup(),
"aws_autoscaling_notification": resourceAwsAutoscalingNotification(),
"aws_autoscaling_policy": resourceAwsAutoscalingPolicy(),
+ "aws_autoscaling_schedule": resourceAwsAutoscalingSchedule(),
"aws_cloudformation_stack": resourceAwsCloudFormationStack(),
"aws_cloudtrail": resourceAwsCloudTrail(),
"aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(),
diff --git a/builtin/providers/aws/resource_aws_autoscaling_schedule.go b/builtin/providers/aws/resource_aws_autoscaling_schedule.go
new file mode 100644
index 0000000000..4de57333c6
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_autoscaling_schedule.go
@@ -0,0 +1,117 @@
+package aws
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/autoscaling"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceAwsAutoscalingSchedule() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceAwsAutoscalingScheduleCreate,
+ Read: resourceAwsAutoscalingScheduleRead,
+ Update: resourceAwsAutoscalingScheduleCreate,
+ Delete: resourceAwsAutoscalingScheduleDelete,
+
+ Schema: map[string]*schema.Schema{
+ "scheduled_action_name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "autoscaling_group_name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "start_time": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+ "end_time": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+ "recurrence": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+ "min_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ Computed: true,
+ },
+ "max_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ Computed: true,
+ },
+ "desired_capacity": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func resourceAwsAutoscalingScheduleCreate(d *schema.ResourceData, meta interface{}) error {
+ autoscalingconn := meta.(*AWSClient).autoscalingconn
+ params := autoscaling.PutScheduledUpdateGroupActionInput{
+ AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)),
+ ScheduledActionName: aws.String(d.Get("scheduled_action_name").(string)),
+ }
+
+ if attr, ok := d.GetOk("start_time"); ok {
+ params.StartTime = aws.Time()
+ }
+
+ if attr, ok := d.GetOk("min_size"); ok {
+ params.MinSize = aws.Int(int64(attr.(int)))
+ }
+
+ if attr, ok := d.GetOk("max_size"); ok {
+ params.MaxSize = aws.Int(int64(attr.(int)))
+ }
+
+ if attr, ok := d.GetOk("desired_capacity"); ok {
+ params.DesiredCapacity = aws.Int(int64(attr.(int)))
+ }
+
+ log.Printf("[INFO] Creating Autoscaling Scheduled Action: %s", d.Get("scheduled_action_name").(string))
+ _, err := autoscalingconn.PutScheduledUpdateGroupAction(params)
+ if err != nil {
+ return fmt.Errorf("Error Creating Autoscaling Scheduled Action: %s", err.Error())
+ }
+
+ d.SetId(d.Get("scheduled_action_name").(string))
+
+ return resourceAwsAutoscalingScheduleRead(d, meta)
+}
+
+func resourceAwsAutoscalingScheduleRead(d *schema.ResourceData, meta interface{}) error {
+ return nil
+}
+
+func resourceAwsAutoscalingScheduleDelete(d *schema.ResourceData, meta interface{}) error {
+ autoscalingconn := meta.(*AWSClient).autoscalingconn
+
+ params := autoscaling.DeleteScheduledActionInput{
+ AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)),
+ ScheduledActionName: aws.String(d.Get("scheduled_action_name").(string)),
+ }
+
+ log.Printf("[INFO] Deleting Autoscaling Scheduled Action: %s", d.Get("scheduled_action_name").(string))
+ _, err := autoscalingconn.DeleteScheduledAction(params)
+ if err != nil {
+ return fmt.Errorf("Error deleting Autoscaling Scheduled Action: %s", err.Error())
+ }
+
+ return nil
+}
From 67c1971e63b9bfcff152b8979ad9a5ee9c91e0c7 Mon Sep 17 00:00:00 2001
From: stack72
Date: Fri, 11 Dec 2015 12:07:50 +0000
Subject: [PATCH 204/664] Adding support to DB Parameter Group for Tags
---
.../aws/resource_aws_db_parameter_group.go | 56 +++++++++++++++++++
.../resource_aws_db_parameter_group_test.go | 13 ++++-
.../aws/r/db_parameter_group.html.markdown | 2 +
3 files changed, 70 insertions(+), 1 deletion(-)
diff --git a/builtin/providers/aws/resource_aws_db_parameter_group.go b/builtin/providers/aws/resource_aws_db_parameter_group.go
index b4f07e43de..0513be0b54 100644
--- a/builtin/providers/aws/resource_aws_db_parameter_group.go
+++ b/builtin/providers/aws/resource_aws_db_parameter_group.go
@@ -14,6 +14,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/rds"
)
@@ -24,6 +25,10 @@ func resourceAwsDbParameterGroup() *schema.Resource {
Update: resourceAwsDbParameterGroupUpdate,
Delete: resourceAwsDbParameterGroupDelete,
Schema: map[string]*schema.Schema{
+ "arn": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
"name": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
@@ -71,17 +76,21 @@ func resourceAwsDbParameterGroup() *schema.Resource {
},
Set: resourceAwsDbParameterHash,
},
+
+ "tags": tagsSchema(),
},
}
}
func resourceAwsDbParameterGroupCreate(d *schema.ResourceData, meta interface{}) error {
rdsconn := meta.(*AWSClient).rdsconn
+ tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
createOpts := rds.CreateDBParameterGroupInput{
DBParameterGroupName: aws.String(d.Get("name").(string)),
DBParameterGroupFamily: aws.String(d.Get("family").(string)),
Description: aws.String(d.Get("description").(string)),
+ Tags: tags,
}
log.Printf("[DEBUG] Create DB Parameter Group: %#v", createOpts)
@@ -136,6 +145,31 @@ func resourceAwsDbParameterGroupRead(d *schema.ResourceData, meta interface{}) e
d.Set("parameter", flattenParameters(describeParametersResp.Parameters))
+ paramGroup := describeResp.DBParameterGroups[0]
+ arn, err := buildRDSPGARN(d, meta)
+ if err != nil {
+ name := ""
+ if paramGroup.DBParameterGroupName != nil && *paramGroup.DBParameterGroupName != "" {
+ name = *paramGroup.DBParameterGroupName
+ }
+ log.Printf("[DEBUG] Error building ARN for DB Parameter Group, not setting Tags for Param Group %s", name)
+ } else {
+ d.Set("arn", arn)
+ resp, err := rdsconn.ListTagsForResource(&rds.ListTagsForResourceInput{
+ ResourceName: aws.String(arn),
+ })
+
+ if err != nil {
+ log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn)
+ }
+
+ var dt []*rds.Tag
+ if len(resp.TagList) > 0 {
+ dt = resp.TagList
+ }
+ d.Set("tags", tagsToMapRDS(dt))
+ }
+
return nil
}
@@ -177,6 +211,14 @@ func resourceAwsDbParameterGroupUpdate(d *schema.ResourceData, meta interface{})
d.SetPartial("parameter")
}
+ if arn, err := buildRDSPGARN(d, meta); err == nil {
+ if err := setTagsRDS(rdsconn, d, arn); err != nil {
+ return err
+ } else {
+ d.SetPartial("tags")
+ }
+ }
+
d.Partial(false)
return resourceAwsDbParameterGroupRead(d, meta)
@@ -230,6 +272,20 @@ func resourceAwsDbParameterHash(v interface{}) int {
return hashcode.String(buf.String())
}
+func buildRDSPGARN(d *schema.ResourceData, meta interface{}) (string, error) {
+ iamconn := meta.(*AWSClient).iamconn
+ region := meta.(*AWSClient).region
+ // An zero value GetUserInput{} defers to the currently logged in user
+ resp, err := iamconn.GetUser(&iam.GetUserInput{})
+ if err != nil {
+ return "", err
+ }
+ userARN := *resp.User.Arn
+ accountID := strings.Split(userARN, ":")[4]
+ arn := fmt.Sprintf("arn:aws:rds:%s:%s:pg:%s", region, accountID, d.Id())
+ return arn, nil
+}
+
func validateDbParamGroupName(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) {
diff --git a/builtin/providers/aws/resource_aws_db_parameter_group_test.go b/builtin/providers/aws/resource_aws_db_parameter_group_test.go
index d0042df232..c2a8b9538f 100644
--- a/builtin/providers/aws/resource_aws_db_parameter_group_test.go
+++ b/builtin/providers/aws/resource_aws_db_parameter_group_test.go
@@ -44,6 +44,8 @@ func TestAccAWSDBParameterGroup_basic(t *testing.T) {
"aws_db_parameter_group.bar", "parameter.2478663599.name", "character_set_client"),
resource.TestCheckResourceAttr(
"aws_db_parameter_group.bar", "parameter.2478663599.value", "utf8"),
+ resource.TestCheckResourceAttr(
+ "aws_db_parameter_group.bar", "tags.#", "1"),
),
},
resource.TestStep{
@@ -77,6 +79,8 @@ func TestAccAWSDBParameterGroup_basic(t *testing.T) {
"aws_db_parameter_group.bar", "parameter.2478663599.name", "character_set_client"),
resource.TestCheckResourceAttr(
"aws_db_parameter_group.bar", "parameter.2478663599.value", "utf8"),
+ resource.TestCheckResourceAttr(
+ "aws_db_parameter_group.bar", "tags.#", "2"),
),
},
},
@@ -174,7 +178,7 @@ func testAccCheckAWSDBParameterGroupDestroy(s *terraform.State) error {
if !ok {
return err
}
- if newerr.Code() != "InvalidDBParameterGroup.NotFound" {
+ if newerr.Code() != "DBParameterGroupNotFound" {
return err
}
}
@@ -262,6 +266,9 @@ resource "aws_db_parameter_group" "bar" {
name = "character_set_results"
value = "utf8"
}
+ tags {
+ foo = "bar"
+ }
}
`
@@ -290,6 +297,10 @@ resource "aws_db_parameter_group" "bar" {
name = "collation_connection"
value = "utf8_unicode_ci"
}
+ tags {
+ foo = "bar"
+ baz = "foo"
+ }
}
`
diff --git a/website/source/docs/providers/aws/r/db_parameter_group.html.markdown b/website/source/docs/providers/aws/r/db_parameter_group.html.markdown
index 41e2f7b860..8fa5b3b6c5 100644
--- a/website/source/docs/providers/aws/r/db_parameter_group.html.markdown
+++ b/website/source/docs/providers/aws/r/db_parameter_group.html.markdown
@@ -36,6 +36,7 @@ The following arguments are supported:
* `family` - (Required) The family of the DB parameter group.
* `description` - (Required) The description of the DB parameter group.
* `parameter` - (Optional) A list of DB parameters to apply.
+* `tags` - (Optional) A mapping of tags to assign to the resource.
Parameter blocks support the following:
@@ -50,3 +51,4 @@ Parameter blocks support the following:
The following attributes are exported:
* `id` - The db parameter group name.
+* `arn` - The ARN of the db parameter group.
From 474d6080f06d40cf73cf84365d99cb7e4d0cf7f5 Mon Sep 17 00:00:00 2001
From: stack72
Date: Fri, 11 Dec 2015 12:28:24 +0000
Subject: [PATCH 205/664] Adding support for Tags to the DB Security Group
---
.../aws/resource_aws_db_security_group.go | 67 +++++++++++++++++++
.../resource_aws_db_security_group_test.go | 8 ++-
.../aws/r/db_security_group.html.markdown | 2 +
3 files changed, 76 insertions(+), 1 deletion(-)
diff --git a/builtin/providers/aws/resource_aws_db_security_group.go b/builtin/providers/aws/resource_aws_db_security_group.go
index 367400ae77..070f1ec71a 100644
--- a/builtin/providers/aws/resource_aws_db_security_group.go
+++ b/builtin/providers/aws/resource_aws_db_security_group.go
@@ -4,10 +4,12 @@ import (
"bytes"
"fmt"
"log"
+ "strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/rds"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/helper/hashcode"
@@ -19,9 +21,15 @@ func resourceAwsDbSecurityGroup() *schema.Resource {
return &schema.Resource{
Create: resourceAwsDbSecurityGroupCreate,
Read: resourceAwsDbSecurityGroupRead,
+ Update: resourceAwsDbSecurityGroupUpdate,
Delete: resourceAwsDbSecurityGroupDelete,
Schema: map[string]*schema.Schema{
+ "arn": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
@@ -66,12 +74,15 @@ func resourceAwsDbSecurityGroup() *schema.Resource {
},
Set: resourceAwsDbSecurityGroupIngressHash,
},
+
+ "tags": tagsSchema(),
},
}
}
func resourceAwsDbSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
+ tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
var err error
var errs []error
@@ -79,6 +90,7 @@ func resourceAwsDbSecurityGroupCreate(d *schema.ResourceData, meta interface{})
opts := rds.CreateDBSecurityGroupInput{
DBSecurityGroupName: aws.String(d.Get("name").(string)),
DBSecurityGroupDescription: aws.String(d.Get("description").(string)),
+ Tags: tags,
}
log.Printf("[DEBUG] DB Security Group create configuration: %#v", opts)
@@ -157,9 +169,50 @@ func resourceAwsDbSecurityGroupRead(d *schema.ResourceData, meta interface{}) er
d.Set("ingress", rules)
+ conn := meta.(*AWSClient).rdsconn
+ arn, err := buildRDSSecurityGroupARN(d, meta)
+ if err != nil {
+ name := ""
+ if sg.DBSecurityGroupName != nil && *sg.DBSecurityGroupName != "" {
+ name = *sg.DBSecurityGroupName
+ }
+ log.Printf("[DEBUG] Error building ARN for DB Security Group, not setting Tags for DB Security Group %s", name)
+ } else {
+ d.Set("arn", arn)
+ resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{
+ ResourceName: aws.String(arn),
+ })
+
+ if err != nil {
+ log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn)
+ }
+
+ var dt []*rds.Tag
+ if len(resp.TagList) > 0 {
+ dt = resp.TagList
+ }
+ d.Set("tags", tagsToMapRDS(dt))
+ }
+
return nil
}
+func resourceAwsDbSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).rdsconn
+
+ d.Partial(true)
+ if arn, err := buildRDSSecurityGroupARN(d, meta); err == nil {
+ if err := setTagsRDS(conn, d, arn); err != nil {
+ return err
+ } else {
+ d.SetPartial("tags")
+ }
+ }
+ d.Partial(false)
+
+ return resourceAwsDbSecurityGroupRead(d, meta)
+}
+
func resourceAwsDbSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
@@ -290,3 +343,17 @@ func resourceAwsDbSecurityGroupStateRefreshFunc(
return v, "authorized", nil
}
}
+
+func buildRDSSecurityGroupARN(d *schema.ResourceData, meta interface{}) (string, error) {
+ iamconn := meta.(*AWSClient).iamconn
+ region := meta.(*AWSClient).region
+ // An zero value GetUserInput{} defers to the currently logged in user
+ resp, err := iamconn.GetUser(&iam.GetUserInput{})
+ if err != nil {
+ return "", err
+ }
+ userARN := *resp.User.Arn
+ accountID := strings.Split(userARN, ":")[4]
+ arn := fmt.Sprintf("arn:aws:rds:%s:%s:secgrp:%s", region, accountID, d.Id())
+ return arn, nil
+}
diff --git a/builtin/providers/aws/resource_aws_db_security_group_test.go b/builtin/providers/aws/resource_aws_db_security_group_test.go
index bf1db6e37b..7ab269fb36 100644
--- a/builtin/providers/aws/resource_aws_db_security_group_test.go
+++ b/builtin/providers/aws/resource_aws_db_security_group_test.go
@@ -32,6 +32,8 @@ func TestAccAWSDBSecurityGroup_basic(t *testing.T) {
"aws_db_security_group.bar", "ingress.3363517775.cidr", "10.0.0.1/24"),
resource.TestCheckResourceAttr(
"aws_db_security_group.bar", "ingress.#", "1"),
+ resource.TestCheckResourceAttr(
+ "aws_db_security_group.bar", "tags.#", "1"),
),
},
},
@@ -64,7 +66,7 @@ func testAccCheckAWSDBSecurityGroupDestroy(s *terraform.State) error {
if !ok {
return err
}
- if newerr.Code() != "InvalidDBSecurityGroup.NotFound" {
+ if newerr.Code() != "DBSecurityGroupNotFound" {
return err
}
}
@@ -149,5 +151,9 @@ resource "aws_db_security_group" "bar" {
ingress {
cidr = "10.0.0.1/24"
}
+
+ tags {
+ foo = "bar"
+ }
}
`
diff --git a/website/source/docs/providers/aws/r/db_security_group.html.markdown b/website/source/docs/providers/aws/r/db_security_group.html.markdown
index 7a92426778..1c7f8183e8 100644
--- a/website/source/docs/providers/aws/r/db_security_group.html.markdown
+++ b/website/source/docs/providers/aws/r/db_security_group.html.markdown
@@ -33,6 +33,7 @@ The following arguments are supported:
* `name` - (Required) The name of the DB security group.
* `description` - (Required) The description of the DB security group.
* `ingress` - (Optional) A list of ingress rules.
+* `tags` - (Optional) A mapping of tags to assign to the resource.
Ingress blocks support the following:
@@ -47,4 +48,5 @@ Ingress blocks support the following:
The following attributes are exported:
* `id` - The db security group ID.
+* `arn` - The arn of the DB security group.
From dec5a27f19661cba845173fed494b9a14906cacb Mon Sep 17 00:00:00 2001
From: stack72
Date: Fri, 11 Dec 2015 12:39:15 +0000
Subject: [PATCH 206/664] Adding the ARN as an output of the DB Subnet Group
---
builtin/providers/aws/resource_aws_db_subnet_group.go | 6 ++++++
.../docs/providers/aws/r/db_subnet_group.html.markdown | 1 +
2 files changed, 7 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_db_subnet_group.go b/builtin/providers/aws/resource_aws_db_subnet_group.go
index cbfed609a9..0127cfd48a 100644
--- a/builtin/providers/aws/resource_aws_db_subnet_group.go
+++ b/builtin/providers/aws/resource_aws_db_subnet_group.go
@@ -23,6 +23,11 @@ func resourceAwsDbSubnetGroup() *schema.Resource {
Delete: resourceAwsDbSubnetGroupDelete,
Schema: map[string]*schema.Schema{
+ "arn": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
"name": &schema.Schema{
Type: schema.TypeString,
ForceNew: true,
@@ -142,6 +147,7 @@ func resourceAwsDbSubnetGroupRead(d *schema.ResourceData, meta interface{}) erro
if err != nil {
log.Printf("[DEBUG] Error building ARN for DB Subnet Group, not setting Tags for group %s", *subnetGroup.DBSubnetGroupName)
} else {
+ d.Set("arn", arn)
resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{
ResourceName: aws.String(arn),
})
diff --git a/website/source/docs/providers/aws/r/db_subnet_group.html.markdown b/website/source/docs/providers/aws/r/db_subnet_group.html.markdown
index e3dcd18ed9..1a539ffa2b 100644
--- a/website/source/docs/providers/aws/r/db_subnet_group.html.markdown
+++ b/website/source/docs/providers/aws/r/db_subnet_group.html.markdown
@@ -37,4 +37,5 @@ The following arguments are supported:
The following attributes are exported:
* `id` - The db subnet group name.
+* `arn` - The ARN of the db subnet group.
From a59ffd7e9ea48e735959041bdbaaed0039b8f551 Mon Sep 17 00:00:00 2001
From: Clint
Date: Fri, 11 Dec 2015 08:34:23 -0600
Subject: [PATCH 207/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2c23a1c90e..3ddc38ae35 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -17,6 +17,7 @@ IMPROVEMENTS:
* provider/aws: Add `name_prefix` to Security Groups [GH-4167]
* provider/aws: Add support for removing nodes to `aws_elasticache_cluster` [GH-3809]
* provider/aws: Add support for `skip_final_snapshot` to `aws_db_instance` [GH-3853]
+ * provider/aws: DB Subnet group arn output [GH-4261]
* provider/cloudstack: performance improvements [GH-4150]
* provider/docker: Add support for setting the entry point on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting the restart policy on `docker_container` resources [GH-3761]
From b788ad702d8b6edd53f261dcbb2e355bb746fc15 Mon Sep 17 00:00:00 2001
From: Clint
Date: Fri, 11 Dec 2015 08:49:16 -0600
Subject: [PATCH 208/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3ddc38ae35..d1e5774fe8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -17,6 +17,7 @@ IMPROVEMENTS:
* provider/aws: Add `name_prefix` to Security Groups [GH-4167]
* provider/aws: Add support for removing nodes to `aws_elasticache_cluster` [GH-3809]
* provider/aws: Add support for `skip_final_snapshot` to `aws_db_instance` [GH-3853]
+ * provider/aws: Adding support for Tags to DB SecurityGroup [GH-4260]
* provider/aws: DB Subnet group arn output [GH-4261]
* provider/cloudstack: performance improvements [GH-4150]
* provider/docker: Add support for setting the entry point on `docker_container` resources [GH-3761]
From b26a85e5b227b5adaf30f22222c235ca2f8f40ad Mon Sep 17 00:00:00 2001
From: Clint
Date: Fri, 11 Dec 2015 08:58:49 -0600
Subject: [PATCH 209/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d1e5774fe8..93577f2151 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -18,6 +18,7 @@ IMPROVEMENTS:
* provider/aws: Add support for removing nodes to `aws_elasticache_cluster` [GH-3809]
* provider/aws: Add support for `skip_final_snapshot` to `aws_db_instance` [GH-3853]
* provider/aws: Adding support for Tags to DB SecurityGroup [GH-4260]
+ * provider/aws: Adding Tag support for DB Param Groups [GH-4259]
* provider/aws: DB Subnet group arn output [GH-4261]
* provider/cloudstack: performance improvements [GH-4150]
* provider/docker: Add support for setting the entry point on `docker_container` resources [GH-3761]
From f0ceb7fb9dbd24a92e3acbf67637d3b9e74fac04 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Fri, 11 Dec 2015 10:07:06 -0600
Subject: [PATCH 210/664] update docs for updated us-east AMIs
---
website/source/intro/getting-started/build.html.md | 8 ++++----
website/source/intro/getting-started/change.html.md | 8 ++++----
.../intro/getting-started/dependencies.html.md | 12 ++++++------
.../source/intro/getting-started/provision.html.md | 8 ++++----
.../source/intro/getting-started/variables.html.md | 6 +++---
5 files changed, 21 insertions(+), 21 deletions(-)
diff --git a/website/source/intro/getting-started/build.html.md b/website/source/intro/getting-started/build.html.md
index 0c380d4fb3..633db888b4 100644
--- a/website/source/intro/getting-started/build.html.md
+++ b/website/source/intro/getting-started/build.html.md
@@ -59,7 +59,7 @@ provider "aws" {
}
resource "aws_instance" "example" {
- ami = "ami-5189a661"
+ ami = "ami-d05e75b8"
instance_type = "t2.micro"
}
```
@@ -111,7 +111,7 @@ $ terraform plan
...
+ aws_instance.example
- ami: "" => "ami-5189a661"
+ ami: "" => "ami-d05e75b8"
availability_zone: "" => ""
instance_type: "" => "t2.micro"
key_name: "" => ""
@@ -148,7 +148,7 @@ since Terraform waits for the EC2 instance to become available.
```
$ terraform apply
aws_instance.example: Creating...
- ami: "" => "ami-5189a661"
+ ami: "" => "ami-d05e75b8"
instance_type: "" => "t2.micro"
Apply complete! Resources: 1 added, 0 changed, 0 destroyed.
@@ -172,7 +172,7 @@ You can inspect the state using `terraform show`:
$ terraform show
aws_instance.example:
id = i-e60900cd
- ami = ami-5189a661
+ ami = ami-d05e75b8
availability_zone = us-east-1c
instance_type = t2.micro
key_name =
diff --git a/website/source/intro/getting-started/change.html.md b/website/source/intro/getting-started/change.html.md
index 4850bc808e..60d14fd4b6 100644
--- a/website/source/intro/getting-started/change.html.md
+++ b/website/source/intro/getting-started/change.html.md
@@ -28,8 +28,8 @@ resource in your configuration and change it to the following:
```
resource "aws_instance" "example" {
- ami = "ami-aa7ab6c2"
- instance_type = "t1.micro"
+ ami = "ami-8eb061e6"
+ instance_type = "t2.micro"
}
```
@@ -47,7 +47,7 @@ $ terraform plan
...
-/+ aws_instance.example
- ami: "ami-408c7f28" => "ami-aa7ab6c2" (forces new resource)
+ ami: "ami-d05e75b8" => "ami-8eb061e6" (forces new resource)
availability_zone: "us-east-1c" => ""
key_name: "" => ""
private_dns: "domU-12-31-39-12-38-AB.compute-1.internal" => ""
@@ -79,7 +79,7 @@ the change.
$ terraform apply
aws_instance.example: Destroying...
aws_instance.example: Modifying...
- ami: "ami-408c7f28" => "ami-aa7ab6c2"
+ ami: "ami-d05e75b8" => "ami-8eb061e6"
Apply complete! Resources: 0 added, 1 changed, 1 destroyed.
diff --git a/website/source/intro/getting-started/dependencies.html.md b/website/source/intro/getting-started/dependencies.html.md
index fe3397afed..75cc9e4eb5 100644
--- a/website/source/intro/getting-started/dependencies.html.md
+++ b/website/source/intro/getting-started/dependencies.html.md
@@ -67,9 +67,9 @@ $ terraform plan
public_ip: "" => ""
+ aws_instance.example
- ami: "" => "ami-aa7ab6c2"
+ ami: "" => "ami-8eb061e6"
availability_zone: "" => ""
- instance_type: "" => "t1.micro"
+ instance_type: "" => "t2.micro"
key_name: "" => ""
private_dns: "" => ""
private_ip: "" => ""
@@ -90,8 +90,8 @@ following:
```
aws_instance.example: Creating...
- ami: "" => "ami-aa7ab6c2"
- instance_type: "" => "t1.micro"
+ ami: "" => "ami-8eb061e6"
+ instance_type: "" => "t2.micro"
aws_eip.ip: Creating...
instance: "" => "i-0e737b25"
@@ -144,8 +144,8 @@ created in parallel to everything else.
```
resource "aws_instance" "another" {
- ami = "ami-aa7ab6c2"
- instance_type = "t1.micro"
+ ami = "ami-8eb061e6"
+ instance_type = "t2.micro"
}
```
diff --git a/website/source/intro/getting-started/provision.html.md b/website/source/intro/getting-started/provision.html.md
index 4c6a5cfeeb..24684ef785 100644
--- a/website/source/intro/getting-started/provision.html.md
+++ b/website/source/intro/getting-started/provision.html.md
@@ -25,8 +25,8 @@ To define a provisioner, modify the resource block defining the
```
resource "aws_instance" "example" {
- ami = "ami-aa7ab6c2"
- instance_type = "t1.micro"
+ ami = "ami-8eb061e6"
+ instance_type = "t2.micro"
provisioner "local-exec" {
command = "echo ${aws_instance.example.public_ip} > file.txt"
@@ -61,8 +61,8 @@ then run `apply`:
```
$ terraform apply
aws_instance.example: Creating...
- ami: "" => "ami-aa7ab6c2"
- instance_type: "" => "t1.micro"
+ ami: "" => "ami-8eb061e6"
+ instance_type: "" => "t2.micro"
aws_eip.ip: Creating...
instance: "" => "i-213f350a"
diff --git a/website/source/intro/getting-started/variables.html.md b/website/source/intro/getting-started/variables.html.md
index 24154ca25d..9062a08cf1 100644
--- a/website/source/intro/getting-started/variables.html.md
+++ b/website/source/intro/getting-started/variables.html.md
@@ -123,8 +123,8 @@ support for the "us-west-2" region as well:
```
variable "amis" {
default = {
- us-east-1 = "ami-aa7ab6c2"
- us-west-2 = "ami-23f78e13"
+ us-east-1 = "ami-8eb061e6"
+ us-west-2 = "ami-ef5e24df"
}
}
```
@@ -137,7 +137,7 @@ Then, replace the "aws\_instance" with the following:
```
resource "aws_instance" "example" {
ami = "${lookup(var.amis, var.region)}"
- instance_type = "t1.micro"
+ instance_type = "t2.micro"
}
```
From bedd020deee5134e181a0e3e9551271a8afcf636 Mon Sep 17 00:00:00 2001
From: toshihisa
Date: Sat, 12 Dec 2015 01:15:54 +0900
Subject: [PATCH 211/664] issue #4137 vpc_endpoint bug fix.
---
builtin/providers/aws/resource_aws_vpc_endpoint.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_vpc_endpoint.go b/builtin/providers/aws/resource_aws_vpc_endpoint.go
index 06ba0bf005..c35e5f9489 100644
--- a/builtin/providers/aws/resource_aws_vpc_endpoint.go
+++ b/builtin/providers/aws/resource_aws_vpc_endpoint.go
@@ -119,12 +119,12 @@ func resourceAwsVPCEndpointUpdate(d *schema.ResourceData, meta interface{}) erro
os := o.(*schema.Set)
ns := n.(*schema.Set)
- add := expandStringList(os.Difference(ns).List())
+ add := expandStringList(ns.Difference(os).List())
if len(add) > 0 {
input.AddRouteTableIds = add
}
- remove := expandStringList(ns.Difference(os).List())
+ remove := expandStringList(os.Difference(ns).List())
if len(remove) > 0 {
input.RemoveRouteTableIds = remove
}
From a1a5788ed8625203220330fbc02e9ac83cb7e7f6 Mon Sep 17 00:00:00 2001
From: Lars Wander
Date: Fri, 11 Dec 2015 11:41:02 -0500
Subject: [PATCH 212/664] provider/google: provide assigned_nat_ip as well as
nat_ip
---
builtin/providers/google/resource_compute_instance.go | 11 ++++++++---
.../providers/google/r/compute_instance.html.markdown | 4 ++++
.../docs/providers/google/r/dns_record_set.markdown | 2 +-
3 files changed, 13 insertions(+), 4 deletions(-)
diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go
index 808c5de789..f7b6e05b16 100644
--- a/builtin/providers/google/resource_compute_instance.go
+++ b/builtin/providers/google/resource_compute_instance.go
@@ -136,9 +136,13 @@ func resourceComputeInstance() *schema.Resource {
Schema: map[string]*schema.Schema{
"nat_ip": &schema.Schema{
Type: schema.TypeString,
- Computed: true,
Optional: true,
},
+
+ "assigned_nat_ip": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
},
},
},
@@ -629,9 +633,10 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error
var natIP string
accessConfigs := make(
[]map[string]interface{}, 0, len(iface.AccessConfigs))
- for _, config := range iface.AccessConfigs {
+ for j, config := range iface.AccessConfigs {
accessConfigs = append(accessConfigs, map[string]interface{}{
- "nat_ip": config.NatIP,
+ "nat_ip": d.Get(fmt.Sprintf("network_interface.%d.access_config.%d.nat_ip", i, j)),
+ "assigned_nat_ip": config.NatIP,
})
if natIP == "" {
diff --git a/website/source/docs/providers/google/r/compute_instance.html.markdown b/website/source/docs/providers/google/r/compute_instance.html.markdown
index c7bc410015..1074d01176 100644
--- a/website/source/docs/providers/google/r/compute_instance.html.markdown
+++ b/website/source/docs/providers/google/r/compute_instance.html.markdown
@@ -133,6 +133,10 @@ The `access_config` block supports:
* `nat_ip` - (Optional) The IP address that will be 1:1 mapped to the instance's network ip. If not
given, one will be generated.
+* `assigned_nat_ip` - (Optional) The IP address that is assigned to the
+ instance. If `nat_ip` is filled, it will appear here. If `nat_ip` is left
+ blank, the ephemeral assigned IP will appear here.
+
(DEPRECATED) The `network` block supports:
* `source` - (Required) The name of the network to attach this interface to.
diff --git a/website/source/docs/providers/google/r/dns_record_set.markdown b/website/source/docs/providers/google/r/dns_record_set.markdown
index 79ad2eb308..a4fd97af47 100644
--- a/website/source/docs/providers/google/r/dns_record_set.markdown
+++ b/website/source/docs/providers/google/r/dns_record_set.markdown
@@ -40,7 +40,7 @@ resource "google_dns_record_set" "frontend" {
name = "frontend.${google_dns_managed_zone.prod.dns_name}"
type = "A"
ttl = 300
- rrdatas = ["${google_compute_instance.frontend.network_interface.0.access_config.0.nat_ip}"]
+ rrdatas = ["${google_compute_instance.frontend.network_interface.0.access_config.0.assigned_nat_ip}"]
}
```
From a1676f9eb11151c7060e9c0c2899c0c3e14813b7 Mon Sep 17 00:00:00 2001
From: Lars Wander
Date: Fri, 11 Dec 2015 12:59:13 -0500
Subject: [PATCH 213/664] provider/google: gofmt
---
builtin/providers/google/config.go | 4 ++--
builtin/providers/google/provider.go | 4 ++--
.../google/resource_pubsub_subscription.go | 18 ++++++++----------
.../resource_pubsub_subscription_test.go | 1 -
.../providers/google/resource_pubsub_topic.go | 12 +++++-------
5 files changed, 17 insertions(+), 22 deletions(-)
diff --git a/builtin/providers/google/config.go b/builtin/providers/google/config.go
index 5467c6483b..159a57e093 100644
--- a/builtin/providers/google/config.go
+++ b/builtin/providers/google/config.go
@@ -16,9 +16,9 @@ import (
"google.golang.org/api/compute/v1"
"google.golang.org/api/container/v1"
"google.golang.org/api/dns/v1"
+ "google.golang.org/api/pubsub/v1"
"google.golang.org/api/sqladmin/v1beta4"
"google.golang.org/api/storage/v1"
- "google.golang.org/api/pubsub/v1"
)
// Config is the configuration structure used to instantiate the Google
@@ -33,7 +33,7 @@ type Config struct {
clientDns *dns.Service
clientStorage *storage.Service
clientSqlAdmin *sqladmin.Service
- clientPubsub *pubsub.Service
+ clientPubsub *pubsub.Service
}
func (c *Config) loadAndValidate() error {
diff --git a/builtin/providers/google/provider.go b/builtin/providers/google/provider.go
index 3fa46c7d56..adec631d7e 100644
--- a/builtin/providers/google/provider.go
+++ b/builtin/providers/google/provider.go
@@ -70,8 +70,8 @@ func Provider() terraform.ResourceProvider {
"google_dns_record_set": resourceDnsRecordSet(),
"google_sql_database": resourceSqlDatabase(),
"google_sql_database_instance": resourceSqlDatabaseInstance(),
- "google_pubsub_topic": resourcePubsubTopic(),
- "google_pubsub_subscription": resourcePubsubSubscription(),
+ "google_pubsub_topic": resourcePubsubTopic(),
+ "google_pubsub_subscription": resourcePubsubSubscription(),
"google_storage_bucket": resourceStorageBucket(),
"google_storage_bucket_acl": resourceStorageBucketAcl(),
"google_storage_bucket_object": resourceStorageBucketObject(),
diff --git a/builtin/providers/google/resource_pubsub_subscription.go b/builtin/providers/google/resource_pubsub_subscription.go
index 6a1f19da73..9301aa4df1 100644
--- a/builtin/providers/google/resource_pubsub_subscription.go
+++ b/builtin/providers/google/resource_pubsub_subscription.go
@@ -2,8 +2,8 @@ package google
import (
"fmt"
- "google.golang.org/api/pubsub/v1"
"github.com/hashicorp/terraform/helper/schema"
+ "google.golang.org/api/pubsub/v1"
)
func resourcePubsubSubscription() *schema.Resource {
@@ -29,7 +29,7 @@ func resourcePubsubSubscription() *schema.Resource {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
- Elem: &schema.Resource{
+ Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"attributes": &schema.Schema{
Type: schema.TypeMap,
@@ -52,14 +52,13 @@ func resourcePubsubSubscription() *schema.Resource {
Required: true,
ForceNew: true,
},
-
},
}
}
func cleanAdditionalArgs(args map[string]interface{}) map[string]string {
cleaned_args := make(map[string]string)
- for k,v := range args {
+ for k, v := range args {
cleaned_args[k] = v.(string)
}
return cleaned_args
@@ -91,7 +90,7 @@ func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{})
attributesClean := cleanAdditionalArgs(attributes)
pushConfig := &pubsub.PushConfig{Attributes: attributesClean, PushEndpoint: push_config["push_endpoint"].(string)}
subscription = &pubsub.Subscription{AckDeadlineSeconds: ackDeadlineSeconds, Topic: computed_topic_name, PushConfig: pushConfig}
- } else {
+ } else {
subscription = &pubsub.Subscription{AckDeadlineSeconds: ackDeadlineSeconds, Topic: computed_topic_name}
}
@@ -100,7 +99,7 @@ func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{})
if err != nil {
return err
}
-
+
d.SetId(res.Name)
return nil
@@ -108,7 +107,7 @@ func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{})
func resourcePubsubSubscriptionRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
-
+
name := d.Id()
call := config.clientPubsub.Projects.Subscriptions.Get(name)
_, err := call.Do()
@@ -119,7 +118,6 @@ func resourcePubsubSubscriptionRead(d *schema.ResourceData, meta interface{}) er
return nil
}
-
func resourcePubsubSubscriptionDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
@@ -127,8 +125,8 @@ func resourcePubsubSubscriptionDelete(d *schema.ResourceData, meta interface{})
call := config.clientPubsub.Projects.Subscriptions.Delete(name)
_, err := call.Do()
if err != nil {
- return err
+ return err
}
-
+
return nil
}
diff --git a/builtin/providers/google/resource_pubsub_subscription_test.go b/builtin/providers/google/resource_pubsub_subscription_test.go
index b0eb2a25ba..0bbed3aed7 100644
--- a/builtin/providers/google/resource_pubsub_subscription_test.go
+++ b/builtin/providers/google/resource_pubsub_subscription_test.go
@@ -71,4 +71,3 @@ resource "google_pubsub_subscription" "foobar_sub" {
name = "foobar_sub"
topic = "${google_pubsub_topic.foobar_sub.name}"
}`
-
diff --git a/builtin/providers/google/resource_pubsub_topic.go b/builtin/providers/google/resource_pubsub_topic.go
index c6ec7cf0fe..e5ac7ab905 100644
--- a/builtin/providers/google/resource_pubsub_topic.go
+++ b/builtin/providers/google/resource_pubsub_topic.go
@@ -2,8 +2,8 @@ package google
import (
"fmt"
- "google.golang.org/api/pubsub/v1"
"github.com/hashicorp/terraform/helper/schema"
+ "google.golang.org/api/pubsub/v1"
)
func resourcePubsubTopic() *schema.Resource {
@@ -18,7 +18,6 @@ func resourcePubsubTopic() *schema.Resource {
Required: true,
ForceNew: true,
},
-
},
}
}
@@ -34,7 +33,7 @@ func resourcePubsubTopicCreate(d *schema.ResourceData, meta interface{}) error {
if err != nil {
return err
}
-
+
d.SetId(res.Name)
return nil
@@ -42,7 +41,7 @@ func resourcePubsubTopicCreate(d *schema.ResourceData, meta interface{}) error {
func resourcePubsubTopicRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
-
+
name := d.Id()
call := config.clientPubsub.Projects.Topics.Get(name)
_, err := call.Do()
@@ -53,7 +52,6 @@ func resourcePubsubTopicRead(d *schema.ResourceData, meta interface{}) error {
return nil
}
-
func resourcePubsubTopicDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
@@ -61,8 +59,8 @@ func resourcePubsubTopicDelete(d *schema.ResourceData, meta interface{}) error {
call := config.clientPubsub.Projects.Topics.Delete(name)
_, err := call.Do()
if err != nil {
- return err
+ return err
}
-
+
return nil
}
From cda549ab17cc7af74d289a782afb70c81ee3708a Mon Sep 17 00:00:00 2001
From: Lars Wander
Date: Fri, 11 Dec 2015 13:19:37 -0500
Subject: [PATCH 214/664] provider/google: Update docs for acquiring service
account credentials
---
website/source/docs/providers/google/index.html.markdown | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/website/source/docs/providers/google/index.html.markdown b/website/source/docs/providers/google/index.html.markdown
index 14a208d6a2..2dfc69cfde 100644
--- a/website/source/docs/providers/google/index.html.markdown
+++ b/website/source/docs/providers/google/index.html.markdown
@@ -73,7 +73,10 @@ the process more straightforwarded, it is documented here:
1. Log into the [Google Developers Console](https://console.developers.google.com)
and select a project.
-2. Under the "APIs & Auth" section, click "Credentials."
+2. Click the menu button in the top left corner, and navigate to "Permissions",
+ then "Service accounts", and finally "Create service account".
-3. Create a new OAuth client ID and select "Service account" as the type
- of account. Once created, and after a P12 key is downloaded, a JSON file should be downloaded. This is your _account file_.
+3. Provide a name and ID in the corresponding fields, select
+ "Furnish a new private key", and select "JSON" as the key type.
+
+4. Clicking "Create" will download your `credentials`.
From 1549adfccd0a19f2635bab5353f1f6c73038a89b Mon Sep 17 00:00:00 2001
From: Ross Duggan
Date: Fri, 11 Dec 2015 18:15:07 +0000
Subject: [PATCH 215/664] Change wording of aws_route53_health_check Both may
be specified, which enables health checking of an IP address with the Host
header specified.
Defined in the API documentation:
http://docs.aws.amazon.com/Route53/latest/APIReference/API_GetHealthCheck.html
---
.../docs/providers/aws/r/route53_health_check.html.markdown | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/website/source/docs/providers/aws/r/route53_health_check.html.markdown b/website/source/docs/providers/aws/r/route53_health_check.html.markdown
index 07f8dc751a..3456bcb112 100644
--- a/website/source/docs/providers/aws/r/route53_health_check.html.markdown
+++ b/website/source/docs/providers/aws/r/route53_health_check.html.markdown
@@ -38,5 +38,5 @@ The following arguments are supported:
* `search_string` - (Optional) String searched in respoonse body for check to considered healthy.
* `tags` - (Optional) A mapping of tags to assign to the health check.
-Exactly one of `fqdn` or `ip_address` must be specified.
+At least one of either `fqdn` or `ip_address` must be specified.
From c965d2278ee35268ca0b22add624a24a24c71dcd Mon Sep 17 00:00:00 2001
From: stack72
Date: Fri, 11 Dec 2015 18:13:52 +0000
Subject: [PATCH 216/664] Adding a resource for aws_autoscaling_schedule
---
builtin/providers/aws/provider.go | 2 +-
.../aws/resource_aws_autoscaling_schedule.go | 100 ++++++++++++---
.../resource_aws_autoscaling_schedule_test.go | 117 ++++++++++++++++++
.../aws/r/autoscaling_schedule.html.markdown | 55 ++++++++
website/source/layouts/aws.erb | 4 +
5 files changed, 263 insertions(+), 15 deletions(-)
create mode 100644 builtin/providers/aws/resource_aws_autoscaling_schedule_test.go
create mode 100644 website/source/docs/providers/aws/r/autoscaling_schedule.html.markdown
diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go
index 04f94cc347..313f74b18a 100644
--- a/builtin/providers/aws/provider.go
+++ b/builtin/providers/aws/provider.go
@@ -174,7 +174,7 @@ func Provider() terraform.ResourceProvider {
"aws_autoscaling_group": resourceAwsAutoscalingGroup(),
"aws_autoscaling_notification": resourceAwsAutoscalingNotification(),
"aws_autoscaling_policy": resourceAwsAutoscalingPolicy(),
- "aws_autoscaling_schedule": resourceAwsAutoscalingSchedule(),
+ "aws_autoscaling_schedule": resourceAwsAutoscalingSchedule(),
"aws_cloudformation_stack": resourceAwsCloudFormationStack(),
"aws_cloudtrail": resourceAwsCloudTrail(),
"aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(),
diff --git a/builtin/providers/aws/resource_aws_autoscaling_schedule.go b/builtin/providers/aws/resource_aws_autoscaling_schedule.go
index 4de57333c6..b8a1135dee 100644
--- a/builtin/providers/aws/resource_aws_autoscaling_schedule.go
+++ b/builtin/providers/aws/resource_aws_autoscaling_schedule.go
@@ -3,12 +3,15 @@ package aws
import (
"fmt"
"log"
+ "time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/hashicorp/terraform/helper/schema"
)
+const awsAutoscalingScheduleTimeLayout = "2006-01-02T15:04:05Z"
+
func resourceAwsAutoscalingSchedule() *schema.Resource {
return &schema.Resource{
Create: resourceAwsAutoscalingScheduleCreate,
@@ -17,6 +20,10 @@ func resourceAwsAutoscalingSchedule() *schema.Resource {
Delete: resourceAwsAutoscalingScheduleDelete,
Schema: map[string]*schema.Schema{
+ "arn": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
"scheduled_action_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
@@ -28,14 +35,16 @@ func resourceAwsAutoscalingSchedule() *schema.Resource {
ForceNew: true,
},
"start_time": &schema.Schema{
- Type: schema.TypeString,
- Optional: true,
- Computed: true,
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ ValidateFunc: validateASGScheduleTimestamp,
},
"end_time": &schema.Schema{
- Type: schema.TypeString,
- Optional: true,
- Computed: true,
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ ValidateFunc: validateASGScheduleTimestamp,
},
"recurrence": &schema.Schema{
Type: schema.TypeString,
@@ -63,25 +72,41 @@ func resourceAwsAutoscalingSchedule() *schema.Resource {
func resourceAwsAutoscalingScheduleCreate(d *schema.ResourceData, meta interface{}) error {
autoscalingconn := meta.(*AWSClient).autoscalingconn
- params := autoscaling.PutScheduledUpdateGroupActionInput{
+ params := &autoscaling.PutScheduledUpdateGroupActionInput{
AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)),
ScheduledActionName: aws.String(d.Get("scheduled_action_name").(string)),
}
if attr, ok := d.GetOk("start_time"); ok {
- params.StartTime = aws.Time()
+ t, err := time.Parse(awsAutoscalingScheduleTimeLayout, attr.(string))
+ if err != nil {
+ return fmt.Errorf("Error Parsing AWS Autoscaling Group Schedule Start Time: %s", err.Error())
+ }
+ params.StartTime = aws.Time(t)
+ }
+
+ if attr, ok := d.GetOk("end_time"); ok {
+ t, err := time.Parse(awsAutoscalingScheduleTimeLayout, attr.(string))
+ if err != nil {
+ return fmt.Errorf("Error Parsing AWS Autoscaling Group Schedule End Time: %s", err.Error())
+ }
+ params.EndTime = aws.Time(t)
+ }
+
+ if attr, ok := d.GetOk("recurrance"); ok {
+ params.Recurrence = aws.String(attr.(string))
}
if attr, ok := d.GetOk("min_size"); ok {
- params.MinSize = aws.Int(int64(attr.(int)))
+ params.MinSize = aws.Int64(int64(attr.(int)))
}
if attr, ok := d.GetOk("max_size"); ok {
- params.MaxSize = aws.Int(int64(attr.(int)))
+ params.MaxSize = aws.Int64(int64(attr.(int)))
}
if attr, ok := d.GetOk("desired_capacity"); ok {
- params.DesiredCapacity = aws.Int(int64(attr.(int)))
+ params.DesiredCapacity = aws.Int64(int64(attr.(int)))
}
log.Printf("[INFO] Creating Autoscaling Scheduled Action: %s", d.Get("scheduled_action_name").(string))
@@ -96,18 +121,32 @@ func resourceAwsAutoscalingScheduleCreate(d *schema.ResourceData, meta interface
}
func resourceAwsAutoscalingScheduleRead(d *schema.ResourceData, meta interface{}) error {
+ sa, err := resourceAwsASGScheduledActionRetrieve(d, meta)
+ if err != nil {
+ return err
+ }
+
+ d.Set("autoscaling_group_name", sa.AutoScalingGroupName)
+ d.Set("arn", sa.ScheduledActionARN)
+ d.Set("desired_capacity", sa.DesiredCapacity)
+ d.Set("min_size", sa.MinSize)
+ d.Set("max_size", sa.MaxSize)
+ d.Set("recurrance", sa.Recurrence)
+ d.Set("start_time", sa.StartTime.Format(awsAutoscalingScheduleTimeLayout))
+ d.Set("end_time", sa.EndTime.Format(awsAutoscalingScheduleTimeLayout))
+
return nil
}
func resourceAwsAutoscalingScheduleDelete(d *schema.ResourceData, meta interface{}) error {
autoscalingconn := meta.(*AWSClient).autoscalingconn
- params := autoscaling.DeleteScheduledActionInput{
+ params := &autoscaling.DeleteScheduledActionInput{
AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)),
- ScheduledActionName: aws.String(d.Get("scheduled_action_name").(string)),
+ ScheduledActionName: aws.String(d.Id()),
}
- log.Printf("[INFO] Deleting Autoscaling Scheduled Action: %s", d.Get("scheduled_action_name").(string))
+ log.Printf("[INFO] Deleting Autoscaling Scheduled Action: %s", d.Id())
_, err := autoscalingconn.DeleteScheduledAction(params)
if err != nil {
return fmt.Errorf("Error deleting Autoscaling Scheduled Action: %s", err.Error())
@@ -115,3 +154,36 @@ func resourceAwsAutoscalingScheduleDelete(d *schema.ResourceData, meta interface
return nil
}
+
+func resourceAwsASGScheduledActionRetrieve(d *schema.ResourceData, meta interface{}) (*autoscaling.ScheduledUpdateGroupAction, error) {
+ autoscalingconn := meta.(*AWSClient).autoscalingconn
+
+ params := &autoscaling.DescribeScheduledActionsInput{
+ AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)),
+ ScheduledActionNames: []*string{aws.String(d.Id())},
+ }
+
+ log.Printf("[INFO] Describing Autoscaling Scheduled Action: %+v", params)
+ actions, err := autoscalingconn.DescribeScheduledActions(params)
+ if err != nil {
+ return nil, fmt.Errorf("Error retrieving Autoscaling Scheduled Actions: %s", err)
+ }
+
+ if len(actions.ScheduledUpdateGroupActions) != 1 ||
+ *actions.ScheduledUpdateGroupActions[0].ScheduledActionName != d.Id() {
+ return nil, fmt.Errorf("Unable to find Autoscaling Scheduled Action: %#v", actions.ScheduledUpdateGroupActions)
+ }
+
+ return actions.ScheduledUpdateGroupActions[0], nil
+}
+
+func validateASGScheduleTimestamp(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ _, err := time.Parse(awsAutoscalingScheduleTimeLayout, value)
+ if err != nil {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot be parsed as iso8601 Timestamp Format", value))
+ }
+
+ return
+}
diff --git a/builtin/providers/aws/resource_aws_autoscaling_schedule_test.go b/builtin/providers/aws/resource_aws_autoscaling_schedule_test.go
new file mode 100644
index 0000000000..3bd0315267
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_autoscaling_schedule_test.go
@@ -0,0 +1,117 @@
+package aws
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/autoscaling"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAWSAutoscalingSchedule_basic(t *testing.T) {
+ var schedule autoscaling.ScheduledUpdateGroupAction
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSAutoscalingScheduleDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSAutoscalingScheduleConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckScalingScheduleExists("aws_autoscaling_schedule.foobar", &schedule),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckScalingScheduleExists(n string, policy *autoscaling.ScheduledUpdateGroupAction) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ autoScalingGroup, _ := rs.Primary.Attributes["autoscaling_group_name"]
+ conn := testAccProvider.Meta().(*AWSClient).autoscalingconn
+ params := &autoscaling.DescribeScheduledActionsInput{
+ AutoScalingGroupName: aws.String(autoScalingGroup),
+ ScheduledActionNames: []*string{aws.String(rs.Primary.ID)},
+ }
+
+ resp, err := conn.DescribeScheduledActions(params)
+ if err != nil {
+ return err
+ }
+ if len(resp.ScheduledUpdateGroupActions) == 0 {
+ return fmt.Errorf("Scaling Schedule not found")
+ }
+
+ return nil
+ }
+}
+
+func testAccCheckAWSAutoscalingScheduleDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*AWSClient).autoscalingconn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_autoscaling_schedule" {
+ continue
+ }
+
+ autoScalingGroup, _ := rs.Primary.Attributes["autoscaling_group_name"]
+ params := &autoscaling.DescribeScheduledActionsInput{
+ AutoScalingGroupName: aws.String(autoScalingGroup),
+ ScheduledActionNames: []*string{aws.String(rs.Primary.ID)},
+ }
+
+ resp, err := conn.DescribeScheduledActions(params)
+
+ if err == nil {
+ if len(resp.ScheduledUpdateGroupActions) != 0 &&
+ *resp.ScheduledUpdateGroupActions[0].ScheduledActionName == rs.Primary.ID {
+ return fmt.Errorf("Scaling Schedule Still Exists: %s", rs.Primary.ID)
+ }
+ }
+ }
+
+ return nil
+}
+
+var testAccAWSAutoscalingScheduleConfig = fmt.Sprintf(`
+resource "aws_launch_configuration" "foobar" {
+ name = "terraform-test-foobar5"
+ image_id = "ami-21f78e11"
+ instance_type = "t1.micro"
+}
+
+resource "aws_autoscaling_group" "foobar" {
+ availability_zones = ["us-west-2a"]
+ name = "terraform-test-foobar5"
+ max_size = 1
+ min_size = 1
+ health_check_grace_period = 300
+ health_check_type = "ELB"
+ force_delete = true
+ termination_policies = ["OldestInstance"]
+ launch_configuration = "${aws_launch_configuration.foobar.name}"
+ tag {
+ key = "Foo"
+ value = "foo-bar"
+ propagate_at_launch = true
+ }
+}
+
+resource "aws_autoscaling_schedule" "foobar" {
+ scheduled_action_name = "foobar"
+ min_size = 0
+ max_size = 1
+ desired_capacity = 0
+ start_time = "2016-12-11T18:00:00Z"
+ end_time = "2016-12-12T06:00:00Z"
+ autoscaling_group_name = "${aws_autoscaling_group.foobar.name}"
+}
+`)
diff --git a/website/source/docs/providers/aws/r/autoscaling_schedule.html.markdown b/website/source/docs/providers/aws/r/autoscaling_schedule.html.markdown
new file mode 100644
index 0000000000..4ad9472932
--- /dev/null
+++ b/website/source/docs/providers/aws/r/autoscaling_schedule.html.markdown
@@ -0,0 +1,55 @@
+---
+layout: "aws"
+page_title: "AWS: aws_autoscaling_schedule"
+sidebar_current: "docs-aws-resource-autoscaling-schedule"
+description: |-
+ Provides an AutoScaling Schedule resource.
+---
+
+# aws\_autoscaling\_schedule
+
+Provides an AutoScaling Schedule resource.
+
+## Example Usage
+```
+resource "aws_autoscaling_group" "foobar" {
+ availability_zones = ["us-west-2a"]
+ name = "terraform-test-foobar5"
+ max_size = 1
+ min_size = 1
+ health_check_grace_period = 300
+ health_check_type = "ELB"
+ force_delete = true
+ termination_policies = ["OldestInstance"]
+}
+
+resource "aws_autoscaling_schedule" "foobar" {
+ scheduled_action_name = "foobar"
+ min_size = 0
+ max_size = 1
+ desired_capacity = 0
+ start_time = "2016-12-11T18:00:00Z"
+ end_time = "2016-12-12T06:00:00Z"
+ autoscaling_group_name = "${aws_autoscaling_group.foobar.name}"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `autoscaling_group_name` - (Required) The name or Amazon Resource Name (ARN) of the Auto Scaling group.
+* `scheduled_action_name` - (Required) The name of this scaling action.
+* `start_time` - (Optional) The time for this action to start, in "YYYY-MM-DDThh:mm:ssZ" format in UTC/GMT only (for example, 2014-06-01T00:00:00Z ).
+ If you try to schedule your action in the past, Auto Scaling returns an error message.
+* `end_time` - (Optional) The time for this action to end, in "YYYY-MM-DDThh:mm:ssZ" format in UTC/GMT only (for example, 2014-06-01T00:00:00Z ).
+ If you try to schedule your action in the past, Auto Scaling returns an error messag
+* `recurrence` - (Optional) The time when recurring future actions will start. Start time is specified by the user following the Unix cron syntax format.
+* `min_size` - (Optional) The minimum size for the Auto Scaling group.
+* `max_size` - (Optional) The maximum size for the Auto Scaling group.
+* `desired_capacity` - (Optional) The number of EC2 instances that should be running in the group.
+
+~> **NOTE:** When `start_time` and `end_time` are specified with `recurrence` , they form the boundaries of when the recurring action will start and stop.
+
+## Attribute Reference
+* `arn` - The ARN assigned by AWS to the autoscaling schedule.
\ No newline at end of file
diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb
index 1da203dca2..c2df5bbf5c 100644
--- a/website/source/layouts/aws.erb
+++ b/website/source/layouts/aws.erb
@@ -128,6 +128,10 @@
aws_autoscaling_policy
+ >
+ aws_autoscaling_schedule
+
+
>
aws_ebs_volume
From 0a73c2e6299521dfe3b93a9b7f20525d212260d3 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Fri, 11 Dec 2015 16:56:51 -0500
Subject: [PATCH 217/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 93577f2151..3edd01c266 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,7 @@
FEATURES:
* **New provider: `vcd` - VMware vCloud Director** [GH-3785]
* **New provider: `postgresql` - Create PostgreSQL databases and roles** [GH-3653]
+ * **New resource: `aws_autoscaling_schedule`** [GH-4256]
* **New resource: `google_pubsub_topic`** [GH-3671]
* **New resource: `google_pubsub_subscription`** [GH-3671]
* **New resource: `tls_locally_signed_cert`** [GH-3930]
From ed8c5cdeea3407a58c435ef5cf77a6490deaabdf Mon Sep 17 00:00:00 2001
From: stack72
Date: Sat, 12 Dec 2015 11:00:54 +0000
Subject: [PATCH 218/664] Fixing some gofmt errors that keep appearing on my
master branch
---
builtin/providers/google/resource_pubsub_subscription.go | 1 +
builtin/providers/google/resource_pubsub_topic.go | 1 +
2 files changed, 2 insertions(+)
diff --git a/builtin/providers/google/resource_pubsub_subscription.go b/builtin/providers/google/resource_pubsub_subscription.go
index 9301aa4df1..03e6f31238 100644
--- a/builtin/providers/google/resource_pubsub_subscription.go
+++ b/builtin/providers/google/resource_pubsub_subscription.go
@@ -2,6 +2,7 @@ package google
import (
"fmt"
+
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/pubsub/v1"
)
diff --git a/builtin/providers/google/resource_pubsub_topic.go b/builtin/providers/google/resource_pubsub_topic.go
index e5ac7ab905..9d6a6a8797 100644
--- a/builtin/providers/google/resource_pubsub_topic.go
+++ b/builtin/providers/google/resource_pubsub_topic.go
@@ -2,6 +2,7 @@ package google
import (
"fmt"
+
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/pubsub/v1"
)
From 47164516172c673da8420875ec6537e169a856ef Mon Sep 17 00:00:00 2001
From: Joe Topjian
Date: Sat, 12 Dec 2015 18:51:21 +0000
Subject: [PATCH 219/664] provider/openstack: Convert block_device from TypeSet
to TypeList
This change better reflects how block devices are passed to the Nova API
and allows for future enablement of block_device features. It also resolves
an interpolation bug.
---
.../resource_openstack_compute_instance_v2.go | 21 +++------
...urce_openstack_compute_instance_v2_test.go | 45 +++++++++++++++++--
2 files changed, 47 insertions(+), 19 deletions(-)
diff --git a/builtin/providers/openstack/resource_openstack_compute_instance_v2.go b/builtin/providers/openstack/resource_openstack_compute_instance_v2.go
index f78a8d015a..c9a0f98328 100644
--- a/builtin/providers/openstack/resource_openstack_compute_instance_v2.go
+++ b/builtin/providers/openstack/resource_openstack_compute_instance_v2.go
@@ -176,12 +176,7 @@ func resourceComputeInstanceV2() *schema.Resource {
ForceNew: true,
},
"block_device": &schema.Schema{
- // TODO: This is a set because we don't support singleton
- // sub-resources today. We'll enforce that the set only ever has
- // length zero or one below. When TF gains support for
- // sub-resources this can be converted.
- // As referenced in resource_aws_instance.go
- Type: schema.TypeSet,
+ Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Resource{
@@ -213,10 +208,6 @@ func resourceComputeInstanceV2() *schema.Resource {
},
},
},
- Set: func(v interface{}) int {
- // there can only be one bootable block device; no need to hash anything
- return 0
- },
},
"volume": &schema.Schema{
Type: schema.TypeSet,
@@ -352,9 +343,8 @@ func resourceComputeInstanceV2Create(d *schema.ResourceData, meta interface{}) e
}
}
- if v, ok := d.GetOk("block_device"); ok {
- vL := v.(*schema.Set).List()
- for _, v := range vL {
+ if vL, ok := d.GetOk("block_device"); ok {
+ for _, v := range vL.([]interface{}) {
blockDeviceRaw := v.(map[string]interface{})
blockDevice := resourceInstanceBlockDeviceV2(d, blockDeviceRaw)
createOpts = &bootfromvolume.CreateOptsExt{
@@ -1239,9 +1229,8 @@ func checkVolumeConfig(d *schema.ResourceData) error {
}
}
- if v, ok := d.GetOk("block_device"); ok {
- vL := v.(*schema.Set).List()
- if len(vL) > 1 {
+ if vL, ok := d.GetOk("block_device"); ok {
+ if len(vL.([]interface{})) > 1 {
return fmt.Errorf("Can only specify one block device to boot from.")
}
}
diff --git a/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go b/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go
index fa5533508f..63f8714460 100644
--- a/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go
+++ b/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go
@@ -253,9 +253,9 @@ func TestAccComputeV2Instance_multi_secgroups(t *testing.T) {
})
}
-func TestAccComputeV2Instance_bootFromVolume(t *testing.T) {
+func TestAccComputeV2Instance_bootFromVolumeImage(t *testing.T) {
var instance servers.Server
- var testAccComputeV2Instance_bootFromVolume = fmt.Sprintf(`
+ var testAccComputeV2Instance_bootFromVolumeImage = fmt.Sprintf(`
resource "openstack_compute_instance_v2" "foo" {
name = "terraform-test"
security_groups = ["default"]
@@ -276,7 +276,46 @@ func TestAccComputeV2Instance_bootFromVolume(t *testing.T) {
CheckDestroy: testAccCheckComputeV2InstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeV2Instance_bootFromVolume,
+ Config: testAccComputeV2Instance_bootFromVolumeImage,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.foo", &instance),
+ testAccCheckComputeV2InstanceBootVolumeAttachment(&instance),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccComputeV2Instance_bootFromVolumeVolume(t *testing.T) {
+ var instance servers.Server
+ var testAccComputeV2Instance_bootFromVolumeVolume = fmt.Sprintf(`
+ resource "openstack_blockstorage_volume_v1" "foo" {
+ name = "terraform-test"
+ size = 5
+ image_id = "%s"
+ }
+
+ resource "openstack_compute_instance_v2" "foo" {
+ name = "terraform-test"
+ security_groups = ["default"]
+ block_device {
+ uuid = "${openstack_blockstorage_volume_v1.foo.id}"
+ source_type = "volume"
+ volume_size = 5
+ boot_index = 0
+ destination_type = "volume"
+ delete_on_termination = true
+ }
+ }`,
+ os.Getenv("OS_IMAGE_ID"))
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckComputeV2InstanceDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccComputeV2Instance_bootFromVolumeVolume,
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.foo", &instance),
testAccCheckComputeV2InstanceBootVolumeAttachment(&instance),
From c0b91689cc29ba07433967f143124c3331a96766 Mon Sep 17 00:00:00 2001
From: Michael Mell
Date: Sat, 12 Dec 2015 14:22:40 -0800
Subject: [PATCH 220/664] fix typo in snapshot_retention_limit
---
.../docs/providers/aws/r/elasticache_cluster.html.markdown | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown
index 9d547b5dc1..3f511f84d6 100644
--- a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown
+++ b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown
@@ -77,7 +77,7 @@ Example: `arn:aws:s3:::my_bucket/snapshot1.rdb`
* `snapshot_window` - (Optional) The daily time range (in UTC) during which ElastiCache will
begin taking a daily snapshot of your cache cluster. Can only be used for the Redis engine. Example: 05:00-09:00
-* `snapshow_retention_limit` - (Optional) The number of days for which ElastiCache will
+* `snapshot_retention_limit` - (Optional) The number of days for which ElastiCache will
retain automatic cache cluster snapshots before deleting them. For example, if you set
SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days
before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
From 7cbd18a88d1b970e3057eef68ef344d95580fad7 Mon Sep 17 00:00:00 2001
From: Joe Topjian
Date: Sat, 12 Dec 2015 23:07:18 +0000
Subject: [PATCH 221/664] provider/openstack: DevStack Deploy Script
This commit includes a script to deploy a standardized devstack environment
for use with development and testing.
---
.../providers/openstack/devstack/deploy.sh | 125 ++++++++++++++++++
.../providers/openstack/index.html.markdown | 14 +-
2 files changed, 138 insertions(+), 1 deletion(-)
create mode 100644 builtin/providers/openstack/devstack/deploy.sh
diff --git a/builtin/providers/openstack/devstack/deploy.sh b/builtin/providers/openstack/devstack/deploy.sh
new file mode 100644
index 0000000000..2225478e1f
--- /dev/null
+++ b/builtin/providers/openstack/devstack/deploy.sh
@@ -0,0 +1,125 @@
+#!/bin/bash
+
+sudo apt-get update
+sudo apt-get install -y git make mercurial
+
+GOPKG=go1.5.2.linux-amd64.tar.gz
+wget https://storage.googleapis.com/golang/$GOPKG
+sudo tar -xvf $GOPKG -C /usr/local/
+
+mkdir ~/go
+echo 'export GOPATH=$HOME/go' >> .bashrc
+echo 'export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin' >> .bashrc
+source .bashrc
+export GOPATH=$HOME/go
+export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
+
+go get github.com/hashicorp/terraform
+cd $GOPATH/src/github.com/hashicorp/terraform
+make updatedeps
+
+cd
+git clone https://git.openstack.org/openstack-dev/devstack -b stable/liberty
+cd devstack
+cat >local.conf <> openrc
+echo export OS_IMAGE_ID="$_IMAGE_ID" >> openrc
+echo export OS_NETWORK_ID=$_NETWORK_ID >> openrc
+echo export OS_POOL_NAME="public" >> openrc
+echo export OS_FLAVOR_ID=99 >> openrc
+source openrc demo
+
+cd $GOPATH/src/github.com/hashicorp/terraform
+make updatedeps
+
+# Replace the below lines with the repo/branch you want to test
+#git remote add jtopjian https://github.com/jtopjian/terraform
+#git fetch jtopjian
+#git checkout --track jtopjian/openstack-acctest-fixes
+#make testacc TEST=./builtin/providers/openstack TESTARGS='-run=AccBlockStorageV1'
+#make testacc TEST=./builtin/providers/openstack TESTARGS='-run=AccCompute'
+#make testacc TEST=./builtin/providers/openstack
diff --git a/website/source/docs/providers/openstack/index.html.markdown b/website/source/docs/providers/openstack/index.html.markdown
index be918a4655..e248571931 100644
--- a/website/source/docs/providers/openstack/index.html.markdown
+++ b/website/source/docs/providers/openstack/index.html.markdown
@@ -64,7 +64,7 @@ The following arguments are supported:
service catalog. It can be set using the OS_ENDPOINT_TYPE environment
variable. If not set, public endpoints is used.
-## Testing
+## Testing and Development
In order to run the Acceptance Tests for development, the following environment
variables must also be set:
@@ -79,3 +79,15 @@ variables must also be set:
* `OS_POOL_NAME` - The name of a Floating IP pool.
* `OS_NETWORK_ID` - The UUID of a network in your test environment.
+
+To make development easier, the `builtin/providers/openstack/devstack/deploy.sh`
+script will assist in installing and configuring a standardized
+[DevStack](http://docs.openstack.org/developer/devstack/) environment along with
+Golang, Terraform, and all development dependencies. It will also set the required
+environment variables in the `devstack/openrc` file.
+
+Do not run the `deploy.sh` script on your workstation or any type of production
+server. Instead, run the script within a disposable virtual machine.
+[Here's](https://github.com/berendt/terraform-configurations) an example of a
+Terraform configuration that will create an OpenStack instance and then install and
+configure DevStack inside.
From 1ff403347b42dd02a099f80815e06e2d98e04363 Mon Sep 17 00:00:00 2001
From: Joe Topjian
Date: Sun, 13 Dec 2015 03:15:52 +0000
Subject: [PATCH 222/664] provider/openstack: Fix set hash for security group
test
---
.../openstack/resource_openstack_compute_secgroup_v2_test.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/openstack/resource_openstack_compute_secgroup_v2_test.go b/builtin/providers/openstack/resource_openstack_compute_secgroup_v2_test.go
index 4cb99fa741..28223fa1bb 100644
--- a/builtin/providers/openstack/resource_openstack_compute_secgroup_v2_test.go
+++ b/builtin/providers/openstack/resource_openstack_compute_secgroup_v2_test.go
@@ -97,9 +97,9 @@ func TestAccComputeV2SecGroup_self(t *testing.T) {
testAccCheckComputeV2SecGroupExists(t, "openstack_compute_secgroup_v2.test_group_1", &secgroup),
testAccCheckComputeV2SecGroupGroupIDMatch(t, &secgroup, &secgroup),
resource.TestCheckResourceAttr(
- "openstack_compute_secgroup_v2.test_group_1", "rule.1118853483.self", "true"),
+ "openstack_compute_secgroup_v2.test_group_1", "rule.3170486100.self", "true"),
resource.TestCheckResourceAttr(
- "openstack_compute_secgroup_v2.test_group_1", "rule.1118853483.from_group_id", ""),
+ "openstack_compute_secgroup_v2.test_group_1", "rule.3170486100.from_group_id", ""),
),
},
},
From e25a0cafa238299f83842e2f425017fca2c9de8c Mon Sep 17 00:00:00 2001
From: Martin Atkins
Date: Sun, 13 Dec 2015 14:59:24 -0800
Subject: [PATCH 223/664] Update for breaking change to Azure library.
vmImageClient.ListVirtualMachineImages takes a parameter as of
68d50cb53a73edfeb7f17f5e86cdc8eb359a9528 in Azure/azure-sdk-for-go .
Passing in a parameters object whose members are all empty strings seems
to restore the previous behavior.
---
builtin/providers/azure/resource_azure_instance.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/builtin/providers/azure/resource_azure_instance.go b/builtin/providers/azure/resource_azure_instance.go
index 8a643931c3..c30b07ea41 100644
--- a/builtin/providers/azure/resource_azure_instance.go
+++ b/builtin/providers/azure/resource_azure_instance.go
@@ -682,7 +682,7 @@ func retrieveImageDetails(
func retrieveVMImageDetails(
vmImageClient virtualmachineimage.Client,
label string) (func(*virtualmachine.Role) error, string, []string, error) {
- imgs, err := vmImageClient.ListVirtualMachineImages()
+ imgs, err := vmImageClient.ListVirtualMachineImages(virtualmachineimage.ListParameters{})
if err != nil {
return nil, "", nil, fmt.Errorf("Error retrieving image details: %s", err)
}
From e9d152d586169571c7e24b620b5cb431ad7775ef Mon Sep 17 00:00:00 2001
From: Martin Atkins
Date: Sun, 13 Dec 2015 15:07:54 -0800
Subject: [PATCH 224/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3edd01c266..0a609bbdd9 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -49,6 +49,7 @@ BUG FIXES:
* provider/aws: Fix missing AMI issue with Launch Configurations [GH-4242]
* provider/aws: Opsworks stack SSH key is write-only [GH-4241]
* provider/aws: Fix issue with ElasticSearch Domain `access_policies` always appear changed [GH-4245]
+ * provider/azure: Update for [breaking change to upstream client library](https://github.com/Azure/azure-sdk-for-go/commit/68d50cb53a73edfeb7f17f5e86cdc8eb359a9528). [GH-4300]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
* provider/openstack: Handle volumes in "deleting" state [GH-4204]
* provider/vsphere: Create and attach additional disks before bootup [GH-4196]
From 53aa3fb049ffcf395a6c4d6bb6f354cf09d4782b Mon Sep 17 00:00:00 2001
From: Martin Atkins
Date: Wed, 26 Aug 2015 16:50:12 -0700
Subject: [PATCH 225/664] Entry point for chef provider.
---
builtin/bins/provider-chef/main.go | 12 ++++
builtin/providers/chef/provider.go | 79 +++++++++++++++++++++++++
builtin/providers/chef/provider_test.go | 62 +++++++++++++++++++
3 files changed, 153 insertions(+)
create mode 100644 builtin/bins/provider-chef/main.go
create mode 100644 builtin/providers/chef/provider.go
create mode 100644 builtin/providers/chef/provider_test.go
diff --git a/builtin/bins/provider-chef/main.go b/builtin/bins/provider-chef/main.go
new file mode 100644
index 0000000000..b1bd8b537e
--- /dev/null
+++ b/builtin/bins/provider-chef/main.go
@@ -0,0 +1,12 @@
+package main
+
+import (
+ "github.com/hashicorp/terraform/builtin/providers/chef"
+ "github.com/hashicorp/terraform/plugin"
+)
+
+func main() {
+ plugin.Serve(&plugin.ServeOpts{
+ ProviderFunc: chef.Provider,
+ })
+}
diff --git a/builtin/providers/chef/provider.go b/builtin/providers/chef/provider.go
new file mode 100644
index 0000000000..2319d7639b
--- /dev/null
+++ b/builtin/providers/chef/provider.go
@@ -0,0 +1,79 @@
+package chef
+
+import (
+ "io/ioutil"
+ "os"
+ "time"
+
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/terraform"
+
+ chefc "github.com/go-chef/chef"
+)
+
+func Provider() terraform.ResourceProvider {
+ return &schema.Provider{
+ Schema: map[string]*schema.Schema{
+ "server_url": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ DefaultFunc: schema.EnvDefaultFunc("CHEF_SERVER_URL", nil),
+ Description: "URL of the root of the target Chef server or organization.",
+ },
+ "client_name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ DefaultFunc: schema.EnvDefaultFunc("CHEF_CLIENT_NAME", nil),
+ Description: "Name of a registered client within the Chef server.",
+ },
+ "private_key_pem": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ DefaultFunc: providerPrivateKeyEnvDefault,
+ Description: "PEM-formatted private key for client authentication.",
+ },
+ "allow_unverified_ssl": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ Description: "If set, the Chef client will permit unverifiable SSL certificates.",
+ },
+ },
+
+ ResourcesMap: map[string]*schema.Resource{
+ //"chef_acl": resourceChefAcl(),
+ //"chef_client": resourceChefClient(),
+ //"chef_cookbook": resourceChefCookbook(),
+ //"chef_data_bag": resourceChefDataBag(),
+ //"chef_data_bag_item": resourceChefDataBagItem(),
+ //"chef_environment": resourceChefEnvironment(),
+ //"chef_node": resourceChefNode(),
+ //"chef_role": resourceChefRole(),
+ },
+
+ ConfigureFunc: providerConfigure,
+ }
+}
+
+func providerConfigure(d *schema.ResourceData) (interface{}, error) {
+ config := &chefc.Config{
+ Name: d.Get("client_name").(string),
+ Key: d.Get("private_key_pem").(string),
+ BaseURL: d.Get("server_url").(string),
+ SkipSSL: d.Get("allow_unverified_ssl").(bool),
+ Timeout: 10 * time.Second,
+ }
+
+ return chefc.NewClient(config)
+}
+
+func providerPrivateKeyEnvDefault() (interface{}, error) {
+ if fn := os.Getenv("CHEF_PRIVATE_KEY_FILE"); fn != "" {
+ contents, err := ioutil.ReadFile(fn)
+ if err != nil {
+ return nil, err
+ }
+ return string(contents), nil
+ }
+
+ return nil, nil
+}
diff --git a/builtin/providers/chef/provider_test.go b/builtin/providers/chef/provider_test.go
new file mode 100644
index 0000000000..1d12945f46
--- /dev/null
+++ b/builtin/providers/chef/provider_test.go
@@ -0,0 +1,62 @@
+package chef
+
+import (
+ "os"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// To run these acceptance tests, you will need access to a Chef server.
+// An easy way to get one is to sign up for a hosted Chef server account
+// at https://manage.chef.io/signup , after which your base URL will
+// be something like https://api.opscode.com/organizations/example/ .
+// You will also need to create a "client" and write its private key to
+// a file somewhere.
+//
+// You can then set the following environment variables to make these
+// tests work:
+// CHEF_SERVER_URL to the base URL as described above.
+// CHEF_CLIENT_NAME to the name of the client object you created.
+// CHEF_PRIVATE_KEY_FILE to the path to the private key file you created.
+//
+// You will probably need to edit the global permissions on your Chef
+// Server account to allow this client (or all clients, if you're lazy)
+// to have both List and Create access on all types of object:
+// https://manage.chef.io/organizations/saymedia/global_permissions
+//
+// With all of that done, you can run like this:
+// make testacc TEST=./builtin/providers/chef
+
+var testAccProviders map[string]terraform.ResourceProvider
+var testAccProvider *schema.Provider
+
+func init() {
+ testAccProvider = Provider().(*schema.Provider)
+ testAccProviders = map[string]terraform.ResourceProvider{
+ "chef": testAccProvider,
+ }
+}
+
+func TestProvider(t *testing.T) {
+ if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestProvider_impl(t *testing.T) {
+ var _ terraform.ResourceProvider = Provider()
+}
+
+func testAccPreCheck(t *testing.T) {
+ if v := os.Getenv("CHEF_SERVER_URL"); v == "" {
+ t.Fatal("CHEF_SERVER_URL must be set for acceptance tests")
+ }
+ if v := os.Getenv("CHEF_CLIENT_NAME"); v == "" {
+ t.Fatal("CHEF_CLIENT_NAME must be set for acceptance tests")
+ }
+ if v := os.Getenv("CHEF_PRIVATE_KEY_FILE"); v == "" {
+ t.Fatal("CHEF_PRIVATE_KEY_FILE must be set for acceptance tests")
+ }
+}
From d583b936b2a0db4a449af32de679f0115cc3a35c Mon Sep 17 00:00:00 2001
From: Martin Atkins
Date: Wed, 26 Aug 2015 17:37:09 -0700
Subject: [PATCH 226/664] chef_data_bag resource.
---
builtin/providers/chef/provider.go | 2 +-
builtin/providers/chef/resource_data_bag.go | 77 +++++++++++++++++++
.../providers/chef/resource_data_bag_test.go | 70 +++++++++++++++++
3 files changed, 148 insertions(+), 1 deletion(-)
create mode 100644 builtin/providers/chef/resource_data_bag.go
create mode 100644 builtin/providers/chef/resource_data_bag_test.go
diff --git a/builtin/providers/chef/provider.go b/builtin/providers/chef/provider.go
index 2319d7639b..9f3e41255c 100644
--- a/builtin/providers/chef/provider.go
+++ b/builtin/providers/chef/provider.go
@@ -43,7 +43,7 @@ func Provider() terraform.ResourceProvider {
//"chef_acl": resourceChefAcl(),
//"chef_client": resourceChefClient(),
//"chef_cookbook": resourceChefCookbook(),
- //"chef_data_bag": resourceChefDataBag(),
+ "chef_data_bag": resourceChefDataBag(),
//"chef_data_bag_item": resourceChefDataBagItem(),
//"chef_environment": resourceChefEnvironment(),
//"chef_node": resourceChefNode(),
diff --git a/builtin/providers/chef/resource_data_bag.go b/builtin/providers/chef/resource_data_bag.go
new file mode 100644
index 0000000000..a9c08748cd
--- /dev/null
+++ b/builtin/providers/chef/resource_data_bag.go
@@ -0,0 +1,77 @@
+package chef
+
+import (
+ "github.com/hashicorp/terraform/helper/schema"
+
+ chefc "github.com/go-chef/chef"
+)
+
+func resourceChefDataBag() *schema.Resource {
+ return &schema.Resource{
+ Create: CreateDataBag,
+ Read: ReadDataBag,
+ Delete: DeleteDataBag,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "api_uri": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func CreateDataBag(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ dataBag := &chefc.DataBag{
+ Name: d.Get("name").(string),
+ }
+
+ result, err := client.DataBags.Create(dataBag)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(dataBag.Name)
+ d.Set("api_uri", result.URI)
+ return nil
+}
+
+func ReadDataBag(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ // The Chef API provides no API to read a data bag's metadata,
+ // but we can try to read its items and use that as a proxy for
+ // whether it still exists.
+
+ name := d.Id()
+
+ _, err := client.DataBags.ListItems(name)
+ if err != nil {
+ if errRes, ok := err.(*chefc.ErrorResponse); ok {
+ if errRes.Response.StatusCode == 404 {
+ d.SetId("")
+ return nil
+ }
+ }
+ }
+ return err
+}
+
+func DeleteDataBag(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ name := d.Id()
+
+ _, err := client.DataBags.Delete(name)
+ if err == nil {
+ d.SetId("")
+ }
+ return err
+}
diff --git a/builtin/providers/chef/resource_data_bag_test.go b/builtin/providers/chef/resource_data_bag_test.go
new file mode 100644
index 0000000000..92b74e5df6
--- /dev/null
+++ b/builtin/providers/chef/resource_data_bag_test.go
@@ -0,0 +1,70 @@
+package chef
+
+import (
+ "fmt"
+ "testing"
+
+ chefc "github.com/go-chef/chef"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccDataBag_basic(t *testing.T) {
+ var dataBagName string
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccDataBagCheckDestroy(dataBagName),
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccDataBagConfig_basic,
+ Check: testAccDataBagCheckExists("chef_data_bag.test", &dataBagName),
+ },
+ },
+ })
+}
+
+func testAccDataBagCheckExists(rn string, name *string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[rn]
+ if !ok {
+ return fmt.Errorf("resource not found: %s", rn)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("data bag id not set")
+ }
+
+ client := testAccProvider.Meta().(*chefc.Client)
+ _, err := client.DataBags.ListItems(rs.Primary.ID)
+ if err != nil {
+ return fmt.Errorf("error getting data bag: %s", err)
+ }
+
+ *name = rs.Primary.ID
+
+ return nil
+ }
+}
+
+func testAccDataBagCheckDestroy(name string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ client := testAccProvider.Meta().(*chefc.Client)
+ result, err := client.DataBags.ListItems(name)
+ if err == nil && len(*result) != 0 {
+ return fmt.Errorf("data bag still exists")
+ }
+ if _, ok := err.(*chefc.ErrorResponse); err != nil && !ok {
+ return fmt.Errorf("got something other than an HTTP error (%v) when getting data bag", err)
+ }
+
+ return nil
+ }
+}
+
+const testAccDataBagConfig_basic = `
+resource "chef_data_bag" "test" {
+ name = "terraform-acc-test-basic"
+}
+`
From 406aba4a6289595c4573b9b3692719deac098b0e Mon Sep 17 00:00:00 2001
From: Martin Atkins
Date: Thu, 27 Aug 2015 10:02:25 -0700
Subject: [PATCH 227/664] chef_data_bag_item resource.
---
builtin/providers/chef/provider.go | 20 ++-
.../providers/chef/resource_data_bag_item.go | 120 ++++++++++++++++++
.../chef/resource_data_bag_item_test.go | 95 ++++++++++++++
3 files changed, 234 insertions(+), 1 deletion(-)
create mode 100644 builtin/providers/chef/resource_data_bag_item.go
create mode 100644 builtin/providers/chef/resource_data_bag_item_test.go
diff --git a/builtin/providers/chef/provider.go b/builtin/providers/chef/provider.go
index 9f3e41255c..668d9e2080 100644
--- a/builtin/providers/chef/provider.go
+++ b/builtin/providers/chef/provider.go
@@ -1,6 +1,7 @@
package chef
import (
+ "encoding/json"
"io/ioutil"
"os"
"time"
@@ -44,7 +45,7 @@ func Provider() terraform.ResourceProvider {
//"chef_client": resourceChefClient(),
//"chef_cookbook": resourceChefCookbook(),
"chef_data_bag": resourceChefDataBag(),
- //"chef_data_bag_item": resourceChefDataBagItem(),
+ "chef_data_bag_item": resourceChefDataBagItem(),
//"chef_environment": resourceChefEnvironment(),
//"chef_node": resourceChefNode(),
//"chef_role": resourceChefRole(),
@@ -77,3 +78,20 @@ func providerPrivateKeyEnvDefault() (interface{}, error) {
return nil, nil
}
+
+func jsonStateFunc(value interface{}) string {
+ // Parse and re-stringify the JSON to make sure it's always kept
+ // in a normalized form.
+ in, ok := value.(string)
+ if !ok {
+ return "null"
+ }
+ var tmp map[string]interface{}
+
+ // Assuming the value must be valid JSON since it passed okay through
+ // our prepareDataBagItemContent function earlier.
+ json.Unmarshal([]byte(in), &tmp)
+
+ jsonValue, _ := json.Marshal(&tmp)
+ return string(jsonValue)
+}
diff --git a/builtin/providers/chef/resource_data_bag_item.go b/builtin/providers/chef/resource_data_bag_item.go
new file mode 100644
index 0000000000..ff6f7ac673
--- /dev/null
+++ b/builtin/providers/chef/resource_data_bag_item.go
@@ -0,0 +1,120 @@
+package chef
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/hashicorp/terraform/helper/schema"
+
+ chefc "github.com/go-chef/chef"
+)
+
+func resourceChefDataBagItem() *schema.Resource {
+ return &schema.Resource{
+ Create: CreateDataBagItem,
+ Read: ReadDataBagItem,
+ Delete: DeleteDataBagItem,
+
+ Schema: map[string]*schema.Schema{
+ "data_bag_name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "content_json": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ StateFunc: jsonStateFunc,
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func CreateDataBagItem(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ dataBagName := d.Get("data_bag_name").(string)
+ itemId, itemContent, err := prepareDataBagItemContent(d.Get("content_json").(string))
+ if err != nil {
+ return err
+ }
+
+ err = client.DataBags.CreateItem(dataBagName, itemContent)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(itemId)
+ d.Set("id", itemId)
+ return nil
+}
+
+func ReadDataBagItem(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ // The Chef API provides no API to read a data bag's metadata,
+ // but we can try to read its items and use that as a proxy for
+ // whether it still exists.
+
+ itemId := d.Id()
+ dataBagName := d.Get("data_bag_name").(string)
+
+ value, err := client.DataBags.GetItem(dataBagName, itemId)
+ if err != nil {
+ if errRes, ok := err.(*chefc.ErrorResponse); ok {
+ if errRes.Response.StatusCode == 404 {
+ d.SetId("")
+ return nil
+ }
+ } else {
+ return err
+ }
+ }
+
+ jsonContent, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+
+ d.Set("content_json", string(jsonContent))
+
+ return nil
+}
+
+func DeleteDataBagItem(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ itemId := d.Id()
+ dataBagName := d.Get("data_bag_name").(string)
+
+ err := client.DataBags.DeleteItem(dataBagName, itemId)
+ if err == nil {
+ d.SetId("")
+ d.Set("id", "")
+ }
+ return err
+}
+
+func prepareDataBagItemContent(contentJson string) (string, interface{}, error) {
+ var value map[string]interface{}
+ err := json.Unmarshal([]byte(contentJson), &value)
+ if err != nil {
+ return "", nil, err
+ }
+
+ var itemId string
+ if itemIdI, ok := value["id"]; ok {
+ itemId, _ = itemIdI.(string)
+ }
+
+ if itemId == "" {
+ return "", nil, fmt.Errorf("content_json must have id attribute, set to a string")
+ }
+
+ return itemId, value, nil
+}
diff --git a/builtin/providers/chef/resource_data_bag_item_test.go b/builtin/providers/chef/resource_data_bag_item_test.go
new file mode 100644
index 0000000000..9630d8b6c8
--- /dev/null
+++ b/builtin/providers/chef/resource_data_bag_item_test.go
@@ -0,0 +1,95 @@
+package chef
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ chefc "github.com/go-chef/chef"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccDataBagItem_basic(t *testing.T) {
+ var dataBagItemName string
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccDataBagItemCheckDestroy(dataBagItemName),
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccDataBagItemConfig_basic,
+ Check: testAccDataBagItemCheck(
+ "chef_data_bag_item.test", &dataBagItemName,
+ ),
+ },
+ },
+ })
+}
+
+func testAccDataBagItemCheck(rn string, name *string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[rn]
+ if !ok {
+ return fmt.Errorf("resource not found: %s", rn)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("data bag item id not set")
+ }
+
+ client := testAccProvider.Meta().(*chefc.Client)
+ content, err := client.DataBags.GetItem("terraform-acc-test-bag-item-basic", rs.Primary.ID)
+ if err != nil {
+ return fmt.Errorf("error getting data bag item: %s", err)
+ }
+
+ expectedContent := map[string]interface{}{
+ "id": "terraform_acc_test",
+ "something_else": true,
+ }
+ if !reflect.DeepEqual(content, expectedContent) {
+ return fmt.Errorf("wrong content: expected %#v, got %#v", expectedContent, content)
+ }
+
+ if expected := "terraform_acc_test"; rs.Primary.Attributes["id"] != expected {
+ return fmt.Errorf("wrong id; expected %#v, got %#v", expected, rs.Primary.Attributes["id"])
+ }
+
+ *name = rs.Primary.ID
+
+ return nil
+ }
+}
+
+func testAccDataBagItemCheckDestroy(name string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ client := testAccProvider.Meta().(*chefc.Client)
+ _, err := client.DataBags.GetItem("terraform-acc-test-bag-item-basic", name)
+ if err == nil {
+ return fmt.Errorf("data bag item still exists")
+ }
+ if _, ok := err.(*chefc.ErrorResponse); err != nil && !ok {
+ return fmt.Errorf("got something other than an HTTP error (%v) when getting data bag item", err)
+ }
+
+ return nil
+ }
+}
+
+const testAccDataBagItemConfig_basic = `
+resource "chef_data_bag" "test" {
+ name = "terraform-acc-test-bag-item-basic"
+}
+resource "chef_data_bag_item" "test" {
+ data_bag_name = "terraform-acc-test-bag-item-basic"
+ depends_on = ["chef_data_bag.test"]
+ content_json = <
Date: Thu, 27 Aug 2015 18:04:22 -0700
Subject: [PATCH 228/664] chef_environment resource.
---
builtin/providers/chef/provider.go | 2 +-
.../providers/chef/resource_environment.go | 183 ++++++++++++++++++
.../chef/resource_environment_test.go | 120 ++++++++++++
3 files changed, 304 insertions(+), 1 deletion(-)
create mode 100644 builtin/providers/chef/resource_environment.go
create mode 100644 builtin/providers/chef/resource_environment_test.go
diff --git a/builtin/providers/chef/provider.go b/builtin/providers/chef/provider.go
index 668d9e2080..6a7f8e5401 100644
--- a/builtin/providers/chef/provider.go
+++ b/builtin/providers/chef/provider.go
@@ -46,7 +46,7 @@ func Provider() terraform.ResourceProvider {
//"chef_cookbook": resourceChefCookbook(),
"chef_data_bag": resourceChefDataBag(),
"chef_data_bag_item": resourceChefDataBagItem(),
- //"chef_environment": resourceChefEnvironment(),
+ "chef_environment": resourceChefEnvironment(),
//"chef_node": resourceChefNode(),
//"chef_role": resourceChefRole(),
},
diff --git a/builtin/providers/chef/resource_environment.go b/builtin/providers/chef/resource_environment.go
new file mode 100644
index 0000000000..605f037acb
--- /dev/null
+++ b/builtin/providers/chef/resource_environment.go
@@ -0,0 +1,183 @@
+package chef
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/hashicorp/terraform/helper/schema"
+
+ chefc "github.com/go-chef/chef"
+)
+
+func resourceChefEnvironment() *schema.Resource {
+ return &schema.Resource{
+ Create: CreateEnvironment,
+ Update: UpdateEnvironment,
+ Read: ReadEnvironment,
+ Delete: DeleteEnvironment,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "description": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "Managed by Terraform",
+ },
+ "default_attributes_json": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "{}",
+ StateFunc: jsonStateFunc,
+ },
+ "override_attributes_json": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "{}",
+ StateFunc: jsonStateFunc,
+ },
+ "cookbook_constraints": &schema.Schema{
+ Type: schema.TypeMap,
+ Optional: true,
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ },
+ }
+}
+
+func CreateEnvironment(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ env, err := environmentFromResourceData(d)
+ if err != nil {
+ return err
+ }
+
+ _, err = client.Environments.Create(env)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(env.Name)
+ return ReadEnvironment(d, meta)
+}
+
+func UpdateEnvironment(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ env, err := environmentFromResourceData(d)
+ if err != nil {
+ return err
+ }
+
+ _, err = client.Environments.Put(env)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(env.Name)
+ return ReadEnvironment(d, meta)
+}
+
+func ReadEnvironment(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ name := d.Id()
+
+ env, err := client.Environments.Get(name)
+ if err != nil {
+ if errRes, ok := err.(*chefc.ErrorResponse); ok {
+ if errRes.Response.StatusCode == 404 {
+ d.SetId("")
+ return nil
+ }
+ } else {
+ return err
+ }
+ }
+
+ d.Set("name", env.Name)
+ d.Set("description", env.Description)
+
+ defaultAttrJson, err := json.Marshal(env.DefaultAttributes)
+ if err != nil {
+ return err
+ }
+ d.Set("default_attributes_json", defaultAttrJson)
+
+ overrideAttrJson, err := json.Marshal(env.OverrideAttributes)
+ if err != nil {
+ return err
+ }
+ d.Set("override_attributes_json", overrideAttrJson)
+
+ cookbookVersionsI := map[string]interface{}{}
+ for k, v := range env.CookbookVersions {
+ cookbookVersionsI[k] = v
+ }
+ d.Set("cookbook_constraints", cookbookVersionsI)
+
+ return nil
+}
+
+func DeleteEnvironment(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ name := d.Id()
+
+ // For some reason Environments.Delete is not exposed by the
+ // underlying client library, so we have to do this manually.
+
+ path := fmt.Sprintf("environments/%s", name)
+
+ httpReq, err := client.NewRequest("DELETE", path, nil)
+ if err != nil {
+ return err
+ }
+
+ _, err = client.Do(httpReq, nil)
+ if err == nil {
+ d.SetId("")
+ }
+
+ return err
+}
+
+func environmentFromResourceData(d *schema.ResourceData) (*chefc.Environment, error) {
+
+ env := &chefc.Environment{
+ Name: d.Get("name").(string),
+ Description: d.Get("description").(string),
+ ChefType: "environment",
+ }
+
+ var err error
+
+ err = json.Unmarshal(
+ []byte(d.Get("default_attributes_json").(string)),
+ &env.DefaultAttributes,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("default_attributes_json: %s", err)
+ }
+
+ err = json.Unmarshal(
+ []byte(d.Get("override_attributes_json").(string)),
+ &env.OverrideAttributes,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("override_attributes_json: %s", err)
+ }
+
+ env.CookbookVersions = make(map[string]string)
+ for k, vI := range d.Get("cookbook_constraints").(map[string]interface{}) {
+ env.CookbookVersions[k] = vI.(string)
+ }
+
+ return env, nil
+}
diff --git a/builtin/providers/chef/resource_environment_test.go b/builtin/providers/chef/resource_environment_test.go
new file mode 100644
index 0000000000..b441d2ffc4
--- /dev/null
+++ b/builtin/providers/chef/resource_environment_test.go
@@ -0,0 +1,120 @@
+package chef
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ chefc "github.com/go-chef/chef"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccEnvironment_basic(t *testing.T) {
+ var env chefc.Environment
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccEnvironmentCheckDestroy(&env),
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccEnvironmentConfig_basic,
+ Check: resource.ComposeTestCheckFunc(
+ testAccEnvironmentCheckExists("chef_environment.test", &env),
+ func(s *terraform.State) error {
+
+ if expected := "terraform-acc-test-basic"; env.Name != expected {
+ return fmt.Errorf("wrong name; expected %v, got %v", expected, env.Name)
+ }
+ if expected := "Terraform Acceptance Tests"; env.Description != expected {
+ return fmt.Errorf("wrong description; expected %v, got %v", expected, env.Description)
+ }
+
+ expectedConstraints := map[string]string{
+ "terraform": "= 1.0.0",
+ }
+ if !reflect.DeepEqual(env.CookbookVersions, expectedConstraints) {
+ return fmt.Errorf("wrong cookbook constraints; expected %#v, got %#v", expectedConstraints, env.CookbookVersions)
+ }
+
+ var expectedAttributes interface{}
+ expectedAttributes = map[string]interface{}{
+ "terraform_acc_test": true,
+ }
+ if !reflect.DeepEqual(env.DefaultAttributes, expectedAttributes) {
+ return fmt.Errorf("wrong default attributes; expected %#v, got %#v", expectedAttributes, env.DefaultAttributes)
+ }
+ if !reflect.DeepEqual(env.OverrideAttributes, expectedAttributes) {
+ return fmt.Errorf("wrong override attributes; expected %#v, got %#v", expectedAttributes, env.OverrideAttributes)
+ }
+
+ return nil
+ },
+ ),
+ },
+ },
+ })
+}
+
+func testAccEnvironmentCheckExists(rn string, env *chefc.Environment) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[rn]
+ if !ok {
+ return fmt.Errorf("resource not found: %s", rn)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("environment id not set")
+ }
+
+ client := testAccProvider.Meta().(*chefc.Client)
+ gotEnv, err := client.Environments.Get(rs.Primary.ID)
+ if err != nil {
+ return fmt.Errorf("error getting environment: %s", err)
+ }
+
+ *env = *gotEnv
+
+ return nil
+ }
+}
+
+func testAccEnvironmentCheckDestroy(env *chefc.Environment) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ client := testAccProvider.Meta().(*chefc.Client)
+ _, err := client.Environments.Get(env.Name)
+ if err == nil {
+ return fmt.Errorf("environment still exists")
+ }
+ if _, ok := err.(*chefc.ErrorResponse); !ok {
+ // A more specific check is tricky because Chef Server can return
+ // a few different error codes in this case depending on which
+ // part of its stack catches the error.
+ return fmt.Errorf("got something other than an HTTP error (%v) when getting environment", err)
+ }
+
+ return nil
+ }
+}
+
+const testAccEnvironmentConfig_basic = `
+resource "chef_environment" "test" {
+ name = "terraform-acc-test-basic"
+ description = "Terraform Acceptance Tests"
+ default_attributes_json = <
Date: Thu, 27 Aug 2015 18:29:25 -0700
Subject: [PATCH 229/664] chef_role resource.
---
builtin/providers/chef/provider.go | 17 +-
builtin/providers/chef/resource_role.go | 185 +++++++++++++++++++
builtin/providers/chef/resource_role_test.go | 120 ++++++++++++
3 files changed, 321 insertions(+), 1 deletion(-)
create mode 100644 builtin/providers/chef/resource_role.go
create mode 100644 builtin/providers/chef/resource_role_test.go
diff --git a/builtin/providers/chef/provider.go b/builtin/providers/chef/provider.go
index 6a7f8e5401..a362b0071f 100644
--- a/builtin/providers/chef/provider.go
+++ b/builtin/providers/chef/provider.go
@@ -2,8 +2,10 @@ package chef
import (
"encoding/json"
+ "fmt"
"io/ioutil"
"os"
+ "strings"
"time"
"github.com/hashicorp/terraform/helper/schema"
@@ -48,7 +50,7 @@ func Provider() terraform.ResourceProvider {
"chef_data_bag_item": resourceChefDataBagItem(),
"chef_environment": resourceChefEnvironment(),
//"chef_node": resourceChefNode(),
- //"chef_role": resourceChefRole(),
+ "chef_role": resourceChefRole(),
},
ConfigureFunc: providerConfigure,
@@ -95,3 +97,16 @@ func jsonStateFunc(value interface{}) string {
jsonValue, _ := json.Marshal(&tmp)
return string(jsonValue)
}
+
+func runListEntryStateFunc(value interface{}) string {
+ // Recipes in run lists can either be naked, like "foo", or can
+ // be explicitly qualified as "recipe[foo]". Whichever form we use,
+ // the server will always normalize to the explicit form,
+ // so we'll normalize too and then we won't generate unnecessary
+ // diffs when we refresh.
+ in := value.(string)
+ if !strings.Contains(in, "[") {
+ return fmt.Sprintf("recipe[%s]", in)
+ }
+ return in
+}
diff --git a/builtin/providers/chef/resource_role.go b/builtin/providers/chef/resource_role.go
new file mode 100644
index 0000000000..8e3f743163
--- /dev/null
+++ b/builtin/providers/chef/resource_role.go
@@ -0,0 +1,185 @@
+package chef
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/hashicorp/terraform/helper/schema"
+
+ chefc "github.com/go-chef/chef"
+)
+
+func resourceChefRole() *schema.Resource {
+ return &schema.Resource{
+ Create: CreateRole,
+ Update: UpdateRole,
+ Read: ReadRole,
+ Delete: DeleteRole,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "description": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "Managed by Terraform",
+ },
+ "default_attributes_json": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "{}",
+ StateFunc: jsonStateFunc,
+ },
+ "override_attributes_json": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "{}",
+ StateFunc: jsonStateFunc,
+ },
+ "run_list": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ StateFunc: runListEntryStateFunc,
+ },
+ },
+ },
+ }
+}
+
+func CreateRole(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ role, err := roleFromResourceData(d)
+ if err != nil {
+ return err
+ }
+
+ _, err = client.Roles.Create(role)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(role.Name)
+ return ReadRole(d, meta)
+}
+
+func UpdateRole(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ role, err := roleFromResourceData(d)
+ if err != nil {
+ return err
+ }
+
+ _, err = client.Roles.Put(role)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(role.Name)
+ return ReadRole(d, meta)
+}
+
+func ReadRole(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ name := d.Id()
+
+ role, err := client.Roles.Get(name)
+ if err != nil {
+ if errRes, ok := err.(*chefc.ErrorResponse); ok {
+ if errRes.Response.StatusCode == 404 {
+ d.SetId("")
+ return nil
+ }
+ } else {
+ return err
+ }
+ }
+
+ d.Set("name", role.Name)
+ d.Set("description", role.Description)
+
+ defaultAttrJson, err := json.Marshal(role.DefaultAttributes)
+ if err != nil {
+ return err
+ }
+ d.Set("default_attributes_json", defaultAttrJson)
+
+ overrideAttrJson, err := json.Marshal(role.OverrideAttributes)
+ if err != nil {
+ return err
+ }
+ d.Set("override_attributes_json", overrideAttrJson)
+
+ runListI := make([]interface{}, len(role.RunList))
+ for i, v := range role.RunList {
+ runListI[i] = v
+ }
+ d.Set("run_list", runListI)
+
+ return nil
+}
+
+func DeleteRole(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ name := d.Id()
+
+ // For some reason Roles.Delete is not exposed by the
+ // underlying client library, so we have to do this manually.
+
+ path := fmt.Sprintf("roles/%s", name)
+
+ httpReq, err := client.NewRequest("DELETE", path, nil)
+ if err != nil {
+ return err
+ }
+
+ _, err = client.Do(httpReq, nil)
+ if err == nil {
+ d.SetId("")
+ }
+
+ return err
+}
+
+func roleFromResourceData(d *schema.ResourceData) (*chefc.Role, error) {
+
+ role := &chefc.Role{
+ Name: d.Get("name").(string),
+ Description: d.Get("description").(string),
+ ChefType: "role",
+ }
+
+ var err error
+
+ err = json.Unmarshal(
+ []byte(d.Get("default_attributes_json").(string)),
+ &role.DefaultAttributes,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("default_attributes_json: %s", err)
+ }
+
+ err = json.Unmarshal(
+ []byte(d.Get("override_attributes_json").(string)),
+ &role.OverrideAttributes,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("override_attributes_json: %s", err)
+ }
+
+ runListI := d.Get("run_list").([]interface{})
+ role.RunList = make([]string, len(runListI))
+ for i, vI := range runListI {
+ role.RunList[i] = vI.(string)
+ }
+
+ return role, nil
+}
diff --git a/builtin/providers/chef/resource_role_test.go b/builtin/providers/chef/resource_role_test.go
new file mode 100644
index 0000000000..3859e4e858
--- /dev/null
+++ b/builtin/providers/chef/resource_role_test.go
@@ -0,0 +1,120 @@
+package chef
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ chefc "github.com/go-chef/chef"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccRole_basic(t *testing.T) {
+ var role chefc.Role
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccRoleCheckDestroy(&role),
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccRoleConfig_basic,
+ Check: resource.ComposeTestCheckFunc(
+ testAccRoleCheckExists("chef_role.test", &role),
+ func(s *terraform.State) error {
+
+ if expected := "terraform-acc-test-basic"; role.Name != expected {
+ return fmt.Errorf("wrong name; expected %v, got %v", expected, role.Name)
+ }
+ if expected := "Terraform Acceptance Tests"; role.Description != expected {
+ return fmt.Errorf("wrong description; expected %v, got %v", expected, role.Description)
+ }
+
+ expectedRunList := chefc.RunList{
+ "recipe[terraform@1.0.0]",
+ "recipe[consul]",
+ "role[foo]",
+ }
+ if !reflect.DeepEqual(role.RunList, expectedRunList) {
+ return fmt.Errorf("wrong runlist; expected %#v, got %#v", expectedRunList, role.RunList)
+ }
+
+ var expectedAttributes interface{}
+ expectedAttributes = map[string]interface{}{
+ "terraform_acc_test": true,
+ }
+ if !reflect.DeepEqual(role.DefaultAttributes, expectedAttributes) {
+ return fmt.Errorf("wrong default attributes; expected %#v, got %#v", expectedAttributes, role.DefaultAttributes)
+ }
+ if !reflect.DeepEqual(role.OverrideAttributes, expectedAttributes) {
+ return fmt.Errorf("wrong override attributes; expected %#v, got %#v", expectedAttributes, role.OverrideAttributes)
+ }
+
+ return nil
+ },
+ ),
+ },
+ },
+ })
+}
+
+func testAccRoleCheckExists(rn string, role *chefc.Role) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[rn]
+ if !ok {
+ return fmt.Errorf("resource not found: %s", rn)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("role id not set")
+ }
+
+ client := testAccProvider.Meta().(*chefc.Client)
+ gotRole, err := client.Roles.Get(rs.Primary.ID)
+ if err != nil {
+ return fmt.Errorf("error getting role: %s", err)
+ }
+
+ *role = *gotRole
+
+ return nil
+ }
+}
+
+func testAccRoleCheckDestroy(role *chefc.Role) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ client := testAccProvider.Meta().(*chefc.Client)
+ _, err := client.Roles.Get(role.Name)
+ if err == nil {
+ return fmt.Errorf("role still exists")
+ }
+ if _, ok := err.(*chefc.ErrorResponse); !ok {
+ // A more specific check is tricky because Chef Server can return
+ // a few different error codes in this case depending on which
+ // part of its stack catches the error.
+ return fmt.Errorf("got something other than an HTTP error (%v) when getting role", err)
+ }
+
+ return nil
+ }
+}
+
+const testAccRoleConfig_basic = `
+resource "chef_role" "test" {
+ name = "terraform-acc-test-basic"
+ description = "Terraform Acceptance Tests"
+ default_attributes_json = <
Date: Thu, 27 Aug 2015 18:54:52 -0700
Subject: [PATCH 230/664] chef_node resource.
---
builtin/providers/chef/provider.go | 2 +-
builtin/providers/chef/resource_node.go | 216 +++++++++++++++++++
builtin/providers/chef/resource_node_test.go | 139 ++++++++++++
3 files changed, 356 insertions(+), 1 deletion(-)
create mode 100644 builtin/providers/chef/resource_node.go
create mode 100644 builtin/providers/chef/resource_node_test.go
diff --git a/builtin/providers/chef/provider.go b/builtin/providers/chef/provider.go
index a362b0071f..7a04b97758 100644
--- a/builtin/providers/chef/provider.go
+++ b/builtin/providers/chef/provider.go
@@ -49,7 +49,7 @@ func Provider() terraform.ResourceProvider {
"chef_data_bag": resourceChefDataBag(),
"chef_data_bag_item": resourceChefDataBagItem(),
"chef_environment": resourceChefEnvironment(),
- //"chef_node": resourceChefNode(),
+ "chef_node": resourceChefNode(),
"chef_role": resourceChefRole(),
},
diff --git a/builtin/providers/chef/resource_node.go b/builtin/providers/chef/resource_node.go
new file mode 100644
index 0000000000..6ded52ce2f
--- /dev/null
+++ b/builtin/providers/chef/resource_node.go
@@ -0,0 +1,216 @@
+package chef
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/hashicorp/terraform/helper/schema"
+
+ chefc "github.com/go-chef/chef"
+)
+
+func resourceChefNode() *schema.Resource {
+ return &schema.Resource{
+ Create: CreateNode,
+ Update: UpdateNode,
+ Read: ReadNode,
+ Delete: DeleteNode,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "environment_name": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "_default",
+ },
+ "automatic_attributes_json": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "{}",
+ StateFunc: jsonStateFunc,
+ },
+ "normal_attributes_json": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "{}",
+ StateFunc: jsonStateFunc,
+ },
+ "default_attributes_json": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "{}",
+ StateFunc: jsonStateFunc,
+ },
+ "override_attributes_json": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "{}",
+ StateFunc: jsonStateFunc,
+ },
+ "run_list": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ StateFunc: runListEntryStateFunc,
+ },
+ },
+ },
+ }
+}
+
+func CreateNode(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ node, err := nodeFromResourceData(d)
+ if err != nil {
+ return err
+ }
+
+ _, err = client.Nodes.Post(*node)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(node.Name)
+ return ReadNode(d, meta)
+}
+
+func UpdateNode(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ node, err := nodeFromResourceData(d)
+ if err != nil {
+ return err
+ }
+
+ _, err = client.Nodes.Put(*node)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(node.Name)
+ return ReadNode(d, meta)
+}
+
+func ReadNode(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ name := d.Id()
+
+ node, err := client.Nodes.Get(name)
+ if err != nil {
+ if errRes, ok := err.(*chefc.ErrorResponse); ok {
+ if errRes.Response.StatusCode == 404 {
+ d.SetId("")
+ return nil
+ }
+ } else {
+ return err
+ }
+ }
+
+ d.Set("name", node.Name)
+ d.Set("environment_name", node.Environment)
+
+ automaticAttrJson, err := json.Marshal(node.AutomaticAttributes)
+ if err != nil {
+ return err
+ }
+ d.Set("automatic_attributes_json", automaticAttrJson)
+
+ normalAttrJson, err := json.Marshal(node.NormalAttributes)
+ if err != nil {
+ return err
+ }
+ d.Set("normal_attributes_json", normalAttrJson)
+
+ defaultAttrJson, err := json.Marshal(node.DefaultAttributes)
+ if err != nil {
+ return err
+ }
+ d.Set("default_attributes_json", defaultAttrJson)
+
+ overrideAttrJson, err := json.Marshal(node.OverrideAttributes)
+ if err != nil {
+ return err
+ }
+ d.Set("override_attributes_json", overrideAttrJson)
+
+ runListI := make([]interface{}, len(node.RunList))
+ for i, v := range node.RunList {
+ runListI[i] = v
+ }
+ d.Set("run_list", runListI)
+
+ return nil
+}
+
+func DeleteNode(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ name := d.Id()
+ err := client.Nodes.Delete(name)
+
+ if err == nil {
+ d.SetId("")
+ }
+
+ return err
+}
+
+func nodeFromResourceData(d *schema.ResourceData) (*chefc.Node, error) {
+
+ node := &chefc.Node{
+ Name: d.Get("name").(string),
+ Environment: d.Get("environment_name").(string),
+ ChefType: "node",
+ JsonClass: "Chef::Node",
+ }
+
+ var err error
+
+ err = json.Unmarshal(
+ []byte(d.Get("automatic_attributes_json").(string)),
+ &node.AutomaticAttributes,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("automatic_attributes_json: %s", err)
+ }
+
+ err = json.Unmarshal(
+ []byte(d.Get("normal_attributes_json").(string)),
+ &node.NormalAttributes,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("normal_attributes_json: %s", err)
+ }
+
+ err = json.Unmarshal(
+ []byte(d.Get("default_attributes_json").(string)),
+ &node.DefaultAttributes,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("default_attributes_json: %s", err)
+ }
+
+ err = json.Unmarshal(
+ []byte(d.Get("override_attributes_json").(string)),
+ &node.OverrideAttributes,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("override_attributes_json: %s", err)
+ }
+
+ runListI := d.Get("run_list").([]interface{})
+ node.RunList = make([]string, len(runListI))
+ for i, vI := range runListI {
+ node.RunList[i] = vI.(string)
+ }
+
+ return node, nil
+}
diff --git a/builtin/providers/chef/resource_node_test.go b/builtin/providers/chef/resource_node_test.go
new file mode 100644
index 0000000000..ace6c75a3a
--- /dev/null
+++ b/builtin/providers/chef/resource_node_test.go
@@ -0,0 +1,139 @@
+package chef
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ chefc "github.com/go-chef/chef"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccNode_basic(t *testing.T) {
+ var node chefc.Node
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccNodeCheckDestroy(&node),
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccNodeConfig_basic,
+ Check: resource.ComposeTestCheckFunc(
+ testAccNodeCheckExists("chef_node.test", &node),
+ func(s *terraform.State) error {
+
+ if expected := "terraform-acc-test-basic"; node.Name != expected {
+ return fmt.Errorf("wrong name; expected %v, got %v", expected, node.Name)
+ }
+ if expected := "terraform-acc-test-node-basic"; node.Environment != expected {
+ return fmt.Errorf("wrong environment; expected %v, got %v", expected, node.Environment)
+ }
+
+ expectedRunList := []string{
+ "recipe[terraform@1.0.0]",
+ "recipe[consul]",
+ "role[foo]",
+ }
+ if !reflect.DeepEqual(node.RunList, expectedRunList) {
+ return fmt.Errorf("wrong runlist; expected %#v, got %#v", expectedRunList, node.RunList)
+ }
+
+ var expectedAttributes interface{}
+ expectedAttributes = map[string]interface{}{
+ "terraform_acc_test": true,
+ }
+ if !reflect.DeepEqual(node.AutomaticAttributes, expectedAttributes) {
+ return fmt.Errorf("wrong automatic attributes; expected %#v, got %#v", expectedAttributes, node.AutomaticAttributes)
+ }
+ if !reflect.DeepEqual(node.NormalAttributes, expectedAttributes) {
+ return fmt.Errorf("wrong normal attributes; expected %#v, got %#v", expectedAttributes, node.NormalAttributes)
+ }
+ if !reflect.DeepEqual(node.DefaultAttributes, expectedAttributes) {
+ return fmt.Errorf("wrong default attributes; expected %#v, got %#v", expectedAttributes, node.DefaultAttributes)
+ }
+ if !reflect.DeepEqual(node.OverrideAttributes, expectedAttributes) {
+ return fmt.Errorf("wrong override attributes; expected %#v, got %#v", expectedAttributes, node.OverrideAttributes)
+ }
+
+ return nil
+ },
+ ),
+ },
+ },
+ })
+}
+
+func testAccNodeCheckExists(rn string, node *chefc.Node) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[rn]
+ if !ok {
+ return fmt.Errorf("resource not found: %s", rn)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("node id not set")
+ }
+
+ client := testAccProvider.Meta().(*chefc.Client)
+ gotNode, err := client.Nodes.Get(rs.Primary.ID)
+ if err != nil {
+ return fmt.Errorf("error getting node: %s", err)
+ }
+
+ *node = gotNode
+
+ return nil
+ }
+}
+
+func testAccNodeCheckDestroy(node *chefc.Node) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ client := testAccProvider.Meta().(*chefc.Client)
+ _, err := client.Nodes.Get(node.Name)
+ if err == nil {
+ return fmt.Errorf("node still exists")
+ }
+ if _, ok := err.(*chefc.ErrorResponse); !ok {
+ // A more specific check is tricky because Chef Server can return
+ // a few different error codes in this case depending on which
+ // part of its stack catches the error.
+ return fmt.Errorf("got something other than an HTTP error (%v) when getting node", err)
+ }
+
+ return nil
+ }
+}
+
+const testAccNodeConfig_basic = `
+resource "chef_environment" "test" {
+ name = "terraform-acc-test-node-basic"
+}
+resource "chef_node" "test" {
+ name = "terraform-acc-test-basic"
+ environment_name = "terraform-acc-test-node-basic"
+ automatic_attributes_json = <
Date: Sat, 29 Aug 2015 09:16:50 -0700
Subject: [PATCH 231/664] Documentation for the Chef provider.
---
website/source/assets/stylesheets/_docs.scss | 1 +
.../docs/providers/chef/index.html.markdown | 60 +++++++++++++++++++
.../providers/chef/r/data_bag.html.markdown | 38 ++++++++++++
.../chef/r/data_bag_item.html.markdown | 48 +++++++++++++++
.../chef/r/environment.html.markdown | 40 +++++++++++++
.../docs/providers/chef/r/node.html.markdown | 48 +++++++++++++++
.../docs/providers/chef/r/role.html.markdown | 40 +++++++++++++
website/source/layouts/chef.erb | 38 ++++++++++++
website/source/layouts/docs.erb | 4 ++
9 files changed, 317 insertions(+)
create mode 100644 website/source/docs/providers/chef/index.html.markdown
create mode 100644 website/source/docs/providers/chef/r/data_bag.html.markdown
create mode 100644 website/source/docs/providers/chef/r/data_bag_item.html.markdown
create mode 100644 website/source/docs/providers/chef/r/environment.html.markdown
create mode 100644 website/source/docs/providers/chef/r/node.html.markdown
create mode 100644 website/source/docs/providers/chef/r/role.html.markdown
create mode 100644 website/source/layouts/chef.erb
diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss
index ed1a598d31..017ea474a0 100755
--- a/website/source/assets/stylesheets/_docs.scss
+++ b/website/source/assets/stylesheets/_docs.scss
@@ -9,6 +9,7 @@ body.page-sub{
body.layout-atlas,
body.layout-aws,
body.layout-azure,
+body.layout-chef,
body.layout-cloudflare,
body.layout-cloudstack,
body.layout-consul,
diff --git a/website/source/docs/providers/chef/index.html.markdown b/website/source/docs/providers/chef/index.html.markdown
new file mode 100644
index 0000000000..91bcf99826
--- /dev/null
+++ b/website/source/docs/providers/chef/index.html.markdown
@@ -0,0 +1,60 @@
+---
+layout: "chef"
+page_title: "Provider: Chef"
+sidebar_current: "docs-chef-index"
+description: |-
+ Chef is a systems and cloud infrastructure automation framework.
+---
+
+# Chef Provider
+
+[Chef](https://www.chef.io/) is a systems and cloud infrastructure automation
+framework. The Chef provider allows Terraform to manage various resources
+that exist within [Chef Server](http://docs.chef.io/chef_server.html).
+
+Use the navigation to the left to read about the available resources.
+
+## Example Usage
+
+```
+# Configure the Chef provider
+provider "chef" {
+ "server_url" = "https://api.opscode.com/organizations/example/"
+
+ // You can set up a "Client" within the Chef Server management console.
+ "client_name" = "terraform"
+ "private_key_pem" = "${file(\"chef-terraform.pem\")}"
+}
+
+# Create a Chef Environment
+resource "chef_environment" "production" {
+ name = "production"
+}
+
+# Create a Chef Role
+resource "chef_role" "app_server" {
+ name = "app_server"
+ run_list = [
+ "recipe[terraform]"
+ ]
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `server_url` - (Required) The HTTP(S) API URL of the Chef server to use. If
+ the target Chef server supports organizations, use the full URL of the
+ organization you wish to configure. May be provided instead via the
+ ``CHEF_SERVER_URL`` environment variable.
+* `client_name` - (Required) The name of the client account to use when making
+ requests. This must have been already configured on the Chef server.
+ May be provided instead via the ``CHEF_CLIENT_NAME`` environment variable.
+* `private_key_pem` - (Required) The PEM-formatted private key belonging to
+ the configured client. This is issued by the server when a new client object
+ is created. May be provided instead in a file whose path is in the
+ ``CHEF_PRIVATE_KEY_FILE`` environment variable.
+* `allow_unverified_ssl` - (Optional) Boolean indicating whether to make
+ requests to a Chef server whose SSL certicate cannot be verified. Defaults
+ to ``false``.
diff --git a/website/source/docs/providers/chef/r/data_bag.html.markdown b/website/source/docs/providers/chef/r/data_bag.html.markdown
new file mode 100644
index 0000000000..6df60d84f5
--- /dev/null
+++ b/website/source/docs/providers/chef/r/data_bag.html.markdown
@@ -0,0 +1,38 @@
+---
+layout: "chef"
+page_title: "Chef: chef_data_bag"
+sidebar_current: "docs-chef-resource-data-bag"
+description: |-
+ Creates and manages a data bag in Chef Server.
+---
+
+# chef\_data\_bag
+
+A [data bag](http://docs.chef.io/data_bags.html) is a collection of
+configuration objects that are stored as JSON in Chef Server and can be
+retrieved and used in Chef recipes.
+
+This resource creates the data bag itself. Inside each data bag is a collection
+of items which can be created using the ``chef_data_bag_item`` resource.
+
+## Example Usage
+
+```
+resource "chef_data_bag" "example" {
+ name = "example-data-bag"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) The unique name to assign to the data bag. This is the
+ name that other server clients will use to find and retrieve data from the
+ data bag.
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `api_url` - The URL representing this data bag in the Chef server API.
diff --git a/website/source/docs/providers/chef/r/data_bag_item.html.markdown b/website/source/docs/providers/chef/r/data_bag_item.html.markdown
new file mode 100644
index 0000000000..2265c16e4f
--- /dev/null
+++ b/website/source/docs/providers/chef/r/data_bag_item.html.markdown
@@ -0,0 +1,48 @@
+---
+layout: "chef"
+page_title: "Chef: chef_data_bag_item"
+sidebar_current: "docs-chef-resource-data-bag-item"
+description: |-
+ Creates and manages an object within a data bag in Chef Server.
+---
+
+# chef\_data\_bag\_item
+
+A [data bag](http://docs.chef.io/data_bags.html) is a collection of
+configuration objects that are stored as JSON in Chef Server and can be
+retrieved and used in Chef recipes.
+
+This resource creates objects within an existing data bag. To create the
+data bag itself, use the ``chef_data_bag`` resource.
+
+## Example Usage
+
+```
+resource "chef_data_bag_item" "example" {
+ data_bag_name = "example-data-bag"
+ content_json = <
+ <% content_for :sidebar do %>
+
+ <% end %>
+
+ <%= yield %>
+ <% end %>
diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb
index ff088e8e8c..3deb5be98f 100644
--- a/website/source/layouts/docs.erb
+++ b/website/source/layouts/docs.erb
@@ -133,6 +133,10 @@
Azure
+ >
+ Chef
+
+
>
CloudFlare
From 98fc16ec06128a5ae954861b6acb1be49d2d8582 Mon Sep 17 00:00:00 2001
From: Martin Atkins
Date: Sun, 13 Dec 2015 17:38:08 -0800
Subject: [PATCH 232/664] Only refresh project name if Rundeck server provides
it.
It seems that not all Rundeck servers consistently return the project name
when retrieving a job. Not yet sure in what situations it is or isn't
returned, but since jobs are not allowed to move between projects anyway
it doesn't hurt to just skip refreshing it if the server provides no
value.
---
builtin/providers/rundeck/resource_job.go | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/builtin/providers/rundeck/resource_job.go b/builtin/providers/rundeck/resource_job.go
index c9af25b0b7..5ef863bd24 100644
--- a/builtin/providers/rundeck/resource_job.go
+++ b/builtin/providers/rundeck/resource_job.go
@@ -463,7 +463,14 @@ func jobToResourceData(job *rundeck.JobDetail, d *schema.ResourceData) error {
d.Set("id", job.ID)
d.Set("name", job.Name)
d.Set("group_name", job.GroupName)
- d.Set("project_name", job.ProjectName)
+
+ // The project name is not consistently returned in all rundeck versions,
+ // so we'll only update it if it's set. Jobs can't move between projects
+ // anyway, so this is harmless.
+ if job.ProjectName != "" {
+ d.Set("project_name", job.ProjectName)
+ }
+
d.Set("description", job.Description)
d.Set("log_level", job.LogLevel)
d.Set("allow_concurrent_executions", job.AllowConcurrentExecutions)
From 38509d583dc6abddd6067d5a9a1c58071fb4ae00 Mon Sep 17 00:00:00 2001
From: justnom
Date: Fri, 11 Dec 2015 12:04:06 -0500
Subject: [PATCH 233/664] Fixes #4253
---
.../aws/resource_aws_network_interface.go | 50 ++++++++++++++++++-
1 file changed, 49 insertions(+), 1 deletion(-)
diff --git a/builtin/providers/aws/resource_aws_network_interface.go b/builtin/providers/aws/resource_aws_network_interface.go
index d994e56545..0b7ac4cfee 100644
--- a/builtin/providers/aws/resource_aws_network_interface.go
+++ b/builtin/providers/aws/resource_aws_network_interface.go
@@ -33,7 +33,6 @@ func resourceAwsNetworkInterface() *schema.Resource {
"private_ips": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
- ForceNew: true,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
@@ -200,6 +199,14 @@ func resourceAwsNetworkInterfaceDetach(oa *schema.Set, meta interface{}, eniId s
return nil
}
+func convertToAwsStringSlice(s []interface{}) []*string {
+ var b []*string
+ for _, i := range s {
+ b = append(b, aws.String(i.(string)))
+ }
+ return b
+}
+
func resourceAwsNetworkInterfaceUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
d.Partial(true)
@@ -230,6 +237,47 @@ func resourceAwsNetworkInterfaceUpdate(d *schema.ResourceData, meta interface{})
d.SetPartial("attachment")
}
+ if d.HasChange("private_ips") {
+ o, n := d.GetChange("private_ips")
+ if o == nil {
+ o = new(schema.Set)
+ }
+ if n == nil {
+ n = new(schema.Set)
+ }
+
+ os := o.(*schema.Set)
+ ns := n.(*schema.Set)
+
+ // Unassign old IP addresses
+ unassignIps := os.Difference(ns)
+ if unassignIps.Len() != 0 {
+ input := &ec2.UnassignPrivateIpAddressesInput{
+ NetworkInterfaceId: aws.String(d.Id()),
+ PrivateIpAddresses: convertToAwsStringSlice(unassignIps.List()),
+ }
+ _, err := conn.UnassignPrivateIpAddresses(input)
+ if err != nil {
+ return fmt.Errorf("Failure to unassign Private IPs: %s", err)
+ }
+ }
+
+ // Assign new IP addresses
+ assignIps := ns.Difference(os)
+ if assignIps.Len() != 0 {
+ input := &ec2.AssignPrivateIpAddressesInput{
+ NetworkInterfaceId: aws.String(d.Id()),
+ PrivateIpAddresses: convertToAwsStringSlice(assignIps.List()),
+ }
+ _, err := conn.AssignPrivateIpAddresses(input)
+ if err != nil {
+ return fmt.Errorf("Failure to assign Private IPs: %s", err)
+ }
+ }
+
+ d.SetPartial("private_ips")
+ }
+
request := &ec2.ModifyNetworkInterfaceAttributeInput{
NetworkInterfaceId: aws.String(d.Id()),
SourceDestCheck: &ec2.AttributeBooleanValue{Value: aws.Bool(d.Get("source_dest_check").(bool))},
From 063d770e517b2273c2766f7135613f9adf814b0e Mon Sep 17 00:00:00 2001
From: Spencer Nelson
Date: Mon, 14 Dec 2015 11:26:44 -0500
Subject: [PATCH 234/664] provider/aws: Kinesis DescribeStream pagination
Each call to the Kinesis DescribeStream API returns a limited number of
shards. When interrogating AWS for the state of a Kinesis stream, the
client needs to page through the API's responses to get the true number
of shards.
---
.../aws/resource_aws_kinesis_stream.go | 48 ++++++++++++-------
1 file changed, 32 insertions(+), 16 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_kinesis_stream.go b/builtin/providers/aws/resource_aws_kinesis_stream.go
index 1abb9dbc32..76beb3ac45 100644
--- a/builtin/providers/aws/resource_aws_kinesis_stream.go
+++ b/builtin/providers/aws/resource_aws_kinesis_stream.go
@@ -74,9 +74,10 @@ func resourceAwsKinesisStreamCreate(d *schema.ResourceData, meta interface{}) er
sn, err)
}
- s := streamRaw.(*kinesis.StreamDescription)
- d.SetId(*s.StreamARN)
- d.Set("arn", s.StreamARN)
+ s := streamRaw.(kinesisStreamState)
+ d.SetId(s.arn)
+ d.Set("arn", s.arn)
+ d.Set("shard_count", s.shardCount)
return resourceAwsKinesisStreamUpdate(d, meta)
}
@@ -98,10 +99,8 @@ func resourceAwsKinesisStreamUpdate(d *schema.ResourceData, meta interface{}) er
func resourceAwsKinesisStreamRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).kinesisconn
sn := d.Get("name").(string)
- describeOpts := &kinesis.DescribeStreamInput{
- StreamName: aws.String(sn),
- }
- resp, err := conn.DescribeStream(describeOpts)
+
+ state, err := readKinesisStreamState(conn, sn)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "ResourceNotFoundException" {
@@ -111,11 +110,10 @@ func resourceAwsKinesisStreamRead(d *schema.ResourceData, meta interface{}) erro
return fmt.Errorf("[WARN] Error reading Kinesis Stream: \"%s\", code: \"%s\"", awsErr.Message(), awsErr.Code())
}
return err
- }
- s := resp.StreamDescription
- d.Set("arn", *s.StreamARN)
- d.Set("shard_count", len(s.Shards))
+ }
+ d.Set("arn", state.arn)
+ d.Set("shard_count", state.shardCount)
// set tags
describeTagsOpts := &kinesis.ListTagsForStreamInput{
@@ -162,12 +160,30 @@ func resourceAwsKinesisStreamDelete(d *schema.ResourceData, meta interface{}) er
return nil
}
+type kinesisStreamState struct {
+ arn string
+ status string
+ shardCount int
+}
+
+func readKinesisStreamState(conn *kinesis.Kinesis, sn string) (kinesisStreamState, error) {
+ describeOpts := &kinesis.DescribeStreamInput{
+ StreamName: aws.String(sn),
+ }
+
+ var state kinesisStreamState
+ err := conn.DescribeStreamPages(describeOpts, func(page *kinesis.DescribeStreamOutput, last bool) (shouldContinue bool) {
+ state.arn = aws.StringValue(page.StreamDescription.StreamARN)
+ state.status = aws.StringValue(page.StreamDescription.StreamStatus)
+ state.shardCount += len(page.StreamDescription.Shards)
+ return !last
+ })
+ return state, err
+}
+
func streamStateRefreshFunc(conn *kinesis.Kinesis, sn string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
- describeOpts := &kinesis.DescribeStreamInput{
- StreamName: aws.String(sn),
- }
- resp, err := conn.DescribeStream(describeOpts)
+ state, err := readKinesisStreamState(conn, sn)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "ResourceNotFoundException" {
@@ -178,6 +194,6 @@ func streamStateRefreshFunc(conn *kinesis.Kinesis, sn string) resource.StateRefr
return nil, "failed", err
}
- return resp.StreamDescription, *resp.StreamDescription.StreamStatus, nil
+ return state, state.status, nil
}
}
From 559aa50a318d454e363df8d8aeaa9b2f8f96a3c8 Mon Sep 17 00:00:00 2001
From: Chris Marchesi
Date: Mon, 14 Dec 2015 10:42:08 -0800
Subject: [PATCH 235/664] Retry MalformedPolicy errors due to newly created
principals
---
.../providers/aws/resource_aws_s3_bucket.go | 19 ++++++++++++++++++-
1 file changed, 18 insertions(+), 1 deletion(-)
diff --git a/builtin/providers/aws/resource_aws_s3_bucket.go b/builtin/providers/aws/resource_aws_s3_bucket.go
index ec57452022..22f3544ff1 100644
--- a/builtin/providers/aws/resource_aws_s3_bucket.go
+++ b/builtin/providers/aws/resource_aws_s3_bucket.go
@@ -5,7 +5,9 @@ import (
"encoding/json"
"fmt"
"log"
+ "time"
+ "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/aws/aws-sdk-go/aws"
@@ -466,9 +468,24 @@ func resourceAwsS3BucketPolicyUpdate(s3conn *s3.S3, d *schema.ResourceData) erro
if policy != "" {
log.Printf("[DEBUG] S3 bucket: %s, put policy: %s", bucket, policy)
- _, err := s3conn.PutBucketPolicy(&s3.PutBucketPolicyInput{
+ params := &s3.PutBucketPolicyInput{
Bucket: aws.String(bucket),
Policy: aws.String(policy),
+ }
+
+ err := resource.Retry(1*time.Minute, func() error {
+ if _, err := s3conn.PutBucketPolicy(params); err != nil {
+ if awserr, ok := err.(awserr.Error); ok {
+ if awserr.Code() == "MalformedPolicy" {
+ // Retryable
+ return awserr
+ }
+ }
+ // Not retryable
+ return resource.RetryError{Err: err}
+ }
+ // No error
+ return nil
})
if err != nil {
From bfa4a881700c1498cb1b0085aff65c47a13db2b7 Mon Sep 17 00:00:00 2001
From: Chris Marchesi
Date: Mon, 14 Dec 2015 11:12:06 -0800
Subject: [PATCH 236/664] Retry InvalidParameterValueException errors due to
newly created resources
---
...esource_aws_lambda_event_source_mapping.go | 25 ++++++++++++++++---
1 file changed, 21 insertions(+), 4 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_lambda_event_source_mapping.go b/builtin/providers/aws/resource_aws_lambda_event_source_mapping.go
index 70ca3a01c8..4adb3f0448 100644
--- a/builtin/providers/aws/resource_aws_lambda_event_source_mapping.go
+++ b/builtin/providers/aws/resource_aws_lambda_event_source_mapping.go
@@ -3,10 +3,13 @@ package aws
import (
"fmt"
"log"
+ "time"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/lambda"
+ "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
@@ -88,14 +91,28 @@ func resourceAwsLambdaEventSourceMappingCreate(d *schema.ResourceData, meta inte
Enabled: aws.Bool(d.Get("enabled").(bool)),
}
- eventSourceMappingConfiguration, err := conn.CreateEventSourceMapping(params)
+ err := resource.Retry(1*time.Minute, func() error {
+ eventSourceMappingConfiguration, err := conn.CreateEventSourceMapping(params)
+ if err != nil {
+ if awserr, ok := err.(awserr.Error); ok {
+ if awserr.Code() == "InvalidParameterValueException" {
+ // Retryable
+ return awserr
+ }
+ }
+ // Not retryable
+ return resource.RetryError{Err: err}
+ }
+ // No error
+ d.Set("uuid", eventSourceMappingConfiguration.UUID)
+ d.SetId(*eventSourceMappingConfiguration.UUID)
+ return nil
+ })
+
if err != nil {
return fmt.Errorf("Error creating Lambda event source mapping: %s", err)
}
- d.Set("uuid", eventSourceMappingConfiguration.UUID)
- d.SetId(*eventSourceMappingConfiguration.UUID)
-
return resourceAwsLambdaEventSourceMappingRead(d, meta)
}
From d4892065300f9a5c417323d4492aa349615b5fac Mon Sep 17 00:00:00 2001
From: captainill
Date: Mon, 14 Dec 2015 12:40:21 -0800
Subject: [PATCH 237/664] smaller font-size in doc headers for readability with
var names as titles
---
website/source/assets/stylesheets/_docs.scss | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss
index 017ea474a0..5f2b070212 100755
--- a/website/source/assets/stylesheets/_docs.scss
+++ b/website/source/assets/stylesheets/_docs.scss
@@ -202,7 +202,9 @@ body.layout-intro{
h1{
color: $purple;
+ font-size: 36px;
text-transform: uppercase;
+ word-wrap: break-word;
padding-bottom: 24px;
margin-top: 40px;
margin-bottom: 24px;
@@ -219,7 +221,6 @@ body.layout-intro{
}
}
-
@media (max-width: 992px) {
body.layout-docs,
body.layout-inner,
@@ -280,6 +281,7 @@ body.layout-intro{
.bs-docs-section{
h1{
+ font-size: 32px;
padding-top: 24px;
border-top: 1px solid #eeeeee;
}
@@ -293,7 +295,7 @@ body.layout-intro{
}
h1{
- font-size: 32px;
+ font-size: 28px;
}
}
}
From d7a73937eb847a63a3a11c33ea409a8bd0e6efaf Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Mon, 14 Dec 2015 14:41:54 -0600
Subject: [PATCH 238/664] provider/aws: Guard against nil DB Parameter vaules
---
builtin/providers/aws/structure.go | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/builtin/providers/aws/structure.go b/builtin/providers/aws/structure.go
index b5ca83a797..748ecc88be 100644
--- a/builtin/providers/aws/structure.go
+++ b/builtin/providers/aws/structure.go
@@ -399,10 +399,16 @@ func flattenEcsContainerDefinitions(definitions []*ecs.ContainerDefinition) (str
func flattenParameters(list []*rds.Parameter) []map[string]interface{} {
result := make([]map[string]interface{}, 0, len(list))
for _, i := range list {
- result = append(result, map[string]interface{}{
- "name": strings.ToLower(*i.ParameterName),
- "value": strings.ToLower(*i.ParameterValue),
- })
+ if i.ParameterName != nil {
+ r := make(map[string]interface{})
+ r["name"] = strings.ToLower(*i.ParameterName)
+ // Default empty string, guard against nil parameter values
+ r["value"] = ""
+ if i.ParameterValue != nil {
+ r["value"] = strings.ToLower(*i.ParameterValue)
+ }
+ result = append(result, r)
+ }
}
return result
}
From 81cf5294cc9e5d7dfb5eb3a6d4aa26ec051713a3 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Mon, 14 Dec 2015 18:49:20 -0500
Subject: [PATCH 239/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0361c6aadb..a83bb182cd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -50,6 +50,7 @@ BUG FIXES:
* provider/aws: Fix missing AMI issue with Launch Configurations [GH-4242]
* provider/aws: Opsworks stack SSH key is write-only [GH-4241]
* provider/aws: Fix issue with ElasticSearch Domain `access_policies` always appear changed [GH-4245]
+ * provider/aws: Fix issue with nil parameter group value causing panic in `aws_db_parameter_group` [GH-4318]
* provider/azure: Update for [breaking change to upstream client library](https://github.com/Azure/azure-sdk-for-go/commit/68d50cb53a73edfeb7f17f5e86cdc8eb359a9528). [GH-4300]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
* provider/openstack: Handle volumes in "deleting" state [GH-4204]
From c72342eefc213f889b4c03f787ae2d872e2f25da Mon Sep 17 00:00:00 2001
From: Sander van Harmelen
Date: Tue, 15 Dec 2015 16:39:23 +0100
Subject: [PATCH 240/664] Add SSH agent support for Windows
The Windows support is limited to the Pageant SSH authentication agent.
This fixes #3423
---
communicator/ssh/provisioner.go | 18 +++++++++---------
.../docs/provisioners/connection.html.markdown | 4 +++-
2 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/communicator/ssh/provisioner.go b/communicator/ssh/provisioner.go
index f9f889037e..48eaafe388 100644
--- a/communicator/ssh/provisioner.go
+++ b/communicator/ssh/provisioner.go
@@ -11,6 +11,7 @@ import (
"github.com/hashicorp/terraform/helper/pathorcontents"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/mapstructure"
+ "github.com/xanzy/ssh-agent"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
)
@@ -245,22 +246,17 @@ func connectToAgent(connInfo *connectionInfo) (*sshAgent, error) {
return nil, nil
}
- sshAuthSock := os.Getenv("SSH_AUTH_SOCK")
-
- if sshAuthSock == "" {
- return nil, fmt.Errorf("SSH Requested but SSH_AUTH_SOCK not-specified")
- }
-
- conn, err := net.Dial("unix", sshAuthSock)
+ agent, conn, err := sshagent.New()
if err != nil {
- return nil, fmt.Errorf("Error connecting to SSH_AUTH_SOCK: %v", err)
+ return nil, err
}
// connection close is handled over in Communicator
return &sshAgent{
- agent: agent.NewClient(conn),
+ agent: agent,
conn: conn,
}, nil
+
}
// A tiny wrapper around an agent.Agent to expose the ability to close its
@@ -271,6 +267,10 @@ type sshAgent struct {
}
func (a *sshAgent) Close() error {
+ if a.conn == nil {
+ return nil
+ }
+
return a.conn.Close()
}
diff --git a/website/source/docs/provisioners/connection.html.markdown b/website/source/docs/provisioners/connection.html.markdown
index 83fa8ebb4a..52f7be7589 100644
--- a/website/source/docs/provisioners/connection.html.markdown
+++ b/website/source/docs/provisioners/connection.html.markdown
@@ -73,7 +73,9 @@ provisioner "file" {
function](/docs/configuration/interpolation.html#file_path_). This takes
preference over the password if provided.
-* `agent` - Set to false to disable using ssh-agent to authenticate.
+* `agent` - Set to false to disable using ssh-agent to authenticate. On Windows the
+ only supported SSH authentication agent is
+ [Pageant](http://the.earth.li/~sgtatham/putty/0.66/htmldoc/Chapter9.html#pageant)
**Additional arguments only supported by the "winrm" connection type:**
From 43fb403bcea744b30a39519500c13282373e63fa Mon Sep 17 00:00:00 2001
From: Lars Wander
Date: Tue, 15 Dec 2015 11:39:31 -0500
Subject: [PATCH 241/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a83bb182cd..88e4137b06 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -34,6 +34,7 @@ IMPROVEMENTS:
* provider/vsphere: Add folder handling for folder-qualified vm names [GH-3939]
* provider/vsphere: Change ip_address parameter for ipv6 support [GH-4035]
* provider/openstack: Increase instance timeout from 10 to 30 minutes [GH-4223]
+ * provider/google: Add `restart_policy` attribute to `google_managed_instance_group` [GH-3892]
BUG FIXES:
From 5f5459a1fbc5efc0fde9f8edea47f7549eff9375 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Thu, 10 Dec 2015 15:43:13 -0600
Subject: [PATCH 242/664] provider/aws: Refactor AWS Authentication chain
- update auth checking to check metadata header
- refactor tests to not export os env vars
---
builtin/providers/aws/config.go | 66 +++++-
builtin/providers/aws/config_test.go | 299 +++++++++++++++++++++++++++
builtin/providers/aws/provider.go | 95 +--------
3 files changed, 369 insertions(+), 91 deletions(-)
create mode 100644 builtin/providers/aws/config_test.go
diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go
index d8a9ff862d..e7c7628dc4 100644
--- a/builtin/providers/aws/config.go
+++ b/builtin/providers/aws/config.go
@@ -3,14 +3,19 @@ package aws
import (
"fmt"
"log"
+ "net/http"
+ "os"
"strings"
+ "time"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-multierror"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/credentials"
+ awsCredentials "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/cloudformation"
@@ -104,9 +109,14 @@ func (c *Config) Client() (interface{}, error) {
client.region = c.Region
log.Println("[INFO] Building AWS auth structure")
- // We fetched all credential sources in Provider. If they are
- // available, they'll already be in c. See Provider definition.
- creds := credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token)
+ creds := getCreds(c.AccessKey, c.SecretKey, c.Token)
+ // Call Get to check for credential provider. If nothing found, we'll get an
+ // error, and we can present it nicely to the user
+ _, err = creds.Get()
+ if err != nil {
+ errs = append(errs, fmt.Errorf("Error loading credentials for AWS Provider: %s", err))
+ return nil, &multierror.Error{Errors: errs}
+ }
awsConfig := &aws.Config{
Credentials: creds,
Region: aws.String(c.Region),
@@ -118,7 +128,7 @@ func (c *Config) Client() (interface{}, error) {
sess := session.New(awsConfig)
client.iamconn = iam.New(sess)
- err := c.ValidateCredentials(client.iamconn)
+ err = c.ValidateCredentials(client.iamconn)
if err != nil {
errs = append(errs, err)
}
@@ -316,3 +326,49 @@ func (c *Config) ValidateAccountId(iamconn *iam.IAM) error {
return nil
}
+
+// This function is responsible for reading credentials from the
+// environment in the case that they're not explicitly specified
+// in the Terraform configuration.
+func getCreds(key, secret, token string) *awsCredentials.Credentials {
+ // build a chain provider, lazy-evaulated by aws-sdk
+ providers := []awsCredentials.Provider{
+ &awsCredentials.StaticProvider{Value: awsCredentials.Value{
+ AccessKeyID: key,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ }},
+ &awsCredentials.EnvProvider{},
+ &awsCredentials.SharedCredentialsProvider{},
+ }
+
+ // We only look in the EC2 metadata API if we can connect
+ // to the metadata service within a reasonable amount of time
+ metadataURL := os.Getenv("AWS_METADATA_URL")
+ if metadataURL == "" {
+ metadataURL = "http://169.254.169.254:80/latest"
+ }
+ c := http.Client{
+ Timeout: 100 * time.Millisecond,
+ }
+
+ r, err := c.Get(metadataURL)
+ var useIAM bool
+ if err == nil {
+ if r.Header["Server"] != nil && strings.Contains(r.Header["Server"][0], "EC2") {
+ useIAM = true
+ }
+ }
+
+ if useIAM {
+ log.Printf("[DEBUG] EC2 Metadata service found, adding EC2 Role Credential Provider")
+ providers = append(providers, &ec2rolecreds.EC2RoleProvider{
+ Client: ec2metadata.New(session.New(&aws.Config{
+ Endpoint: aws.String(metadataURL),
+ })),
+ })
+ } else {
+ log.Printf("[DEBUG] EC2 Metadata service not found, not adding EC2 Role Credential Provider")
+ }
+ return awsCredentials.NewChainCredentials(providers)
+}
diff --git a/builtin/providers/aws/config_test.go b/builtin/providers/aws/config_test.go
new file mode 100644
index 0000000000..316bf18939
--- /dev/null
+++ b/builtin/providers/aws/config_test.go
@@ -0,0 +1,299 @@
+package aws
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+func TestAWSConfig_shouldError(t *testing.T) {
+ resetEnv := unsetEnv(t)
+ defer resetEnv()
+ cfg := Config{}
+
+ c := getCreds(cfg.AccessKey, cfg.SecretKey, cfg.Token)
+ _, err := c.Get()
+ if awsErr, ok := err.(awserr.Error); ok {
+ if awsErr.Code() != "NoCredentialProviders" {
+ t.Fatalf("Expected NoCredentialProviders error")
+ }
+ }
+ if err == nil {
+ t.Fatalf("Expected an error with empty env, keys, and IAM in AWS Config")
+ }
+}
+
+func TestAWSConfig_shouldBeStatic(t *testing.T) {
+ simple := []struct {
+ Key, Secret, Token string
+ }{
+ {
+ Key: "test",
+ Secret: "secret",
+ }, {
+ Key: "test",
+ Secret: "test",
+ Token: "test",
+ },
+ }
+
+ for _, c := range simple {
+ cfg := Config{
+ AccessKey: c.Key,
+ SecretKey: c.Secret,
+ Token: c.Token,
+ }
+
+ creds := getCreds(cfg.AccessKey, cfg.SecretKey, cfg.Token)
+ if creds == nil {
+ t.Fatalf("Expected a static creds provider to be returned")
+ }
+ v, err := creds.Get()
+ if err != nil {
+ t.Fatalf("Error gettings creds: %s", err)
+ }
+ if v.AccessKeyID != c.Key {
+ t.Fatalf("AccessKeyID mismatch, expected: (%s), got (%s)", c.Key, v.AccessKeyID)
+ }
+ if v.SecretAccessKey != c.Secret {
+ t.Fatalf("SecretAccessKey mismatch, expected: (%s), got (%s)", c.Secret, v.SecretAccessKey)
+ }
+ if v.SessionToken != c.Token {
+ t.Fatalf("SessionToken mismatch, expected: (%s), got (%s)", c.Token, v.SessionToken)
+ }
+ }
+}
+
+// TestAWSConfig_shouldIAM is designed to test the scenario of running Terraform
+// from an EC2 instance, without environment variables or manually supplied
+// credentials.
+func TestAWSConfig_shouldIAM(t *testing.T) {
+ // clear AWS_* environment variables
+ resetEnv := unsetEnv(t)
+ defer resetEnv()
+
+ // capture the test server's close method, to call after the test returns
+ ts := awsEnv(t)
+ defer ts()
+
+ // An empty config, no key supplied
+ cfg := Config{}
+
+ creds := getCreds(cfg.AccessKey, cfg.SecretKey, cfg.Token)
+ if creds == nil {
+ t.Fatalf("Expected a static creds provider to be returned")
+ }
+
+ v, err := creds.Get()
+ if err != nil {
+ t.Fatalf("Error gettings creds: %s", err)
+ }
+ if v.AccessKeyID != "somekey" {
+ t.Fatalf("AccessKeyID mismatch, expected: (somekey), got (%s)", v.AccessKeyID)
+ }
+ if v.SecretAccessKey != "somesecret" {
+ t.Fatalf("SecretAccessKey mismatch, expected: (somesecret), got (%s)", v.SecretAccessKey)
+ }
+ if v.SessionToken != "sometoken" {
+ t.Fatalf("SessionToken mismatch, expected: (sometoken), got (%s)", v.SessionToken)
+ }
+}
+
+// TestAWSConfig_shouldIAM is designed to test the scenario of running Terraform
+// from an EC2 instance, without environment variables or manually supplied
+// credentials.
+func TestAWSConfig_shouldIgnoreIAM(t *testing.T) {
+ resetEnv := unsetEnv(t)
+ defer resetEnv()
+ // capture the test server's close method, to call after the test returns
+ ts := awsEnv(t)
+ defer ts()
+ simple := []struct {
+ Key, Secret, Token string
+ }{
+ {
+ Key: "test",
+ Secret: "secret",
+ }, {
+ Key: "test",
+ Secret: "test",
+ Token: "test",
+ },
+ }
+
+ for _, c := range simple {
+ cfg := Config{
+ AccessKey: c.Key,
+ SecretKey: c.Secret,
+ Token: c.Token,
+ }
+
+ creds := getCreds(cfg.AccessKey, cfg.SecretKey, cfg.Token)
+ if creds == nil {
+ t.Fatalf("Expected a static creds provider to be returned")
+ }
+ v, err := creds.Get()
+ if err != nil {
+ t.Fatalf("Error gettings creds: %s", err)
+ }
+ if v.AccessKeyID != c.Key {
+ t.Fatalf("AccessKeyID mismatch, expected: (%s), got (%s)", c.Key, v.AccessKeyID)
+ }
+ if v.SecretAccessKey != c.Secret {
+ t.Fatalf("SecretAccessKey mismatch, expected: (%s), got (%s)", c.Secret, v.SecretAccessKey)
+ }
+ if v.SessionToken != c.Token {
+ t.Fatalf("SessionToken mismatch, expected: (%s), got (%s)", c.Token, v.SessionToken)
+ }
+ }
+}
+
+func TestAWSConfig_shouldBeENV(t *testing.T) {
+ // need to set the environment variables to a dummy string, as we don't know
+ // what they may be at runtime without hardcoding here
+ s := "some_env"
+ resetEnv := setEnv(s, t)
+ defer resetEnv()
+
+ cfg := Config{}
+ creds := getCreds(cfg.AccessKey, cfg.SecretKey, cfg.Token)
+ if creds == nil {
+ t.Fatalf("Expected a static creds provider to be returned")
+ }
+ v, err := creds.Get()
+ if err != nil {
+ t.Fatalf("Error gettings creds: %s", err)
+ }
+ if v.AccessKeyID != s {
+ t.Fatalf("AccessKeyID mismatch, expected: (%s), got (%s)", s, v.AccessKeyID)
+ }
+ if v.SecretAccessKey != s {
+ t.Fatalf("SecretAccessKey mismatch, expected: (%s), got (%s)", s, v.SecretAccessKey)
+ }
+ if v.SessionToken != s {
+ t.Fatalf("SessionToken mismatch, expected: (%s), got (%s)", s, v.SessionToken)
+ }
+}
+
+// unsetEnv unsets enviornment variables for testing a "clean slate" with no
+// credentials in the environment
+func unsetEnv(t *testing.T) func() {
+ // Grab any existing AWS keys and preserve. In some tests we'll unset these, so
+ // we need to have them and restore them after
+ e := getEnv()
+ if err := os.Unsetenv("AWS_ACCESS_KEY_ID"); err != nil {
+ t.Fatalf("Error unsetting env var AWS_ACCESS_KEY_ID: %s", err)
+ }
+ if err := os.Unsetenv("AWS_SECRET_ACCESS_KEY"); err != nil {
+ t.Fatalf("Error unsetting env var AWS_SECRET_ACCESS_KEY: %s", err)
+ }
+ if err := os.Unsetenv("AWS_SESSION_TOKEN"); err != nil {
+ t.Fatalf("Error unsetting env var AWS_SESSION_TOKEN: %s", err)
+ }
+
+ return func() {
+ // re-set all the envs we unset above
+ if err := os.Setenv("AWS_ACCESS_KEY_ID", e.Key); err != nil {
+ t.Fatalf("Error resetting env var AWS_ACCESS_KEY_ID: %s", err)
+ }
+ if err := os.Setenv("AWS_SECRET_ACCESS_KEY", e.Secret); err != nil {
+ t.Fatalf("Error resetting env var AWS_SECRET_ACCESS_KEY: %s", err)
+ }
+ if err := os.Setenv("AWS_SESSION_TOKEN", e.Token); err != nil {
+ t.Fatalf("Error resetting env var AWS_SESSION_TOKEN: %s", err)
+ }
+ }
+}
+
+func setEnv(s string, t *testing.T) func() {
+ e := getEnv()
+ // Set all the envs to a dummy value
+ if err := os.Setenv("AWS_ACCESS_KEY_ID", s); err != nil {
+ t.Fatalf("Error setting env var AWS_ACCESS_KEY_ID: %s", err)
+ }
+ if err := os.Setenv("AWS_SECRET_ACCESS_KEY", s); err != nil {
+ t.Fatalf("Error setting env var AWS_SECRET_ACCESS_KEY: %s", err)
+ }
+ if err := os.Setenv("AWS_SESSION_TOKEN", s); err != nil {
+ t.Fatalf("Error setting env var AWS_SESSION_TOKEN: %s", err)
+ }
+
+ return func() {
+ // re-set all the envs we unset above
+ if err := os.Setenv("AWS_ACCESS_KEY_ID", e.Key); err != nil {
+ t.Fatalf("Error resetting env var AWS_ACCESS_KEY_ID: %s", err)
+ }
+ if err := os.Setenv("AWS_SECRET_ACCESS_KEY", e.Secret); err != nil {
+ t.Fatalf("Error resetting env var AWS_SECRET_ACCESS_KEY: %s", err)
+ }
+ if err := os.Setenv("AWS_SESSION_TOKEN", e.Token); err != nil {
+ t.Fatalf("Error resetting env var AWS_SESSION_TOKEN: %s", err)
+ }
+ }
+}
+
+// awsEnv establishes a httptest server to mock out the internal AWS Metadata
+// service. IAM Credentials are retrieved by the EC2RoleProvider, which makes
+// API calls to this internal URL. By replacing the server with a test server,
+// we can simulate an AWS environment
+func awsEnv(t *testing.T) func() {
+ routes := routes{}
+ if err := json.Unmarshal([]byte(aws_routes), &routes); err != nil {
+ t.Fatalf("Failed to unmarshal JSON in AWS ENV test: %s", err)
+ }
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/plain")
+ w.Header().Add("Server", "MockEC2")
+ for _, e := range routes.Endpoints {
+ if r.RequestURI == e.Uri {
+ fmt.Fprintln(w, e.Body)
+ }
+ }
+ }))
+
+ os.Setenv("AWS_METADATA_URL", ts.URL+"/latest")
+ return ts.Close
+}
+
+func getEnv() *currentEnv {
+ // Grab any existing AWS keys and preserve. In some tests we'll unset these, so
+ // we need to have them and restore them after
+ return ¤tEnv{
+ Key: os.Getenv("AWS_ACCESS_KEY_ID"),
+ Secret: os.Getenv("AWS_SECRET_ACCESS_KEY"),
+ Token: os.Getenv("AWS_SESSION_TOKEN"),
+ }
+}
+
+// struct to preserve the current environment
+type currentEnv struct {
+ Key, Secret, Token string
+}
+
+type routes struct {
+ Endpoints []*endpoint `json:"endpoints"`
+}
+type endpoint struct {
+ Uri string `json:"uri"`
+ Body string `json:"body"`
+}
+
+const aws_routes = `
+{
+ "endpoints": [
+ {
+ "uri": "/latest/meta-data/iam/security-credentials",
+ "body": "test_role"
+ },
+ {
+ "uri": "/latest/meta-data/iam/security-credentials/test_role",
+ "body": "{\"Code\":\"Success\",\"LastUpdated\":\"2015-12-11T17:17:25Z\",\"Type\":\"AWS-HMAC\",\"AccessKeyId\":\"somekey\",\"SecretAccessKey\":\"somesecret\",\"Token\":\"sometoken\"}"
+ }
+ ]
+}
+`
diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go
index 313f74b18a..2edb94b066 100644
--- a/builtin/providers/aws/provider.go
+++ b/builtin/providers/aws/provider.go
@@ -1,19 +1,10 @@
package aws
import (
- "net"
- "sync"
- "time"
-
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/mutexkv"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
-
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
- "github.com/aws/aws-sdk-go/aws/ec2metadata"
- "github.com/aws/aws-sdk-go/aws/session"
)
// Provider returns a terraform.ResourceProvider.
@@ -21,95 +12,27 @@ func Provider() terraform.ResourceProvider {
// TODO: Move the validation to this, requires conditional schemas
// TODO: Move the configuration to this, requires validation
- // These variables are closed within the `getCreds` function below.
- // This function is responsible for reading credentials from the
- // environment in the case that they're not explicitly specified
- // in the Terraform configuration.
- //
- // By using the getCreds function here instead of making the default
- // empty, we avoid asking for input on credentials if they're available
- // in the environment.
- var credVal credentials.Value
- var credErr error
- var once sync.Once
- getCreds := func() {
- // Build the list of providers to look for creds in
- providers := []credentials.Provider{
- &credentials.EnvProvider{},
- &credentials.SharedCredentialsProvider{},
- }
-
- // We only look in the EC2 metadata API if we can connect
- // to the metadata service within a reasonable amount of time
- conn, err := net.DialTimeout("tcp", "169.254.169.254:80", 100*time.Millisecond)
- if err == nil {
- conn.Close()
- providers = append(providers, &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())})
- }
-
- credVal, credErr = credentials.NewChainCredentials(providers).Get()
-
- // If we didn't successfully find any credentials, just
- // set the error to nil.
- if credErr == credentials.ErrNoValidProvidersFoundInChain {
- credErr = nil
- }
- }
-
- // getCredDefault is a function used by DefaultFunc below to
- // get the default value for various parts of the credentials.
- // This function properly handles loading the credentials, checking
- // for errors, etc.
- getCredDefault := func(def interface{}, f func() string) (interface{}, error) {
- once.Do(getCreds)
-
- // If there was an error, that is always first
- if credErr != nil {
- return nil, credErr
- }
-
- // If the value is empty string, return nil (not set)
- val := f()
- if val == "" {
- return def, nil
- }
-
- return val, nil
- }
-
// The actual provider
return &schema.Provider{
Schema: map[string]*schema.Schema{
"access_key": &schema.Schema{
- Type: schema.TypeString,
- Required: true,
- DefaultFunc: func() (interface{}, error) {
- return getCredDefault(nil, func() string {
- return credVal.AccessKeyID
- })
- },
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "",
Description: descriptions["access_key"],
},
"secret_key": &schema.Schema{
- Type: schema.TypeString,
- Required: true,
- DefaultFunc: func() (interface{}, error) {
- return getCredDefault(nil, func() string {
- return credVal.SecretAccessKey
- })
- },
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "",
Description: descriptions["secret_key"],
},
"token": &schema.Schema{
- Type: schema.TypeString,
- Optional: true,
- DefaultFunc: func() (interface{}, error) {
- return getCredDefault("", func() string {
- return credVal.SessionToken
- })
- },
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "",
Description: descriptions["token"],
},
From adf417809a7fdd44da0241efc76164b36e6e24be Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 15 Dec 2015 10:49:23 -0600
Subject: [PATCH 243/664] add some comments on auth refactoring
---
builtin/providers/aws/config.go | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go
index e7c7628dc4..e3e2243f1c 100644
--- a/builtin/providers/aws/config.go
+++ b/builtin/providers/aws/config.go
@@ -353,8 +353,12 @@ func getCreds(key, secret, token string) *awsCredentials.Credentials {
}
r, err := c.Get(metadataURL)
+ // Flag to determine if we should add the EC2Meta data provider. Default false
var useIAM bool
if err == nil {
+ // AWS will add a "Server: EC2ws" header value for the metadata request. We
+ // check the headers for this value to ensure something else didn't just
+ // happent to be listening on that IP:Port
if r.Header["Server"] != nil && strings.Contains(r.Header["Server"][0], "EC2") {
useIAM = true
}
From 04e7c1abb3a61a6e7f07820e2e4ee455867e5879 Mon Sep 17 00:00:00 2001
From: Clint
Date: Tue, 15 Dec 2015 11:34:20 -0600
Subject: [PATCH 244/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 88e4137b06..ade2d61b58 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -21,6 +21,7 @@ IMPROVEMENTS:
* provider/aws: Add support for `skip_final_snapshot` to `aws_db_instance` [GH-3853]
* provider/aws: Adding support for Tags to DB SecurityGroup [GH-4260]
* provider/aws: Adding Tag support for DB Param Groups [GH-4259]
+ * provider/aws: Validate IOPs for EBS Volumes [GH-4146]
* provider/aws: DB Subnet group arn output [GH-4261]
* provider/cloudstack: performance improvements [GH-4150]
* provider/docker: Add support for setting the entry point on `docker_container` resources [GH-3761]
From 6236e8d7208d425c87980d3a204c2f0c29458795 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 15 Dec 2015 15:58:28 -0600
Subject: [PATCH 245/664] Update doc to use valid AMI and t2.micro instance
types
---
website/source/docs/configuration/override.html.md | 2 +-
website/source/docs/configuration/resources.html.md | 4 ++--
website/source/docs/modules/usage.html.markdown | 6 +++---
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/website/source/docs/configuration/override.html.md b/website/source/docs/configuration/override.html.md
index a667adcd58..1f841af08a 100644
--- a/website/source/docs/configuration/override.html.md
+++ b/website/source/docs/configuration/override.html.md
@@ -37,7 +37,7 @@ If you have a Terraform configuration `example.tf` with the contents:
```
resource "aws_instance" "web" {
- ami = "ami-1234567"
+ ami = "ami-d05e75b8"
}
```
diff --git a/website/source/docs/configuration/resources.html.md b/website/source/docs/configuration/resources.html.md
index d5e087fec4..3bf2031ac2 100644
--- a/website/source/docs/configuration/resources.html.md
+++ b/website/source/docs/configuration/resources.html.md
@@ -25,8 +25,8 @@ A resource configuration looks like the following:
```
resource "aws_instance" "web" {
- ami = "ami-123456"
- instance_type = "m1.small"
+ ami = "ami-d05e75b8"
+ instance_type = "t2.micro"
}
```
diff --git a/website/source/docs/modules/usage.html.markdown b/website/source/docs/modules/usage.html.markdown
index 8f98a9d6b3..65ce75cf33 100644
--- a/website/source/docs/modules/usage.html.markdown
+++ b/website/source/docs/modules/usage.html.markdown
@@ -87,9 +87,9 @@ For example:
```
resource "aws_instance" "client" {
- ami = "ami-123456"
- instance_type = "m1.small"
- availability_zone = "${module.consul.server_availability_zone}"
+ ami = "ami-d05e75b8"
+ instance_type = "t2.micro"
+ availability_zone = "${module.consul.server_availability_zone}"
}
```
From e59f39df5ffdfee913fd2322aab97f6e2351a2c5 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 15 Dec 2015 16:12:31 -0600
Subject: [PATCH 246/664] other m1/ami-1234 clean ups
---
.../docs/providers/aws/r/instance.html.markdown | 8 ++++----
.../aws/r/launch_configuration.html.markdown | 12 ++++++------
website/source/index.html.erb | 6 +++---
3 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/website/source/docs/providers/aws/r/instance.html.markdown b/website/source/docs/providers/aws/r/instance.html.markdown
index e9e8356454..79e4d22070 100644
--- a/website/source/docs/providers/aws/r/instance.html.markdown
+++ b/website/source/docs/providers/aws/r/instance.html.markdown
@@ -14,11 +14,11 @@ and deleted. Instances also support [provisioning](/docs/provisioners/index.html
## Example Usage
```
-# Create a new instance of the ami-1234 on an m1.small node
-# with an AWS Tag naming it "HelloWorld"
+# Create a new instance of the `ami-d05e75b8` (Ubuntu 14.04) on an
+# t2.micro node with an AWS Tag naming it "HelloWorld"
resource "aws_instance" "web" {
- ami = "ami-1234"
- instance_type = "m1.small"
+ ami = "ami-d05e75b8"
+ instance_type = "t2.micro"
tags {
Name = "HelloWorld"
}
diff --git a/website/source/docs/providers/aws/r/launch_configuration.html.markdown b/website/source/docs/providers/aws/r/launch_configuration.html.markdown
index 413f1b4a1e..4f820b7f67 100644
--- a/website/source/docs/providers/aws/r/launch_configuration.html.markdown
+++ b/website/source/docs/providers/aws/r/launch_configuration.html.markdown
@@ -15,8 +15,8 @@ Provides a resource to create a new launch configuration, used for autoscaling g
```
resource "aws_launch_configuration" "as_conf" {
name = "web_config"
- image_id = "ami-1234"
- instance_type = "m1.small"
+ ami = "ami-d05e75b8"
+ instance_type = "t2.micro"
}
```
@@ -33,8 +33,8 @@ with `name_prefix`. Example:
```
resource "aws_launch_configuration" "as_conf" {
name_prefix = "terraform-lc-example-"
- image_id = "ami-1234"
- instance_type = "m1.small"
+ ami = "ami-d05e75b8"
+ instance_type = "t2.micro"
lifecycle {
create_before_destroy = true
@@ -66,8 +66,8 @@ for more information or how to launch [Spot Instances][3] with Terraform.
```
resource "aws_launch_configuration" "as_conf" {
- image_id = "ami-1234"
- instance_type = "m1.small"
+ ami = "ami-d05e75b8"
+ instance_type = "t2.micro"
spot_price = "0.001"
lifecycle {
create_before_destroy = true
diff --git a/website/source/index.html.erb b/website/source/index.html.erb
index 92a6c3f17b..353bb94fa4 100644
--- a/website/source/index.html.erb
+++ b/website/source/index.html.erb
@@ -195,9 +195,9 @@
resource "aws_instance" "app" {
count = 5
-
- ami = "ami-043a5034"
- instance_type = "m1.small"
+
+ ami = "ami-d05e75b8"
+ instance_type = "t2.micro"
}
From 35a69fd322c5adc4c3cbc60cd5e1b36ffb50b283 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 15 Dec 2015 16:20:24 -0600
Subject: [PATCH 247/664] fix whitespace
---
website/source/index.html.erb | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/website/source/index.html.erb b/website/source/index.html.erb
index 353bb94fa4..52c8b747bf 100644
--- a/website/source/index.html.erb
+++ b/website/source/index.html.erb
@@ -195,7 +195,7 @@
resource "aws_instance" "app" {
count = 5
-
+
ami = "ami-d05e75b8"
instance_type = "t2.micro"
}
From c279adfc55e4126756529789ff648a1620d4ab49 Mon Sep 17 00:00:00 2001
From: Nashwan Azhari
Date: Tue, 8 Dec 2015 17:30:12 -0500
Subject: [PATCH 248/664] provider/azurerm: Initial commit.
This commit brings some of the work over from #3808, but rearchitects to
use a separate provider for Azure Resource Manager. This is in line with
the decisions made by the Azure Powershell Cmdlets, and is important for
usability since the sets of required fields change between the ASM and
ARM APIs.
Currently `azurerm_resource_group` and `azurerm_virtual_network` are
implemented, more resources will follow.
---
builtin/bins/provider-azurerm/main.go | 12 +
builtin/bins/provider-azurerm/main_test.go | 1 +
builtin/providers/azurerm/config.go | 227 +++++++++++++++
builtin/providers/azurerm/provider.go | 108 +++++++
builtin/providers/azurerm/provider_test.go | 195 +++++++++++++
.../azurerm/resourceArmResourceGroup.go | 140 +++++++++
.../azurerm/resourceArmVirtualNetwork.go | 270 ++++++++++++++++++
7 files changed, 953 insertions(+)
create mode 100644 builtin/bins/provider-azurerm/main.go
create mode 100644 builtin/bins/provider-azurerm/main_test.go
create mode 100644 builtin/providers/azurerm/config.go
create mode 100644 builtin/providers/azurerm/provider.go
create mode 100644 builtin/providers/azurerm/provider_test.go
create mode 100644 builtin/providers/azurerm/resourceArmResourceGroup.go
create mode 100644 builtin/providers/azurerm/resourceArmVirtualNetwork.go
diff --git a/builtin/bins/provider-azurerm/main.go b/builtin/bins/provider-azurerm/main.go
new file mode 100644
index 0000000000..f81707338f
--- /dev/null
+++ b/builtin/bins/provider-azurerm/main.go
@@ -0,0 +1,12 @@
+package main
+
+import (
+ "github.com/hashicorp/terraform/builtin/providers/azurerm"
+ "github.com/hashicorp/terraform/plugin"
+)
+
+func main() {
+ plugin.Serve(&plugin.ServeOpts{
+ ProviderFunc: azurerm.Provider,
+ })
+}
diff --git a/builtin/bins/provider-azurerm/main_test.go b/builtin/bins/provider-azurerm/main_test.go
new file mode 100644
index 0000000000..06ab7d0f9a
--- /dev/null
+++ b/builtin/bins/provider-azurerm/main_test.go
@@ -0,0 +1 @@
+package main
diff --git a/builtin/providers/azurerm/config.go b/builtin/providers/azurerm/config.go
new file mode 100644
index 0000000000..669e4631d6
--- /dev/null
+++ b/builtin/providers/azurerm/config.go
@@ -0,0 +1,227 @@
+package azurerm
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/azure-sdk-for-go/arm/compute"
+ "github.com/Azure/azure-sdk-for-go/arm/network"
+ "github.com/Azure/azure-sdk-for-go/arm/resources"
+ "github.com/Azure/azure-sdk-for-go/arm/scheduler"
+ "github.com/Azure/azure-sdk-for-go/arm/storage"
+ "github.com/hashicorp/terraform/helper/pathorcontents"
+)
+
+// ArmClient contains the handles to all the specific Azure Resource Manager
+// resource classes' respective clients.
+type ArmClient struct {
+ availSetClient compute.AvailabilitySetsClient
+ usageOpsClient compute.UsageOperationsClient
+ vmExtensionImageClient compute.VirtualMachineExtensionImagesClient
+ vmExtensionClient compute.VirtualMachineExtensionsClient
+ vmImageClient compute.VirtualMachineImagesClient
+ vmClient compute.VirtualMachinesClient
+
+ appGatewayClient network.ApplicationGatewaysClient
+ ifaceClient network.InterfacesClient
+ loadBalancerClient network.LoadBalancersClient
+ localNetConnClient network.LocalNetworkGatewaysClient
+ publicIPClient network.PublicIPAddressesClient
+ secGroupClient network.SecurityGroupsClient
+ secRuleClient network.SecurityRulesClient
+ subnetClient network.SubnetsClient
+ netUsageClient network.UsagesClient
+ vnetGatewayConnectionsClient network.VirtualNetworkGatewayConnectionsClient
+ vnetGatewayClient network.VirtualNetworkGatewaysClient
+ vnetClient network.VirtualNetworksClient
+
+ resourceGroupClient resources.GroupsClient
+ tagsClient resources.TagsClient
+
+ jobsClient scheduler.JobsClient
+ jobsCollectionsClient scheduler.JobCollectionsClient
+
+ storageServiceClient storage.AccountsClient
+ storageUsageClient storage.UsageOperationsClient
+}
+
+// getArmClient is a helper method which returns a fully instantiated
+// *ArmClient based on the Config's current settings.
+func (c *Config) getArmClient() (*ArmClient, error) {
+ // first; check that all the necessary credentials were provided:
+ if !c._armCredentialsProvided() {
+ return nil, fmt.Errorf("Not all ARM-required fields have been provided.")
+ }
+
+ spt, err := azure.NewServicePrincipalToken(c.ClientID, c.ClientSecret, c.TenantID, azure.AzureResourceManagerScope)
+ if err != nil {
+ return nil, err
+ }
+
+ // client declarations:
+ client := ArmClient{}
+
+ // NOTE: these declarations should be left separate for clarity should the
+ // clients be wished to be configured with custom Responders/PollingModess etc...
+ asc := compute.NewAvailabilitySetsClient(c.SubscriptionID)
+ asc.Authorizer = spt
+ client.availSetClient = asc
+
+ uoc := compute.NewUsageOperationsClient(c.SubscriptionID)
+ uoc.Authorizer = spt
+ client.usageOpsClient = uoc
+
+ vmeic := compute.NewVirtualMachineExtensionImagesClient(c.SubscriptionID)
+ vmeic.Authorizer = spt
+ client.vmExtensionImageClient = vmeic
+
+ vmec := compute.NewVirtualMachineExtensionsClient(c.SubscriptionID)
+ vmec.Authorizer = spt
+ client.vmExtensionClient = vmec
+
+ vmic := compute.NewVirtualMachineImagesClient(c.SubscriptionID)
+ vmic.Authorizer = spt
+ client.vmImageClient = vmic
+
+ vmc := compute.NewVirtualMachinesClient(c.SubscriptionID)
+ vmc.Authorizer = spt
+ client.vmClient = vmc
+
+ agc := network.NewApplicationGatewaysClient(c.SubscriptionID)
+ agc.Authorizer = spt
+ client.appGatewayClient = agc
+
+ ifc := network.NewInterfacesClient(c.SubscriptionID)
+ ifc.Authorizer = spt
+ client.ifaceClient = ifc
+
+ lbc := network.NewLoadBalancersClient(c.SubscriptionID)
+ lbc.Authorizer = spt
+ client.loadBalancerClient = lbc
+
+ lgc := network.NewLocalNetworkGatewaysClient(c.SubscriptionID)
+ lgc.Authorizer = spt
+ client.localNetConnClient = lgc
+
+ pipc := network.NewPublicIPAddressesClient(c.SubscriptionID)
+ pipc.Authorizer = spt
+ client.publicIPClient = pipc
+
+ sgc := network.NewSecurityGroupsClient(c.SubscriptionID)
+ sgc.Authorizer = spt
+ client.secGroupClient = sgc
+
+ src := network.NewSecurityRulesClient(c.SubscriptionID)
+ src.Authorizer = spt
+ client.secRuleClient = src
+
+ snc := network.NewSubnetsClient(c.SubscriptionID)
+ snc.Authorizer = spt
+ client.subnetClient = snc
+
+ vgcc := network.NewVirtualNetworkGatewayConnectionsClient(c.SubscriptionID)
+ vgcc.Authorizer = spt
+ client.vnetGatewayConnectionsClient = vgcc
+
+ vgc := network.NewVirtualNetworkGatewaysClient(c.SubscriptionID)
+ vgc.Authorizer = spt
+ client.vnetGatewayClient = vgc
+
+ vnc := network.NewVirtualNetworksClient(c.SubscriptionID)
+ vnc.Authorizer = spt
+ client.vnetClient = vnc
+
+ rgc := resources.NewGroupsClient(c.SubscriptionID)
+ rgc.Authorizer = spt
+ client.resourceGroupClient = rgc
+
+ tc := resources.NewTagsClient(c.SubscriptionID)
+ tc.Authorizer = spt
+ client.tagsClient = tc
+
+ jc := scheduler.NewJobsClient(c.SubscriptionID)
+ jc.Authorizer = spt
+ client.jobsClient = jc
+
+ jcc := scheduler.NewJobCollectionsClient(c.SubscriptionID)
+ jcc.Authorizer = spt
+ client.jobsCollectionsClient = jcc
+
+ ssc := storage.NewAccountsClient(c.SubscriptionID)
+ ssc.Authorizer = spt
+ client.storageServiceClient = ssc
+
+ suc := storage.NewUsageOperationsClient(c.SubscriptionID)
+ suc.Authorizer = spt
+ client.storageUsageClient = suc
+
+ return &client, nil
+}
+
+// armCredentialsProvided is a helper method which indicates whether or not the
+// credentials required for authenticating against the ARM APIs were provided.
+func (c *Config) armCredentialsProvided() bool {
+ return c.ArmConfig != "" || c._armCredentialsProvided()
+}
+func (c *Config) _armCredentialsProvided() bool {
+ return !(c.SubscriptionID == "" || c.ClientID == "" || c.ClientSecret == "" || c.TenantID == "")
+}
+
+// readArmSettings is a helper method which; given the contents of the ARM
+// credentials file, loads all the data into the Config.
+func (c *Config) readArmSettings(contents string) error {
+ data := &armConfigData{}
+ err := json.Unmarshal([]byte(contents), data)
+
+ c.SubscriptionID = data.SubscriptionID
+ c.ClientID = data.ClientID
+ c.ClientSecret = data.ClientSecret
+ c.TenantID = data.TenantID
+
+ return err
+}
+
+// configFileContentsWarning represents the warning message returned when the
+// path to the 'arm_config_file' is provided instead of its sourced contents.
+var configFileContentsWarning = `
+The path to the 'arm_config_file' was provided instead of its contents.
+Support for accepting filepaths instead of their contents will be removed
+in the near future. Do please consider switching over to using
+'${file("/path/to/config.arm")}' instead.
+`[1:]
+
+// validateArmConfigFile is a helper function which verifies that
+// the provided ARM configuration file is valid.
+func validateArmConfigFile(v interface{}, _ string) (ws []string, es []error) {
+ value := v.(string)
+ if value == "" {
+ return nil, nil
+ }
+
+ pathOrContents, wasPath, err := pathorcontents.Read(v.(string))
+ if err != nil {
+ es = append(es, fmt.Errorf("Error reading 'arm_config_file': %s", err))
+ }
+
+ if wasPath {
+ ws = append(ws, configFileContentsWarning)
+ }
+
+ data := armConfigData{}
+ err = json.Unmarshal([]byte(pathOrContents), &data)
+ if err != nil {
+ es = append(es, fmt.Errorf("Error unmarshalling the provided 'arm_config_file': %s", err))
+ }
+
+ return
+}
+
+// armConfigData is a private struct which represents the expected layout of
+// an ARM configuration file. It is used for unmarshalling purposes.
+type armConfigData struct {
+ ClientID string `json:"clientID"`
+ ClientSecret string `json:"clientSecret"`
+ SubscriptionID string `json:"subscriptionID"`
+ TenantID string `json:"tenantID"`
+}
diff --git a/builtin/providers/azurerm/provider.go b/builtin/providers/azurerm/provider.go
new file mode 100644
index 0000000000..8b559c0b12
--- /dev/null
+++ b/builtin/providers/azurerm/provider.go
@@ -0,0 +1,108 @@
+package azurerm
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// Provider returns a terraform.ResourceProvider.
+func Provider() terraform.ResourceProvider {
+ return &schema.Provider{
+ Schema: map[string]*schema.Schema{
+ "arm_config_file": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "",
+ DefaultFunc: schema.EnvDefaultFunc("ARM_CONFIG_FILE", nil),
+ ValidateFunc: validateArmConfigFile,
+ },
+
+ "subscription_id": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ DefaultFunc: schema.EnvDefaultFunc("AZURE_SUBSCRIPTION_ID", ""),
+ },
+
+ "client_id": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""),
+ },
+
+ "client_secret": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""),
+ },
+
+ "tenant_id": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""),
+ },
+ },
+
+ ResourcesMap: map[string]*schema.Resource{
+ "azurerm_resource_group": resourceArmResourceGroup(),
+ "azurerm_virtual_network": resourceArmVirtualNetwork(),
+ },
+
+ ConfigureFunc: providerConfigure,
+ }
+}
+
+// Config is the configuration structure used to instantiate a
+// new Azure management client.
+type Config struct {
+ ManagementURL string
+
+ ArmConfig string
+
+ SubscriptionID string
+ ClientID string
+ ClientSecret string
+ TenantID string
+}
+
+const noConfigError = `Credentials must be provided either via arm_config_file, or via
+subscription_id, client_id, client_secret and tenant_id. Please see
+the provider documentation for more information on how to obtain these
+credentials.`
+
+func providerConfigure(d *schema.ResourceData) (interface{}, error) {
+ config := Config{
+ SubscriptionID: d.Get("subscription_id").(string),
+ ClientID: d.Get("client_id").(string),
+ ClientSecret: d.Get("client_secret").(string),
+ TenantID: d.Get("tenant_id").(string),
+ }
+
+ // check if credentials file is provided:
+ armConfig := d.Get("arm_config_file").(string)
+ if armConfig != "" {
+ // then, load the settings from that:
+ if err := config.readArmSettings(armConfig); err != nil {
+ return nil, err
+ }
+ }
+
+ // then; check whether the ARM credentials were provided:
+ if !config.armCredentialsProvided() {
+ return nil, fmt.Errorf(noConfigError)
+ }
+
+ client, err := config.getArmClient()
+ if err != nil {
+ return nil, err
+ }
+
+ return client, nil
+}
+
+func azureRMNormalizeLocation(location interface{}) string {
+ input := location.(string)
+ return strings.Replace(strings.ToLower(input), " ", "", -1)
+}
diff --git a/builtin/providers/azurerm/provider_test.go b/builtin/providers/azurerm/provider_test.go
new file mode 100644
index 0000000000..333d8fa3e6
--- /dev/null
+++ b/builtin/providers/azurerm/provider_test.go
@@ -0,0 +1,195 @@
+package azurerm
+
+import (
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/terraform/config"
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/terraform"
+ "github.com/mitchellh/go-homedir"
+)
+
+var testAccProviders map[string]terraform.ResourceProvider
+var testAccProvider *schema.Provider
+
+const (
+ testAccSecurityGroupName = "terraform-security-group"
+ testAccHostedServiceName = "terraform-testing-service"
+)
+
+// testAccStorageServiceName is used as the name for the Storage Service
+// created in all storage-related tests.
+// It is much more convenient to provide a Storage Service which
+// has been created beforehand as the creation of one takes a lot
+// and would greatly impede the multitude of tests which rely on one.
+// NOTE: the storage container should be located in `West US`.
+var testAccStorageServiceName = os.Getenv("AZURE_STORAGE")
+
+const testAccStorageContainerName = "terraform-testing-container"
+
+func init() {
+ testAccProvider = Provider().(*schema.Provider)
+ testAccProviders = map[string]terraform.ResourceProvider{
+ "azure": testAccProvider,
+ }
+}
+
+func TestProvider(t *testing.T) {
+ if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestProvider_impl(t *testing.T) {
+ var _ terraform.ResourceProvider = Provider()
+}
+
+func testAccPreCheck(t *testing.T) {
+ if v := os.Getenv("AZURE_PUBLISH_SETTINGS"); v == "" {
+ subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID")
+ certificate := os.Getenv("AZURE_CERTIFICATE")
+
+ if subscriptionID == "" || certificate == "" {
+ t.Fatal("either AZURE_PUBLISH_SETTINGS, or AZURE_SUBSCRIPTION_ID " +
+ "and AZURE_CERTIFICATE must be set for acceptance tests")
+ }
+ }
+
+ if v := os.Getenv("AZURE_STORAGE"); v == "" {
+ t.Fatal("AZURE_STORAGE must be set for acceptance tests")
+ }
+}
+
+func TestAzure_validateSettingsFile(t *testing.T) {
+ f, err := ioutil.TempFile("", "tf-test")
+ if err != nil {
+ t.Fatalf("Error creating temporary file in TestAzure_validateSettingsFile: %s", err)
+ }
+ defer os.Remove(f.Name())
+
+ fx, err := ioutil.TempFile("", "tf-test-xml")
+ if err != nil {
+ t.Fatalf("Error creating temporary file with XML in TestAzure_validateSettingsFile: %s", err)
+ }
+ defer os.Remove(fx.Name())
+ _, err = io.WriteString(fx, " ")
+ if err != nil {
+ t.Fatalf("Error writing XML File: %s", err)
+ }
+ fx.Close()
+
+ home, err := homedir.Dir()
+ if err != nil {
+ t.Fatalf("Error fetching homedir: %s", err)
+ }
+ fh, err := ioutil.TempFile(home, "tf-test-home")
+ if err != nil {
+ t.Fatalf("Error creating homedir-based temporary file: %s", err)
+ }
+ defer os.Remove(fh.Name())
+ _, err = io.WriteString(fh, " ")
+ if err != nil {
+ t.Fatalf("Error writing XML File: %s", err)
+ }
+ fh.Close()
+
+ r := strings.NewReplacer(home, "~")
+ homePath := r.Replace(fh.Name())
+
+ cases := []struct {
+ Input string // String of XML or a path to an XML file
+ W int // expected count of warnings
+ E int // expected count of errors
+ }{
+ {"test", 0, 1},
+ {f.Name(), 1, 1},
+ {fx.Name(), 1, 0},
+ {homePath, 1, 0},
+ {" ", 0, 0},
+ }
+
+ for _, tc := range cases {
+ w, e := validateSettingsFile(tc.Input, "")
+
+ if len(w) != tc.W {
+ t.Errorf("Error in TestAzureValidateSettingsFile: input: %s , warnings: %v, errors: %v", tc.Input, w, e)
+ }
+ if len(e) != tc.E {
+ t.Errorf("Error in TestAzureValidateSettingsFile: input: %s , warnings: %v, errors: %v", tc.Input, w, e)
+ }
+ }
+}
+
+func TestAzure_providerConfigure(t *testing.T) {
+ home, err := homedir.Dir()
+ if err != nil {
+ t.Fatalf("Error fetching homedir: %s", err)
+ }
+ fh, err := ioutil.TempFile(home, "tf-test-home")
+ if err != nil {
+ t.Fatalf("Error creating homedir-based temporary file: %s", err)
+ }
+ defer os.Remove(fh.Name())
+
+ _, err = io.WriteString(fh, testAzurePublishSettingsStr)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ fh.Close()
+
+ r := strings.NewReplacer(home, "~")
+ homePath := r.Replace(fh.Name())
+
+ cases := []struct {
+ SettingsFile string // String of XML or a path to an XML file
+ NilMeta bool // whether meta is expected to be nil
+ }{
+ {testAzurePublishSettingsStr, false},
+ {homePath, false},
+ }
+
+ for _, tc := range cases {
+ rp := Provider()
+ raw := map[string]interface{}{
+ "settings_file": tc.SettingsFile,
+ }
+
+ rawConfig, err := config.NewRawConfig(raw)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = rp.Configure(terraform.NewResourceConfig(rawConfig))
+ meta := rp.(*schema.Provider).Meta()
+ if (meta == nil) != tc.NilMeta {
+ t.Fatalf("expected NilMeta: %t, got meta: %#v, settings_file: %q",
+ tc.NilMeta, meta, tc.SettingsFile)
+ }
+ }
+}
+
+func genRandInt() int {
+ return rand.New(rand.NewSource(time.Now().UnixNano())).Int() % 100000
+}
+
+// testAzurePublishSettingsStr is a revoked publishsettings file
+const testAzurePublishSettingsStr = `
+
+
+
+
+
+
+`
diff --git a/builtin/providers/azurerm/resourceArmResourceGroup.go b/builtin/providers/azurerm/resourceArmResourceGroup.go
new file mode 100644
index 0000000000..2e321aab6c
--- /dev/null
+++ b/builtin/providers/azurerm/resourceArmResourceGroup.go
@@ -0,0 +1,140 @@
+package azurerm
+
+import (
+ "fmt"
+ "log"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/arm/resources"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+// resourceArmResourceGroup returns the *schema.Resource
+// associated to resource group resources on ARM.
+func resourceArmResourceGroup() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceArmResourceGroupCreate,
+ Read: resourceArmResourceGroupRead,
+ Exists: resourceArmResourceGroupExists,
+ Delete: resourceArmResourceGroupDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ //TODO(jen20) - implement validation func: {resource-group-name} must uniquely identify the resource group within the subscription. It must be no longer than 80 characters long. It can only contain alphanumeric characters, dash, underscore, opening parenthesis, closing parenthesis or period. The name cannot end with a period.
+ },
+ "location": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ StateFunc: azureRMNormalizeLocation,
+ },
+ },
+ }
+}
+
+// resourceArmResourceGroupCreate goes ahead and creates the specified ARM resource group.
+func resourceArmResourceGroupCreate(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*ArmClient)
+ resGroupClient := client.resourceGroupClient
+
+ name := d.Get("name").(string)
+ location := d.Get("location").(string)
+
+ log.Printf("[INFO] Issuing Azure ARM creation request for resource group '%s'.", name)
+
+ rg := resources.ResourceGroup{
+ Name: &name,
+ Location: &location,
+ }
+
+ _, err := resGroupClient.CreateOrUpdate(name, rg)
+ if err != nil {
+ return fmt.Errorf("Error issuing Azure ARM create request for resource group '%s': %s", name, err)
+ }
+
+ d.SetId(*rg.Name)
+
+ // Wait for the resource group to become available
+ // TODO(jen20): Is there any need for this?
+ log.Printf("[DEBUG] Waiting for Resource Group (%s) to become available", d.Id())
+ stateConf := &resource.StateChangeConf{
+ Pending: []string{"Accepted"},
+ Target: "Succeeded",
+ Refresh: resourceGroupStateRefreshFunc(client, d.Id()),
+ Timeout: 10 * time.Minute,
+ }
+ if _, err := stateConf.WaitForState(); err != nil {
+ return fmt.Errorf("Error waiting for Resource Group (%s) to become available: %s", d.Id(), err)
+ }
+
+ return resourceArmResourceGroupRead(d, meta)
+}
+
+// resourceArmResourceGroupRead goes ahead and reads the state of the corresponding ARM resource group.
+func resourceArmResourceGroupRead(d *schema.ResourceData, meta interface{}) error {
+ resGroupClient := meta.(*ArmClient).resourceGroupClient
+
+ name := d.Id()
+ log.Printf("[INFO] Issuing read request to Azure ARM for resource group '%s'.", name)
+
+ res, err := resGroupClient.Get(name)
+ if err != nil {
+ return fmt.Errorf("Error issuing read request to Azure ARM for resource group '%s': %s", name, err)
+ }
+
+ d.Set("name", *res.Name)
+ d.Set("location", *res.Location)
+
+ return nil
+}
+
+// resourceArmResourceGroupExists goes ahead and checks for the existence of the correspoding ARM resource group.
+func resourceArmResourceGroupExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ resGroupClient := meta.(*ArmClient).resourceGroupClient
+
+ name := d.Id()
+
+ resp, err := resGroupClient.CheckExistence(name)
+ if err != nil {
+ // TODO(aznashwan): implement some error switching helpers in the SDK
+ // to avoid HTTP error checks such as the below:
+ if resp.StatusCode != 200 {
+ return false, err
+ }
+
+ return true, nil
+ }
+
+ return true, nil
+}
+
+// resourceArmResourceGroupDelete deletes the specified ARM resource group.
+func resourceArmResourceGroupDelete(d *schema.ResourceData, meta interface{}) error {
+ resGroupClient := meta.(*ArmClient).resourceGroupClient
+
+ name := d.Id()
+
+ _, err := resGroupClient.Delete(name)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// resourceGroupStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch
+// a resource group.
+func resourceGroupStateRefreshFunc(client *ArmClient, id string) resource.StateRefreshFunc {
+ return func() (interface{}, string, error) {
+ res, err := client.resourceGroupClient.Get(id)
+ if err != nil {
+ return nil, "", fmt.Errorf("Error issuing read request in resourceGroupStateRefreshFunc to Azure ARM for resource group '%s': %s", id, err)
+ }
+
+ return res, *res.Properties.ProvisioningState, nil
+ }
+}
diff --git a/builtin/providers/azurerm/resourceArmVirtualNetwork.go b/builtin/providers/azurerm/resourceArmVirtualNetwork.go
new file mode 100644
index 0000000000..c4471b37b1
--- /dev/null
+++ b/builtin/providers/azurerm/resourceArmVirtualNetwork.go
@@ -0,0 +1,270 @@
+package azurerm
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/arm/network"
+ "github.com/hashicorp/terraform/helper/hashcode"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceArmVirtualNetwork() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceArmVirtualNetworkCreate,
+ Read: resourceArmVirtualNetworkRead,
+ Update: resourceArmVirtualNetworkUpdate,
+ Delete: resourceArmVirtualNetworkDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "address_space": &schema.Schema{
+ Type: schema.TypeList,
+ Required: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+
+ "dns_servers_names": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+
+ "subnet": &schema.Schema{
+ Type: schema.TypeSet,
+ Required: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "address_prefix": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "security_group": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ },
+ },
+ Set: resourceAzureSubnetHash,
+ },
+
+ "location": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ StateFunc: azureRMNormalizeLocation,
+ },
+
+ "resource_group_name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ },
+ }
+}
+
+// resourceArmVirtualNetworkCreate creates the specified ARM virtual network.
+func resourceArmVirtualNetworkCreate(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*ArmClient)
+ vnetClient := client.vnetClient
+
+ log.Printf("[INFO] preparing arguments for Azure ARM virtual network creation.")
+
+ name := d.Get("name").(string)
+ location := d.Get("location").(string)
+ resGroup := d.Get("resource_group_name").(string)
+
+ vnet := network.VirtualNetwork{
+ Name: &name,
+ Location: &location,
+ Properties: getVirtualNetworkProperties(d),
+ }
+
+ log.Printf("[INFO] Sending virtual network create request to ARM.")
+ _, err := vnetClient.CreateOrUpdate(resGroup, name, vnet)
+ if err != nil {
+ return err
+ }
+
+ // if res.Response.StatusCode != http.StatusAccepted {
+ // return fmt.Errorf("Creation request was denies: code: %d", res.Response.StatusCode)
+ // }
+
+ d.SetId(name)
+ d.Set("resGroup", resGroup)
+
+ // Wait for the resource group to become available
+ // TODO(jen20): Is there any need for this?
+ log.Printf("[DEBUG] Waiting for Virtual Network (%s) to become available", d.Id())
+ stateConf := &resource.StateChangeConf{
+ Pending: []string{"Accepted", "Updating"},
+ Target: "Succeeded",
+ Refresh: virtualNetworkStateRefreshFunc(client, resGroup, name),
+ Timeout: 10 * time.Minute,
+ }
+ if _, err := stateConf.WaitForState(); err != nil {
+ return fmt.Errorf("Error waiting for Virtual Network (%s) to become available: %s", d.Id(), err)
+ }
+
+ return resourceArmVirtualNetworkRead(d, meta)
+}
+
+// resourceArmVirtualNetworkRead goes ahead and reads the state of the corresponding ARM virtual network.
+func resourceArmVirtualNetworkRead(d *schema.ResourceData, meta interface{}) error {
+ vnetClient := meta.(*ArmClient).vnetClient
+
+ name := d.Get("name").(string)
+ resGroup := d.Get("resource_group_name").(string)
+
+ log.Printf("[INFO] Sending virtual network read request to ARM.")
+
+ resp, err := vnetClient.Get(resGroup, name)
+ if resp.StatusCode == http.StatusNotFound {
+ // it means the virtual network has been deleted in the meantime;
+ // so we must go ahead and remove it here:
+ d.SetId("")
+ return nil
+ }
+ if err != nil {
+ return fmt.Errorf("Error making Read request on Azure virtual network %s: %s", name, err)
+ }
+ vnet := *resp.Properties
+
+ // update all the appropriate values:
+ d.Set("address_space", vnet.AddressSpace.AddressPrefixes)
+
+ // read state of subnets:
+ subnets := &schema.Set{
+ F: resourceAzureSubnetHash,
+ }
+
+ for _, subnet := range *vnet.Subnets {
+ s := map[string]interface{}{}
+
+ s["name"] = *subnet.Name
+ s["address_prefix"] = *subnet.Properties.AddressPrefix
+ // NOTE(aznashwan): ID's necessary?
+ if subnet.Properties.NetworkSecurityGroup != nil {
+ s["security_group"] = *subnet.Properties.NetworkSecurityGroup.ID
+ }
+
+ subnets.Add(s)
+ }
+ d.Set("subnet", subnets)
+
+ // now; dns servers:
+ dnses := []string{}
+ for _, dns := range *vnet.DhcpOptions.DNSServers {
+ dnses = append(dnses, dns)
+ }
+ d.Set("dns_servers_names", dnses)
+
+ return nil
+}
+
+// resourceArmVirtualNetworkUpdate goes ahead and updates the corresponding ARM virtual network.
+func resourceArmVirtualNetworkUpdate(d *schema.ResourceData, meta interface{}) error {
+ // considering Create's idempotency, Update is simply a proxy for it...
+ // Update has been left as a separate function here for utmost clarity:
+ return resourceArmVirtualNetworkCreate(d, meta)
+}
+
+// resourceArmVirtualNetworkDelete deletes the specified ARM virtual network.
+func resourceArmVirtualNetworkDelete(d *schema.ResourceData, meta interface{}) error {
+ vnetClient := meta.(*ArmClient).vnetClient
+
+ name := d.Get("name").(string)
+ resGroup := d.Get("resource_group_name").(string)
+ _, err := vnetClient.Delete(resGroup, name)
+
+ return err
+}
+
+// getVirtualNetworkProperties is a helper function which returns the
+// VirtualNetworkPropertiesFormat of the network resource.
+func getVirtualNetworkProperties(d *schema.ResourceData) *network.VirtualNetworkPropertiesFormat {
+ // first; get address space prefixes:
+ prefixes := []string{}
+ for _, prefix := range d.Get("address_space").([]interface{}) {
+ prefixes = append(prefixes, prefix.(string))
+ }
+
+ // then; the dns servers:
+ dnses := []string{}
+ for _, dns := range d.Get("dns_servers_names").([]interface{}) {
+ dnses = append(dnses, dns.(string))
+ }
+
+ // then; the subnets:
+ subnets := []network.Subnet{}
+ if subs := d.Get("subnet").(*schema.Set); subs.Len() > 0 {
+ for _, subnet := range subs.List() {
+ subnet := subnet.(map[string]interface{})
+
+ name := subnet["name"].(string)
+ prefix := subnet["address_prefix"].(string)
+ secGroup := subnet["security_group"].(string)
+
+ var subnetObj network.Subnet
+ subnetObj.Name = &name
+ subnetObj.Properties = &network.SubnetPropertiesFormat{}
+ subnetObj.Properties.AddressPrefix = &prefix
+
+ if secGroup != "" {
+ subnetObj.Properties.NetworkSecurityGroup = &network.SubResource{
+ ID: &secGroup,
+ }
+ }
+
+ subnets = append(subnets, subnetObj)
+ }
+ }
+
+ // finally; return the struct:
+ return &network.VirtualNetworkPropertiesFormat{
+ AddressSpace: &network.AddressSpace{
+ AddressPrefixes: &prefixes,
+ },
+ DhcpOptions: &network.DhcpOptions{
+ DNSServers: &dnses,
+ },
+ Subnets: &subnets,
+ }
+}
+
+func resourceAzureSubnetHash(v interface{}) int {
+ m := v.(map[string]interface{})
+ subnet := m["name"].(string) + m["address_prefix"].(string)
+ if securityGroup, present := m["security_group"]; present {
+ subnet = subnet + securityGroup.(string)
+ }
+ return hashcode.String(subnet)
+}
+
+// virtualNetworkStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch
+// a virtual network.
+func virtualNetworkStateRefreshFunc(client *ArmClient, resourceGroupName string, networkName string) resource.StateRefreshFunc {
+ return func() (interface{}, string, error) {
+ res, err := client.vnetClient.Get(resourceGroupName, networkName)
+ if err != nil {
+ return nil, "", fmt.Errorf("Error issuing read request in virtualNetworkStateRefreshFunc to Azure ARM for virtual network '%s' (RG: '%s'): %s", networkName, resourceGroupName, err)
+ }
+
+ return res, *res.Properties.ProvisioningState, nil
+ }
+}
From 63bc8e98526cbed553437ed76833e4a4b78ad816 Mon Sep 17 00:00:00 2001
From: Nashwan Azhari
Date: Tue, 8 Dec 2015 18:50:48 -0500
Subject: [PATCH 249/664] provider/azurerm: Tidy up minor issues
This commit cleans up some of the work on the Azure ARM provider
following review by @phinze. Specifically:
- Unnecessary ASM-targeted tests are removed
- Validation is added to the `resource_group` resource
- `dns_servers_names` -> `dns_servers` as per the API documentation
- AZURE_SUBSCRIPTION_ID environment variable is renamed to be
ARM_SUBSCRIPTION_ID in order to match the other environment variables
---
builtin/providers/azurerm/config.go | 15 +-
builtin/providers/azurerm/provider.go | 2 +-
builtin/providers/azurerm/provider_test.go | 151 ++----------------
.../azurerm/resourceArmResourceGroup.go | 30 +++-
.../azurerm/resourceArmVirtualNetwork.go | 7 +-
5 files changed, 40 insertions(+), 165 deletions(-)
diff --git a/builtin/providers/azurerm/config.go b/builtin/providers/azurerm/config.go
index 669e4631d6..4e6a750969 100644
--- a/builtin/providers/azurerm/config.go
+++ b/builtin/providers/azurerm/config.go
@@ -182,15 +182,6 @@ func (c *Config) readArmSettings(contents string) error {
return err
}
-// configFileContentsWarning represents the warning message returned when the
-// path to the 'arm_config_file' is provided instead of its sourced contents.
-var configFileContentsWarning = `
-The path to the 'arm_config_file' was provided instead of its contents.
-Support for accepting filepaths instead of their contents will be removed
-in the near future. Do please consider switching over to using
-'${file("/path/to/config.arm")}' instead.
-`[1:]
-
// validateArmConfigFile is a helper function which verifies that
// the provided ARM configuration file is valid.
func validateArmConfigFile(v interface{}, _ string) (ws []string, es []error) {
@@ -199,15 +190,11 @@ func validateArmConfigFile(v interface{}, _ string) (ws []string, es []error) {
return nil, nil
}
- pathOrContents, wasPath, err := pathorcontents.Read(v.(string))
+ pathOrContents, _, err := pathorcontents.Read(v.(string))
if err != nil {
es = append(es, fmt.Errorf("Error reading 'arm_config_file': %s", err))
}
- if wasPath {
- ws = append(ws, configFileContentsWarning)
- }
-
data := armConfigData{}
err = json.Unmarshal([]byte(pathOrContents), &data)
if err != nil {
diff --git a/builtin/providers/azurerm/provider.go b/builtin/providers/azurerm/provider.go
index 8b559c0b12..b9b86ad607 100644
--- a/builtin/providers/azurerm/provider.go
+++ b/builtin/providers/azurerm/provider.go
@@ -23,7 +23,7 @@ func Provider() terraform.ResourceProvider {
"subscription_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
- DefaultFunc: schema.EnvDefaultFunc("AZURE_SUBSCRIPTION_ID", ""),
+ DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""),
},
"client_id": &schema.Schema{
diff --git a/builtin/providers/azurerm/provider_test.go b/builtin/providers/azurerm/provider_test.go
index 333d8fa3e6..ff15860fb3 100644
--- a/builtin/providers/azurerm/provider_test.go
+++ b/builtin/providers/azurerm/provider_test.go
@@ -1,18 +1,11 @@
package azurerm
import (
- "io"
- "io/ioutil"
- "math/rand"
"os"
- "strings"
"testing"
- "time"
- "github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
- "github.com/mitchellh/go-homedir"
)
var testAccProviders map[string]terraform.ResourceProvider
@@ -36,7 +29,7 @@ const testAccStorageContainerName = "terraform-testing-container"
func init() {
testAccProvider = Provider().(*schema.Provider)
testAccProviders = map[string]terraform.ResourceProvider{
- "azure": testAccProvider,
+ "azurerm": testAccProvider,
}
}
@@ -51,13 +44,15 @@ func TestProvider_impl(t *testing.T) {
}
func testAccPreCheck(t *testing.T) {
- if v := os.Getenv("AZURE_PUBLISH_SETTINGS"); v == "" {
- subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID")
- certificate := os.Getenv("AZURE_CERTIFICATE")
+ if v := os.Getenv("ARM_CREDENTIALS_FILE"); v == "" {
+ subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID")
+ clientID := os.Getenv("ARM_CLIENT_ID")
+ clientSecret := os.Getenv("ARM_CLIENT_SECRET")
+ tenantID := os.Getenv("ARM_TENANT_ID")
- if subscriptionID == "" || certificate == "" {
- t.Fatal("either AZURE_PUBLISH_SETTINGS, or AZURE_SUBSCRIPTION_ID " +
- "and AZURE_CERTIFICATE must be set for acceptance tests")
+ if subscriptionID == "" || clientID == "" || clientSecret == "" || tenantID == "" {
+ t.Fatal("Either ARM_CREDENTIALS_FILE or ARM_SUBSCRIPTION_ID, ARM_CLIENT_ID, " +
+ "ARM_CLIENT_SECRET and ARM_TENANT_ID must be set for acceptance tests")
}
}
@@ -65,131 +60,3 @@ func testAccPreCheck(t *testing.T) {
t.Fatal("AZURE_STORAGE must be set for acceptance tests")
}
}
-
-func TestAzure_validateSettingsFile(t *testing.T) {
- f, err := ioutil.TempFile("", "tf-test")
- if err != nil {
- t.Fatalf("Error creating temporary file in TestAzure_validateSettingsFile: %s", err)
- }
- defer os.Remove(f.Name())
-
- fx, err := ioutil.TempFile("", "tf-test-xml")
- if err != nil {
- t.Fatalf("Error creating temporary file with XML in TestAzure_validateSettingsFile: %s", err)
- }
- defer os.Remove(fx.Name())
- _, err = io.WriteString(fx, " ")
- if err != nil {
- t.Fatalf("Error writing XML File: %s", err)
- }
- fx.Close()
-
- home, err := homedir.Dir()
- if err != nil {
- t.Fatalf("Error fetching homedir: %s", err)
- }
- fh, err := ioutil.TempFile(home, "tf-test-home")
- if err != nil {
- t.Fatalf("Error creating homedir-based temporary file: %s", err)
- }
- defer os.Remove(fh.Name())
- _, err = io.WriteString(fh, " ")
- if err != nil {
- t.Fatalf("Error writing XML File: %s", err)
- }
- fh.Close()
-
- r := strings.NewReplacer(home, "~")
- homePath := r.Replace(fh.Name())
-
- cases := []struct {
- Input string // String of XML or a path to an XML file
- W int // expected count of warnings
- E int // expected count of errors
- }{
- {"test", 0, 1},
- {f.Name(), 1, 1},
- {fx.Name(), 1, 0},
- {homePath, 1, 0},
- {" ", 0, 0},
- }
-
- for _, tc := range cases {
- w, e := validateSettingsFile(tc.Input, "")
-
- if len(w) != tc.W {
- t.Errorf("Error in TestAzureValidateSettingsFile: input: %s , warnings: %v, errors: %v", tc.Input, w, e)
- }
- if len(e) != tc.E {
- t.Errorf("Error in TestAzureValidateSettingsFile: input: %s , warnings: %v, errors: %v", tc.Input, w, e)
- }
- }
-}
-
-func TestAzure_providerConfigure(t *testing.T) {
- home, err := homedir.Dir()
- if err != nil {
- t.Fatalf("Error fetching homedir: %s", err)
- }
- fh, err := ioutil.TempFile(home, "tf-test-home")
- if err != nil {
- t.Fatalf("Error creating homedir-based temporary file: %s", err)
- }
- defer os.Remove(fh.Name())
-
- _, err = io.WriteString(fh, testAzurePublishSettingsStr)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- fh.Close()
-
- r := strings.NewReplacer(home, "~")
- homePath := r.Replace(fh.Name())
-
- cases := []struct {
- SettingsFile string // String of XML or a path to an XML file
- NilMeta bool // whether meta is expected to be nil
- }{
- {testAzurePublishSettingsStr, false},
- {homePath, false},
- }
-
- for _, tc := range cases {
- rp := Provider()
- raw := map[string]interface{}{
- "settings_file": tc.SettingsFile,
- }
-
- rawConfig, err := config.NewRawConfig(raw)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- err = rp.Configure(terraform.NewResourceConfig(rawConfig))
- meta := rp.(*schema.Provider).Meta()
- if (meta == nil) != tc.NilMeta {
- t.Fatalf("expected NilMeta: %t, got meta: %#v, settings_file: %q",
- tc.NilMeta, meta, tc.SettingsFile)
- }
- }
-}
-
-func genRandInt() int {
- return rand.New(rand.NewSource(time.Now().UnixNano())).Int() % 100000
-}
-
-// testAzurePublishSettingsStr is a revoked publishsettings file
-const testAzurePublishSettingsStr = `
-
-
-
-
-
-
-`
diff --git a/builtin/providers/azurerm/resourceArmResourceGroup.go b/builtin/providers/azurerm/resourceArmResourceGroup.go
index 2e321aab6c..3155b68727 100644
--- a/builtin/providers/azurerm/resourceArmResourceGroup.go
+++ b/builtin/providers/azurerm/resourceArmResourceGroup.go
@@ -3,6 +3,8 @@ package azurerm
import (
"fmt"
"log"
+ "regexp"
+ "strings"
"time"
"github.com/Azure/azure-sdk-for-go/arm/resources"
@@ -21,10 +23,10 @@ func resourceArmResourceGroup() *schema.Resource {
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
- Type: schema.TypeString,
- Required: true,
- ForceNew: true,
- //TODO(jen20) - implement validation func: {resource-group-name} must uniquely identify the resource group within the subscription. It must be no longer than 80 characters long. It can only contain alphanumeric characters, dash, underscore, opening parenthesis, closing parenthesis or period. The name cannot end with a period.
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ ValidateFunc: validateArmResourceGroupName,
},
"location": &schema.Schema{
Type: schema.TypeString,
@@ -36,6 +38,26 @@ func resourceArmResourceGroup() *schema.Resource {
}
}
+// validateArmResourceGroupName validates inputs to the name argument against the requirements
+// documented in the ARM REST API guide: http://bit.ly/1NEXclG
+func validateArmResourceGroupName(v interface{}, k string) (ws []string, es []error) {
+ value := v.(string)
+
+ if len(value) > 80 {
+ es = append(es, fmt.Errorf("%q may not exceed 80 characters in length", k))
+ }
+
+ if strings.HasSuffix(value, ".") {
+ es = append(es, fmt.Errorf("%q may not end with a period", k))
+ }
+
+ if matched := regexp.MustCompile(`^[\(\)\.a-zA-Z0-9_-]$`).Match([]byte(value)); !matched {
+ es = append(es, fmt.Errorf("%q may only contain alphanumeric characters, dash, underscores, parentheses and periods", k))
+ }
+
+ return
+}
+
// resourceArmResourceGroupCreate goes ahead and creates the specified ARM resource group.
func resourceArmResourceGroupCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
diff --git a/builtin/providers/azurerm/resourceArmVirtualNetwork.go b/builtin/providers/azurerm/resourceArmVirtualNetwork.go
index c4471b37b1..d7ce438bfe 100644
--- a/builtin/providers/azurerm/resourceArmVirtualNetwork.go
+++ b/builtin/providers/azurerm/resourceArmVirtualNetwork.go
@@ -32,7 +32,7 @@ func resourceArmVirtualNetwork() *schema.Resource {
Elem: &schema.Schema{Type: schema.TypeString},
},
- "dns_servers_names": &schema.Schema{
+ "dns_servers": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{
@@ -158,7 +158,6 @@ func resourceArmVirtualNetworkRead(d *schema.ResourceData, meta interface{}) err
s["name"] = *subnet.Name
s["address_prefix"] = *subnet.Properties.AddressPrefix
- // NOTE(aznashwan): ID's necessary?
if subnet.Properties.NetworkSecurityGroup != nil {
s["security_group"] = *subnet.Properties.NetworkSecurityGroup.ID
}
@@ -172,7 +171,7 @@ func resourceArmVirtualNetworkRead(d *schema.ResourceData, meta interface{}) err
for _, dns := range *vnet.DhcpOptions.DNSServers {
dnses = append(dnses, dns)
}
- d.Set("dns_servers_names", dnses)
+ d.Set("dns_servers", dnses)
return nil
}
@@ -206,7 +205,7 @@ func getVirtualNetworkProperties(d *schema.ResourceData) *network.VirtualNetwork
// then; the dns servers:
dnses := []string{}
- for _, dns := range d.Get("dns_servers_names").([]interface{}) {
+ for _, dns := range d.Get("dns_servers").([]interface{}) {
dnses = append(dnses, dns.(string))
}
From 805c4896bda01b5e099aa1cbaa2d3b968cd5c123 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Tue, 8 Dec 2015 20:25:05 -0500
Subject: [PATCH 250/664] provider/azurerm: Clean up work for base provider
- Add documentation for resources
- Rename files to match standard patterns
- Add acceptance tests for resource groups
- Add acceptance tests for vnets
- Remove ARM_CREDENTIALS file - as discussed this does not appear to be
an Azure standard, and there is scope for confusion with the
azureProfile.json file which the CLI generates. If a standard emerges
we can reconsider this.
- Validate credentials in the schema
- Remove storage testing artefacts
- Use ARM IDs as Terraform IDs
- Use autorest hooks for logging
---
builtin/bins/provider-azurerm/main_test.go | 1 -
builtin/providers/azurerm/config.go | 99 ++++++----------
builtin/providers/azurerm/provider.go | 38 +------
builtin/providers/azurerm/provider_test.go | 34 +-----
...roup.go => resource_arm_resource_group.go} | 58 +++++-----
.../resource_arm_resource_group_test.go | 82 ++++++++++++++
...ork.go => resource_arm_virtual_network.go} | 59 ++++------
.../resource_arm_virtual_network_test.go | 100 ++++++++++++++++
builtin/providers/azurerm/resourceid.go | 82 ++++++++++++++
builtin/providers/azurerm/resourceid_test.go | 107 ++++++++++++++++++
website/Vagrantfile | 2 +-
website/source/assets/stylesheets/_docs.scss | 1 +
.../providers/azurerm/index.html.markdown | 80 +++++++++++++
.../azurerm/r/resource_group.html.markdown | 36 ++++++
.../azurerm/r/virtual_network.html.markdown | 76 +++++++++++++
website/source/layouts/azurerm.erb | 30 +++++
website/source/layouts/docs.erb | 6 +-
17 files changed, 696 insertions(+), 195 deletions(-)
delete mode 100644 builtin/bins/provider-azurerm/main_test.go
rename builtin/providers/azurerm/{resourceArmResourceGroup.go => resource_arm_resource_group.go} (67%)
create mode 100644 builtin/providers/azurerm/resource_arm_resource_group_test.go
rename builtin/providers/azurerm/{resourceArmVirtualNetwork.go => resource_arm_virtual_network.go} (75%)
create mode 100644 builtin/providers/azurerm/resource_arm_virtual_network_test.go
create mode 100644 builtin/providers/azurerm/resourceid.go
create mode 100644 builtin/providers/azurerm/resourceid_test.go
create mode 100644 website/source/docs/providers/azurerm/index.html.markdown
create mode 100644 website/source/docs/providers/azurerm/r/resource_group.html.markdown
create mode 100644 website/source/docs/providers/azurerm/r/virtual_network.html.markdown
create mode 100644 website/source/layouts/azurerm.erb
diff --git a/builtin/bins/provider-azurerm/main_test.go b/builtin/bins/provider-azurerm/main_test.go
deleted file mode 100644
index 06ab7d0f9a..0000000000
--- a/builtin/bins/provider-azurerm/main_test.go
+++ /dev/null
@@ -1 +0,0 @@
-package main
diff --git a/builtin/providers/azurerm/config.go b/builtin/providers/azurerm/config.go
index 4e6a750969..12911512b1 100644
--- a/builtin/providers/azurerm/config.go
+++ b/builtin/providers/azurerm/config.go
@@ -1,8 +1,8 @@
package azurerm
import (
- "encoding/json"
- "fmt"
+ "log"
+ "net/http"
"github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/azure-sdk-for-go/arm/compute"
@@ -10,7 +10,7 @@ import (
"github.com/Azure/azure-sdk-for-go/arm/resources"
"github.com/Azure/azure-sdk-for-go/arm/scheduler"
"github.com/Azure/azure-sdk-for-go/arm/storage"
- "github.com/hashicorp/terraform/helper/pathorcontents"
+ "github.com/Azure/go-autorest/autorest"
)
// ArmClient contains the handles to all the specific Azure Resource Manager
@@ -46,14 +46,20 @@ type ArmClient struct {
storageUsageClient storage.UsageOperationsClient
}
+func withRequestLogging() autorest.SendDecorator {
+ return func(s autorest.Sender) autorest.Sender {
+ return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) {
+ log.Printf("[DEBUG] Sending Azure RM Request %s to %s\n", r.Method, r.URL)
+ resp, err := s.Do(r)
+ log.Printf("[DEBUG] Received Azure RM Request status code %s for %s\n", resp.Status, r.URL)
+ return resp, err
+ })
+ }
+}
+
// getArmClient is a helper method which returns a fully instantiated
// *ArmClient based on the Config's current settings.
func (c *Config) getArmClient() (*ArmClient, error) {
- // first; check that all the necessary credentials were provided:
- if !c._armCredentialsProvided() {
- return nil, fmt.Errorf("Not all ARM-required fields have been provided.")
- }
-
spt, err := azure.NewServicePrincipalToken(c.ClientID, c.ClientSecret, c.TenantID, azure.AzureResourceManagerScope)
if err != nil {
return nil, err
@@ -66,149 +72,118 @@ func (c *Config) getArmClient() (*ArmClient, error) {
// clients be wished to be configured with custom Responders/PollingModess etc...
asc := compute.NewAvailabilitySetsClient(c.SubscriptionID)
asc.Authorizer = spt
+ asc.Sender = autorest.CreateSender(withRequestLogging())
client.availSetClient = asc
uoc := compute.NewUsageOperationsClient(c.SubscriptionID)
uoc.Authorizer = spt
+ uoc.Sender = autorest.CreateSender(withRequestLogging())
client.usageOpsClient = uoc
vmeic := compute.NewVirtualMachineExtensionImagesClient(c.SubscriptionID)
vmeic.Authorizer = spt
+ vmeic.Sender = autorest.CreateSender(withRequestLogging())
client.vmExtensionImageClient = vmeic
vmec := compute.NewVirtualMachineExtensionsClient(c.SubscriptionID)
vmec.Authorizer = spt
+ vmec.Sender = autorest.CreateSender(withRequestLogging())
client.vmExtensionClient = vmec
vmic := compute.NewVirtualMachineImagesClient(c.SubscriptionID)
vmic.Authorizer = spt
+ vmic.Sender = autorest.CreateSender(withRequestLogging())
client.vmImageClient = vmic
vmc := compute.NewVirtualMachinesClient(c.SubscriptionID)
vmc.Authorizer = spt
+ vmc.Sender = autorest.CreateSender(withRequestLogging())
client.vmClient = vmc
agc := network.NewApplicationGatewaysClient(c.SubscriptionID)
agc.Authorizer = spt
+ agc.Sender = autorest.CreateSender(withRequestLogging())
client.appGatewayClient = agc
ifc := network.NewInterfacesClient(c.SubscriptionID)
ifc.Authorizer = spt
+ ifc.Sender = autorest.CreateSender(withRequestLogging())
client.ifaceClient = ifc
lbc := network.NewLoadBalancersClient(c.SubscriptionID)
lbc.Authorizer = spt
+ lbc.Sender = autorest.CreateSender(withRequestLogging())
client.loadBalancerClient = lbc
lgc := network.NewLocalNetworkGatewaysClient(c.SubscriptionID)
lgc.Authorizer = spt
+ lgc.Sender = autorest.CreateSender(withRequestLogging())
client.localNetConnClient = lgc
pipc := network.NewPublicIPAddressesClient(c.SubscriptionID)
pipc.Authorizer = spt
+ pipc.Sender = autorest.CreateSender(withRequestLogging())
client.publicIPClient = pipc
sgc := network.NewSecurityGroupsClient(c.SubscriptionID)
sgc.Authorizer = spt
+ sgc.Sender = autorest.CreateSender(withRequestLogging())
client.secGroupClient = sgc
src := network.NewSecurityRulesClient(c.SubscriptionID)
src.Authorizer = spt
+ src.Sender = autorest.CreateSender(withRequestLogging())
client.secRuleClient = src
snc := network.NewSubnetsClient(c.SubscriptionID)
snc.Authorizer = spt
+ snc.Sender = autorest.CreateSender(withRequestLogging())
client.subnetClient = snc
vgcc := network.NewVirtualNetworkGatewayConnectionsClient(c.SubscriptionID)
vgcc.Authorizer = spt
+ vgcc.Sender = autorest.CreateSender(withRequestLogging())
client.vnetGatewayConnectionsClient = vgcc
vgc := network.NewVirtualNetworkGatewaysClient(c.SubscriptionID)
vgc.Authorizer = spt
+ vgc.Sender = autorest.CreateSender(withRequestLogging())
client.vnetGatewayClient = vgc
vnc := network.NewVirtualNetworksClient(c.SubscriptionID)
vnc.Authorizer = spt
+ vnc.Sender = autorest.CreateSender(withRequestLogging())
client.vnetClient = vnc
rgc := resources.NewGroupsClient(c.SubscriptionID)
rgc.Authorizer = spt
+ rgc.Sender = autorest.CreateSender(withRequestLogging())
client.resourceGroupClient = rgc
tc := resources.NewTagsClient(c.SubscriptionID)
tc.Authorizer = spt
+ tc.Sender = autorest.CreateSender(withRequestLogging())
client.tagsClient = tc
jc := scheduler.NewJobsClient(c.SubscriptionID)
jc.Authorizer = spt
+ jc.Sender = autorest.CreateSender(withRequestLogging())
client.jobsClient = jc
jcc := scheduler.NewJobCollectionsClient(c.SubscriptionID)
jcc.Authorizer = spt
+ jcc.Sender = autorest.CreateSender(withRequestLogging())
client.jobsCollectionsClient = jcc
ssc := storage.NewAccountsClient(c.SubscriptionID)
ssc.Authorizer = spt
+ ssc.Sender = autorest.CreateSender(withRequestLogging())
client.storageServiceClient = ssc
suc := storage.NewUsageOperationsClient(c.SubscriptionID)
suc.Authorizer = spt
+ suc.Sender = autorest.CreateSender(withRequestLogging())
client.storageUsageClient = suc
return &client, nil
}
-
-// armCredentialsProvided is a helper method which indicates whether or not the
-// credentials required for authenticating against the ARM APIs were provided.
-func (c *Config) armCredentialsProvided() bool {
- return c.ArmConfig != "" || c._armCredentialsProvided()
-}
-func (c *Config) _armCredentialsProvided() bool {
- return !(c.SubscriptionID == "" || c.ClientID == "" || c.ClientSecret == "" || c.TenantID == "")
-}
-
-// readArmSettings is a helper method which; given the contents of the ARM
-// credentials file, loads all the data into the Config.
-func (c *Config) readArmSettings(contents string) error {
- data := &armConfigData{}
- err := json.Unmarshal([]byte(contents), data)
-
- c.SubscriptionID = data.SubscriptionID
- c.ClientID = data.ClientID
- c.ClientSecret = data.ClientSecret
- c.TenantID = data.TenantID
-
- return err
-}
-
-// validateArmConfigFile is a helper function which verifies that
-// the provided ARM configuration file is valid.
-func validateArmConfigFile(v interface{}, _ string) (ws []string, es []error) {
- value := v.(string)
- if value == "" {
- return nil, nil
- }
-
- pathOrContents, _, err := pathorcontents.Read(v.(string))
- if err != nil {
- es = append(es, fmt.Errorf("Error reading 'arm_config_file': %s", err))
- }
-
- data := armConfigData{}
- err = json.Unmarshal([]byte(pathOrContents), &data)
- if err != nil {
- es = append(es, fmt.Errorf("Error unmarshalling the provided 'arm_config_file': %s", err))
- }
-
- return
-}
-
-// armConfigData is a private struct which represents the expected layout of
-// an ARM configuration file. It is used for unmarshalling purposes.
-type armConfigData struct {
- ClientID string `json:"clientID"`
- ClientSecret string `json:"clientSecret"`
- SubscriptionID string `json:"subscriptionID"`
- TenantID string `json:"tenantID"`
-}
diff --git a/builtin/providers/azurerm/provider.go b/builtin/providers/azurerm/provider.go
index b9b86ad607..53c5c97a68 100644
--- a/builtin/providers/azurerm/provider.go
+++ b/builtin/providers/azurerm/provider.go
@@ -1,7 +1,6 @@
package azurerm
import (
- "fmt"
"strings"
"github.com/hashicorp/terraform/helper/schema"
@@ -12,35 +11,27 @@ import (
func Provider() terraform.ResourceProvider {
return &schema.Provider{
Schema: map[string]*schema.Schema{
- "arm_config_file": &schema.Schema{
- Type: schema.TypeString,
- Optional: true,
- Default: "",
- DefaultFunc: schema.EnvDefaultFunc("ARM_CONFIG_FILE", nil),
- ValidateFunc: validateArmConfigFile,
- },
-
"subscription_id": &schema.Schema{
Type: schema.TypeString,
- Optional: true,
+ Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""),
},
"client_id": &schema.Schema{
Type: schema.TypeString,
- Optional: true,
+ Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""),
},
"client_secret": &schema.Schema{
Type: schema.TypeString,
- Optional: true,
+ Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""),
},
"tenant_id": &schema.Schema{
Type: schema.TypeString,
- Optional: true,
+ Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""),
},
},
@@ -59,19 +50,12 @@ func Provider() terraform.ResourceProvider {
type Config struct {
ManagementURL string
- ArmConfig string
-
SubscriptionID string
ClientID string
ClientSecret string
TenantID string
}
-const noConfigError = `Credentials must be provided either via arm_config_file, or via
-subscription_id, client_id, client_secret and tenant_id. Please see
-the provider documentation for more information on how to obtain these
-credentials.`
-
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
config := Config{
SubscriptionID: d.Get("subscription_id").(string),
@@ -80,20 +64,6 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
TenantID: d.Get("tenant_id").(string),
}
- // check if credentials file is provided:
- armConfig := d.Get("arm_config_file").(string)
- if armConfig != "" {
- // then, load the settings from that:
- if err := config.readArmSettings(armConfig); err != nil {
- return nil, err
- }
- }
-
- // then; check whether the ARM credentials were provided:
- if !config.armCredentialsProvided() {
- return nil, fmt.Errorf(noConfigError)
- }
-
client, err := config.getArmClient()
if err != nil {
return nil, err
diff --git a/builtin/providers/azurerm/provider_test.go b/builtin/providers/azurerm/provider_test.go
index ff15860fb3..a26249f588 100644
--- a/builtin/providers/azurerm/provider_test.go
+++ b/builtin/providers/azurerm/provider_test.go
@@ -11,21 +11,6 @@ import (
var testAccProviders map[string]terraform.ResourceProvider
var testAccProvider *schema.Provider
-const (
- testAccSecurityGroupName = "terraform-security-group"
- testAccHostedServiceName = "terraform-testing-service"
-)
-
-// testAccStorageServiceName is used as the name for the Storage Service
-// created in all storage-related tests.
-// It is much more convenient to provide a Storage Service which
-// has been created beforehand as the creation of one takes a lot
-// and would greatly impede the multitude of tests which rely on one.
-// NOTE: the storage container should be located in `West US`.
-var testAccStorageServiceName = os.Getenv("AZURE_STORAGE")
-
-const testAccStorageContainerName = "terraform-testing-container"
-
func init() {
testAccProvider = Provider().(*schema.Provider)
testAccProviders = map[string]terraform.ResourceProvider{
@@ -44,19 +29,12 @@ func TestProvider_impl(t *testing.T) {
}
func testAccPreCheck(t *testing.T) {
- if v := os.Getenv("ARM_CREDENTIALS_FILE"); v == "" {
- subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID")
- clientID := os.Getenv("ARM_CLIENT_ID")
- clientSecret := os.Getenv("ARM_CLIENT_SECRET")
- tenantID := os.Getenv("ARM_TENANT_ID")
+ subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID")
+ clientID := os.Getenv("ARM_CLIENT_ID")
+ clientSecret := os.Getenv("ARM_CLIENT_SECRET")
+ tenantID := os.Getenv("ARM_TENANT_ID")
- if subscriptionID == "" || clientID == "" || clientSecret == "" || tenantID == "" {
- t.Fatal("Either ARM_CREDENTIALS_FILE or ARM_SUBSCRIPTION_ID, ARM_CLIENT_ID, " +
- "ARM_CLIENT_SECRET and ARM_TENANT_ID must be set for acceptance tests")
- }
- }
-
- if v := os.Getenv("AZURE_STORAGE"); v == "" {
- t.Fatal("AZURE_STORAGE must be set for acceptance tests")
+ if subscriptionID == "" || clientID == "" || clientSecret == "" || tenantID == "" {
+ t.Fatal("ARM_SUBSCRIPTION_ID, ARM_CLIENT_ID, ARM_CLIENT_SECRET and ARM_TENANT_ID must be set for acceptance tests")
}
}
diff --git a/builtin/providers/azurerm/resourceArmResourceGroup.go b/builtin/providers/azurerm/resource_arm_resource_group.go
similarity index 67%
rename from builtin/providers/azurerm/resourceArmResourceGroup.go
rename to builtin/providers/azurerm/resource_arm_resource_group.go
index 3155b68727..a4304c8d4a 100644
--- a/builtin/providers/azurerm/resourceArmResourceGroup.go
+++ b/builtin/providers/azurerm/resource_arm_resource_group.go
@@ -3,6 +3,7 @@ package azurerm
import (
"fmt"
"log"
+ "net/http"
"regexp"
"strings"
"time"
@@ -12,8 +13,6 @@ import (
"github.com/hashicorp/terraform/helper/schema"
)
-// resourceArmResourceGroup returns the *schema.Resource
-// associated to resource group resources on ARM.
func resourceArmResourceGroup() *schema.Resource {
return &schema.Resource{
Create: resourceArmResourceGroupCreate,
@@ -38,8 +37,6 @@ func resourceArmResourceGroup() *schema.Resource {
}
}
-// validateArmResourceGroupName validates inputs to the name argument against the requirements
-// documented in the ARM REST API guide: http://bit.ly/1NEXclG
func validateArmResourceGroupName(v interface{}, k string) (ws []string, es []error) {
value := v.(string)
@@ -51,14 +48,13 @@ func validateArmResourceGroupName(v interface{}, k string) (ws []string, es []er
es = append(es, fmt.Errorf("%q may not end with a period", k))
}
- if matched := regexp.MustCompile(`^[\(\)\.a-zA-Z0-9_-]$`).Match([]byte(value)); !matched {
+ if matched := regexp.MustCompile(`[\(\)\.a-zA-Z0-9_-]`).Match([]byte(value)); !matched {
es = append(es, fmt.Errorf("%q may only contain alphanumeric characters, dash, underscores, parentheses and periods", k))
}
return
}
-// resourceArmResourceGroupCreate goes ahead and creates the specified ARM resource group.
func resourceArmResourceGroupCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
resGroupClient := client.resourceGroupClient
@@ -66,64 +62,67 @@ func resourceArmResourceGroupCreate(d *schema.ResourceData, meta interface{}) er
name := d.Get("name").(string)
location := d.Get("location").(string)
- log.Printf("[INFO] Issuing Azure ARM creation request for resource group '%s'.", name)
-
rg := resources.ResourceGroup{
Name: &name,
Location: &location,
}
- _, err := resGroupClient.CreateOrUpdate(name, rg)
+ resp, err := resGroupClient.CreateOrUpdate(name, rg)
if err != nil {
return fmt.Errorf("Error issuing Azure ARM create request for resource group '%s': %s", name, err)
}
- d.SetId(*rg.Name)
+ d.SetId(*resp.ID)
- // Wait for the resource group to become available
- // TODO(jen20): Is there any need for this?
- log.Printf("[DEBUG] Waiting for Resource Group (%s) to become available", d.Id())
+ log.Printf("[DEBUG] Waiting for Resource Group (%s) to become available", name)
stateConf := &resource.StateChangeConf{
Pending: []string{"Accepted"},
Target: "Succeeded",
- Refresh: resourceGroupStateRefreshFunc(client, d.Id()),
+ Refresh: resourceGroupStateRefreshFunc(client, name),
Timeout: 10 * time.Minute,
}
if _, err := stateConf.WaitForState(); err != nil {
- return fmt.Errorf("Error waiting for Resource Group (%s) to become available: %s", d.Id(), err)
+ return fmt.Errorf("Error waiting for Resource Group (%s) to become available: %s", name, err)
}
return resourceArmResourceGroupRead(d, meta)
}
-// resourceArmResourceGroupRead goes ahead and reads the state of the corresponding ARM resource group.
func resourceArmResourceGroupRead(d *schema.ResourceData, meta interface{}) error {
resGroupClient := meta.(*ArmClient).resourceGroupClient
- name := d.Id()
- log.Printf("[INFO] Issuing read request to Azure ARM for resource group '%s'.", name)
+ id, err := parseAzureResourceID(d.Id())
+ if err != nil {
+ return err
+ }
+ name := id.ResourceGroup
res, err := resGroupClient.Get(name)
if err != nil {
+ if res.StatusCode == http.StatusNotFound {
+ d.SetId("")
+ return nil
+ }
return fmt.Errorf("Error issuing read request to Azure ARM for resource group '%s': %s", name, err)
}
- d.Set("name", *res.Name)
- d.Set("location", *res.Location)
+ d.Set("name", res.Name)
+ d.Set("location", res.Location)
return nil
}
-// resourceArmResourceGroupExists goes ahead and checks for the existence of the correspoding ARM resource group.
func resourceArmResourceGroupExists(d *schema.ResourceData, meta interface{}) (bool, error) {
resGroupClient := meta.(*ArmClient).resourceGroupClient
- name := d.Id()
+ id, err := parseAzureResourceID(d.Id())
+ if err != nil {
+ return false, err
+ }
+ name := id.ResourceGroup
resp, err := resGroupClient.CheckExistence(name)
if err != nil {
- // TODO(aznashwan): implement some error switching helpers in the SDK
- // to avoid HTTP error checks such as the below:
if resp.StatusCode != 200 {
return false, err
}
@@ -134,13 +133,16 @@ func resourceArmResourceGroupExists(d *schema.ResourceData, meta interface{}) (b
return true, nil
}
-// resourceArmResourceGroupDelete deletes the specified ARM resource group.
func resourceArmResourceGroupDelete(d *schema.ResourceData, meta interface{}) error {
resGroupClient := meta.(*ArmClient).resourceGroupClient
- name := d.Id()
+ id, err := parseAzureResourceID(d.Id())
+ if err != nil {
+ return err
+ }
+ name := id.ResourceGroup
- _, err := resGroupClient.Delete(name)
+ _, err = resGroupClient.Delete(name)
if err != nil {
return err
}
@@ -148,8 +150,6 @@ func resourceArmResourceGroupDelete(d *schema.ResourceData, meta interface{}) er
return nil
}
-// resourceGroupStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch
-// a resource group.
func resourceGroupStateRefreshFunc(client *ArmClient, id string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
res, err := client.resourceGroupClient.Get(id)
diff --git a/builtin/providers/azurerm/resource_arm_resource_group_test.go b/builtin/providers/azurerm/resource_arm_resource_group_test.go
new file mode 100644
index 0000000000..2f7f80ab89
--- /dev/null
+++ b/builtin/providers/azurerm/resource_arm_resource_group_test.go
@@ -0,0 +1,82 @@
+package azurerm
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/Azure/azure-sdk-for-go/core/http"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAzureRMResourceGroup_basic(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMResourceGroupDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAzureRMResourceGroup_basic,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMResourceGroupExists("azurerm_resource_group.test"),
+ ),
+ },
+ },
+ })
+}
+
+func testCheckAzureRMResourceGroupExists(name string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ // Ensure we have enough information in state to look up in API
+ rs, ok := s.RootModule().Resources[name]
+ if !ok {
+ return fmt.Errorf("Not found: %s", name)
+ }
+
+ resourceGroup := rs.Primary.Attributes["name"]
+
+ // Ensure resource group exists in API
+ conn := testAccProvider.Meta().(*ArmClient).resourceGroupClient
+
+ resp, err := conn.Get(resourceGroup)
+ if err != nil {
+ return fmt.Errorf("Bad: Get on resourceGroupClient: %s", err)
+ }
+
+ if resp.StatusCode == http.StatusNotFound {
+ return fmt.Errorf("Bad: Virtual Network %q (resource group: %q) does not exist", name, resourceGroup)
+ }
+
+ return nil
+ }
+}
+
+func testCheckAzureRMResourceGroupDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*ArmClient).resourceGroupClient
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "azurerm_resource_group" {
+ continue
+ }
+
+ resourceGroup := rs.Primary.ID
+
+ resp, err := conn.Get(resourceGroup)
+ if err != nil {
+ return nil
+ }
+
+ if resp.StatusCode != http.StatusNotFound {
+ return fmt.Errorf("Resource Group still exists:\n%#v", resp.Properties)
+ }
+ }
+
+ return nil
+}
+
+var testAccAzureRMResourceGroup_basic = `
+resource "azurerm_resource_group" "test" {
+ name = "acceptanceTestResourceGroup1_basic"
+ location = "West US"
+}
+`
diff --git a/builtin/providers/azurerm/resourceArmVirtualNetwork.go b/builtin/providers/azurerm/resource_arm_virtual_network.go
similarity index 75%
rename from builtin/providers/azurerm/resourceArmVirtualNetwork.go
rename to builtin/providers/azurerm/resource_arm_virtual_network.go
index d7ce438bfe..305af5a766 100644
--- a/builtin/providers/azurerm/resourceArmVirtualNetwork.go
+++ b/builtin/providers/azurerm/resource_arm_virtual_network.go
@@ -16,7 +16,7 @@ func resourceArmVirtualNetwork() *schema.Resource {
return &schema.Resource{
Create: resourceArmVirtualNetworkCreate,
Read: resourceArmVirtualNetworkRead,
- Update: resourceArmVirtualNetworkUpdate,
+ Update: resourceArmVirtualNetworkCreate,
Delete: resourceArmVirtualNetworkDelete,
Schema: map[string]*schema.Schema{
@@ -78,7 +78,6 @@ func resourceArmVirtualNetwork() *schema.Resource {
}
}
-// resourceArmVirtualNetworkCreate creates the specified ARM virtual network.
func resourceArmVirtualNetworkCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
vnetClient := client.vnetClient
@@ -95,22 +94,14 @@ func resourceArmVirtualNetworkCreate(d *schema.ResourceData, meta interface{}) e
Properties: getVirtualNetworkProperties(d),
}
- log.Printf("[INFO] Sending virtual network create request to ARM.")
- _, err := vnetClient.CreateOrUpdate(resGroup, name, vnet)
+ resp, err := vnetClient.CreateOrUpdate(resGroup, name, vnet)
if err != nil {
return err
}
- // if res.Response.StatusCode != http.StatusAccepted {
- // return fmt.Errorf("Creation request was denies: code: %d", res.Response.StatusCode)
- // }
+ d.SetId(*resp.ID)
- d.SetId(name)
- d.Set("resGroup", resGroup)
-
- // Wait for the resource group to become available
- // TODO(jen20): Is there any need for this?
- log.Printf("[DEBUG] Waiting for Virtual Network (%s) to become available", d.Id())
+ log.Printf("[DEBUG] Waiting for Virtual Network (%s) to become available", name)
stateConf := &resource.StateChangeConf{
Pending: []string{"Accepted", "Updating"},
Target: "Succeeded",
@@ -118,25 +109,24 @@ func resourceArmVirtualNetworkCreate(d *schema.ResourceData, meta interface{}) e
Timeout: 10 * time.Minute,
}
if _, err := stateConf.WaitForState(); err != nil {
- return fmt.Errorf("Error waiting for Virtual Network (%s) to become available: %s", d.Id(), err)
+ return fmt.Errorf("Error waiting for Virtual Network (%s) to become available: %s", name, err)
}
return resourceArmVirtualNetworkRead(d, meta)
}
-// resourceArmVirtualNetworkRead goes ahead and reads the state of the corresponding ARM virtual network.
func resourceArmVirtualNetworkRead(d *schema.ResourceData, meta interface{}) error {
vnetClient := meta.(*ArmClient).vnetClient
- name := d.Get("name").(string)
- resGroup := d.Get("resource_group_name").(string)
-
- log.Printf("[INFO] Sending virtual network read request to ARM.")
+ id, err := parseAzureResourceID(d.Id())
+ if err != nil {
+ return err
+ }
+ resGroup := id.ResourceGroup
+ name := id.Path["virtualNetworks"]
resp, err := vnetClient.Get(resGroup, name)
if resp.StatusCode == http.StatusNotFound {
- // it means the virtual network has been deleted in the meantime;
- // so we must go ahead and remove it here:
d.SetId("")
return nil
}
@@ -145,10 +135,9 @@ func resourceArmVirtualNetworkRead(d *schema.ResourceData, meta interface{}) err
}
vnet := *resp.Properties
- // update all the appropriate values:
+ // update appropriate values
d.Set("address_space", vnet.AddressSpace.AddressPrefixes)
- // read state of subnets:
subnets := &schema.Set{
F: resourceAzureSubnetHash,
}
@@ -166,7 +155,6 @@ func resourceArmVirtualNetworkRead(d *schema.ResourceData, meta interface{}) err
}
d.Set("subnet", subnets)
- // now; dns servers:
dnses := []string{}
for _, dns := range *vnet.DhcpOptions.DNSServers {
dnses = append(dnses, dns)
@@ -176,26 +164,21 @@ func resourceArmVirtualNetworkRead(d *schema.ResourceData, meta interface{}) err
return nil
}
-// resourceArmVirtualNetworkUpdate goes ahead and updates the corresponding ARM virtual network.
-func resourceArmVirtualNetworkUpdate(d *schema.ResourceData, meta interface{}) error {
- // considering Create's idempotency, Update is simply a proxy for it...
- // Update has been left as a separate function here for utmost clarity:
- return resourceArmVirtualNetworkCreate(d, meta)
-}
-
-// resourceArmVirtualNetworkDelete deletes the specified ARM virtual network.
func resourceArmVirtualNetworkDelete(d *schema.ResourceData, meta interface{}) error {
vnetClient := meta.(*ArmClient).vnetClient
- name := d.Get("name").(string)
- resGroup := d.Get("resource_group_name").(string)
- _, err := vnetClient.Delete(resGroup, name)
+ id, err := parseAzureResourceID(d.Id())
+ if err != nil {
+ return err
+ }
+ resGroup := id.ResourceGroup
+ name := id.Path["virtualNetworks"]
+
+ _, err = vnetClient.Delete(resGroup, name)
return err
}
-// getVirtualNetworkProperties is a helper function which returns the
-// VirtualNetworkPropertiesFormat of the network resource.
func getVirtualNetworkProperties(d *schema.ResourceData) *network.VirtualNetworkPropertiesFormat {
// first; get address space prefixes:
prefixes := []string{}
@@ -255,8 +238,6 @@ func resourceAzureSubnetHash(v interface{}) int {
return hashcode.String(subnet)
}
-// virtualNetworkStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch
-// a virtual network.
func virtualNetworkStateRefreshFunc(client *ArmClient, resourceGroupName string, networkName string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
res, err := client.vnetClient.Get(resourceGroupName, networkName)
diff --git a/builtin/providers/azurerm/resource_arm_virtual_network_test.go b/builtin/providers/azurerm/resource_arm_virtual_network_test.go
new file mode 100644
index 0000000000..41be2fa7dd
--- /dev/null
+++ b/builtin/providers/azurerm/resource_arm_virtual_network_test.go
@@ -0,0 +1,100 @@
+package azurerm
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/Azure/azure-sdk-for-go/core/http"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAzureRMVirtualNetwork_basic(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMVirtualNetworkDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAzureRMVirtualNetwork_basic,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMVirtualNetworkExists("azurerm_virtual_network.test"),
+ ),
+ },
+ },
+ })
+}
+
+func testCheckAzureRMVirtualNetworkExists(name string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ // Ensure we have enough information in state to look up in API
+ rs, ok := s.RootModule().Resources[name]
+ if !ok {
+ return fmt.Errorf("Not found: %s", name)
+ }
+
+ virtualNetworkName := rs.Primary.Attributes["name"]
+ resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"]
+ if !hasResourceGroup {
+ return fmt.Errorf("Bad: no resource group found in state for virtual network: %s", virtualNetworkName)
+ }
+
+ // Ensure resource group/virtual network combination exists in API
+ conn := testAccProvider.Meta().(*ArmClient).vnetClient
+
+ resp, err := conn.Get(resourceGroup, virtualNetworkName)
+ if err != nil {
+ return fmt.Errorf("Bad: Get on vnetClient: %s", err)
+ }
+
+ if resp.StatusCode == http.StatusNotFound {
+ return fmt.Errorf("Bad: Virtual Network %q (resource group: %q) does not exist", name, resourceGroup)
+ }
+
+ return nil
+ }
+}
+
+func testCheckAzureRMVirtualNetworkDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*ArmClient).vnetClient
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "azurerm_virtual_network" {
+ continue
+ }
+
+ name := rs.Primary.Attributes["name"]
+ resourceGroup := rs.Primary.Attributes["resource_group_name"]
+
+ resp, err := conn.Get(resourceGroup, name)
+
+ if err != nil {
+ return nil
+ }
+
+ if resp.StatusCode != http.StatusNotFound {
+ return fmt.Errorf("Virtual Network sitll exists:\n%#v", resp.Properties)
+ }
+ }
+
+ return nil
+}
+
+var testAccAzureRMVirtualNetwork_basic = `
+resource "azurerm_resource_group" "test" {
+ name = "acceptanceTestResourceGroup1"
+ location = "West US"
+}
+
+resource "azurerm_virtual_network" "test" {
+ name = "acceptanceTestVirtualNetwork1"
+ address_space = ["10.0.0.0/16"]
+ location = "West US"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+
+ subnet {
+ name = "subnet1"
+ address_prefix = "10.0.1.0/24"
+ }
+}
+`
diff --git a/builtin/providers/azurerm/resourceid.go b/builtin/providers/azurerm/resourceid.go
new file mode 100644
index 0000000000..fd3e0718cc
--- /dev/null
+++ b/builtin/providers/azurerm/resourceid.go
@@ -0,0 +1,82 @@
+package azurerm
+
+import (
+ "fmt"
+ "net/url"
+ "strings"
+)
+
+// ResourceID represents a parsed long-form Azure Resource Manager ID
+// with the Subscription ID, Resource Group and the Provider as top-
+// level fields, and other key-value pairs available via a map in the
+// Path field.
+type ResourceID struct {
+ SubscriptionID string
+ ResourceGroup string
+ Provider string
+ Path map[string]string
+}
+
+// parseAzureResourceID converts a long-form Azure Resource Manager ID
+// into a ResourceID. We make assumptions about the structure of URLs,
+// which is obviously not good, but the best thing available given the
+// SDK.
+func parseAzureResourceID(id string) (*ResourceID, error) {
+ idURL, err := url.ParseRequestURI(id)
+ if err != nil {
+ return nil, fmt.Errorf("Cannot parse Azure Id: %s", err)
+ }
+
+ path := idURL.Path
+
+ path = strings.TrimSpace(path)
+ if strings.HasPrefix(path, "/") {
+ path = path[1:]
+ }
+
+ if strings.HasSuffix(path, "/") {
+ path = path[:len(path)-1]
+ }
+
+ components := strings.Split(path, "/")
+
+ // We should have an even number of key-value pairs.
+ if len(components)%2 != 0 {
+ return nil, fmt.Errorf("The number of path segments is not divisible by 2 in %q", path)
+ }
+
+ // Put the constituent key-value pairs into a map
+ componentMap := make(map[string]string, len(components)/2)
+ for current := 0; current < len(components); current += 2 {
+ key := components[current]
+ value := components[current+1]
+
+ componentMap[key] = value
+ }
+
+ // Build up a ResourceID from the map
+ idObj := &ResourceID{}
+ idObj.Path = componentMap
+
+ if subscription, ok := componentMap["subscriptions"]; ok {
+ idObj.SubscriptionID = subscription
+ delete(componentMap, "subscriptions")
+ } else {
+ return nil, fmt.Errorf("No subscription ID found in: %q", path)
+ }
+
+ if resourceGroup, ok := componentMap["resourceGroups"]; ok {
+ idObj.ResourceGroup = resourceGroup
+ delete(componentMap, "resourceGroups")
+ } else {
+ return nil, fmt.Errorf("No resource group name found in: %q", path)
+ }
+
+ // It is OK not to have a provider in the case of a resource group
+ if provider, ok := componentMap["providers"]; ok {
+ idObj.Provider = provider
+ delete(componentMap, "providers")
+ }
+
+ return idObj, nil
+}
diff --git a/builtin/providers/azurerm/resourceid_test.go b/builtin/providers/azurerm/resourceid_test.go
new file mode 100644
index 0000000000..15caad8005
--- /dev/null
+++ b/builtin/providers/azurerm/resourceid_test.go
@@ -0,0 +1,107 @@
+package azurerm
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestParseAzureResourceID(t *testing.T) {
+ testCases := []struct {
+ id string
+ expectedResourceID *ResourceID
+ expectError bool
+ }{
+ {
+ "random",
+ nil,
+ true,
+ },
+ {
+ "/subscriptions/6d74bdd2-9f84-11e5-9bd9-7831c1c4c038",
+ nil,
+ true,
+ },
+ {
+ "subscriptions/6d74bdd2-9f84-11e5-9bd9-7831c1c4c038",
+ nil,
+ true,
+ },
+ {
+ "/subscriptions/6d74bdd2-9f84-11e5-9bd9-7831c1c4c038/resourceGroups/testGroup1",
+ &ResourceID{
+ SubscriptionID: "6d74bdd2-9f84-11e5-9bd9-7831c1c4c038",
+ ResourceGroup: "testGroup1",
+ Provider: "",
+ Path: map[string]string{},
+ },
+ false,
+ },
+ {
+ "/subscriptions/6d74bdd2-9f84-11e5-9bd9-7831c1c4c038/resourceGroups/testGroup1/providers/Microsoft.Network",
+ &ResourceID{
+ SubscriptionID: "6d74bdd2-9f84-11e5-9bd9-7831c1c4c038",
+ ResourceGroup: "testGroup1",
+ Provider: "Microsoft.Network",
+ Path: map[string]string{},
+ },
+ false,
+ },
+ {
+ // Missing leading /
+ "subscriptions/6d74bdd2-9f84-11e5-9bd9-7831c1c4c038/resourceGroups/testGroup1/providers/Microsoft.Network/virtualNetworks/virtualNetwork1/",
+ nil,
+ true,
+ },
+ {
+ "/subscriptions/6d74bdd2-9f84-11e5-9bd9-7831c1c4c038/resourceGroups/testGroup1/providers/Microsoft.Network/virtualNetworks/virtualNetwork1",
+ &ResourceID{
+ SubscriptionID: "6d74bdd2-9f84-11e5-9bd9-7831c1c4c038",
+ ResourceGroup: "testGroup1",
+ Provider: "Microsoft.Network",
+ Path: map[string]string{
+ "virtualNetworks": "virtualNetwork1",
+ },
+ },
+ false,
+ },
+ {
+ "/subscriptions/6d74bdd2-9f84-11e5-9bd9-7831c1c4c038/resourceGroups/testGroup1/providers/Microsoft.Network/virtualNetworks/virtualNetwork1?api-version=2006-01-02-preview",
+ &ResourceID{
+ SubscriptionID: "6d74bdd2-9f84-11e5-9bd9-7831c1c4c038",
+ ResourceGroup: "testGroup1",
+ Provider: "Microsoft.Network",
+ Path: map[string]string{
+ "virtualNetworks": "virtualNetwork1",
+ },
+ },
+ false,
+ },
+ {
+ "/subscriptions/6d74bdd2-9f84-11e5-9bd9-7831c1c4c038/resourceGroups/testGroup1/providers/Microsoft.Network/virtualNetworks/virtualNetwork1/subnets/publicInstances1?api-version=2006-01-02-preview",
+ &ResourceID{
+ SubscriptionID: "6d74bdd2-9f84-11e5-9bd9-7831c1c4c038",
+ ResourceGroup: "testGroup1",
+ Provider: "Microsoft.Network",
+ Path: map[string]string{
+ "virtualNetworks": "virtualNetwork1",
+ "subnets": "publicInstances1",
+ },
+ },
+ false,
+ },
+ }
+
+ for _, test := range testCases {
+ parsed, err := parseAzureResourceID(test.id)
+ if test.expectError && err != nil {
+ continue
+ }
+ if err != nil {
+ t.Fatalf("Unexpected error: %s", err)
+ }
+
+ if !reflect.DeepEqual(test.expectedResourceID, parsed) {
+ t.Fatalf("Unexpected resource ID:\nExpected: %+v\nGot: %+v\n", test.expectedResourceID, parsed)
+ }
+ }
+}
diff --git a/website/Vagrantfile b/website/Vagrantfile
index 4bfc410e20..6507bea16a 100644
--- a/website/Vagrantfile
+++ b/website/Vagrantfile
@@ -28,7 +28,7 @@ bundle exec middleman server &
SCRIPT
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
- config.vm.box = "chef/ubuntu-12.04"
+ config.vm.box = "bento/ubuntu-12.04"
config.vm.network "private_network", ip: "33.33.30.10"
config.vm.provision "shell", inline: $script, privileged: false
config.vm.synced_folder ".", "/vagrant", type: "rsync"
diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss
index 5f2b070212..49e9e164f8 100755
--- a/website/source/assets/stylesheets/_docs.scss
+++ b/website/source/assets/stylesheets/_docs.scss
@@ -10,6 +10,7 @@ body.layout-atlas,
body.layout-aws,
body.layout-azure,
body.layout-chef,
+body.layout-azurerm,
body.layout-cloudflare,
body.layout-cloudstack,
body.layout-consul,
diff --git a/website/source/docs/providers/azurerm/index.html.markdown b/website/source/docs/providers/azurerm/index.html.markdown
new file mode 100644
index 0000000000..0655a48433
--- /dev/null
+++ b/website/source/docs/providers/azurerm/index.html.markdown
@@ -0,0 +1,80 @@
+---
+layout: "azurerm"
+page_title: "Provider: Azure Resource Manager"
+sidebar_current: "docs-azurerm-index"
+description: |-
+ The Azure Resource Manager provider is used to interact with the many resources supported by Azure, via the ARM API. This supercedes the Azure provider, which interacts with Azure using the Service Management API. The provider needs to be configured with a credentials file, or credentials needed to generate OAuth tokens for the ARM API.
+---
+
+# Azure Resource Manager Provider
+
+The Azure Resource Manager provider is used to interact with the many resources
+supported by Azure, via the ARM API. This supercedes the Azure provider, which
+interacts with Azure using the Service Management API. The provider needs to be
+configured with the credentials needed to generate OAuth tokens for the ARM API.
+
+Use the navigation to the left to read about the available resources.
+
+## Example Usage
+
+```
+# Configure the Azure Resource Manager Provider
+provider "azurerm" {
+ subscription_id = "..."
+ client_id = "..."
+ client_secret = "..."
+ tenant_id = "..."
+}
+
+# Create a resource group
+resource "azurerm_resource_group" "production" {
+ name = "production"
+ location = "West US"
+}
+
+# Create a virtual network in the web_servers resource group
+resource "azurerm_virtual_network" "network" {
+ name = "productionNetwork"
+ address_space = ["10.0.0.0/16"]
+ location = "West US"
+ resource_group_name = "${azurerm_resource_group.production.name}"
+
+ subnet {
+ name = "subnet1"
+ address_prefix = "10.0.1.0/24"
+ }
+
+ subnet {
+ name = "subnet2"
+ address_prefix = "10.0.2.0/24"
+ }
+
+ subnet {
+ name = "subnet3"
+ address_prefix = "10.0.3.0/24"
+ }
+}
+
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `subscription_id` - (Optional) The subscription ID to use. It can also
+ be sourced from the `ARM_SUBSCRIPTION_ID` environment variable.
+
+* `client_id` - (Optional) The client ID to use. It can also be sourced from
+ the `ARM_CLIENT_ID` environment variable.
+
+* `client_secret` - (Optional) The client secret to use. It can also be sourced from
+ the `ARM_CLIENT_SECRET` environment variable.
+
+* `tenant_id` - (Optional) The tenant ID to use. It can also be sourced from the
+ `ARM_TENANT_ID` environment variable.
+
+## Testing:
+
+Credentials must be provided via the `ARM_SUBSCRIPTION_ID`, `ARM_CLIENT_ID`,
+`ARM_CLIENT_SECRET` and `ARM_TENANT_ID` environment variables in order to run
+acceptance tests.
diff --git a/website/source/docs/providers/azurerm/r/resource_group.html.markdown b/website/source/docs/providers/azurerm/r/resource_group.html.markdown
new file mode 100644
index 0000000000..06d6dbb225
--- /dev/null
+++ b/website/source/docs/providers/azurerm/r/resource_group.html.markdown
@@ -0,0 +1,36 @@
+---
+layout: "azurerm"
+page_title: "Azure Resource Manager: azurerm_resource_group"
+sidebar_current: "docs-azurerm-resource-resource-group"
+description: |-
+ Creates a new resource group on Azure.
+---
+
+# azurerm\_resource\_group
+
+Creates a new resource group on Azure.
+
+## Example Usage
+
+```
+resource "azurerm_resource_group" "test" {
+ name = "testResourceGroup1"
+ location = "West US"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) The name of the resource group. Must be unique on your
+ Azure subscription.
+
+* `location` - (Required) The location where the resource group should be created.
+ For a list of all Azure locations, please consult [this link](http://azure.microsoft.com/en-us/regions/).
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The resource group ID.
diff --git a/website/source/docs/providers/azurerm/r/virtual_network.html.markdown b/website/source/docs/providers/azurerm/r/virtual_network.html.markdown
new file mode 100644
index 0000000000..164ffce4b3
--- /dev/null
+++ b/website/source/docs/providers/azurerm/r/virtual_network.html.markdown
@@ -0,0 +1,76 @@
+---
+layout: "azurerm"
+page_title: "Azure Resource Manager: azure_virtual_network"
+sidebar_current: "docs-azurerm-resource-virtual-network"
+description: |-
+ Creates a new virtual network including any configured subnets. Each subnet can optionally be configured with a security group to be associated with the subnet.
+---
+
+# azurerm\_virtual\_network
+
+Creates a new virtual network including any configured subnets. Each subnet can
+optionally be configured with a security group to be associated with the subnet.
+
+## Example Usage
+
+```
+resource "azurerm_virtual_network" "test" {
+ name = "virtualNetwork1"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ address_space = ["10.0.0.0/16"]
+ location = "West US"
+
+ subnet {
+ name = "subnet1"
+ address_prefix = "10.0.1.0/24"
+ }
+
+ subnet {
+ name = "subnet2"
+ address_prefix = "10.0.2.0/24"
+ }
+
+ subnet {
+ name = "subnet3"
+ address_prefix = "10.0.3.0/24"
+ }
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) The name of the virtual network. Changing this forces a
+ new resource to be created.
+
+* `resource_group_name` - (Required) The name of the resource group in which to
+ create the virtual network.
+
+* `address_space` - (Required) The address space that is used the virtual
+ network. You can supply more than one address space. Changing this forces
+ a new resource to be created.
+
+* `location` - (Required) The location/region where the virtual network is
+ created. Changing this forces a new resource to be created.
+
+* `dns_servers` - (Optional) List of names of DNS servers previously registered
+ on Azure.
+
+* `subnet` - (Required) Can be specified multiple times to define multiple
+ subnets. Each `subnet` block supports fields documented below.
+
+The `subnet` block supports:
+
+* `name` - (Required) The name of the subnet.
+
+* `address_prefix` - (Required) The address prefix to use for the subnet.
+
+* `security_group` - (Optional) The Network Security Group to associate with
+ the subnet.
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The virtual NetworkConfiguration ID.
diff --git a/website/source/layouts/azurerm.erb b/website/source/layouts/azurerm.erb
new file mode 100644
index 0000000000..f52a1bce23
--- /dev/null
+++ b/website/source/layouts/azurerm.erb
@@ -0,0 +1,30 @@
+<% wrap_layout :inner do %>
+ <% content_for :sidebar do %>
+
+ <% end %>
+
+ <%= yield %>
+<% end %>
diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb
index 3deb5be98f..e61f01b87f 100644
--- a/website/source/layouts/docs.erb
+++ b/website/source/layouts/docs.erb
@@ -130,7 +130,11 @@
>
- Azure
+ Azure (Service Management)
+
+
+ >
+ Azure (Resource Manager)
>
From 3365cd2231df788186ff61d45b3dd27ebb0c8d24 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Tue, 15 Dec 2015 18:49:24 -0500
Subject: [PATCH 251/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ade2d61b58..612c88dd8e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,7 @@ FEATURES:
* **New provider: `vcd` - VMware vCloud Director** [GH-3785]
* **New provider: `postgresql` - Create PostgreSQL databases and roles** [GH-3653]
* **New provider: `chef` - Create chef environments, roles, etc** [GH-3084]
+ * **New provider: `azurerm` - Preliminary support for Azure Resource Manager** [GH-4226]
* **New resource: `aws_autoscaling_schedule`** [GH-4256]
* **New resource: `google_pubsub_topic`** [GH-3671]
* **New resource: `google_pubsub_subscription`** [GH-3671]
From f353f5c62d780fd2224f651d691d6e4ab6b6dca1 Mon Sep 17 00:00:00 2001
From: Andrew Hodges
Date: Wed, 16 Dec 2015 09:57:58 -0500
Subject: [PATCH 252/664] Clarify the docs for Packet a bit.
---
.../docs/providers/packet/index.html.markdown | 7 +++----
.../docs/providers/packet/r/device.html.markdown | 16 ++++++++--------
.../providers/packet/r/project.html.markdown | 10 +++++-----
.../providers/packet/r/ssh_key.html.markdown | 4 ++--
4 files changed, 18 insertions(+), 19 deletions(-)
diff --git a/website/source/docs/providers/packet/index.html.markdown b/website/source/docs/providers/packet/index.html.markdown
index bbe9f5d1ea..5898c3c9a8 100644
--- a/website/source/docs/providers/packet/index.html.markdown
+++ b/website/source/docs/providers/packet/index.html.markdown
@@ -22,9 +22,9 @@ provider "packet" {
}
# Create a project
-resource "packet_project" "tf_project_1" {
+resource "packet_project" "cool_project" {
name = "My First Terraform Project"
- payment_method = "PAYMENT_METHOD_ID"
+ payment_method = "PAYMENT_METHOD_ID" # Only required for a non-default payment method
}
# Create a device and add it to tf_project_1
@@ -34,7 +34,7 @@ resource "packet_device" "web1" {
facility = "ewr1"
operating_system = "coreos_stable"
billing_cycle = "hourly"
- project_id = "${packet_project.tf_project_1.id}"
+ project_id = "${packet_project.cool_project.id}"
}
```
@@ -44,4 +44,3 @@ The following arguments are supported:
* `auth_token` - (Required) This is your Packet API Auth token. This can also be specified
with the `PACKET_AUTH_TOKEN` shell environment variable.
-
diff --git a/website/source/docs/providers/packet/r/device.html.markdown b/website/source/docs/providers/packet/r/device.html.markdown
index 6d57dcbb51..75a1f501d9 100644
--- a/website/source/docs/providers/packet/r/device.html.markdown
+++ b/website/source/docs/providers/packet/r/device.html.markdown
@@ -14,14 +14,14 @@ modify, and delete devices.
## Example Usage
```
-# Create a device and add it to tf_project_1
+# Create a device and add it to cool_project
resource "packet_device" "web1" {
hostname = "tf.coreos2"
plan = "baremetal_1"
facility = "ewr1"
operating_system = "coreos_stable"
billing_cycle = "hourly"
- project_id = "${packet_project.tf_project_1.id}"
+ project_id = "${packet_project.cool_project.id}"
}
```
@@ -33,7 +33,7 @@ The following arguments are supported:
* `project_id` - (Required) The id of the project in which to create the device
* `operating_system` - (Required) The operating system slug
* `facility` - (Required) The facility in which to create the device
-* `plan` - (Required) The config type slug
+* `plan` - (Required) The hardware config slug
* `billing_cycle` - (Required) monthly or hourly
* `user_data` (Optional) - A string of the desired User Data for the device.
@@ -43,13 +43,13 @@ The following attributes are exported:
* `id` - The ID of the device
* `hostname`- The hostname of the device
-* `project_id`- The Id of the project the device belonds to
-* `facility` - The facility the device is in
-* `plan` - The config type of the device
+* `project_id`- The ID of the project the device belongs to
+* `facility` - The facility the device is in
+* `plan` - The hardware config of the device
* `network` - The private and public v4 and v6 IPs assigned to the device
-* `locked` - Is the device locked
+* `locked` - Whether the device is locked
* `billing_cycle` - The billing cycle of the device (monthly or hourly)
* `operating_system` - The operating system running on the device
* `status` - The status of the device
* `created` - The timestamp for when the device was created
-* `updated` - The timestamp for the last time the device was udpated
+* `updated` - The timestamp for the last time the device was updated
diff --git a/website/source/docs/providers/packet/r/project.html.markdown b/website/source/docs/providers/packet/r/project.html.markdown
index c34b49c209..9b92e1c89a 100644
--- a/website/source/docs/providers/packet/r/project.html.markdown
+++ b/website/source/docs/providers/packet/r/project.html.markdown
@@ -25,16 +25,16 @@ resource "packet_project" "tf_project_1" {
The following arguments are supported:
-* `name` - (Required) The name of the Project in Packet.net
-* `payment_method` - (Required) The id of the payment method on file to use for services created
-on this project.
+* `name` - (Required) The name of the Project on Packet.net
+* `payment_method` - (Optional) The unique ID of the payment method on file to use for services created
+in this project. If not given, the project will use the default payment method for your user.
## Attributes Reference
The following attributes are exported:
* `id` - The unique ID of the project
-* `payment_method` - The id of the payment method on file to use for services created
-on this project.
+* `payment_method` - The unique ID of the payment method on file to use for services created
+in this project.
* `created` - The timestamp for when the Project was created
* `updated` - The timestamp for the last time the Project was updated
diff --git a/website/source/docs/providers/packet/r/ssh_key.html.markdown b/website/source/docs/providers/packet/r/ssh_key.html.markdown
index cb27aaa774..f3ca1e3c6b 100644
--- a/website/source/docs/providers/packet/r/ssh_key.html.markdown
+++ b/website/source/docs/providers/packet/r/ssh_key.html.markdown
@@ -9,7 +9,7 @@ description: |-
# packet\_ssh_key
Provides a Packet SSH key resource to allow you manage SSH
-keys on your account. All ssh keys on your account are loaded on
+keys on your account. All SSH keys on your account are loaded on
all new devices, they do not have to be explicitly declared on
device creation.
@@ -40,4 +40,4 @@ The following attributes are exported:
* `public_key` - The text of the public key
* `fingerprint` - The fingerprint of the SSH key
* `created` - The timestamp for when the SSH key was created
-* `updated` - The timestamp for the last time the SSH key was udpated
+* `updated` - The timestamp for the last time the SSH key was updated
From e2391fbafc947d7ba92150eae9c811e27651d818 Mon Sep 17 00:00:00 2001
From: Clint
Date: Wed, 16 Dec 2015 10:12:43 -0600
Subject: [PATCH 253/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 612c88dd8e..5fe9a8dbb1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -49,6 +49,7 @@ BUG FIXES:
* provider/aws: Fix issue force destroying a versioned S3 bucket [GH-4168]
* provider/aws: Update DB Replica to honor storage type [GH-4155]
* provider/aws: Fix issue creating AWS RDS replicas across regions [GH-4215]
+ * provider/aws: Refactor AWS Authentication chain to fix issue with authentication and IAM [GH-4254]
* provider/aws: Fix issue with finding S3 Hosted Zone ID for eu-central-1 region [GH-4236]
* provider/aws: Fix missing AMI issue with Launch Configurations [GH-4242]
* provider/aws: Opsworks stack SSH key is write-only [GH-4241]
From 57bcb49eded5b6b73804ce7d75dff97d5080a881 Mon Sep 17 00:00:00 2001
From: stack72
Date: Wed, 16 Dec 2015 16:00:36 +0100
Subject: [PATCH 254/664] Change the DB Subnet Group Name to not allow
UPPERCASE characters. If this happens, throw a validation error
Add some ValidationTests for the DBSubnetGroupName ValidateFunc
---
.../aws/resource_aws_db_subnet_group.go | 44 ++++++++++---------
.../aws/resource_aws_db_subnet_group_test.go | 34 +++++++++++++-
2 files changed, 56 insertions(+), 22 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_db_subnet_group.go b/builtin/providers/aws/resource_aws_db_subnet_group.go
index 0127cfd48a..483b24e91f 100644
--- a/builtin/providers/aws/resource_aws_db_subnet_group.go
+++ b/builtin/providers/aws/resource_aws_db_subnet_group.go
@@ -29,25 +29,10 @@ func resourceAwsDbSubnetGroup() *schema.Resource {
},
"name": &schema.Schema{
- Type: schema.TypeString,
- ForceNew: true,
- Required: true,
- ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
- value := v.(string)
- if !regexp.MustCompile(`^[ .0-9A-Za-z-_]+$`).MatchString(value) {
- errors = append(errors, fmt.Errorf(
- "only alphanumeric characters, hyphens, underscores, periods, and spaces allowed in %q", k))
- }
- if len(value) > 255 {
- errors = append(errors, fmt.Errorf(
- "%q cannot be longer than 255 characters", k))
- }
- if regexp.MustCompile(`(?i)^default$`).MatchString(value) {
- errors = append(errors, fmt.Errorf(
- "%q is not allowed as %q", "Default", k))
- }
- return
- },
+ Type: schema.TypeString,
+ ForceNew: true,
+ Required: true,
+ ValidateFunc: validateSubnetGroupName,
},
"description": &schema.Schema{
@@ -131,8 +116,8 @@ func resourceAwsDbSubnetGroupRead(d *schema.ResourceData, meta interface{}) erro
return fmt.Errorf("Unable to find DB Subnet Group: %#v", describeResp.DBSubnetGroups)
}
- d.Set("name", d.Id())
- d.Set("description", *subnetGroup.DBSubnetGroupDescription)
+ d.Set("name", subnetGroup.DBSubnetGroupName)
+ d.Set("description", subnetGroup.DBSubnetGroupDescription)
subnets := make([]string, 0, len(subnetGroup.Subnets))
for _, s := range subnetGroup.Subnets {
@@ -252,3 +237,20 @@ func buildRDSsubgrpARN(d *schema.ResourceData, meta interface{}) (string, error)
arn := fmt.Sprintf("arn:aws:rds:%s:%s:subgrp:%s", region, accountID, d.Id())
return arn, nil
}
+
+func validateSubnetGroupName(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ if !regexp.MustCompile(`^[ .0-9a-z-_]+$`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "only alphanumeric characters, hyphens, underscores, periods, and spaces allowed in %q", k))
+ }
+ if len(value) > 255 {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot be longer than 255 characters", k))
+ }
+ if regexp.MustCompile(`(?i)^default$`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "%q is not allowed as %q", "Default", k))
+ }
+ return
+}
diff --git a/builtin/providers/aws/resource_aws_db_subnet_group_test.go b/builtin/providers/aws/resource_aws_db_subnet_group_test.go
index d943294a97..b3049f035f 100644
--- a/builtin/providers/aws/resource_aws_db_subnet_group_test.go
+++ b/builtin/providers/aws/resource_aws_db_subnet_group_test.go
@@ -66,6 +66,38 @@ func TestAccAWSDBSubnetGroup_withUndocumentedCharacters(t *testing.T) {
})
}
+func TestResourceAWSDBSubnetGroupNameValidation(t *testing.T) {
+ cases := []struct {
+ Value string
+ ErrCount int
+ }{
+ {
+ Value: "tEsting",
+ ErrCount: 1,
+ },
+ {
+ Value: "testing?",
+ ErrCount: 1,
+ },
+ {
+ Value: "default",
+ ErrCount: 1,
+ },
+ {
+ Value: randomString(300),
+ ErrCount: 1,
+ },
+ }
+
+ for _, tc := range cases {
+ _, errors := validateSubnetGroupName(tc.Value, "aws_db_subnet_group")
+
+ if len(errors) != tc.ErrCount {
+ t.Fatalf("Expected the DB Subnet Group name to trigger a validation error")
+ }
+ }
+}
+
func testAccCheckDBSubnetGroupDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).rdsconn
@@ -149,7 +181,7 @@ resource "aws_subnet" "bar" {
}
resource "aws_db_subnet_group" "foo" {
- name = "FOO"
+ name = "foo"
description = "foo description"
subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
tags {
From 37340179e18eb94d550eb95334fbc0b16806c401 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Wed, 16 Dec 2015 11:37:41 -0500
Subject: [PATCH 255/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5fe9a8dbb1..5c8b723792 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -24,6 +24,7 @@ IMPROVEMENTS:
* provider/aws: Adding Tag support for DB Param Groups [GH-4259]
* provider/aws: Validate IOPs for EBS Volumes [GH-4146]
* provider/aws: DB Subnet group arn output [GH-4261]
+ * provider/aws: Validate `name` on `db_subnet_group` against AWS requirements [GH-4340]
* provider/cloudstack: performance improvements [GH-4150]
* provider/docker: Add support for setting the entry point on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting the restart policy on `docker_container` resources [GH-3761]
From b172d94381ac5fd4c3948fc523a343f2a74cd856 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Wed, 16 Dec 2015 11:07:15 -0600
Subject: [PATCH 256/664] provider/aws: Fix check destroy method for s3 tests
---
builtin/providers/aws/resource_aws_s3_bucket_test.go | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_s3_bucket_test.go b/builtin/providers/aws/resource_aws_s3_bucket_test.go
index 0026775c8a..f37ae882ac 100644
--- a/builtin/providers/aws/resource_aws_s3_bucket_test.go
+++ b/builtin/providers/aws/resource_aws_s3_bucket_test.go
@@ -14,6 +14,7 @@ import (
"github.com/hashicorp/terraform/terraform"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3"
)
@@ -265,6 +266,9 @@ func testAccCheckAWSS3BucketDestroy(s *terraform.State) error {
Bucket: aws.String(rs.Primary.ID),
})
if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoSuchBucket" {
+ return nil
+ }
return err
}
}
From 9441738f40fb396ee20ac1a8b4cd2c91d9b07cd9 Mon Sep 17 00:00:00 2001
From: Clint
Date: Wed, 16 Dec 2015 11:22:29 -0600
Subject: [PATCH 257/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5c8b723792..0309c3e60c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -24,6 +24,7 @@ IMPROVEMENTS:
* provider/aws: Adding Tag support for DB Param Groups [GH-4259]
* provider/aws: Validate IOPs for EBS Volumes [GH-4146]
* provider/aws: DB Subnet group arn output [GH-4261]
+ * provider/aws: Retry MalformedPolicy errors due to newly created principals in S3 Buckets [GH-4315]
* provider/aws: Validate `name` on `db_subnet_group` against AWS requirements [GH-4340]
* provider/cloudstack: performance improvements [GH-4150]
* provider/docker: Add support for setting the entry point on `docker_container` resources [GH-3761]
From 91045b3a51676de1643124bddae64ec405e68fc5 Mon Sep 17 00:00:00 2001
From: Clint
Date: Wed, 16 Dec 2015 11:43:30 -0600
Subject: [PATCH 258/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0309c3e60c..f95c53633e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -24,6 +24,7 @@ IMPROVEMENTS:
* provider/aws: Adding Tag support for DB Param Groups [GH-4259]
* provider/aws: Validate IOPs for EBS Volumes [GH-4146]
* provider/aws: DB Subnet group arn output [GH-4261]
+ * provider/aws: Allow changing private IPs for ENIs [GH-4307]
* provider/aws: Retry MalformedPolicy errors due to newly created principals in S3 Buckets [GH-4315]
* provider/aws: Validate `name` on `db_subnet_group` against AWS requirements [GH-4340]
* provider/cloudstack: performance improvements [GH-4150]
From d4e4fb6ea0a69955142a8d85319e1cf4157a70fa Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Wed, 16 Dec 2015 13:56:23 -0600
Subject: [PATCH 259/664] Reuse structure method for network interface ips
---
.../providers/aws/resource_aws_network_interface.go | 12 ++----------
1 file changed, 2 insertions(+), 10 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_network_interface.go b/builtin/providers/aws/resource_aws_network_interface.go
index 0b7ac4cfee..d7a3b16a56 100644
--- a/builtin/providers/aws/resource_aws_network_interface.go
+++ b/builtin/providers/aws/resource_aws_network_interface.go
@@ -199,14 +199,6 @@ func resourceAwsNetworkInterfaceDetach(oa *schema.Set, meta interface{}, eniId s
return nil
}
-func convertToAwsStringSlice(s []interface{}) []*string {
- var b []*string
- for _, i := range s {
- b = append(b, aws.String(i.(string)))
- }
- return b
-}
-
func resourceAwsNetworkInterfaceUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
d.Partial(true)
@@ -254,7 +246,7 @@ func resourceAwsNetworkInterfaceUpdate(d *schema.ResourceData, meta interface{})
if unassignIps.Len() != 0 {
input := &ec2.UnassignPrivateIpAddressesInput{
NetworkInterfaceId: aws.String(d.Id()),
- PrivateIpAddresses: convertToAwsStringSlice(unassignIps.List()),
+ PrivateIpAddresses: expandStringList(unassignIps.List()),
}
_, err := conn.UnassignPrivateIpAddresses(input)
if err != nil {
@@ -267,7 +259,7 @@ func resourceAwsNetworkInterfaceUpdate(d *schema.ResourceData, meta interface{})
if assignIps.Len() != 0 {
input := &ec2.AssignPrivateIpAddressesInput{
NetworkInterfaceId: aws.String(d.Id()),
- PrivateIpAddresses: convertToAwsStringSlice(assignIps.List()),
+ PrivateIpAddresses: expandStringList(assignIps.List()),
}
_, err := conn.AssignPrivateIpAddresses(input)
if err != nil {
From 4f5df717b4fcd7b0a1f7b748b2b1571e8e244c2e Mon Sep 17 00:00:00 2001
From: Harry Macey
Date: Wed, 16 Dec 2015 17:29:40 -0500
Subject: [PATCH 260/664] Fixes #4351
Adding empty string defaults for network interface and instance when reading `aws_eip` resource.
---
builtin/providers/aws/resource_aws_eip.go | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_eip.go b/builtin/providers/aws/resource_aws_eip.go
index 4b369ee606..8bc71c9a3d 100644
--- a/builtin/providers/aws/resource_aws_eip.go
+++ b/builtin/providers/aws/resource_aws_eip.go
@@ -146,9 +146,13 @@ func resourceAwsEipRead(d *schema.ResourceData, meta interface{}) error {
d.Set("association_id", address.AssociationId)
if address.InstanceId != nil {
d.Set("instance", address.InstanceId)
+ } else {
+ d.Set("instance", "")
}
if address.NetworkInterfaceId != nil {
d.Set("network_interface", address.NetworkInterfaceId)
+ } else {
+ d.Set("network_interface", "")
}
d.Set("private_ip", address.PrivateIpAddress)
d.Set("public_ip", address.PublicIp)
From 1a9eab2cbe177c7837d80c997bc17ea1d55df22b Mon Sep 17 00:00:00 2001
From: Scott Lowe
Date: Wed, 16 Dec 2015 16:36:00 -0700
Subject: [PATCH 261/664] Add section on referencing security groups by name
Added new section to end of Markdown file for OpenStack security groups,
recommending that security groups are referenced by the name attribute
instead of by the ID attribute.
---
.../openstack/r/compute_secgroup_v2.html.markdown | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown
index 49e1c3ebfb..e7d88ead76 100644
--- a/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown
+++ b/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown
@@ -99,3 +99,17 @@ rule {
```
A list of ICMP types and codes can be found [here](https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages).
+
+### Referencing Security Groups
+
+When referencing a security group in a configuration (for example, a configuration creates a new security group and then needs to apply it to an instance being created in the same configuration), it is currently recommended to reference the security group by name and not by ID, like this:
+
+```
+resource "openstack_compute_instance_v2" "test-server" {
+ name = "tf-test"
+ image_id = "ad091b52-742f-469e-8f3c-fd81cadf0743"
+ flavor_id = "3"
+ key_pair = "my_key_pair_name"
+ security_groups = ["${openstack_compute_secgroup_v2.secgroup_1.name}"]
+}
+```
From 14b7559cd1a5cae107631d8b3adae45c4bfa41f7 Mon Sep 17 00:00:00 2001
From: stack72
Date: Thu, 17 Dec 2015 00:33:00 +0100
Subject: [PATCH 262/664] Fixing Gofmt errors
---
builtin/providers/chef/resource_data_bag_test.go | 2 +-
builtin/providers/google/resource_compute_instance.go | 2 +-
.../providers/google/resource_compute_instance_group_manager.go | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/builtin/providers/chef/resource_data_bag_test.go b/builtin/providers/chef/resource_data_bag_test.go
index 92b74e5df6..fd63451898 100644
--- a/builtin/providers/chef/resource_data_bag_test.go
+++ b/builtin/providers/chef/resource_data_bag_test.go
@@ -19,7 +19,7 @@ func TestAccDataBag_basic(t *testing.T) {
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDataBagConfig_basic,
- Check: testAccDataBagCheckExists("chef_data_bag.test", &dataBagName),
+ Check: testAccDataBagCheckExists("chef_data_bag.test", &dataBagName),
},
},
})
diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go
index 3359c4d649..8ca7664853 100644
--- a/builtin/providers/google/resource_compute_instance.go
+++ b/builtin/providers/google/resource_compute_instance.go
@@ -3,12 +3,12 @@ package google
import (
"fmt"
"log"
+ "strings"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
- "strings"
)
func stringHashcode(v interface{}) int {
diff --git a/builtin/providers/google/resource_compute_instance_group_manager.go b/builtin/providers/google/resource_compute_instance_group_manager.go
index 77b7143126..e8e6b33a54 100644
--- a/builtin/providers/google/resource_compute_instance_group_manager.go
+++ b/builtin/providers/google/resource_compute_instance_group_manager.go
@@ -243,7 +243,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte
// Wait for the operation to complete
err = computeOperationWaitZoneTime(config, op, d.Get("zone").(string),
- managedInstanceCount * 4, "Restarting InstanceGroupManagers instances")
+ managedInstanceCount*4, "Restarting InstanceGroupManagers instances")
if err != nil {
return err
}
From e8eb2e3573b0bbe3aa0ea8a32420b4edb9ac0cef Mon Sep 17 00:00:00 2001
From: stack72
Date: Thu, 17 Dec 2015 01:18:53 +0100
Subject: [PATCH 263/664] Changing the ingress structure to be required in DB
Security Group - this was marked as optional in the docs whereas the schema
has it as required
---
.../source/docs/providers/aws/r/db_security_group.html.markdown | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/website/source/docs/providers/aws/r/db_security_group.html.markdown b/website/source/docs/providers/aws/r/db_security_group.html.markdown
index 1c7f8183e8..72b969bee7 100644
--- a/website/source/docs/providers/aws/r/db_security_group.html.markdown
+++ b/website/source/docs/providers/aws/r/db_security_group.html.markdown
@@ -32,7 +32,7 @@ The following arguments are supported:
* `name` - (Required) The name of the DB security group.
* `description` - (Required) The description of the DB security group.
-* `ingress` - (Optional) A list of ingress rules.
+* `ingress` - (Required) A list of ingress rules.
* `tags` - (Optional) A mapping of tags to assign to the resource.
Ingress blocks support the following:
From a9d97708ee21d59701a3d5390496930f940bd4bd Mon Sep 17 00:00:00 2001
From: Martin Atkins
Date: Wed, 16 Dec 2015 17:59:35 -0800
Subject: [PATCH 264/664] mysql provider and mysql_database resource.
Allows databases on pre-existing MySQL servers to be created and managed
by Terraform.
---
builtin/bins/provider-mysql/main.go | 12 ++
builtin/bins/provider-mysql/main_test.go | 1 +
builtin/providers/mysql/provider.go | 71 +++++++
builtin/providers/mysql/provider_test.go | 55 ++++++
builtin/providers/mysql/resource_database.go | 174 ++++++++++++++++++
.../providers/mysql/resource_database_test.go | 91 +++++++++
website/source/assets/stylesheets/_docs.scss | 1 +
.../docs/providers/mysql/index.html.markdown | 72 ++++++++
.../providers/mysql/r/database.html.markdown | 54 ++++++
website/source/layouts/docs.erb | 4 +
website/source/layouts/mysql.erb | 26 +++
11 files changed, 561 insertions(+)
create mode 100644 builtin/bins/provider-mysql/main.go
create mode 100644 builtin/bins/provider-mysql/main_test.go
create mode 100644 builtin/providers/mysql/provider.go
create mode 100644 builtin/providers/mysql/provider_test.go
create mode 100644 builtin/providers/mysql/resource_database.go
create mode 100644 builtin/providers/mysql/resource_database_test.go
create mode 100644 website/source/docs/providers/mysql/index.html.markdown
create mode 100644 website/source/docs/providers/mysql/r/database.html.markdown
create mode 100644 website/source/layouts/mysql.erb
diff --git a/builtin/bins/provider-mysql/main.go b/builtin/bins/provider-mysql/main.go
new file mode 100644
index 0000000000..0c21be953d
--- /dev/null
+++ b/builtin/bins/provider-mysql/main.go
@@ -0,0 +1,12 @@
+package main
+
+import (
+ "github.com/hashicorp/terraform/builtin/providers/mysql"
+ "github.com/hashicorp/terraform/plugin"
+)
+
+func main() {
+ plugin.Serve(&plugin.ServeOpts{
+ ProviderFunc: mysql.Provider,
+ })
+}
diff --git a/builtin/bins/provider-mysql/main_test.go b/builtin/bins/provider-mysql/main_test.go
new file mode 100644
index 0000000000..06ab7d0f9a
--- /dev/null
+++ b/builtin/bins/provider-mysql/main_test.go
@@ -0,0 +1 @@
+package main
diff --git a/builtin/providers/mysql/provider.go b/builtin/providers/mysql/provider.go
new file mode 100644
index 0000000000..3afd7db4cb
--- /dev/null
+++ b/builtin/providers/mysql/provider.go
@@ -0,0 +1,71 @@
+package mysql
+
+import (
+ "fmt"
+ "strings"
+
+ mysqlc "github.com/ziutek/mymysql/thrsafe"
+
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func Provider() terraform.ResourceProvider {
+ return &schema.Provider{
+ Schema: map[string]*schema.Schema{
+ "endpoint": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ DefaultFunc: schema.EnvDefaultFunc("MYSQL_ENDPOINT", nil),
+ },
+
+ "username": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ DefaultFunc: schema.EnvDefaultFunc("MYSQL_USERNAME", nil),
+ },
+
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ DefaultFunc: schema.EnvDefaultFunc("MYSQL_PASSWORD", nil),
+ },
+ },
+
+ ResourcesMap: map[string]*schema.Resource{
+ "mysql_database": resourceDatabase(),
+ },
+
+ ConfigureFunc: providerConfigure,
+ }
+}
+
+func providerConfigure(d *schema.ResourceData) (interface{}, error) {
+
+ var username = d.Get("username").(string)
+ var password = d.Get("password").(string)
+ var endpoint = d.Get("endpoint").(string)
+
+ proto := "tcp"
+ if endpoint[0] == '/' {
+ proto = "unix"
+ }
+
+ // mysqlc is the thread-safe implementation of mymysql, so we can
+ // safely re-use the same connection between multiple parallel
+ // operations.
+ conn := mysqlc.New(proto, "", endpoint, username, password)
+
+ err := conn.Connect()
+ if err != nil {
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+var identQuoteReplacer = strings.NewReplacer("`", "``")
+
+func quoteIdentifier(in string) string {
+ return fmt.Sprintf("`%s`", identQuoteReplacer.Replace(in))
+}
diff --git a/builtin/providers/mysql/provider_test.go b/builtin/providers/mysql/provider_test.go
new file mode 100644
index 0000000000..824e2b2be2
--- /dev/null
+++ b/builtin/providers/mysql/provider_test.go
@@ -0,0 +1,55 @@
+package mysql
+
+import (
+ "os"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// To run these acceptance tests, you will need access to a MySQL server.
+// Amazon RDS is one way to get a MySQL server. If you use RDS, you can
+// use the root account credentials you specified when creating an RDS
+// instance to get the access necessary to run these tests. (the tests
+// assume full access to the server.)
+//
+// Set the MYSQL_ENDPOINT and MYSQL_USERNAME environment variables before
+// running the tests. If the given user has a password then you will also need
+// to set MYSQL_PASSWORD.
+//
+// The tests assume a reasonably-vanilla MySQL configuration. In particular,
+// they assume that the "utf8" character set is available and that
+// "utf8_bin" is a valid collation that isn't the default for that character
+// set.
+//
+// You can run the tests like this:
+// make testacc TEST=./builtin/providers/mysql
+
+var testAccProviders map[string]terraform.ResourceProvider
+var testAccProvider *schema.Provider
+
+func init() {
+ testAccProvider = Provider().(*schema.Provider)
+ testAccProviders = map[string]terraform.ResourceProvider{
+ "mysql": testAccProvider,
+ }
+}
+
+func TestProvider(t *testing.T) {
+ if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestProvider_impl(t *testing.T) {
+ var _ terraform.ResourceProvider = Provider()
+}
+
+func testAccPreCheck(t *testing.T) {
+ for _, name := range []string{"MYSQL_ENDPOINT", "MYSQL_USERNAME"} {
+ if v := os.Getenv(name); v == "" {
+ t.Fatal("MYSQL_ENDPOINT, MYSQL_USERNAME and optionally MYSQL_PASSWORD must be set for acceptance tests")
+ }
+ }
+}
diff --git a/builtin/providers/mysql/resource_database.go b/builtin/providers/mysql/resource_database.go
new file mode 100644
index 0000000000..4aa56e8104
--- /dev/null
+++ b/builtin/providers/mysql/resource_database.go
@@ -0,0 +1,174 @@
+package mysql
+
+import (
+ "fmt"
+ "log"
+ "strings"
+
+ mysqlc "github.com/ziutek/mymysql/mysql"
+
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+const defaultCharacterSetKeyword = "CHARACTER SET "
+const defaultCollateKeyword = "COLLATE "
+
+func resourceDatabase() *schema.Resource {
+ return &schema.Resource{
+ Create: CreateDatabase,
+ Update: UpdateDatabase,
+ Read: ReadDatabase,
+ Delete: DeleteDatabase,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "default_character_set": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "utf8",
+ },
+
+ "default_collation": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "utf8_general_ci",
+ },
+ },
+ }
+}
+
+func CreateDatabase(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(mysqlc.Conn)
+
+ stmtSQL := databaseConfigSQL("CREATE", d)
+ log.Println("Executing statement:", stmtSQL)
+
+ _, _, err := conn.Query(stmtSQL)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(d.Get("name").(string))
+
+ return nil
+}
+
+func UpdateDatabase(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(mysqlc.Conn)
+
+ stmtSQL := databaseConfigSQL("ALTER", d)
+ log.Println("Executing statement:", stmtSQL)
+
+ _, _, err := conn.Query(stmtSQL)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func ReadDatabase(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(mysqlc.Conn)
+
+ // This is kinda flimsy-feeling, since it depends on the formatting
+ // of the SHOW CREATE DATABASE output... but this data doesn't seem
+ // to be available any other way, so hopefully MySQL keeps this
+ // compatible in future releases.
+
+ name := d.Id()
+ stmtSQL := "SHOW CREATE DATABASE " + quoteIdentifier(name)
+
+ log.Println("Executing query:", stmtSQL)
+ rows, _, err := conn.Query(stmtSQL)
+ if err != nil {
+ if mysqlErr, ok := err.(*mysqlc.Error); ok {
+ if mysqlErr.Code == mysqlc.ER_BAD_DB_ERROR {
+ d.SetId("")
+ return nil
+ }
+ }
+ return err
+ }
+
+ row := rows[0]
+ createSQL := string(row[1].([]byte))
+
+ defaultCharset := extractIdentAfter(createSQL, defaultCharacterSetKeyword)
+ defaultCollation := extractIdentAfter(createSQL, defaultCollateKeyword)
+
+ if defaultCollation == "" && defaultCharset != "" {
+ // MySQL doesn't return the collation if it's the default one for
+ // the charset, so if we don't have a collation we need to go
+ // hunt for the default.
+ stmtSQL := "SHOW COLLATION WHERE `Charset` = '%s' AND `Default` = 'Yes'"
+ rows, _, err := conn.Query(stmtSQL, defaultCharset)
+ if err != nil {
+ return fmt.Errorf("Error getting default charset: %s", err)
+ }
+ if len(rows) == 0 {
+ return fmt.Errorf("Charset %s has no default collation", defaultCharset)
+ }
+ row := rows[0]
+ defaultCollation = string(row[0].([]byte))
+ }
+
+ d.Set("default_character_set", defaultCharset)
+ d.Set("default_collation", defaultCollation)
+
+ return nil
+}
+
+func DeleteDatabase(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(mysqlc.Conn)
+
+ name := d.Id()
+ stmtSQL := "DROP DATABASE " + quoteIdentifier(name)
+ log.Println("Executing statement:", stmtSQL)
+
+ _, _, err := conn.Query(stmtSQL)
+ if err == nil {
+ d.SetId("")
+ }
+ return err
+}
+
+func databaseConfigSQL(verb string, d *schema.ResourceData) string {
+ name := d.Get("name").(string)
+ defaultCharset := d.Get("default_character_set").(string)
+ defaultCollation := d.Get("default_collation").(string)
+
+ var defaultCharsetClause string
+ var defaultCollationClause string
+
+ if defaultCharset != "" {
+ defaultCharsetClause = defaultCharacterSetKeyword + quoteIdentifier(defaultCharset)
+ }
+ if defaultCollation != "" {
+ defaultCollationClause = defaultCollateKeyword + quoteIdentifier(defaultCollation)
+ }
+
+ return fmt.Sprintf(
+ "%s DATABASE %s %s %s",
+ verb,
+ quoteIdentifier(name),
+ defaultCharsetClause,
+ defaultCollationClause,
+ )
+}
+
+func extractIdentAfter(sql string, keyword string) string {
+ charsetIndex := strings.Index(sql, keyword)
+ if charsetIndex != -1 {
+ charsetIndex += len(keyword)
+ remain := sql[charsetIndex:]
+ spaceIndex := strings.IndexRune(remain, ' ')
+ return remain[:spaceIndex]
+ }
+
+ return ""
+}
diff --git a/builtin/providers/mysql/resource_database_test.go b/builtin/providers/mysql/resource_database_test.go
new file mode 100644
index 0000000000..49c44256f9
--- /dev/null
+++ b/builtin/providers/mysql/resource_database_test.go
@@ -0,0 +1,91 @@
+package mysql
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ mysqlc "github.com/ziutek/mymysql/mysql"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccDatabase(t *testing.T) {
+ var dbName string
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccDatabaseCheckDestroy(dbName),
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccDatabaseConfig_basic,
+ Check: testAccDatabaseCheck(
+ "mysql_database.test", &dbName,
+ ),
+ },
+ },
+ })
+}
+
+func testAccDatabaseCheck(rn string, name *string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[rn]
+ if !ok {
+ return fmt.Errorf("resource not found: %s", rn)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("database id not set")
+ }
+
+ conn := testAccProvider.Meta().(mysqlc.Conn)
+ rows, _, err := conn.Query("SHOW CREATE DATABASE terraform_acceptance_test")
+ if err != nil {
+ return fmt.Errorf("error reading database: %s", err)
+ }
+ if len(rows) != 1 {
+ return fmt.Errorf("expected 1 row reading database but got %d", len(rows))
+ }
+
+ row := rows[0]
+ createSQL := string(row[1].([]byte))
+
+ if strings.Index(createSQL, "CHARACTER SET utf8") == -1 {
+ return fmt.Errorf("database default charset isn't utf8")
+ }
+ if strings.Index(createSQL, "COLLATE utf8_bin") == -1 {
+ return fmt.Errorf("database default collation isn't utf8_bin")
+ }
+
+ *name = rs.Primary.ID
+
+ return nil
+ }
+}
+
+func testAccDatabaseCheckDestroy(name string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ conn := testAccProvider.Meta().(mysqlc.Conn)
+
+ _, _, err := conn.Query("SHOW CREATE DATABASE terraform_acceptance_test")
+ if err == nil {
+ return fmt.Errorf("database still exists after destroy")
+ }
+ if mysqlErr, ok := err.(*mysqlc.Error); ok {
+ if mysqlErr.Code == mysqlc.ER_BAD_DB_ERROR {
+ return nil
+ }
+ }
+
+ return fmt.Errorf("got unexpected error: %s", err)
+ }
+}
+
+const testAccDatabaseConfig_basic = `
+resource "mysql_database" "test" {
+ name = "terraform_acceptance_test"
+ default_character_set = "utf8"
+ default_collation = "utf8_bin"
+}
+`
diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss
index 49e9e164f8..0143966b4f 100755
--- a/website/source/assets/stylesheets/_docs.scss
+++ b/website/source/assets/stylesheets/_docs.scss
@@ -22,6 +22,7 @@ body.layout-dyn,
body.layout-google,
body.layout-heroku,
body.layout-mailgun,
+body.layout-mysql,
body.layout-openstack,
body.layout-packet,
body.layout-postgresql,
diff --git a/website/source/docs/providers/mysql/index.html.markdown b/website/source/docs/providers/mysql/index.html.markdown
new file mode 100644
index 0000000000..555c589e20
--- /dev/null
+++ b/website/source/docs/providers/mysql/index.html.markdown
@@ -0,0 +1,72 @@
+---
+layout: "mysql"
+page_title: "Provider: MySQL"
+sidebar_current: "docs-mysql-index"
+description: |-
+ A provider for MySQL Server.
+---
+
+# MySQL Provider
+
+[MySQL](http://www.mysql.com) is a relational database server. The MySQL
+provider exposes resources used to manage the configuration of resources
+in a MySQL server.
+
+Use the navigation to the left to read about the available resources.
+
+## Example Usage
+
+The following is a minimal example:
+
+```
+# Configure the MySQL provider
+provider "mysql" {
+ endpoint = "my-database.example.com:3306"
+ username = "app-user"
+ password = "app-password"
+}
+
+# Create a Database
+resource "mysql_database" "app" {
+ name = "my_awesome_app"
+}
+```
+
+This provider can be used in conjunction with other resources that create
+MySQL servers. For example, ``aws_db_instance`` is able to create MySQL
+servers in Amazon's RDS service.
+
+```
+# Create a database server
+resource "aws_db_instance" "default" {
+ engine = "mysql"
+ engine_version = "5.6.17"
+ instance_class = "db.t1.micro"
+ name = "initial_db"
+ username = "rootuser"
+ password = "rootpasswd"
+ # etc, etc; see aws_db_instance docs for more
+}
+
+# Configure the MySQL provider based on the outcome of
+# creating the aws_db_instance.
+provider "mysql" {
+ endpoint = "${aws_db_instance.default.endpoint}"
+ username = "${aws_db_instance.default.username}"
+ password = "${aws_db_instance.default.password}"
+}
+
+# Create a second database, in addition to the "initial_db" created
+# by the aws_db_instance resource above.
+resource "mysql_database" "app" {
+ name = "another_db"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `endpoint` - (Required) The address of the MySQL server to use. Most often a "hostname:port" pair, but may also be an absolute path to a Unix socket when the host OS is Unix-compatible.
+* `username` - (Required) Username to use to authenticate with the server.
+* `password` - (Optional) Password for the given user, if that user has a password.
diff --git a/website/source/docs/providers/mysql/r/database.html.markdown b/website/source/docs/providers/mysql/r/database.html.markdown
new file mode 100644
index 0000000000..36459ab9e0
--- /dev/null
+++ b/website/source/docs/providers/mysql/r/database.html.markdown
@@ -0,0 +1,54 @@
+---
+layout: "mysql"
+page_title: "MySQL: mysql_database"
+sidebar_current: "docs-mysql-resource-database"
+description: |-
+ Creates and manages a database on a MySQL server.
+---
+
+# mysql\_database
+
+The ``mysql_database`` resource creates and manages a database on a MySQL
+server.
+
+~> **Caution:** The ``mysql_database`` resource can completely delete your
+database just as easily as it can create it. To avoid costly accidents,
+consider setting
+[``prevent_destroy``](/docs/configuration/resources.html#prevent_destroy)
+on your database resources as an extra safety measure.
+
+## Example Usage
+
+```
+resource "mysql_database" "app" {
+ name = "my_awesome_app"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) The name of the database. This must be unique within
+ a given MySQL server and may or may not be case-sensitive depending on
+ the operating system on which the MySQL server is running.
+
+* `default_character_set` - (Optional) The default character set to use when
+ a table is created without specifying an explicit character set. Defaults
+ to "utf8".
+
+* `default_collation` - (Optional) The default collation to use when a table
+ is created without specifying an explicit collation. Defaults to
+ ``utf8_general_ci``. Each character set has its own set of collations, so
+ changing the character set requires also changing the collation.
+
+Note that the defaults for character set and collation above do not respect
+any defaults set on the MySQL server, so that the configuration can be set
+appropriately even though Terraform cannot see the server-level defaults. If
+you wish to use the server's defaults you must consult the server's
+configuration and then set the ``default_character_set`` and
+``default_collation`` to match.
+
+## Attributes Reference
+
+No further attributes are exported.
diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb
index e61f01b87f..73c8aad088 100644
--- a/website/source/layouts/docs.erb
+++ b/website/source/layouts/docs.erb
@@ -185,6 +185,10 @@
Mailgun
+ >
+ MySQL
+
+
>
OpenStack
diff --git a/website/source/layouts/mysql.erb b/website/source/layouts/mysql.erb
new file mode 100644
index 0000000000..ac21071971
--- /dev/null
+++ b/website/source/layouts/mysql.erb
@@ -0,0 +1,26 @@
+<% wrap_layout :inner do %>
+ <% content_for :sidebar do %>
+
+ <% end %>
+
+ <%= yield %>
+ <% end %>
From 99b3bcd28082e8ab1998844c8a231f750034ef24 Mon Sep 17 00:00:00 2001
From: Joe Topjian
Date: Wed, 16 Dec 2015 23:06:17 -0700
Subject: [PATCH 265/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8d2202e39c..13851f0c62 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -63,6 +63,7 @@ BUG FIXES:
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
* provider/openstack: Handle volumes in "deleting" state [GH-4204]
* provider/vsphere: Create and attach additional disks before bootup [GH-4196]
+ * provider/openstack: Convert block_device from a Set to a List [GH-4288]
## 0.6.8 (December 2, 2015)
From 7f5e2b66ff5c5789cdcd06105a446d2d5c8ca85c Mon Sep 17 00:00:00 2001
From: Radek Simko
Date: Thu, 17 Dec 2015 15:20:36 +0100
Subject: [PATCH 266/664] aws: Treat INACTIVE ECS cluster as deleted
---
.../providers/aws/resource_aws_ecs_cluster.go | 43 ++++++++++++++++++-
.../aws/resource_aws_ecs_cluster_test.go | 12 +++---
2 files changed, 49 insertions(+), 6 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_ecs_cluster.go b/builtin/providers/aws/resource_aws_ecs_cluster.go
index f9e3a4abb9..df17ee58ce 100644
--- a/builtin/providers/aws/resource_aws_ecs_cluster.go
+++ b/builtin/providers/aws/resource_aws_ecs_cluster.go
@@ -1,6 +1,7 @@
package aws
import (
+ "fmt"
"log"
"time"
@@ -61,6 +62,13 @@ func resourceAwsEcsClusterRead(d *schema.ResourceData, meta interface{}) error {
for _, c := range out.Clusters {
if *c.ClusterName == clusterName {
+ // Status==INACTIVE means deleted cluster
+ if *c.Status == "INACTIVE" {
+ log.Printf("[DEBUG] Removing ECS cluster %q because it's INACTIVE", *c.ClusterArn)
+ d.SetId("")
+ return nil
+ }
+
d.SetId(*c.ClusterArn)
d.Set("name", c.ClusterName)
return nil
@@ -77,7 +85,7 @@ func resourceAwsEcsClusterDelete(d *schema.ResourceData, meta interface{}) error
log.Printf("[DEBUG] Deleting ECS cluster %s", d.Id())
- return resource.Retry(10*time.Minute, func() error {
+ err := resource.Retry(10*time.Minute, func() error {
out, err := conn.DeleteCluster(&ecs.DeleteClusterInput{
Cluster: aws.String(d.Id()),
})
@@ -104,4 +112,37 @@ func resourceAwsEcsClusterDelete(d *schema.ResourceData, meta interface{}) error
return resource.RetryError{Err: err}
})
+ if err != nil {
+ return err
+ }
+
+ clusterName := d.Get("name").(string)
+ err = resource.Retry(5*time.Minute, func() error {
+ log.Printf("[DEBUG] Checking if ECS Cluster %q is INACTIVE", d.Id())
+ out, err := conn.DescribeClusters(&ecs.DescribeClustersInput{
+ Clusters: []*string{aws.String(clusterName)},
+ })
+
+ for _, c := range out.Clusters {
+ if *c.ClusterName == clusterName {
+ if *c.Status == "INACTIVE" {
+ return nil
+ }
+
+ return fmt.Errorf("ECS Cluster %q is still %q", clusterName, *c.Status)
+ }
+ }
+
+ if err != nil {
+ return resource.RetryError{Err: err}
+ }
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ log.Printf("[DEBUG] ECS cluster %q deleted", d.Id())
+ return nil
}
diff --git a/builtin/providers/aws/resource_aws_ecs_cluster_test.go b/builtin/providers/aws/resource_aws_ecs_cluster_test.go
index 308085d1d1..42d84a2ce2 100644
--- a/builtin/providers/aws/resource_aws_ecs_cluster_test.go
+++ b/builtin/providers/aws/resource_aws_ecs_cluster_test.go
@@ -38,13 +38,15 @@ func testAccCheckAWSEcsClusterDestroy(s *terraform.State) error {
Clusters: []*string{aws.String(rs.Primary.ID)},
})
- if err == nil {
- if len(out.Clusters) != 0 {
- return fmt.Errorf("ECS cluster still exists:\n%#v", out.Clusters)
- }
+ if err != nil {
+ return err
}
- return err
+ for _, c := range out.Clusters {
+ if *c.ClusterArn == rs.Primary.ID && *c.Status != "INACTIVE" {
+ return fmt.Errorf("ECS cluster still exists:\n%s", c)
+ }
+ }
}
return nil
From a080447471afa28986ef7ed8df19c2a89e8e9710 Mon Sep 17 00:00:00 2001
From: Radek Simko
Date: Thu, 17 Dec 2015 15:14:17 +0100
Subject: [PATCH 267/664] aws: Treat INACTIVE ECS TDs as deleted in acc tests
- related to https://github.com/hashicorp/terraform/pull/3924
---
.../resource_aws_ecs_task_definition_test.go | 20 ++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_ecs_task_definition_test.go b/builtin/providers/aws/resource_aws_ecs_task_definition_test.go
index dcc4ef8b31..9972bba7b9 100644
--- a/builtin/providers/aws/resource_aws_ecs_task_definition_test.go
+++ b/builtin/providers/aws/resource_aws_ecs_task_definition_test.go
@@ -82,17 +82,19 @@ func testAccCheckAWSEcsTaskDefinitionDestroy(s *terraform.State) error {
continue
}
- out, err := conn.DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{
- TaskDefinition: aws.String(rs.Primary.ID),
- })
-
- if err == nil {
- if out.TaskDefinition != nil {
- return fmt.Errorf("ECS task definition still exists:\n%#v", *out.TaskDefinition)
- }
+ input := ecs.DescribeTaskDefinitionInput{
+ TaskDefinition: aws.String(rs.Primary.Attributes["arn"]),
}
- return err
+ out, err := conn.DescribeTaskDefinition(&input)
+
+ if err != nil {
+ return err
+ }
+
+ if out.TaskDefinition != nil && *out.TaskDefinition.Status != "INACTIVE" {
+ return fmt.Errorf("ECS task definition still exists:\n%#v", *out.TaskDefinition)
+ }
}
return nil
From 202b0aef1ba02f0beae945acc802adcd10073197 Mon Sep 17 00:00:00 2001
From: Radek Simko
Date: Thu, 17 Dec 2015 17:01:31 +0100
Subject: [PATCH 268/664] provider/aws: Always use either body or URL for all
updates
- fixes #4332
---
.../providers/aws/resource_aws_cloudformation_stack.go | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_cloudformation_stack.go b/builtin/providers/aws/resource_aws_cloudformation_stack.go
index 1846a31054..86dd8bdb79 100644
--- a/builtin/providers/aws/resource_aws_cloudformation_stack.go
+++ b/builtin/providers/aws/resource_aws_cloudformation_stack.go
@@ -258,12 +258,14 @@ func resourceAwsCloudFormationStackUpdate(d *schema.ResourceData, meta interface
StackName: aws.String(d.Get("name").(string)),
}
- if d.HasChange("template_body") {
- input.TemplateBody = aws.String(normalizeJson(d.Get("template_body").(string)))
+ // Either TemplateBody or TemplateURL are required for each change
+ if v, ok := d.GetOk("template_body"); ok {
+ input.TemplateBody = aws.String(normalizeJson(v.(string)))
}
- if d.HasChange("template_url") {
- input.TemplateURL = aws.String(d.Get("template_url").(string))
+ if v, ok := d.GetOk("template_url"); ok {
+ input.TemplateURL = aws.String(v.(string))
}
+
if d.HasChange("capabilities") {
input.Capabilities = expandStringList(d.Get("capabilities").(*schema.Set).List())
}
From 11a6efa35867c00a658c28ad22b152b2f5f5df0f Mon Sep 17 00:00:00 2001
From: Rolo
Date: Thu, 17 Dec 2015 16:35:44 +0000
Subject: [PATCH 269/664] Use [] around depends_on value.
---
examples/aws-rds/main.tf | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/examples/aws-rds/main.tf b/examples/aws-rds/main.tf
index cb329ac18a..7577ede69b 100644
--- a/examples/aws-rds/main.tf
+++ b/examples/aws-rds/main.tf
@@ -1,5 +1,5 @@
resource "aws_db_instance" "default" {
- depends_on = "aws_security_group.default"
+ depends_on = ["aws_security_group.default"]
identifier = "${var.identifier}"
allocated_storage = "${var.storage}"
engine = "${var.engine}"
From f14cffedcf7049a99e5c1bd1b2809dc5caa94295 Mon Sep 17 00:00:00 2001
From: Radek Simko
Date: Thu, 17 Dec 2015 17:40:07 +0100
Subject: [PATCH 270/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 13851f0c62..e1342e3a3a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -48,6 +48,7 @@ BUG FIXES:
* core: Fix issue which could cause fields that become empty to retain old values in the state [GH-3257]
* provider/docker: Fix an issue running with Docker Swarm by looking up containers by ID instead of name [GH-4148]
* provider/openstack: Better handling of load balancing resource state changes [GH-3926]
+ * provider/aws: Treat `INACTIVE` ECS cluster as deleted [GH-4364]
* provider/aws: Skip `source_security_group_id` determination logic for Classic ELBs [GH-4075]
* provider/aws: Fix issue destroy Route 53 zone/record if it no longer exists [GH-4198]
* provider/aws: Fix issue force destroying a versioned S3 bucket [GH-4168]
From f017d2d2d6823afdc8e3ba95a2fa88b57c292f1c Mon Sep 17 00:00:00 2001
From: Radek Simko
Date: Thu, 17 Dec 2015 17:58:11 +0100
Subject: [PATCH 271/664] aws: Treat CF stacks in DELETE_COMPLETE state as
deleted
---
.../providers/aws/resource_aws_cloudformation_stack.go | 10 ++++++++++
.../aws/resource_aws_cloudformation_stack_test.go | 9 ++++++---
2 files changed, 16 insertions(+), 3 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_cloudformation_stack.go b/builtin/providers/aws/resource_aws_cloudformation_stack.go
index 1846a31054..3486c6a5b6 100644
--- a/builtin/providers/aws/resource_aws_cloudformation_stack.go
+++ b/builtin/providers/aws/resource_aws_cloudformation_stack.go
@@ -190,8 +190,18 @@ func resourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface{}
stacks := resp.Stacks
if len(stacks) < 1 {
+ log.Printf("[DEBUG] Removing CloudFormation stack %s as it's already gone", d.Id())
+ d.SetId("")
return nil
}
+ for _, s := range stacks {
+ if *s.StackId == d.Id() && *s.StackStatus == "DELETE_COMPLETE" {
+ log.Printf("[DEBUG] Removing CloudFormation stack %s"+
+ " as it has been already deleted", d.Id())
+ d.SetId("")
+ return nil
+ }
+ }
tInput := cloudformation.GetTemplateInput{
StackName: aws.String(stackName),
diff --git a/builtin/providers/aws/resource_aws_cloudformation_stack_test.go b/builtin/providers/aws/resource_aws_cloudformation_stack_test.go
index 0c99f8d54d..31b816d1ae 100644
--- a/builtin/providers/aws/resource_aws_cloudformation_stack_test.go
+++ b/builtin/providers/aws/resource_aws_cloudformation_stack_test.go
@@ -101,9 +101,12 @@ func testAccCheckAWSCloudFormationDestroy(s *terraform.State) error {
resp, err := conn.DescribeStacks(¶ms)
- if err == nil {
- if len(resp.Stacks) != 0 &&
- *resp.Stacks[0].StackId == rs.Primary.ID {
+ if err != nil {
+ return err
+ }
+
+ for _, s := range resp.Stacks {
+ if *s.StackId == rs.Primary.ID && *s.StackStatus != "DELETE_COMPLETE" {
return fmt.Errorf("CloudFormation stack still exists: %q", rs.Primary.ID)
}
}
From 4e408d1593903e94a1fd2d2718909f6aed752890 Mon Sep 17 00:00:00 2001
From: Radek Simko
Date: Thu, 17 Dec 2015 17:58:43 +0100
Subject: [PATCH 272/664] provider/aws: CloudFormation - Add regression test
for #4332
---
.../resource_aws_cloudformation_stack_test.go | 65 +++++++++++++++++++
1 file changed, 65 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_cloudformation_stack_test.go b/builtin/providers/aws/resource_aws_cloudformation_stack_test.go
index 0c99f8d54d..19b6f6c52d 100644
--- a/builtin/providers/aws/resource_aws_cloudformation_stack_test.go
+++ b/builtin/providers/aws/resource_aws_cloudformation_stack_test.go
@@ -64,6 +64,31 @@ func TestAccAWSCloudFormation_allAttributes(t *testing.T) {
})
}
+// Regression for https://github.com/hashicorp/terraform/issues/4332
+func TestAccAWSCloudFormation_withParams(t *testing.T) {
+ var stack cloudformation.Stack
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSCloudFormationDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSCloudFormationConfig_withParams,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with_params", &stack),
+ ),
+ },
+ resource.TestStep{
+ Config: testAccAWSCloudFormationConfig_withParams_modified,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with_params", &stack),
+ ),
+ },
+ },
+ })
+}
+
func testAccCheckCloudFormationStackExists(n string, stack *cloudformation.Stack) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
@@ -225,3 +250,43 @@ resource "aws_sns_topic" "cf-updates" {
name = "tf-cf-notifications"
}
`
+
+var tpl_testAccAWSCloudFormationConfig_withParams = `
+resource "aws_cloudformation_stack" "with_params" {
+ name = "tf-stack-with-params"
+ parameters {
+ VpcCIDR = "%s"
+ }
+ template_body = <
Date: Thu, 17 Dec 2015 12:21:43 -0500
Subject: [PATCH 273/664] Make gofmt errors fail build and add `make fmt`
We may want to consider requiring `gofmt -s` compliance in future
builds, but for now we just use `gofmt`.
---
Makefile | 22 ++++++++++++++--------
scripts/build.sh | 2 ++
scripts/gofmtcheck.sh | 13 +++++++++++++
3 files changed, 29 insertions(+), 8 deletions(-)
create mode 100755 scripts/gofmtcheck.sh
diff --git a/Makefile b/Makefile
index 5ecb1ce010..dfb3010343 100644
--- a/Makefile
+++ b/Makefile
@@ -4,12 +4,12 @@ VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf
default: test
# bin generates the releaseable binaries for Terraform
-bin: generate
+bin: fmtcheck generate
@sh -c "'$(CURDIR)/scripts/build.sh'"
# dev creates binaries for testing Terraform locally. These are put
# into ./bin/ as well as $GOPATH/bin
-dev: generate
+dev: fmtcheck generate
@TF_DEV=1 sh -c "'$(CURDIR)/scripts/build.sh'"
quickdev: generate
@@ -18,22 +18,22 @@ quickdev: generate
# Shorthand for quickly building the core of Terraform. Note that some
# changes will require a rebuild of everything, in which case the dev
# target should be used.
-core-dev: generate
+core-dev: fmtcheck generate
go install github.com/hashicorp/terraform
# Shorthand for building and installing just one plugin for local testing.
# Run as (for example): make plugin-dev PLUGIN=provider-aws
-plugin-dev: generate
+plugin-dev: fmtcheck generate
go install github.com/hashicorp/terraform/builtin/bins/$(PLUGIN)
mv $(GOPATH)/bin/$(PLUGIN) $(GOPATH)/bin/terraform-$(PLUGIN)
# test runs the unit tests and vets the code
-test: generate
+test: fmtcheck generate
TF_ACC= go test $(TEST) $(TESTARGS) -timeout=30s -parallel=4
@$(MAKE) vet
# testacc runs acceptance tests
-testacc: generate
+testacc: fmtcheck generate
@if [ "$(TEST)" = "./..." ]; then \
echo "ERROR: Set TEST to a specific package. For example,"; \
echo " make testacc TEST=./builtin/providers/aws"; \
@@ -42,7 +42,7 @@ testacc: generate
TF_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 90m
# testrace runs the race checker
-testrace: generate
+testrace: fmtcheck generate
TF_ACC= go test -race $(TEST) $(TESTARGS)
# updatedeps installs all the dependencies that Terraform needs to run
@@ -84,4 +84,10 @@ vet:
generate:
go generate ./...
-.PHONY: bin default generate test updatedeps vet
+fmt:
+ gofmt -w .
+
+fmtcheck:
+ @sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'"
+
+.PHONY: bin default generate test updatedeps vet fmt fmtcheck
diff --git a/scripts/build.sh b/scripts/build.sh
index 2553f5b6f3..222e1879e4 100755
--- a/scripts/build.sh
+++ b/scripts/build.sh
@@ -18,6 +18,7 @@ GIT_DIRTY=$(test -n "`git status --porcelain`" && echo "+CHANGES" || true)
XC_ARCH=${XC_ARCH:-"386 amd64 arm"}
XC_OS=${XC_OS:-linux darwin windows freebsd openbsd}
+
# Get dependencies unless running in quick mode
if [ "${TF_QUICKDEV}x" == "x" ]; then
echo "==> Getting dependencies..."
@@ -30,6 +31,7 @@ rm -f bin/*
rm -rf pkg/*
mkdir -p bin/
+
# If its dev mode, only build for ourself
if [ "${TF_DEV}x" != "x" ]; then
XC_OS=$(go env GOOS)
diff --git a/scripts/gofmtcheck.sh b/scripts/gofmtcheck.sh
new file mode 100755
index 0000000000..315571042a
--- /dev/null
+++ b/scripts/gofmtcheck.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+# Check gofmt
+echo "==> Checking that code complies with gofmt requirements..."
+gofmt_files=$(gofmt -l .)
+if [[ -n ${gofmt_files} ]]; then
+ echo 'gofmt needs running on the following files:'
+ echo "${gofmt_files}"
+ echo "You can use the command: \`make fmt\` to reformat code."
+ exit 1
+fi
+
+exit 0
From 8e538b68ec04d3f4cb918a7da836ba3d83577a38 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Thu, 17 Dec 2015 12:24:24 -0500
Subject: [PATCH 274/664] Fix errors with gofmt compliance
---
.../aws/resource_aws_db_instance_test.go | 4 +-
.../aws/resource_aws_dynamodb_table.go | 18 +++----
.../aws/resource_aws_security_group.go | 8 +--
.../aws/resource_aws_security_group_test.go | 4 +-
.../resource_digitalocean_record.go | 2 +-
.../vsphere/resource_vsphere_folder.go | 50 +++++++++----------
.../vsphere/resource_vsphere_folder_test.go | 39 +++++++--------
.../resource_vsphere_virtual_machine.go | 4 +-
.../resource_vsphere_virtual_machine_test.go | 14 ++----
9 files changed, 67 insertions(+), 76 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_db_instance_test.go b/builtin/providers/aws/resource_aws_db_instance_test.go
index 74ed455f72..fa1e9e733a 100644
--- a/builtin/providers/aws/resource_aws_db_instance_test.go
+++ b/builtin/providers/aws/resource_aws_db_instance_test.go
@@ -191,7 +191,7 @@ func testAccCheckAWSDBInstanceSnapshot(s *terraform.State) error {
} else {
if len(resp.DBInstances) != 0 &&
- *resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID {
+ *resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID {
return fmt.Errorf("DB Instance still exists")
}
}
@@ -245,7 +245,7 @@ func testAccCheckAWSDBInstanceNoSnapshot(s *terraform.State) error {
} else {
if len(resp.DBInstances) != 0 &&
- *resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID {
+ *resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID {
return fmt.Errorf("DB Instance still exists")
}
}
diff --git a/builtin/providers/aws/resource_aws_dynamodb_table.go b/builtin/providers/aws/resource_aws_dynamodb_table.go
index addf368eff..c0a4f8c49e 100644
--- a/builtin/providers/aws/resource_aws_dynamodb_table.go
+++ b/builtin/providers/aws/resource_aws_dynamodb_table.go
@@ -160,12 +160,12 @@ func resourceAwsDynamoDbTable() *schema.Resource {
},
},
"stream_enabled": &schema.Schema{
- Type: schema.TypeBool,
+ Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"stream_view_type": &schema.Schema{
- Type: schema.TypeString,
+ Type: schema.TypeString,
Optional: true,
Computed: true,
StateFunc: func(v interface{}) string {
@@ -280,9 +280,9 @@ func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) er
}
if _, ok := d.GetOk("stream_enabled"); ok {
-
+
req.StreamSpecification = &dynamodb.StreamSpecification{
- StreamEnabled: aws.Bool(d.Get("stream_enabled").(bool)),
+ StreamEnabled: aws.Bool(d.Get("stream_enabled").(bool)),
StreamViewType: aws.String(d.Get("stream_view_type").(string)),
}
@@ -372,7 +372,7 @@ func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) er
}
req.StreamSpecification = &dynamodb.StreamSpecification{
- StreamEnabled: aws.Bool(d.Get("stream_enabled").(bool)),
+ StreamEnabled: aws.Bool(d.Get("stream_enabled").(bool)),
StreamViewType: aws.String(d.Get("stream_view_type").(string)),
}
@@ -804,10 +804,10 @@ func waitForTableToBeActive(tableName string, meta interface{}) error {
func validateStreamViewType(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
- viewTypes := map[string]bool {
- "KEYS_ONLY": true,
- "NEW_IMAGE": true,
- "OLD_IMAGE": true,
+ viewTypes := map[string]bool{
+ "KEYS_ONLY": true,
+ "NEW_IMAGE": true,
+ "OLD_IMAGE": true,
"NEW_AND_OLD_IMAGES": true,
}
diff --git a/builtin/providers/aws/resource_aws_security_group.go b/builtin/providers/aws/resource_aws_security_group.go
index b0cabec2bc..67d4c356c0 100644
--- a/builtin/providers/aws/resource_aws_security_group.go
+++ b/builtin/providers/aws/resource_aws_security_group.go
@@ -24,10 +24,10 @@ func resourceAwsSecurityGroup() *schema.Resource {
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
- Type: schema.TypeString,
- Optional: true,
- Computed: true,
- ForceNew: true,
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ ForceNew: true,
ConflictsWith: []string{"name_prefix"},
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
diff --git a/builtin/providers/aws/resource_aws_security_group_test.go b/builtin/providers/aws/resource_aws_security_group_test.go
index d5142c68ea..2ae9d283b2 100644
--- a/builtin/providers/aws/resource_aws_security_group_test.go
+++ b/builtin/providers/aws/resource_aws_security_group_test.go
@@ -46,7 +46,7 @@ func TestAccAWSSecurityGroup_basic(t *testing.T) {
})
}
-func TestAccAWSSecurityGroup_namePrefix( t *testing.T) {
+func TestAccAWSSecurityGroup_namePrefix(t *testing.T) {
var group ec2.SecurityGroup
resource.Test(t, resource.TestCase{
@@ -345,7 +345,7 @@ func testAccCheckAWSSecurityGroupDestroy(s *terraform.State) error {
}
func testAccCheckAWSSecurityGroupGeneratedNamePrefix(
-resource, prefix string) resource.TestCheckFunc {
+ resource, prefix string) resource.TestCheckFunc {
return func(s *terraform.State) error {
r, ok := s.RootModule().Resources[resource]
if !ok {
diff --git a/builtin/providers/digitalocean/resource_digitalocean_record.go b/builtin/providers/digitalocean/resource_digitalocean_record.go
index 1db6084bd3..5e8218c79c 100644
--- a/builtin/providers/digitalocean/resource_digitalocean_record.go
+++ b/builtin/providers/digitalocean/resource_digitalocean_record.go
@@ -183,7 +183,7 @@ func resourceDigitalOceanRecordDelete(d *schema.ResourceData, meta interface{})
log.Printf("[INFO] Deleting record: %s, %d", domain, id)
- resp, delErr := client.Domains.DeleteRecord(domain, id)
+ resp, delErr := client.Domains.DeleteRecord(domain, id)
if delErr != nil {
// If the record is somehow already destroyed, mark as
// successfully gone
diff --git a/builtin/providers/vsphere/resource_vsphere_folder.go b/builtin/providers/vsphere/resource_vsphere_folder.go
index 3ed4d52ad5..82289f3cfb 100644
--- a/builtin/providers/vsphere/resource_vsphere_folder.go
+++ b/builtin/providers/vsphere/resource_vsphere_folder.go
@@ -14,9 +14,9 @@ import (
)
type folder struct {
- datacenter string
- existingPath string
- path string
+ datacenter string
+ existingPath string
+ path string
}
func resourceVSphereFolder() *schema.Resource {
@@ -47,7 +47,7 @@ func resourceVSphereFolder() *schema.Resource {
}
func resourceVSphereFolderCreate(d *schema.ResourceData, meta interface{}) error {
-
+
client := meta.(*govmomi.Client)
f := folder{
@@ -67,7 +67,6 @@ func resourceVSphereFolderCreate(d *schema.ResourceData, meta interface{}) error
return resourceVSphereFolderRead(d, meta)
}
-
func createFolder(client *govmomi.Client, f *folder) error {
finder := find.NewFinder(client.Client, true)
@@ -96,41 +95,40 @@ func createFolder(client *govmomi.Client, f *folder) error {
subfolder, err := si.FindByInventoryPath(
context.TODO(), fmt.Sprintf("%v/vm/%v", f.datacenter, workingPath))
- if err != nil {
- return fmt.Errorf("error %s", err)
- } else if subfolder == nil {
- log.Printf("[DEBUG] folder not found; creating: %s", workingPath)
- folder, err = folder.CreateFolder(context.TODO(), pathPart)
+ if err != nil {
+ return fmt.Errorf("error %s", err)
+ } else if subfolder == nil {
+ log.Printf("[DEBUG] folder not found; creating: %s", workingPath)
+ folder, err = folder.CreateFolder(context.TODO(), pathPart)
if err != nil {
return fmt.Errorf("Failed to create folder at %s; %s", workingPath, err)
}
- } else {
- log.Printf("[DEBUG] folder already exists: %s", workingPath)
- f.existingPath = workingPath
- folder = subfolder.(*object.Folder)
- }
+ } else {
+ log.Printf("[DEBUG] folder already exists: %s", workingPath)
+ f.existingPath = workingPath
+ folder = subfolder.(*object.Folder)
+ }
}
return nil
}
-
func resourceVSphereFolderRead(d *schema.ResourceData, meta interface{}) error {
-
+
log.Printf("[DEBUG] reading folder: %#v", d)
client := meta.(*govmomi.Client)
-
+
dc, err := getDatacenter(client, d.Get("datacenter").(string))
if err != nil {
return err
}
-
+
finder := find.NewFinder(client.Client, true)
finder = finder.SetDatacenter(dc)
folder, err := object.NewSearchIndex(client.Client).FindByInventoryPath(
- context.TODO(), fmt.Sprintf("%v/vm/%v", d.Get("datacenter").(string),
- d.Get("path").(string)))
-
+ context.TODO(), fmt.Sprintf("%v/vm/%v", d.Get("datacenter").(string),
+ d.Get("path").(string)))
+
if err != nil {
return err
}
@@ -143,9 +141,9 @@ func resourceVSphereFolderRead(d *schema.ResourceData, meta interface{}) error {
}
func resourceVSphereFolderDelete(d *schema.ResourceData, meta interface{}) error {
-
+
f := folder{
- path: strings.TrimRight(d.Get("path").(string), "/"),
+ path: strings.TrimRight(d.Get("path").(string), "/"),
existingPath: d.Get("existing_path").(string),
}
@@ -156,7 +154,7 @@ func resourceVSphereFolderDelete(d *schema.ResourceData, meta interface{}) error
client := meta.(*govmomi.Client)
deleteFolder(client, &f)
-
+
d.SetId("")
return nil
}
@@ -175,7 +173,7 @@ func deleteFolder(client *govmomi.Client, f *folder) error {
folderRef, err := si.FindByInventoryPath(
context.TODO(), fmt.Sprintf("%v/vm/%v", f.datacenter, f.path))
-
+
if err != nil {
return fmt.Errorf("[ERROR] Could not locate folder %s: %v", f.path, err)
} else {
diff --git a/builtin/providers/vsphere/resource_vsphere_folder_test.go b/builtin/providers/vsphere/resource_vsphere_folder_test.go
index c8dd9828a4..dfd81bbcce 100644
--- a/builtin/providers/vsphere/resource_vsphere_folder_test.go
+++ b/builtin/providers/vsphere/resource_vsphere_folder_test.go
@@ -4,7 +4,6 @@ import (
"fmt"
"os"
"testing"
-
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -21,7 +20,7 @@ func TestAccVSphereFolder_basic(t *testing.T) {
testMethod := "basic"
resourceName := "vsphere_folder." + testMethod
path := "tf_test_basic"
-
+
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@@ -47,7 +46,7 @@ func TestAccVSphereFolder_basic(t *testing.T) {
}
func TestAccVSphereFolder_nested(t *testing.T) {
-
+
var f folder
datacenter := os.Getenv("VSPHERE_DATACENTER")
testMethod := "nested"
@@ -79,7 +78,7 @@ func TestAccVSphereFolder_nested(t *testing.T) {
}
func TestAccVSphereFolder_dontDeleteExisting(t *testing.T) {
-
+
var f folder
datacenter := os.Getenv("VSPHERE_DATACENTER")
testMethod := "dontDeleteExisting"
@@ -88,17 +87,17 @@ func TestAccVSphereFolder_dontDeleteExisting(t *testing.T) {
path := existingPath + "/tf_nested/tf_test"
resource.Test(t, resource.TestCase{
- PreCheck: func() { testAccPreCheck(t) },
- Providers: testAccProviders,
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
CheckDestroy: resource.ComposeTestCheckFunc(
assertVSphereFolderExists(datacenter, existingPath),
removeVSphereFolder(datacenter, existingPath, ""),
),
Steps: []resource.TestStep{
resource.TestStep{
- PreConfig: func() {
+ PreConfig: func() {
createVSphereFolder(datacenter, existingPath)
- },
+ },
Config: fmt.Sprintf(
testAccCheckVSphereFolderConfig,
testMethod,
@@ -171,7 +170,6 @@ func testAccCheckVSphereFolderExists(n string, f *folder) resource.TestCheckFunc
_, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), dcFolders.VmFolder, rs.Primary.Attributes["path"])
-
*f = folder{
path: rs.Primary.Attributes["path"],
}
@@ -206,7 +204,6 @@ func testAccCheckVSphereFolderExistingPathExists(n string, f *folder) resource.T
_, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), dcFolders.VmFolder, rs.Primary.Attributes["existing_path"])
-
*f = folder{
path: rs.Primary.Attributes["path"],
}
@@ -217,10 +214,10 @@ func testAccCheckVSphereFolderExistingPathExists(n string, f *folder) resource.T
func assertVSphereFolderExists(datacenter string, folder_name string) resource.TestCheckFunc {
- return func(s *terraform.State) error {
+ return func(s *terraform.State) error {
client := testAccProvider.Meta().(*govmomi.Client)
folder, err := object.NewSearchIndex(client.Client).FindByInventoryPath(
- context.TODO(), fmt.Sprintf("%v/vm/%v", datacenter, folder_name))
+ context.TODO(), fmt.Sprintf("%v/vm/%v", datacenter, folder_name))
if err != nil {
return fmt.Errorf("Error: %s", err)
} else if folder == nil {
@@ -232,16 +229,16 @@ func assertVSphereFolderExists(datacenter string, folder_name string) resource.T
}
func createVSphereFolder(datacenter string, folder_name string) error {
-
+
client := testAccProvider.Meta().(*govmomi.Client)
- f := folder{path: folder_name, datacenter: datacenter,}
+ f := folder{path: folder_name, datacenter: datacenter}
folder, err := object.NewSearchIndex(client.Client).FindByInventoryPath(
- context.TODO(), fmt.Sprintf("%v/vm/%v", datacenter, folder_name))
+ context.TODO(), fmt.Sprintf("%v/vm/%v", datacenter, folder_name))
if err != nil {
return fmt.Errorf("error %s", err)
- }
+ }
if folder == nil {
createFolder(client, &f)
@@ -253,16 +250,16 @@ func createVSphereFolder(datacenter string, folder_name string) error {
}
func removeVSphereFolder(datacenter string, folder_name string, existing_path string) resource.TestCheckFunc {
-
- f := folder{path: folder_name, datacenter: datacenter, existingPath: existing_path,}
- return func(s *terraform.State) error {
+ f := folder{path: folder_name, datacenter: datacenter, existingPath: existing_path}
+
+ return func(s *terraform.State) error {
client := testAccProvider.Meta().(*govmomi.Client)
// finder := find.NewFinder(client.Client, true)
folder, _ := object.NewSearchIndex(client.Client).FindByInventoryPath(
- context.TODO(), fmt.Sprintf("%v/vm/%v", datacenter, folder_name))
+ context.TODO(), fmt.Sprintf("%v/vm/%v", datacenter, folder_name))
if folder != nil {
deleteFolder(client, &f)
}
@@ -276,4 +273,4 @@ resource "vsphere_folder" "%s" {
path = "%s"
datacenter = "%s"
}
-`
\ No newline at end of file
+`
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
index 0ae6379119..12fdc9bd0c 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go
@@ -426,7 +426,7 @@ func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{
}
func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error {
-
+
log.Printf("[DEBUG] reading virtual machine: %#v", d)
client := meta.(*govmomi.Client)
dc, err := getDatacenter(client, d.Get("datacenter").(string))
@@ -1021,7 +1021,7 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
if err != nil {
return err
}
-
+
log.Printf("[DEBUG] folder: %#v", vm.folder)
folder := dcFolders.VmFolder
if len(vm.folder) > 0 {
diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
index 5d239ac5ed..97973efb52 100644
--- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
+++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go
@@ -216,8 +216,8 @@ func TestAccVSphereVirtualMachine_createInExistingFolder(t *testing.T) {
label := os.Getenv("VSPHERE_NETWORK_LABEL_DHCP")
resource.Test(t, resource.TestCase{
- PreCheck: func() { testAccPreCheck(t) },
- Providers: testAccProviders,
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
CheckDestroy: resource.ComposeTestCheckFunc(
testAccCheckVSphereVirtualMachineDestroy,
removeVSphereFolder(datacenter, folder, ""),
@@ -283,8 +283,8 @@ func TestAccVSphereVirtualMachine_createWithFolder(t *testing.T) {
label := os.Getenv("VSPHERE_NETWORK_LABEL_DHCP")
resource.Test(t, resource.TestCase{
- PreCheck: func() { testAccPreCheck(t) },
- Providers: testAccProviders,
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
CheckDestroy: resource.ComposeTestCheckFunc(
testAccCheckVSphereVirtualMachineDestroy,
testAccCheckVSphereFolderDestroy,
@@ -344,7 +344,6 @@ func testAccCheckVSphereVirtualMachineDestroy(s *terraform.State) error {
return fmt.Errorf("error %s", err)
}
-
folder := dcFolders.VmFolder
if len(rs.Primary.Attributes["folder"]) > 0 {
si := object.NewSearchIndex(client.Client)
@@ -370,7 +369,6 @@ func testAccCheckVSphereVirtualMachineDestroy(s *terraform.State) error {
func testAccCheckVSphereVirtualMachineExistsHasCustomConfig(n string, vm *virtualMachine) resource.TestCheckFunc {
return func(s *terraform.State) error {
-
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
@@ -393,7 +391,6 @@ func testAccCheckVSphereVirtualMachineExistsHasCustomConfig(n string, vm *virtua
return fmt.Errorf("error %s", err)
}
-
_, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), dcFolders.VmFolder, rs.Primary.Attributes["name"])
if err != nil {
return fmt.Errorf("error %s", err)
@@ -493,7 +490,6 @@ func testAccCheckVSphereVirtualMachineExists(n string, vm *virtualMachine) resou
_, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), folder, rs.Primary.Attributes["name"])
-
*vm = virtualMachine{
name: rs.Primary.ID,
}
@@ -598,4 +594,4 @@ resource "vsphere_virtual_machine" "with_folder" {
template = "%s"
}
}
-`
\ No newline at end of file
+`
From 11002435366cce9b45a1d5325ab0b9800c51ca46 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Thu, 17 Dec 2015 19:15:45 -0600
Subject: [PATCH 275/664] state/remote/atlas: switch to retryablehttp
The retryablehttp package implements basic retries w/ exponential
backoff, which helps the remote state push recover in cases of
connectivity blips or transient errors.
---
state/remote/atlas.go | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/state/remote/atlas.go b/state/remote/atlas.go
index f33f407cec..82b57d6c57 100644
--- a/state/remote/atlas.go
+++ b/state/remote/atlas.go
@@ -13,7 +13,7 @@ import (
"path"
"strings"
- "github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/go-retryablehttp"
"github.com/hashicorp/terraform/terraform"
)
@@ -77,14 +77,14 @@ type AtlasClient struct {
Name string
AccessToken string
RunId string
- HTTPClient *http.Client
+ HTTPClient *retryablehttp.Client
conflictHandlingAttempted bool
}
func (c *AtlasClient) Get() (*Payload, error) {
// Make the HTTP request
- req, err := http.NewRequest("GET", c.url().String(), nil)
+ req, err := retryablehttp.NewRequest("GET", c.url().String(), nil)
if err != nil {
return nil, fmt.Errorf("Failed to make HTTP request: %v", err)
}
@@ -158,7 +158,7 @@ func (c *AtlasClient) Put(state []byte) error {
b64 := base64.StdEncoding.EncodeToString(hash[:])
// Make the HTTP client and request
- req, err := http.NewRequest("PUT", base.String(), bytes.NewReader(state))
+ req, err := retryablehttp.NewRequest("PUT", base.String(), bytes.NewReader(state))
if err != nil {
return fmt.Errorf("Failed to make HTTP request: %v", err)
}
@@ -191,7 +191,7 @@ func (c *AtlasClient) Put(state []byte) error {
func (c *AtlasClient) Delete() error {
// Make the HTTP request
- req, err := http.NewRequest("DELETE", c.url().String(), nil)
+ req, err := retryablehttp.NewRequest("DELETE", c.url().String(), nil)
if err != nil {
return fmt.Errorf("Failed to make HTTP request: %v", err)
}
@@ -249,11 +249,11 @@ func (c *AtlasClient) url() *url.URL {
}
}
-func (c *AtlasClient) http() *http.Client {
+func (c *AtlasClient) http() *retryablehttp.Client {
if c.HTTPClient != nil {
return c.HTTPClient
}
- return cleanhttp.DefaultClient()
+ return retryablehttp.NewClient()
}
// Atlas returns an HTTP 409 - Conflict if the pushed state reports the same
From 3adda8241552e81384cb16f454c70571b2ca484e Mon Sep 17 00:00:00 2001
From: Clint
Date: Fri, 18 Dec 2015 10:56:19 -0600
Subject: [PATCH 276/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e1342e3a3a..afaa919204 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -54,6 +54,7 @@ BUG FIXES:
* provider/aws: Fix issue force destroying a versioned S3 bucket [GH-4168]
* provider/aws: Update DB Replica to honor storage type [GH-4155]
* provider/aws: Fix issue creating AWS RDS replicas across regions [GH-4215]
+ * provider/aws: Fix issue with iam_profile in aws_instance when a path is specified [GH-3663]
* provider/aws: Refactor AWS Authentication chain to fix issue with authentication and IAM [GH-4254]
* provider/aws: Fix issue with finding S3 Hosted Zone ID for eu-central-1 region [GH-4236]
* provider/aws: Fix missing AMI issue with Launch Configurations [GH-4242]
From a5efa4a1faf9564267afaf0eda04129f0cdb5097 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Fri, 18 Dec 2015 11:22:21 -0600
Subject: [PATCH 277/664] Better document RDS apply immediately
---
.../providers/aws/r/db_instance.html.markdown | 17 ++++++++++++++++-
.../providers/aws/r/rds_cluster.html.markdown | 11 +++++++++++
2 files changed, 27 insertions(+), 1 deletion(-)
diff --git a/website/source/docs/providers/aws/r/db_instance.html.markdown b/website/source/docs/providers/aws/r/db_instance.html.markdown
index 2ff282d105..4efed4ac6c 100644
--- a/website/source/docs/providers/aws/r/db_instance.html.markdown
+++ b/website/source/docs/providers/aws/r/db_instance.html.markdown
@@ -8,7 +8,21 @@ description: |-
# aws\_db\_instance
-Provides an RDS instance resource.
+Provides an RDS instance resource. A DB instance is an isolated database
+environment in the cloud. A DB instance can contain multiple user-created
+databases.
+
+Changes to a DB instance can occur when you manually change a
+parameter, such as `allocated_storage`, and are reflected in the next maintenance
+window. Because of this, Terraform may report a difference in it's planning
+phase because a modification has not yet taken place. You can use the
+`apply_immediately` flag to instruct the service to apply the change immediately
+(see documentation below).
+
+~> **Note:** using `apply_immediately` can result in a
+brief downtime as the server reboots. See the AWS Docs on [RDS Maintenance][2]
+for more information.
+
## Example Usage
@@ -114,3 +128,4 @@ The following attributes are exported:
* `storage_encrypted` - Specifies whether the DB instance is encrypted
[1]: http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Replication.html
+[2]: http://docs.aws.amazon.com/fr_fr/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html
diff --git a/website/source/docs/providers/aws/r/rds_cluster.html.markdown b/website/source/docs/providers/aws/r/rds_cluster.html.markdown
index c60e6ef294..a9bd4c1a7c 100644
--- a/website/source/docs/providers/aws/r/rds_cluster.html.markdown
+++ b/website/source/docs/providers/aws/r/rds_cluster.html.markdown
@@ -15,6 +15,17 @@ database engine.
For more information on Amazon Aurora, see [Aurora on Amazon RDS][2] in the Amazon RDS User Guide.
+Changes to a RDS Cluster can occur when you manually change a
+parameter, such as `port`, and are reflected in the next maintenance
+window. Because of this, Terraform may report a difference in it's planning
+phase because a modification has not yet taken place. You can use the
+`apply_immediately` flag to instruct the service to apply the change immediately
+(see documentation below).
+
+~> **Note:** using `apply_immediately` can result in a
+brief downtime as the server reboots. See the AWS Docs on [RDS Maintenance][2]
+for more information.
+
## Example Usage
```
From 93b63cc42d44efa1c58425aef0eaf7374f22d845 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Fri, 18 Dec 2015 11:43:33 -0600
Subject: [PATCH 278/664] document for ElastiCache cluster, and fix link
---
.../aws/r/elasticache_cluster.html.markdown | 12 ++++++++++++
.../docs/providers/aws/r/rds_cluster.html.markdown | 3 ++-
2 files changed, 14 insertions(+), 1 deletion(-)
diff --git a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown
index 3f511f84d6..707487c9eb 100644
--- a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown
+++ b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown
@@ -10,6 +10,17 @@ description: |-
Provides an ElastiCache Cluster resource.
+Changes to a Cache Cluster can occur when you manually change a
+parameter, such as `node_type`, and are reflected in the next maintenance
+window. Because of this, Terraform may report a difference in it's planning
+phase because a modification has not yet taken place. You can use the
+`apply_immediately` flag to instruct the service to apply the change immediately
+(see documentation below).
+
+~> **Note:** using `apply_immediately` can result in a
+brief downtime as the server reboots. See the AWS Docs on
+[Modifying an ElastiCache Cache Cluster][2] for more information.
+
## Example Usage
```
@@ -101,3 +112,4 @@ The following attributes are exported:
* `configuration_endpoint` - (Memcached only) The configuration endpoint to allow host discovery
[1]: http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyCacheCluster.html
+[2]: http://docs.aws.amazon.com/fr_fr/AmazonElastiCache/latest/UserGuide/Clusters.Modify.html
diff --git a/website/source/docs/providers/aws/r/rds_cluster.html.markdown b/website/source/docs/providers/aws/r/rds_cluster.html.markdown
index a9bd4c1a7c..832dd31013 100644
--- a/website/source/docs/providers/aws/r/rds_cluster.html.markdown
+++ b/website/source/docs/providers/aws/r/rds_cluster.html.markdown
@@ -23,7 +23,7 @@ phase because a modification has not yet taken place. You can use the
(see documentation below).
~> **Note:** using `apply_immediately` can result in a
-brief downtime as the server reboots. See the AWS Docs on [RDS Maintenance][2]
+brief downtime as the server reboots. See the AWS Docs on [RDS Maintenance][4]
for more information.
## Example Usage
@@ -104,3 +104,4 @@ The following attributes are exported:
[2]: http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html
[3]: /docs/providers/aws/r/rds_cluster_instance.html
+[4]: http://docs.aws.amazon.com/fr_fr/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html
From 5d910691482efb11b2db572146914b410399563b Mon Sep 17 00:00:00 2001
From: Martin Atkins
Date: Fri, 18 Dec 2015 09:45:01 -0800
Subject: [PATCH 279/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index afaa919204..6d945112f0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -64,6 +64,7 @@ BUG FIXES:
* provider/azure: Update for [breaking change to upstream client library](https://github.com/Azure/azure-sdk-for-go/commit/68d50cb53a73edfeb7f17f5e86cdc8eb359a9528). [GH-4300]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
* provider/openstack: Handle volumes in "deleting" state [GH-4204]
+ * provider/rundeck: Tolerate Rundeck server not returning project name when reading a job [GH-4301]
* provider/vsphere: Create and attach additional disks before bootup [GH-4196]
* provider/openstack: Convert block_device from a Set to a List [GH-4288]
From 41f9ebc66715c53440e3102891acaaa0ff567741 Mon Sep 17 00:00:00 2001
From: Jesse Szwedko
Date: Fri, 23 Oct 2015 20:09:34 +0000
Subject: [PATCH 280/664] Add support for unary operators + and -
This adds support to the configuration interpolation syntax for + and -
as unary operators, specifically to represent negative numbers.
---
config/lang/ast/unary_arithmetic.go | 42 +++++++
config/lang/builtins.go | 42 +++++++
config/lang/check_types.go | 45 ++++++++
config/lang/eval_test.go | 54 +++++++++
config/lang/lang.y | 8 ++
config/lang/lex_test.go | 23 ++++
config/lang/y.go | 67 ++++++-----
config/lang/y.output | 172 ++++++++++++++++------------
8 files changed, 354 insertions(+), 99 deletions(-)
create mode 100644 config/lang/ast/unary_arithmetic.go
diff --git a/config/lang/ast/unary_arithmetic.go b/config/lang/ast/unary_arithmetic.go
new file mode 100644
index 0000000000..d6b65b3652
--- /dev/null
+++ b/config/lang/ast/unary_arithmetic.go
@@ -0,0 +1,42 @@
+package ast
+
+import (
+ "fmt"
+)
+
+// UnaryArithmetic represents a node where the result is arithmetic of
+// one operands
+type UnaryArithmetic struct {
+ Op ArithmeticOp
+ Expr Node
+ Posx Pos
+}
+
+func (n *UnaryArithmetic) Accept(v Visitor) Node {
+ n.Expr = n.Expr.Accept(v)
+
+ return v(n)
+}
+
+func (n *UnaryArithmetic) Pos() Pos {
+ return n.Posx
+}
+
+func (n *UnaryArithmetic) GoString() string {
+ return fmt.Sprintf("*%#v", *n)
+}
+
+func (n *UnaryArithmetic) String() string {
+ var sign rune
+ switch n.Op {
+ case ArithmeticOpAdd:
+ sign = '+'
+ case ArithmeticOpSub:
+ sign = '-'
+ }
+ return fmt.Sprintf("%c%s", sign, n.Expr)
+}
+
+func (n *UnaryArithmetic) Type(Scope) (Type, error) {
+ return TypeInt, nil
+}
diff --git a/config/lang/builtins.go b/config/lang/builtins.go
index bf918c9c75..457a5ef372 100644
--- a/config/lang/builtins.go
+++ b/config/lang/builtins.go
@@ -24,11 +24,53 @@ func registerBuiltins(scope *ast.BasicScope) *ast.BasicScope {
scope.FuncMap["__builtin_StringToInt"] = builtinStringToInt()
// Math operations
+ scope.FuncMap["__builtin_UnaryIntMath"] = builtinUnaryIntMath()
+ scope.FuncMap["__builtin_UnaryFloatMath"] = builtinUnaryFloatMath()
scope.FuncMap["__builtin_IntMath"] = builtinIntMath()
scope.FuncMap["__builtin_FloatMath"] = builtinFloatMath()
return scope
}
+func builtinUnaryIntMath() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeInt},
+ Variadic: false,
+ ReturnType: ast.TypeInt,
+ Callback: func(args []interface{}) (interface{}, error) {
+ op := args[0].(ast.ArithmeticOp)
+ result := args[1].(int)
+ switch op {
+ case ast.ArithmeticOpAdd:
+ result = result
+ case ast.ArithmeticOpSub:
+ result = -result
+ }
+
+ return result, nil
+ },
+ }
+}
+
+func builtinUnaryFloatMath() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeFloat},
+ Variadic: false,
+ ReturnType: ast.TypeFloat,
+ Callback: func(args []interface{}) (interface{}, error) {
+ op := args[0].(ast.ArithmeticOp)
+ result := args[1].(float64)
+ switch op {
+ case ast.ArithmeticOpAdd:
+ result = result
+ case ast.ArithmeticOpSub:
+ result = -result
+ }
+
+ return result, nil
+ },
+ }
+}
+
func builtinFloatMath() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeInt},
diff --git a/config/lang/check_types.go b/config/lang/check_types.go
index 4fbcd731ad..0ff6ac93ba 100644
--- a/config/lang/check_types.go
+++ b/config/lang/check_types.go
@@ -55,6 +55,9 @@ func (v *TypeCheck) visit(raw ast.Node) ast.Node {
var result ast.Node
var err error
switch n := raw.(type) {
+ case *ast.UnaryArithmetic:
+ tc := &typeCheckUnaryArithmetic{n}
+ result, err = tc.TypeCheck(v)
case *ast.Arithmetic:
tc := &typeCheckArithmetic{n}
result, err = tc.TypeCheck(v)
@@ -89,6 +92,48 @@ func (v *TypeCheck) visit(raw ast.Node) ast.Node {
return result
}
+type typeCheckUnaryArithmetic struct {
+ n *ast.UnaryArithmetic
+}
+
+func (tc *typeCheckUnaryArithmetic) TypeCheck(v *TypeCheck) (ast.Node, error) {
+ // Only support + or - as unary op
+ if tc.n.Op != ast.ArithmeticOpAdd && tc.n.Op != ast.ArithmeticOpSub {
+ fmt.Printf("%+v\n", tc.n.Op)
+ return nil, fmt.Errorf("only + or - supported as unary operator")
+ }
+ expr := v.StackPop()
+
+ mathFunc := "__builtin_UnaryIntMath"
+ mathType := ast.TypeInt
+ switch expr {
+ case ast.TypeInt:
+ mathFunc = "__builtin_UnaryIntMath"
+ mathType = expr
+ case ast.TypeFloat:
+ mathFunc = "__builtin_UnaryFloatMath"
+ mathType = expr
+ }
+
+ // Return type
+ v.StackPush(mathType)
+
+ args := make([]ast.Node, 2)
+ args[0] = &ast.LiteralNode{
+ Value: tc.n.Op,
+ Typex: ast.TypeInt,
+ Posx: tc.n.Pos(),
+ }
+ args[1] = tc.n.Expr
+ // Replace our node with a call to the proper function. This isn't
+ // type checked but we already verified types.
+ return &ast.Call{
+ Func: mathFunc,
+ Args: args,
+ Posx: tc.n.Pos(),
+ }, nil
+}
+
type typeCheckArithmetic struct {
n *ast.Arithmetic
}
diff --git a/config/lang/eval_test.go b/config/lang/eval_test.go
index 122f44d1f4..63c7ce984b 100644
--- a/config/lang/eval_test.go
+++ b/config/lang/eval_test.go
@@ -251,6 +251,60 @@ func TestEval(t *testing.T) {
"foo 43",
ast.TypeString,
},
+
+ {
+ "foo ${-46}",
+ nil,
+ false,
+ "foo -46",
+ ast.TypeString,
+ },
+
+ {
+ "foo ${-46 + 5}",
+ nil,
+ false,
+ "foo -41",
+ ast.TypeString,
+ },
+
+ {
+ "foo ${46 + -5}",
+ nil,
+ false,
+ "foo 41",
+ ast.TypeString,
+ },
+
+ {
+ "foo ${-bar}",
+ &ast.BasicScope{
+ VarMap: map[string]ast.Variable{
+ "bar": ast.Variable{
+ Value: 41,
+ Type: ast.TypeInt,
+ },
+ },
+ },
+ false,
+ "foo -41",
+ ast.TypeString,
+ },
+
+ {
+ "foo ${5 + -bar}",
+ &ast.BasicScope{
+ VarMap: map[string]ast.Variable{
+ "bar": ast.Variable{
+ Value: 41,
+ Type: ast.TypeInt,
+ },
+ },
+ },
+ false,
+ "foo -36",
+ ast.TypeString,
+ },
}
for _, tc := range cases {
diff --git a/config/lang/lang.y b/config/lang/lang.y
index c531860e51..f55f7bf982 100644
--- a/config/lang/lang.y
+++ b/config/lang/lang.y
@@ -130,6 +130,14 @@ expr:
Posx: $1.Pos(),
}
}
+| ARITH_OP expr
+ {
+ $$ = &ast.UnaryArithmetic{
+ Op: $1.Value.(ast.ArithmeticOp),
+ Expr: $2,
+ Posx: $1.Pos,
+ }
+ }
| IDENTIFIER
{
$$ = &ast.VariableAccess{Name: $1.Value.(string), Posx: $1.Pos}
diff --git a/config/lang/lex_test.go b/config/lang/lex_test.go
index 5341e594a6..572aa0f532 100644
--- a/config/lang/lex_test.go
+++ b/config/lang/lex_test.go
@@ -63,6 +63,20 @@ func TestLex(t *testing.T) {
PROGRAM_BRACKET_RIGHT, lexEOF},
},
+ {
+ "${bar(-42)}",
+ []int{PROGRAM_BRACKET_LEFT,
+ IDENTIFIER, PAREN_LEFT, ARITH_OP, INTEGER, PAREN_RIGHT,
+ PROGRAM_BRACKET_RIGHT, lexEOF},
+ },
+
+ {
+ "${bar(-42.0)}",
+ []int{PROGRAM_BRACKET_LEFT,
+ IDENTIFIER, PAREN_LEFT, ARITH_OP, FLOAT, PAREN_RIGHT,
+ PROGRAM_BRACKET_RIGHT, lexEOF},
+ },
+
{
"${bar(42+1)}",
[]int{PROGRAM_BRACKET_LEFT,
@@ -72,6 +86,15 @@ func TestLex(t *testing.T) {
PROGRAM_BRACKET_RIGHT, lexEOF},
},
+ {
+ "${bar(42+-1)}",
+ []int{PROGRAM_BRACKET_LEFT,
+ IDENTIFIER, PAREN_LEFT,
+ INTEGER, ARITH_OP, ARITH_OP, INTEGER,
+ PAREN_RIGHT,
+ PROGRAM_BRACKET_RIGHT, lexEOF},
+ },
+
{
"${bar(3.14159)}",
[]int{PROGRAM_BRACKET_LEFT,
diff --git a/config/lang/y.go b/config/lang/y.go
index fd0693f151..faffd55d31 100644
--- a/config/lang/y.go
+++ b/config/lang/y.go
@@ -53,7 +53,7 @@ const parserEofCode = 1
const parserErrCode = 2
const parserMaxDepth = 200
-//line lang.y:165
+//line lang.y:173
//line yacctab:1
var parserExca = [...]int{
@@ -62,51 +62,52 @@ var parserExca = [...]int{
-2, 0,
}
-const parserNprod = 19
+const parserNprod = 20
const parserPrivate = 57344
var parserTokenNames []string
var parserStates []string
-const parserLast = 30
+const parserLast = 34
var parserAct = [...]int{
- 9, 20, 16, 16, 7, 7, 3, 18, 10, 8,
- 1, 17, 14, 12, 13, 6, 6, 19, 8, 22,
- 15, 23, 24, 11, 2, 25, 16, 21, 4, 5,
+ 9, 7, 3, 16, 22, 8, 17, 17, 20, 17,
+ 1, 18, 6, 23, 8, 19, 25, 26, 21, 11,
+ 2, 24, 7, 4, 5, 0, 10, 27, 0, 14,
+ 15, 12, 13, 6,
}
var parserPact = [...]int{
- 1, -1000, 1, -1000, -1000, -1000, -1000, 0, -1000, 15,
- 0, 1, -1000, -1000, -1, -1000, 0, -8, 0, -1000,
- -1000, 12, -9, -1000, 0, -9,
+ -3, -1000, -3, -1000, -1000, -1000, -1000, 18, -1000, -2,
+ 18, -3, -1000, -1000, 18, 0, -1000, 18, -5, -1000,
+ 18, -1000, -1000, 7, -4, -1000, 18, -4,
}
var parserPgo = [...]int{
- 0, 0, 29, 28, 23, 6, 27, 10,
+ 0, 0, 24, 23, 19, 2, 13, 10,
}
var parserR1 = [...]int{
0, 7, 7, 4, 4, 5, 5, 2, 1, 1,
- 1, 1, 1, 1, 1, 6, 6, 6, 3,
+ 1, 1, 1, 1, 1, 1, 6, 6, 6, 3,
}
var parserR2 = [...]int{
0, 0, 1, 1, 2, 1, 1, 3, 3, 1,
- 1, 1, 3, 1, 4, 0, 3, 1, 1,
+ 1, 1, 3, 2, 1, 4, 0, 3, 1, 1,
}
var parserChk = [...]int{
-1000, -7, -4, -5, -3, -2, 15, 4, -5, -1,
- 8, -4, 13, 14, 12, 5, 11, -1, 8, -1,
- 9, -6, -1, 9, 10, -1,
+ 8, -4, 13, 14, 11, 12, 5, 11, -1, -1,
+ 8, -1, 9, -6, -1, 9, 10, -1,
}
var parserDef = [...]int{
- 1, -2, 2, 3, 5, 6, 18, 0, 4, 0,
- 0, 9, 10, 11, 13, 7, 0, 0, 15, 12,
- 8, 0, 17, 14, 0, 16,
+ 1, -2, 2, 3, 5, 6, 19, 0, 4, 0,
+ 0, 9, 10, 11, 0, 14, 7, 0, 0, 13,
+ 16, 12, 8, 0, 18, 15, 0, 17,
}
var parserTok1 = [...]int{
@@ -577,38 +578,48 @@ parserdefault:
}
}
case 13:
- parserDollar = parserS[parserpt-1 : parserpt+1]
+ parserDollar = parserS[parserpt-2 : parserpt+1]
//line lang.y:134
+ {
+ parserVAL.node = &ast.UnaryArithmetic{
+ Op: parserDollar[1].token.Value.(ast.ArithmeticOp),
+ Expr: parserDollar[2].node,
+ Posx: parserDollar[1].token.Pos,
+ }
+ }
+ case 14:
+ parserDollar = parserS[parserpt-1 : parserpt+1]
+ //line lang.y:142
{
parserVAL.node = &ast.VariableAccess{Name: parserDollar[1].token.Value.(string), Posx: parserDollar[1].token.Pos}
}
- case 14:
+ case 15:
parserDollar = parserS[parserpt-4 : parserpt+1]
- //line lang.y:138
+ //line lang.y:146
{
parserVAL.node = &ast.Call{Func: parserDollar[1].token.Value.(string), Args: parserDollar[3].nodeList, Posx: parserDollar[1].token.Pos}
}
- case 15:
+ case 16:
parserDollar = parserS[parserpt-0 : parserpt+1]
- //line lang.y:143
+ //line lang.y:151
{
parserVAL.nodeList = nil
}
- case 16:
+ case 17:
parserDollar = parserS[parserpt-3 : parserpt+1]
- //line lang.y:147
+ //line lang.y:155
{
parserVAL.nodeList = append(parserDollar[1].nodeList, parserDollar[3].node)
}
- case 17:
+ case 18:
parserDollar = parserS[parserpt-1 : parserpt+1]
- //line lang.y:151
+ //line lang.y:159
{
parserVAL.nodeList = append(parserVAL.nodeList, parserDollar[1].node)
}
- case 18:
+ case 19:
parserDollar = parserS[parserpt-1 : parserpt+1]
- //line lang.y:157
+ //line lang.y:165
{
parserVAL.node = &ast.LiteralNode{
Value: parserDollar[1].token.Value.(string),
diff --git a/config/lang/y.output b/config/lang/y.output
index 17352390dd..998d2673cc 100644
--- a/config/lang/y.output
+++ b/config/lang/y.output
@@ -51,9 +51,9 @@ state 5
state 6
- literal: STRING. (18)
+ literal: STRING. (19)
- . reduce 18 (src line 155)
+ . reduce 19 (src line 163)
state 7
@@ -61,7 +61,8 @@ state 7
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
- IDENTIFIER shift 14
+ ARITH_OP shift 14
+ IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
@@ -83,8 +84,8 @@ state 9
interpolation: PROGRAM_BRACKET_LEFT expr.PROGRAM_BRACKET_RIGHT
expr: expr.ARITH_OP expr
- PROGRAM_BRACKET_RIGHT shift 15
- ARITH_OP shift 16
+ PROGRAM_BRACKET_RIGHT shift 16
+ ARITH_OP shift 17
. error
@@ -93,13 +94,14 @@ state 10
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
- IDENTIFIER shift 14
+ ARITH_OP shift 14
+ IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
- expr goto 17
+ expr goto 18
interpolation goto 5
literal goto 4
literalModeTop goto 11
@@ -130,25 +132,12 @@ state 13
state 14
- expr: IDENTIFIER. (13)
- expr: IDENTIFIER.PAREN_LEFT args PAREN_RIGHT
-
- PAREN_LEFT shift 18
- . reduce 13 (src line 133)
-
-
-state 15
- interpolation: PROGRAM_BRACKET_LEFT expr PROGRAM_BRACKET_RIGHT. (7)
-
- . reduce 7 (src line 94)
-
-
-state 16
- expr: expr ARITH_OP.expr
+ expr: ARITH_OP.expr
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
- IDENTIFIER shift 14
+ ARITH_OP shift 14
+ IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
@@ -160,104 +149,145 @@ state 16
literalModeTop goto 11
literalModeValue goto 3
+state 15
+ expr: IDENTIFIER. (14)
+ expr: IDENTIFIER.PAREN_LEFT args PAREN_RIGHT
+
+ PAREN_LEFT shift 20
+ . reduce 14 (src line 141)
+
+
+state 16
+ interpolation: PROGRAM_BRACKET_LEFT expr PROGRAM_BRACKET_RIGHT. (7)
+
+ . reduce 7 (src line 94)
+
+
state 17
- expr: PAREN_LEFT expr.PAREN_RIGHT
- expr: expr.ARITH_OP expr
-
- PAREN_RIGHT shift 20
- ARITH_OP shift 16
- . error
-
-
-state 18
- expr: IDENTIFIER PAREN_LEFT.args PAREN_RIGHT
- args: . (15)
+ expr: expr ARITH_OP.expr
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
- IDENTIFIER shift 14
+ ARITH_OP shift 14
+ IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
- . reduce 15 (src line 142)
+ . error
- expr goto 22
+ expr goto 21
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
- args goto 21
+
+state 18
+ expr: PAREN_LEFT expr.PAREN_RIGHT
+ expr: expr.ARITH_OP expr
+
+ PAREN_RIGHT shift 22
+ ARITH_OP shift 17
+ . error
+
state 19
+ expr: expr.ARITH_OP expr
+ expr: ARITH_OP expr. (13)
+
+ . reduce 13 (src line 133)
+
+
+state 20
+ expr: IDENTIFIER PAREN_LEFT.args PAREN_RIGHT
+ args: . (16)
+
+ PROGRAM_BRACKET_LEFT shift 7
+ PAREN_LEFT shift 10
+ ARITH_OP shift 14
+ IDENTIFIER shift 15
+ INTEGER shift 12
+ FLOAT shift 13
+ STRING shift 6
+ . reduce 16 (src line 150)
+
+ expr goto 24
+ interpolation goto 5
+ literal goto 4
+ literalModeTop goto 11
+ literalModeValue goto 3
+ args goto 23
+
+state 21
expr: expr.ARITH_OP expr
expr: expr ARITH_OP expr. (12)
. reduce 12 (src line 125)
-state 20
+state 22
expr: PAREN_LEFT expr PAREN_RIGHT. (8)
. reduce 8 (src line 100)
-state 21
+state 23
expr: IDENTIFIER PAREN_LEFT args.PAREN_RIGHT
args: args.COMMA expr
- PAREN_RIGHT shift 23
- COMMA shift 24
+ PAREN_RIGHT shift 25
+ COMMA shift 26
. error
-state 22
- expr: expr.ARITH_OP expr
- args: expr. (17)
-
- ARITH_OP shift 16
- . reduce 17 (src line 150)
-
-
-state 23
- expr: IDENTIFIER PAREN_LEFT args PAREN_RIGHT. (14)
-
- . reduce 14 (src line 137)
-
-
state 24
+ expr: expr.ARITH_OP expr
+ args: expr. (18)
+
+ ARITH_OP shift 17
+ . reduce 18 (src line 158)
+
+
+state 25
+ expr: IDENTIFIER PAREN_LEFT args PAREN_RIGHT. (15)
+
+ . reduce 15 (src line 145)
+
+
+state 26
args: args COMMA.expr
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
- IDENTIFIER shift 14
+ ARITH_OP shift 14
+ IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
- expr goto 25
+ expr goto 27
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
-state 25
+state 27
expr: expr.ARITH_OP expr
- args: args COMMA expr. (16)
+ args: args COMMA expr. (17)
- ARITH_OP shift 16
- . reduce 16 (src line 146)
+ ARITH_OP shift 17
+ . reduce 17 (src line 154)
15 terminals, 8 nonterminals
-19 grammar rules, 26/2000 states
+20 grammar rules, 28/2000 states
0 shift/reduce, 0 reduce/reduce conflicts reported
57 working sets used
-memory: parser 35/30000
-21 extra closures
-45 shift entries, 1 exceptions
-14 goto entries
-23 entries saved by goto default
-Optimizer space used: output 30/30000
-30 table entries, 0 zero
-maximum spread: 15, maximum offset: 24
+memory: parser 40/30000
+23 extra closures
+57 shift entries, 1 exceptions
+15 goto entries
+27 entries saved by goto default
+Optimizer space used: output 34/30000
+34 table entries, 2 zero
+maximum spread: 15, maximum offset: 26
From 82fe67f7fc4f7e86fc0235d74db3e69fece4807c Mon Sep 17 00:00:00 2001
From: Jesse Szwedko
Date: Fri, 18 Dec 2015 17:50:31 +0000
Subject: [PATCH 281/664] Add support for creating Managed Microsoft Active
Directory in AWS
This action is almost exactly the same as creating a SimpleAD so we
reuse this resource and allow the user to specify the type when creating
the directory (ignoring the size if the type is MicrosoftAD).
---
...esource_aws_directory_service_directory.go | 118 ++++++++++++++----
...ce_aws_directory_service_directory_test.go | 44 +++++++
.../directory_service_directory.html.markdown | 6 +-
3 files changed, 139 insertions(+), 29 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_directory_service_directory.go b/builtin/providers/aws/resource_aws_directory_service_directory.go
index 1fdb9491ee..3eb3d941dd 100644
--- a/builtin/providers/aws/resource_aws_directory_service_directory.go
+++ b/builtin/providers/aws/resource_aws_directory_service_directory.go
@@ -32,7 +32,7 @@ func resourceAwsDirectoryServiceDirectory() *schema.Resource {
},
"size": &schema.Schema{
Type: schema.TypeString,
- Required: true,
+ Optional: true,
ForceNew: true,
},
"alias": &schema.Schema{
@@ -89,14 +89,41 @@ func resourceAwsDirectoryServiceDirectory() *schema.Resource {
},
"type": &schema.Schema{
Type: schema.TypeString,
- Computed: true,
+ Optional: true,
+ Default: "SimpleAD",
+ ForceNew: true,
},
},
}
}
-func resourceAwsDirectoryServiceDirectoryCreate(d *schema.ResourceData, meta interface{}) error {
- dsconn := meta.(*AWSClient).dsconn
+func buildVpcSettings(d *schema.ResourceData) (vpcSettings *directoryservice.DirectoryVpcSettings, err error) {
+ if v, ok := d.GetOk("vpc_settings"); ok {
+ settings := v.([]interface{})
+
+ if len(settings) > 1 {
+ return nil, fmt.Errorf("Only a single vpc_settings block is expected")
+ } else if len(settings) == 1 {
+ s := settings[0].(map[string]interface{})
+ var subnetIds []*string
+ for _, id := range s["subnet_ids"].(*schema.Set).List() {
+ subnetIds = append(subnetIds, aws.String(id.(string)))
+ }
+
+ vpcSettings = &directoryservice.DirectoryVpcSettings{
+ SubnetIds: subnetIds,
+ VpcId: aws.String(s["vpc_id"].(string)),
+ }
+ }
+ }
+
+ return vpcSettings, nil
+}
+
+func createSimpleDirectoryService(dsconn *directoryservice.DirectoryService, d *schema.ResourceData) (directoryId string, err error) {
+ if _, ok := d.GetOk("size"); !ok {
+ return "", fmt.Errorf("size is required for type = SimpleAD")
+ }
input := directoryservice.CreateDirectoryInput{
Name: aws.String(d.Get("name").(string)),
@@ -111,33 +138,70 @@ func resourceAwsDirectoryServiceDirectoryCreate(d *schema.ResourceData, meta int
input.ShortName = aws.String(v.(string))
}
- if v, ok := d.GetOk("vpc_settings"); ok {
- settings := v.([]interface{})
-
- if len(settings) > 1 {
- return fmt.Errorf("Only a single vpc_settings block is expected")
- } else if len(settings) == 1 {
- s := settings[0].(map[string]interface{})
- var subnetIds []*string
- for _, id := range s["subnet_ids"].(*schema.Set).List() {
- subnetIds = append(subnetIds, aws.String(id.(string)))
- }
-
- vpcSettings := directoryservice.DirectoryVpcSettings{
- SubnetIds: subnetIds,
- VpcId: aws.String(s["vpc_id"].(string)),
- }
- input.VpcSettings = &vpcSettings
- }
+ input.VpcSettings, err = buildVpcSettings(d)
+ if err != nil {
+ return "", err
}
- log.Printf("[DEBUG] Creating Directory Service: %s", input)
+ log.Printf("[DEBUG] Creating Simple Directory Service: %s", input)
out, err := dsconn.CreateDirectory(&input)
+ if err != nil {
+ return "", err
+ }
+ log.Printf("[DEBUG] Simple Directory Service created: %s", out)
+
+ return *out.DirectoryId, nil
+}
+
+func createActiveDirectoryService(dsconn *directoryservice.DirectoryService, d *schema.ResourceData) (directoryId string, err error) {
+ input := directoryservice.CreateMicrosoftADInput{
+ Name: aws.String(d.Get("name").(string)),
+ Password: aws.String(d.Get("password").(string)),
+ }
+
+ if v, ok := d.GetOk("description"); ok {
+ input.Description = aws.String(v.(string))
+ }
+ if v, ok := d.GetOk("short_name"); ok {
+ input.ShortName = aws.String(v.(string))
+ }
+
+ input.VpcSettings, err = buildVpcSettings(d)
+ if err != nil {
+ return "", err
+ }
+
+ log.Printf("[DEBUG] Creating Microsoft AD Directory Service: %s", input)
+ out, err := dsconn.CreateMicrosoftAD(&input)
+ if err != nil {
+ return "", err
+ }
+ log.Printf("[DEBUG] Microsoft AD Directory Service created: %s", out)
+
+ return *out.DirectoryId, nil
+}
+
+func resourceAwsDirectoryServiceDirectoryCreate(d *schema.ResourceData, meta interface{}) error {
+ dsconn := meta.(*AWSClient).dsconn
+
+ var (
+ directoryId string
+ err error
+ )
+
+ switch d.Get("type").(string) {
+ case "SimpleAD":
+ directoryId, err = createSimpleDirectoryService(dsconn, d)
+ case "MicrosoftAD":
+ directoryId, err = createActiveDirectoryService(dsconn, d)
+ default:
+ return fmt.Errorf("Unsupported directory type: %s", d.Get("type"))
+ }
if err != nil {
return err
}
- log.Printf("[DEBUG] Directory Service created: %s", out)
- d.SetId(*out.DirectoryId)
+
+ d.SetId(directoryId)
// Wait for creation
log.Printf("[DEBUG] Waiting for DS (%q) to become available", d.Id())
@@ -238,7 +302,9 @@ func resourceAwsDirectoryServiceDirectoryRead(d *schema.ResourceData, meta inter
if dir.ShortName != nil {
d.Set("short_name", *dir.ShortName)
}
- d.Set("size", *dir.Size)
+ if dir.Size != nil {
+ d.Set("size", *dir.Size)
+ }
d.Set("type", *dir.Type)
d.Set("vpc_settings", flattenDSVpcSettings(dir.VpcSettings))
d.Set("enable_sso", *dir.SsoEnabled)
diff --git a/builtin/providers/aws/resource_aws_directory_service_directory_test.go b/builtin/providers/aws/resource_aws_directory_service_directory_test.go
index b10174bdb0..0c71996d93 100644
--- a/builtin/providers/aws/resource_aws_directory_service_directory_test.go
+++ b/builtin/providers/aws/resource_aws_directory_service_directory_test.go
@@ -27,6 +27,22 @@ func TestAccAWSDirectoryServiceDirectory_basic(t *testing.T) {
})
}
+func TestAccAWSDirectoryServiceDirectory_microsoft(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckDirectoryServiceDirectoryDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccDirectoryServiceDirectoryConfig_microsoft,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar"),
+ ),
+ },
+ },
+ })
+}
+
func TestAccAWSDirectoryServiceDirectory_withAliasAndSso(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -192,6 +208,34 @@ resource "aws_subnet" "bar" {
}
`
+const testAccDirectoryServiceDirectoryConfig_microsoft = `
+resource "aws_directory_service_directory" "bar" {
+ name = "corp.notexample.com"
+ password = "SuperSecretPassw0rd"
+ type = "MicrosoftAD"
+
+ vpc_settings {
+ vpc_id = "${aws_vpc.main.id}"
+ subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
+ }
+}
+
+resource "aws_vpc" "main" {
+ cidr_block = "10.0.0.0/16"
+}
+
+resource "aws_subnet" "foo" {
+ vpc_id = "${aws_vpc.main.id}"
+ availability_zone = "us-west-2a"
+ cidr_block = "10.0.1.0/24"
+}
+resource "aws_subnet" "bar" {
+ vpc_id = "${aws_vpc.main.id}"
+ availability_zone = "us-west-2b"
+ cidr_block = "10.0.2.0/24"
+}
+`
+
var randomInteger = genRandInt()
var testAccDirectoryServiceDirectoryConfig_withAlias = fmt.Sprintf(`
resource "aws_directory_service_directory" "bar_a" {
diff --git a/website/source/docs/providers/aws/r/directory_service_directory.html.markdown b/website/source/docs/providers/aws/r/directory_service_directory.html.markdown
index 04049ee553..7a8854487d 100644
--- a/website/source/docs/providers/aws/r/directory_service_directory.html.markdown
+++ b/website/source/docs/providers/aws/r/directory_service_directory.html.markdown
@@ -8,7 +8,7 @@ description: |-
# aws\_directory\_service\_directory
-Provides a directory in AWS Directory Service.
+Provides a Simple or Managed Microsoft directory in AWS Directory Service.
## Example Usage
@@ -46,12 +46,13 @@ The following arguments are supported:
* `name` - (Required) The fully qualified name for the directory, such as `corp.example.com`
* `password` - (Required) The password for the directory administrator.
-* `size` - (Required) The size of the directory (`Small` or `Large` are accepted values).
+* `size` - (Required) The size of the directory (`Small` or `Large` are accepted values). Only used when `type` is `SimpleAD`.
* `vpc_settings` - (Required) VPC related information about the directory. Fields documented below.
* `alias` - (Optional) The alias for the directory (must be unique amongst all aliases in AWS). Required for `enable_sso`.
* `description` - (Optional) A textual description for the directory.
* `short_name` - (Optional) The short name of the directory, such as `CORP`.
* `enable_sso` - (Optional) Whether to enable single-sign on for the directory. Requires `alias`. Defaults to `false`.
+* `type` (Optional) - The directory type (`SimpleAD` or `MicrosoftAD` are accepted values). Defaults to `SimpleAD`.
**vpc\_settings** supports the following:
@@ -65,4 +66,3 @@ The following attributes are exported:
* `id` - The directory identifier.
* `access_url` - The access URL for the directory, such as `http://alias.awsapps.com`.
* `dns_ip_addresses` - A list of IP addresses of the DNS servers for the directory.
-* `type` - The directory type.
From 0bdf249f2ce5a51a853ac07c1039ba90ff378cd8 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Fri, 18 Dec 2015 02:20:13 -0500
Subject: [PATCH 282/664] provider/aws: Add aws_nat_gateway Resource
---
builtin/providers/aws/provider.go | 1 +
.../providers/aws/resource_aws_nat_gateway.go | 181 ++++++++++++++++++
.../aws/resource_aws_nat_gateway_test.go | 154 +++++++++++++++
builtin/providers/aws/resource_aws_route.go | 22 ++-
.../providers/aws/resource_aws_route_table.go | 17 +-
.../providers/aws/r/nat_gateway.html.markdown | 51 +++++
.../docs/providers/aws/r/route.html.markdown | 9 +-
.../providers/aws/r/route_table.html.markdown | 7 +-
website/source/layouts/aws.erb | 4 +
9 files changed, 438 insertions(+), 8 deletions(-)
create mode 100644 builtin/providers/aws/resource_aws_nat_gateway.go
create mode 100644 builtin/providers/aws/resource_aws_nat_gateway_test.go
create mode 100644 website/source/docs/providers/aws/r/nat_gateway.html.markdown
diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go
index 2edb94b066..c13934c562 100644
--- a/builtin/providers/aws/provider.go
+++ b/builtin/providers/aws/provider.go
@@ -151,6 +151,7 @@ func Provider() terraform.ResourceProvider {
"aws_launch_configuration": resourceAwsLaunchConfiguration(),
"aws_lb_cookie_stickiness_policy": resourceAwsLBCookieStickinessPolicy(),
"aws_main_route_table_association": resourceAwsMainRouteTableAssociation(),
+ "aws_nat_gateway": resourceAwsNatGateway(),
"aws_network_acl": resourceAwsNetworkAcl(),
"aws_network_interface": resourceAwsNetworkInterface(),
"aws_opsworks_stack": resourceAwsOpsworksStack(),
diff --git a/builtin/providers/aws/resource_aws_nat_gateway.go b/builtin/providers/aws/resource_aws_nat_gateway.go
new file mode 100644
index 0000000000..8d2265cddb
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_nat_gateway.go
@@ -0,0 +1,181 @@
+package aws
+
+import (
+ "fmt"
+ "log"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceAwsNatGateway() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceAwsNatGatewayCreate,
+ Read: resourceAwsNatGatewayRead,
+ Delete: resourceAwsNatGatewayDelete,
+
+ Schema: map[string]*schema.Schema{
+ "allocation_id": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "subnet_id": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "network_interface_id": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+
+ "private_ip": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+
+ "public_ip": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func resourceAwsNatGatewayCreate(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).ec2conn
+
+ // Create the NAT Gateway
+ createOpts := &ec2.CreateNatGatewayInput{
+ AllocationId: aws.String(d.Get("allocation_id").(string)),
+ SubnetId: aws.String(d.Get("subnet_id").(string)),
+ }
+
+ log.Printf("[DEBUG] Create NAT Gateway: %s", *createOpts)
+ natResp, err := conn.CreateNatGateway(createOpts)
+ if err != nil {
+ return fmt.Errorf("Error creating NAT Gateway: %s", err)
+ }
+
+ // Get the ID and store it
+ ng := natResp.NatGateway
+ d.SetId(*ng.NatGatewayId)
+ log.Printf("[INFO] NAT Gateway ID: %s", d.Id())
+
+ // Wait for the NAT Gateway to become available
+ log.Printf("[DEBUG] Waiting for NAT Gateway (%s) to become available", d.Id())
+ stateConf := &resource.StateChangeConf{
+ Pending: []string{"pending"},
+ Target: "available",
+ Refresh: NGStateRefreshFunc(conn, d.Id()),
+ Timeout: 10 * time.Minute,
+ }
+
+ if _, err := stateConf.WaitForState(); err != nil {
+ return fmt.Errorf("Error waiting for NAT Gateway (%s) to become available: %s", d.Id(), err)
+ }
+
+ // Update our attributes and return
+ return resourceAwsNatGatewayRead(d, meta)
+}
+
+func resourceAwsNatGatewayRead(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).ec2conn
+
+ // Refresh the NAT Gateway state
+ ngRaw, state, err := NGStateRefreshFunc(conn, d.Id())()
+ if err != nil {
+ return err
+ }
+ if ngRaw == nil || strings.ToLower(state) == "deleted" {
+ log.Printf("[INFO] Removing %s from Terraform state as it is not found or in the deleted state.", d.Id())
+ d.SetId("")
+ return nil
+ }
+
+ // Set NAT Gateway attributes
+ ng := ngRaw.(*ec2.NatGateway)
+ address := ng.NatGatewayAddresses[0]
+ d.Set("network_interface_id", address.NetworkInterfaceId)
+ d.Set("private_ip", address.PrivateIp)
+ d.Set("public_ip", address.PublicIp)
+
+ return nil
+}
+
+func resourceAwsNatGatewayDelete(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).ec2conn
+ deleteOpts := &ec2.DeleteNatGatewayInput{
+ NatGatewayId: aws.String(d.Id()),
+ }
+ log.Printf("[INFO] Deleting NAT Gateway: %s", d.Id())
+
+ _, err := conn.DeleteNatGateway(deleteOpts)
+ if err != nil {
+ ec2err, ok := err.(awserr.Error)
+ if !ok {
+ return err
+ }
+
+ if ec2err.Code() == "NatGatewayNotFound" {
+ return nil
+ }
+
+ return err
+ }
+
+ stateConf := &resource.StateChangeConf{
+ Pending: []string{"deleting"},
+ Target: "deleted",
+ Refresh: NGStateRefreshFunc(conn, d.Id()),
+ Timeout: 30 * time.Minute,
+ Delay: 10 * time.Second,
+ MinTimeout: 10 * time.Second,
+ }
+
+ _, stateErr := stateConf.WaitForState()
+ if stateErr != nil {
+ return fmt.Errorf("Error waiting for NAT Gateway (%s) to delete: %s", d.Id(), err)
+ }
+
+ return nil
+}
+
+// NGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch
+// a NAT Gateway.
+func NGStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc {
+ return func() (interface{}, string, error) {
+ opts := &ec2.DescribeNatGatewaysInput{
+ NatGatewayIds: []*string{aws.String(id)},
+ }
+ resp, err := conn.DescribeNatGateways(opts)
+ if err != nil {
+ if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "NatGatewayNotFound" {
+ resp = nil
+ } else {
+ log.Printf("Error on NGStateRefresh: %s", err)
+ return nil, "", err
+ }
+ }
+
+ if resp == nil {
+ // Sometimes AWS just has consistency issues and doesn't see
+ // our instance yet. Return an empty state.
+ return nil, "", nil
+ }
+
+ ng := resp.NatGateways[0]
+ return ng, *ng.State, nil
+ }
+}
diff --git a/builtin/providers/aws/resource_aws_nat_gateway_test.go b/builtin/providers/aws/resource_aws_nat_gateway_test.go
new file mode 100644
index 0000000000..40b6f77c29
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_nat_gateway_test.go
@@ -0,0 +1,154 @@
+package aws
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAWSNatGateway_basic(t *testing.T) {
+ var natGateway ec2.NatGateway
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckNatGatewayDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccNatGatewayConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckNatGatewayExists("aws_nat_gateway.gateway", &natGateway),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckNatGatewayDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*AWSClient).ec2conn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_nat_gateway" {
+ continue
+ }
+
+ // Try to find the resource
+ resp, err := conn.DescribeNatGateways(&ec2.DescribeNatGatewaysInput{
+ NatGatewayIds: []*string{aws.String(rs.Primary.ID)},
+ })
+ if err == nil {
+ if len(resp.NatGateways) > 0 && strings.ToLower(*resp.NatGateways[0].State) != "deleted" {
+ return fmt.Errorf("still exists")
+ }
+
+ return nil
+ }
+
+ // Verify the error is what we want
+ ec2err, ok := err.(awserr.Error)
+ if !ok {
+ return err
+ }
+ if ec2err.Code() != "NatGatewayNotFound" {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func testAccCheckNatGatewayExists(n string, ng *ec2.NatGateway) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No ID is set")
+ }
+
+ conn := testAccProvider.Meta().(*AWSClient).ec2conn
+ resp, err := conn.DescribeNatGateways(&ec2.DescribeNatGatewaysInput{
+ NatGatewayIds: []*string{aws.String(rs.Primary.ID)},
+ })
+ if err != nil {
+ return err
+ }
+ if len(resp.NatGateways) == 0 {
+ return fmt.Errorf("NatGateway not found")
+ }
+
+ *ng = *resp.NatGateways[0]
+
+ return nil
+ }
+}
+
+const testAccNatGatewayConfig = `
+resource "aws_vpc" "vpc" {
+ cidr_block = "10.0.0.0/16"
+}
+
+resource "aws_subnet" "private" {
+ vpc_id = "${aws_vpc.vpc.id}"
+ cidr_block = "10.0.1.0/24"
+ map_public_ip_on_launch = false
+}
+
+resource "aws_subnet" "public" {
+ vpc_id = "${aws_vpc.vpc.id}"
+ cidr_block = "10.0.2.0/24"
+ map_public_ip_on_launch = true
+}
+
+resource "aws_internet_gateway" "gw" {
+ vpc_id = "${aws_vpc.vpc.id}"
+}
+
+resource "aws_eip" "nat_gateway" {
+ vpc = true
+}
+
+// Actual SUT
+resource "aws_nat_gateway" "gateway" {
+ allocation_id = "${aws_eip.nat_gateway.id}"
+ subnet_id = "${aws_subnet.public.id}"
+
+ depends_on = ["aws_internet_gateway.gw"]
+}
+
+resource "aws_route_table" "private" {
+ vpc_id = "${aws_vpc.vpc.id}"
+
+ route {
+ cidr_block = "0.0.0.0/0"
+ nat_gateway_id = "${aws_nat_gateway.gateway.id}"
+ }
+}
+
+resource "aws_route_table_association" "private" {
+ subnet_id = "${aws_subnet.private.id}"
+ route_table_id = "${aws_route_table.private.id}"
+}
+
+resource "aws_route_table" "public" {
+ vpc_id = "${aws_vpc.vpc.id}"
+
+ route {
+ cidr_block = "0.0.0.0/0"
+ gateway_id = "${aws_internet_gateway.gw.id}"
+ }
+}
+
+resource "aws_route_table_association" "public" {
+ subnet_id = "${aws_subnet.public.id}"
+ route_table_id = "${aws_route_table.public.id}"
+}
+`
diff --git a/builtin/providers/aws/resource_aws_route.go b/builtin/providers/aws/resource_aws_route.go
index 3d6f5d25bb..6832f87033 100644
--- a/builtin/providers/aws/resource_aws_route.go
+++ b/builtin/providers/aws/resource_aws_route.go
@@ -13,7 +13,7 @@ import (
// How long to sleep if a limit-exceeded event happens
var routeTargetValidationError = errors.New("Error: more than 1 target specified. Only 1 of gateway_id" +
- "instance_id, network_interface_id, route_table_id or" +
+ "nat_gateway_id, instance_id, network_interface_id, route_table_id or" +
"vpc_peering_connection_id is allowed.")
// AWS Route resource Schema declaration
@@ -42,6 +42,11 @@ func resourceAwsRoute() *schema.Resource {
Optional: true,
},
+ "nat_gateway_id": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ },
+
"instance_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
@@ -86,6 +91,7 @@ func resourceAwsRouteCreate(d *schema.ResourceData, meta interface{}) error {
var setTarget string
allowedTargets := []string{
"gateway_id",
+ "nat_gateway_id",
"instance_id",
"network_interface_id",
"vpc_peering_connection_id",
@@ -112,6 +118,12 @@ func resourceAwsRouteCreate(d *schema.ResourceData, meta interface{}) error {
DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)),
GatewayId: aws.String(d.Get("gateway_id").(string)),
}
+ case "nat_gateway_id":
+ createOpts = &ec2.CreateRouteInput{
+ RouteTableId: aws.String(d.Get("route_table_id").(string)),
+ DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)),
+ NatGatewayId: aws.String(d.Get("nat_gateway_id").(string)),
+ }
case "instance_id":
createOpts = &ec2.CreateRouteInput{
RouteTableId: aws.String(d.Get("route_table_id").(string)),
@@ -160,6 +172,7 @@ func resourceAwsRouteRead(d *schema.ResourceData, meta interface{}) error {
d.Set("destination_prefix_list_id", route.DestinationPrefixListId)
d.Set("gateway_id", route.GatewayId)
+ d.Set("nat_gateway_id", route.NatGatewayId)
d.Set("instance_id", route.InstanceId)
d.Set("instance_owner_id", route.InstanceOwnerId)
d.Set("network_interface_id", route.NetworkInterfaceId)
@@ -176,6 +189,7 @@ func resourceAwsRouteUpdate(d *schema.ResourceData, meta interface{}) error {
var setTarget string
allowedTargets := []string{
"gateway_id",
+ "nat_gateway_id",
"instance_id",
"network_interface_id",
"vpc_peering_connection_id",
@@ -202,6 +216,12 @@ func resourceAwsRouteUpdate(d *schema.ResourceData, meta interface{}) error {
DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)),
GatewayId: aws.String(d.Get("gateway_id").(string)),
}
+ case "nat_gateway_id":
+ replaceOpts = &ec2.ReplaceRouteInput{
+ RouteTableId: aws.String(d.Get("route_table_id").(string)),
+ DestinationCidrBlock: aws.String(d.Get("destination_cidr_block").(string)),
+ NatGatewayId: aws.String(d.Get("nat_gateway_id").(string)),
+ }
case "instance_id":
replaceOpts = &ec2.ReplaceRouteInput{
RouteTableId: aws.String(d.Get("route_table_id").(string)),
diff --git a/builtin/providers/aws/resource_aws_route_table.go b/builtin/providers/aws/resource_aws_route_table.go
index 38e95363e5..752b771fef 100644
--- a/builtin/providers/aws/resource_aws_route_table.go
+++ b/builtin/providers/aws/resource_aws_route_table.go
@@ -60,6 +60,11 @@ func resourceAwsRouteTable() *schema.Resource {
Optional: true,
},
+ "nat_gateway_id": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ },
+
"vpc_peering_connection_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
@@ -163,6 +168,9 @@ func resourceAwsRouteTableRead(d *schema.ResourceData, meta interface{}) error {
if r.GatewayId != nil {
m["gateway_id"] = *r.GatewayId
}
+ if r.NatGatewayId != nil {
+ m["nat_gateway_id"] = *r.NatGatewayId
+ }
if r.InstanceId != nil {
m["instance_id"] = *r.InstanceId
}
@@ -282,6 +290,7 @@ func resourceAwsRouteTableUpdate(d *schema.ResourceData, meta interface{}) error
RouteTableId: aws.String(d.Id()),
DestinationCidrBlock: aws.String(m["cidr_block"].(string)),
GatewayId: aws.String(m["gateway_id"].(string)),
+ NatGatewayId: aws.String(m["nat_gateway_id"].(string)),
InstanceId: aws.String(m["instance_id"].(string)),
VpcPeeringConnectionId: aws.String(m["vpc_peering_connection_id"].(string)),
NetworkInterfaceId: aws.String(m["network_interface_id"].(string)),
@@ -385,6 +394,12 @@ func resourceAwsRouteTableHash(v interface{}) int {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
+ natGatewaySet := false
+ if v, ok := m["nat_gateway_id"]; ok {
+ natGatewaySet = v.(string) != ""
+ buf.WriteString(fmt.Sprintf("%s-", v.(string)))
+ }
+
instanceSet := false
if v, ok := m["instance_id"]; ok {
instanceSet = v.(string) != ""
@@ -395,7 +410,7 @@ func resourceAwsRouteTableHash(v interface{}) int {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
- if v, ok := m["network_interface_id"]; ok && !instanceSet {
+ if v, ok := m["network_interface_id"]; ok && !(instanceSet || natGatewaySet) {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
diff --git a/website/source/docs/providers/aws/r/nat_gateway.html.markdown b/website/source/docs/providers/aws/r/nat_gateway.html.markdown
new file mode 100644
index 0000000000..5f831c043d
--- /dev/null
+++ b/website/source/docs/providers/aws/r/nat_gateway.html.markdown
@@ -0,0 +1,51 @@
+---
+layout: "aws"
+page_title: "AWS: aws_nat_gateway"
+sidebar_current: "docs-aws-resource-nat-gateway"
+description: |-
+ Provides a resource to create a VPC NAT Gateway.
+---
+
+# aws\_nat\_gateway
+
+Provides a resource to create a VPC NAT Gateway.
+
+## Example Usage
+
+```
+resource "aws_nat_gateway" "gw" {
+ allocation_id = "${aws_eip.nat.id}"
+ subnet_id = "${aws_subnet.public.id}"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `allocation_id` - (Required) The Allocation ID of the Elastic IP address for the gateway.
+* `subnet_id` - (Required) The Subnet ID of the subnet in which to place the gateway.
+
+-> **Note:** It's recommended to denote that the NAT Gateway depends on the Internet Gateway for the VPC in which the NAT Gateway's subnet is located. For example:
+
+ resource "aws_internet_gateway" "gw" {
+ vpc_id = "${aws_vpc.main.id}"
+ }
+
+ resource "aws_nat_gateway" "gw" {
+ //other arguments
+
+ depends_on = ["aws_internet_gateway.gw"]
+ }
+
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The ID of the NAT Gateway.
+* `allocation_id` - The Allocation ID of the Elastic IP address for the gateway.
+* `subnet_id` - The Subnet ID of the subnet in which the NAT gateway is placed.
+* `network_interface_id` - The ENI ID of the network interface created by the NAT gateway.
+* `private_ip` - The private IP address of the NAT Gateway.
+* `public_ip` - The public IP address of the NAT Gateway.
diff --git a/website/source/docs/providers/aws/r/route.html.markdown b/website/source/docs/providers/aws/r/route.html.markdown
index 3606555e6f..299e526fd1 100644
--- a/website/source/docs/providers/aws/r/route.html.markdown
+++ b/website/source/docs/providers/aws/r/route.html.markdown
@@ -35,12 +35,14 @@ The following arguments are supported:
* `destination_cidr_block` - (Required) The destination CIDR block.
* `vpc_peering_connection_id` - (Optional) An ID of a VPC peering connection.
* `gateway_id` - (Optional) An ID of a VPC internet gateway or a virtual private gateway.
+* `nat_gateway_id` - (Optional) An ID of a VPC NAT gateway.
* `instance_id` - (Optional) An ID of a NAT instance.
* `network_interface_id` - (Optional) An ID of a network interface.
-Each route must contain either a `gateway_id`, an `instance_id` or a `vpc_peering_connection_id`
-or a `network_interface_id`. Note that the default route, mapping the VPC's CIDR block to "local",
-is created implicitly and cannot be specified.
+Each route must contain either a `gateway_id`, a `nat_gateway_id`, an
+`instance_id` or a `vpc_peering_connection_id` or a `network_interface_id`.
+Note that the default route, mapping the VPC's CIDR block to "local", is
+created implicitly and cannot be specified.
## Attributes Reference
@@ -53,5 +55,6 @@ will be exported as an attribute once the resource is created.
* `destination_cidr_block` - The destination CIDR block.
* `vpc_peering_connection_id` - An ID of a VPC peering connection.
* `gateway_id` - An ID of a VPC internet gateway or a virtual private gateway.
+* `nat_gateway_id` - An ID of a VPC NAT gateway.
* `instance_id` - An ID of a NAT instance.
* `network_interface_id` - An ID of a network interface.
diff --git a/website/source/docs/providers/aws/r/route_table.html.markdown b/website/source/docs/providers/aws/r/route_table.html.markdown
index e751b71933..0b9c036c1c 100644
--- a/website/source/docs/providers/aws/r/route_table.html.markdown
+++ b/website/source/docs/providers/aws/r/route_table.html.markdown
@@ -45,13 +45,14 @@ Each route supports the following:
* `cidr_block` - (Required) The CIDR block of the route.
* `gateway_id` - (Optional) The Internet Gateway ID.
+* `nat_gateway_id` - (Optional) The NAT Gateway ID.
* `instance_id` - (Optional) The EC2 instance ID.
* `vpc_peering_connection_id` - (Optional) The VPC Peering ID.
* `network_interface_id` - (Optional) The ID of the elastic network interface (eni) to use.
-Each route must contain either a `gateway_id`, an `instance_id` or a `vpc_peering_connection_id`
-or a `network_interface_id`. Note that the default route, mapping the VPC's CIDR block to "local",
-is created implicitly and cannot be specified.
+Each route must contain either a `gateway_id`, an `instance_id`, a `nat_gateway_id`, a
+`vpc_peering_connection_id` or a `network_interface_id`. Note that the default route, mapping
+the VPC's CIDR block to "local", is created implicitly and cannot be specified.
## Attributes Reference
diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb
index c2df5bbf5c..56feb49709 100644
--- a/website/source/layouts/aws.erb
+++ b/website/source/layouts/aws.erb
@@ -530,6 +530,10 @@
aws_main_route_table_association
+ >
+ aws_nat_gateway
+
+
>
aws_network_acl
From 6bf1011df4f4325d477747badd2236dfd3768224 Mon Sep 17 00:00:00 2001
From: Jesse Szwedko
Date: Fri, 18 Dec 2015 19:56:58 +0000
Subject: [PATCH 283/664] Validate type earlier for
aws_directory_service_directory
Also DRY it up a little
---
...esource_aws_directory_service_directory.go | 32 ++++++++++++-------
1 file changed, 21 insertions(+), 11 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_directory_service_directory.go b/builtin/providers/aws/resource_aws_directory_service_directory.go
index 3eb3d941dd..33c31957fb 100644
--- a/builtin/providers/aws/resource_aws_directory_service_directory.go
+++ b/builtin/providers/aws/resource_aws_directory_service_directory.go
@@ -12,6 +12,11 @@ import (
"github.com/hashicorp/terraform/helper/resource"
)
+var directoryCreationFuncs = map[string]func(*directoryservice.DirectoryService, *schema.ResourceData) (string, error){
+ "SimpleAD": createSimpleDirectoryService,
+ "MicrosoftAD": createActiveDirectoryService,
+}
+
func resourceAwsDirectoryServiceDirectory() *schema.Resource {
return &schema.Resource{
Create: resourceAwsDirectoryServiceDirectoryCreate,
@@ -92,6 +97,17 @@ func resourceAwsDirectoryServiceDirectory() *schema.Resource {
Optional: true,
Default: "SimpleAD",
ForceNew: true,
+ ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
+ validTypes := []string{"SimpleAD", "MicrosoftAD"}
+ value := v.(string)
+ for validType, _ := range directoryCreationFuncs {
+ if validType == value {
+ return
+ }
+ }
+ es = append(es, fmt.Errorf("%q must be one of %q", k, validTypes))
+ return
+ },
},
},
}
@@ -184,19 +200,13 @@ func createActiveDirectoryService(dsconn *directoryservice.DirectoryService, d *
func resourceAwsDirectoryServiceDirectoryCreate(d *schema.ResourceData, meta interface{}) error {
dsconn := meta.(*AWSClient).dsconn
- var (
- directoryId string
- err error
- )
-
- switch d.Get("type").(string) {
- case "SimpleAD":
- directoryId, err = createSimpleDirectoryService(dsconn, d)
- case "MicrosoftAD":
- directoryId, err = createActiveDirectoryService(dsconn, d)
- default:
+ creationFunc, ok := directoryCreationFuncs[d.Get("type").(string)]
+ if !ok {
+ // Shouldn't happen as this is validated above
return fmt.Errorf("Unsupported directory type: %s", d.Get("type"))
}
+
+ directoryId, err := creationFunc(dsconn, d)
if err != nil {
return err
}
From 2d063818242210334194c31c50816b421abca87c Mon Sep 17 00:00:00 2001
From: Jesse Szwedko
Date: Fri, 18 Dec 2015 20:07:34 +0000
Subject: [PATCH 284/664] Increase aws_directory_service_directory timeouts
According to the AWS docs, creating a MS directory could take up to 25
minutes.
---
.../providers/aws/resource_aws_directory_service_directory.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_directory_service_directory.go b/builtin/providers/aws/resource_aws_directory_service_directory.go
index 33c31957fb..c22d972f7b 100644
--- a/builtin/providers/aws/resource_aws_directory_service_directory.go
+++ b/builtin/providers/aws/resource_aws_directory_service_directory.go
@@ -232,7 +232,7 @@ func resourceAwsDirectoryServiceDirectoryCreate(d *schema.ResourceData, meta int
d.Id(), *ds.Stage)
return ds, *ds.Stage, nil
},
- Timeout: 10 * time.Minute,
+ Timeout: 30 * time.Minute,
}
if _, err := stateConf.WaitForState(); err != nil {
return fmt.Errorf(
@@ -355,7 +355,7 @@ func resourceAwsDirectoryServiceDirectoryDelete(d *schema.ResourceData, meta int
d.Id(), *ds.Stage)
return ds, *ds.Stage, nil
},
- Timeout: 10 * time.Minute,
+ Timeout: 30 * time.Minute,
}
if _, err := stateConf.WaitForState(); err != nil {
return fmt.Errorf(
From 3990853ae225156cb37ff96c2bf3c8278d40aaa6 Mon Sep 17 00:00:00 2001
From: Clint
Date: Fri, 18 Dec 2015 14:10:28 -0600
Subject: [PATCH 285/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6d945112f0..4ba2f312dc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -23,6 +23,7 @@ IMPROVEMENTS:
* provider/aws: Add support for `skip_final_snapshot` to `aws_db_instance` [GH-3853]
* provider/aws: Adding support for Tags to DB SecurityGroup [GH-4260]
* provider/aws: Adding Tag support for DB Param Groups [GH-4259]
+ * provider/aws: Fix issue with updated route ids for VPC Endpoints [GH-4264]
* provider/aws: Validate IOPs for EBS Volumes [GH-4146]
* provider/aws: DB Subnet group arn output [GH-4261]
* provider/aws: Allow changing private IPs for ENIs [GH-4307]
From 3b21fbc1bc5059ed7485ac3211f1fbffb87ce1a5 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Fri, 18 Dec 2015 14:18:30 -0600
Subject: [PATCH 286/664] provider/aws: Update VPC Endpoint to correctly set
route table ids, and read after update
---
builtin/providers/aws/resource_aws_vpc_endpoint.go | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_vpc_endpoint.go b/builtin/providers/aws/resource_aws_vpc_endpoint.go
index c35e5f9489..1b971c64df 100644
--- a/builtin/providers/aws/resource_aws_vpc_endpoint.go
+++ b/builtin/providers/aws/resource_aws_vpc_endpoint.go
@@ -103,7 +103,9 @@ func resourceAwsVPCEndpointRead(d *schema.ResourceData, meta interface{}) error
d.Set("vpc_id", vpce.VpcId)
d.Set("policy", normalizeJson(*vpce.PolicyDocument))
d.Set("service_name", vpce.ServiceName)
- d.Set("route_table_ids", vpce.RouteTableIds)
+ if err := d.Set("route_table_ids", aws.StringValueSlice(vpce.RouteTableIds)); err != nil {
+ return err
+ }
return nil
}
@@ -142,7 +144,7 @@ func resourceAwsVPCEndpointUpdate(d *schema.ResourceData, meta interface{}) erro
}
log.Printf("[DEBUG] VPC Endpoint %q updated", input.VpcEndpointId)
- return nil
+ return resourceAwsVPCEndpointRead(d, meta)
}
func resourceAwsVPCEndpointDelete(d *schema.ResourceData, meta interface{}) error {
From 0eb1f82a1fdbb389ba1e971d5d92ebf7ea0f7876 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Fri, 18 Dec 2015 14:54:01 -0600
Subject: [PATCH 287/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4ba2f312dc..abc025cf18 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -16,6 +16,7 @@ IMPROVEMENTS:
* core: Change set internals for performance improvements [GH-3992]
* core: Support HTTP basic auth in consul remote state [GH-4166]
* core: Improve error message on resource arity mismatch [GH-4244]
+ * core: Add support for unary operators + and - to the interpolation syntax [GH-3621]
* provider/aws: Add `placement_group` as an option for `aws_autoscaling_group` [GH-3704]
* provider/aws: Add support for DynamoDB Table StreamSpecifications [GH-4208]
* provider/aws: Add `name_prefix` to Security Groups [GH-4167]
From 0944c24fe6214a785a7b199c6962dd1f74b46540 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Fri, 18 Dec 2015 16:02:09 -0500
Subject: [PATCH 288/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index abc025cf18..aa043189d1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,7 @@ FEATURES:
* **New provider: `azurerm` - Preliminary support for Azure Resource Manager** [GH-4226]
* **New provider: `mysql` - Create MySQL databases** [GH-3122]
* **New resource: `aws_autoscaling_schedule`** [GH-4256]
+ * **New resource: `aws_nat_gateway`** [GH-4381]
* **New resource: `google_pubsub_topic`** [GH-3671]
* **New resource: `google_pubsub_subscription`** [GH-3671]
* **New resource: `tls_locally_signed_cert`** [GH-3930]
From 48bfd672969c74b6fa0d28492e099970ed179343 Mon Sep 17 00:00:00 2001
From: Jesse Szwedko
Date: Fri, 18 Dec 2015 21:42:54 +0000
Subject: [PATCH 289/664] Add support for creating connectors to
aws_directory_service_directory
This adds support for creating AD Connectors. It is pretty close to the
same as creating AD and simple directories so we reuse the resource.
---
...esource_aws_directory_service_directory.go | 115 +++++++++++++++++-
...ce_aws_directory_service_directory_test.go | 58 +++++++++
builtin/providers/aws/structure.go | 22 ++++
.../directory_service_directory.html.markdown | 16 ++-
4 files changed, 204 insertions(+), 7 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_directory_service_directory.go b/builtin/providers/aws/resource_aws_directory_service_directory.go
index c22d972f7b..b56ca5d105 100644
--- a/builtin/providers/aws/resource_aws_directory_service_directory.go
+++ b/builtin/providers/aws/resource_aws_directory_service_directory.go
@@ -15,6 +15,7 @@ import (
var directoryCreationFuncs = map[string]func(*directoryservice.DirectoryService, *schema.ResourceData) (string, error){
"SimpleAD": createSimpleDirectoryService,
"MicrosoftAD": createActiveDirectoryService,
+ "ADConnector": createDirectoryConnector,
}
func resourceAwsDirectoryServiceDirectory() *schema.Resource {
@@ -59,7 +60,8 @@ func resourceAwsDirectoryServiceDirectory() *schema.Resource {
},
"vpc_settings": &schema.Schema{
Type: schema.TypeList,
- Required: true,
+ Optional: true,
+ ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"subnet_ids": &schema.Schema{
@@ -77,6 +79,39 @@ func resourceAwsDirectoryServiceDirectory() *schema.Resource {
},
},
},
+ "connect_settings": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "customer_username": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "customer_dns_ips": &schema.Schema{
+ Type: schema.TypeSet,
+ Required: true,
+ ForceNew: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ Set: schema.HashString,
+ },
+ "subnet_ids": &schema.Schema{
+ Type: schema.TypeSet,
+ Required: true,
+ ForceNew: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ Set: schema.HashString,
+ },
+ "vpc_id": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ },
+ },
+ },
"enable_sso": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
@@ -114,7 +149,9 @@ func resourceAwsDirectoryServiceDirectory() *schema.Resource {
}
func buildVpcSettings(d *schema.ResourceData) (vpcSettings *directoryservice.DirectoryVpcSettings, err error) {
- if v, ok := d.GetOk("vpc_settings"); ok {
+ if v, ok := d.GetOk("vpc_settings"); !ok {
+ return nil, fmt.Errorf("vpc_settings is required for type = SimpleAD or MicrosoftAD")
+ } else {
settings := v.([]interface{})
if len(settings) > 1 {
@@ -136,6 +173,72 @@ func buildVpcSettings(d *schema.ResourceData) (vpcSettings *directoryservice.Dir
return vpcSettings, nil
}
+func buildConnectSettings(d *schema.ResourceData) (connectSettings *directoryservice.DirectoryConnectSettings, err error) {
+ if v, ok := d.GetOk("connect_settings"); !ok {
+ return nil, fmt.Errorf("connect_settings is required for type = ADConnector")
+ } else {
+ settings := v.([]interface{})
+
+ if len(settings) > 1 {
+ return nil, fmt.Errorf("Only a single connect_settings block is expected")
+ } else if len(settings) == 1 {
+ s := settings[0].(map[string]interface{})
+
+ var subnetIds []*string
+ for _, id := range s["subnet_ids"].(*schema.Set).List() {
+ subnetIds = append(subnetIds, aws.String(id.(string)))
+ }
+
+ var customerDnsIps []*string
+ for _, id := range s["customer_dns_ips"].(*schema.Set).List() {
+ customerDnsIps = append(customerDnsIps, aws.String(id.(string)))
+ }
+
+ connectSettings = &directoryservice.DirectoryConnectSettings{
+ CustomerDnsIps: customerDnsIps,
+ CustomerUserName: aws.String(s["customer_username"].(string)),
+ SubnetIds: subnetIds,
+ VpcId: aws.String(s["vpc_id"].(string)),
+ }
+ }
+ }
+
+ return connectSettings, nil
+}
+
+func createDirectoryConnector(dsconn *directoryservice.DirectoryService, d *schema.ResourceData) (directoryId string, err error) {
+ if _, ok := d.GetOk("size"); !ok {
+ return "", fmt.Errorf("size is required for type = ADConnector")
+ }
+
+ input := directoryservice.ConnectDirectoryInput{
+ Name: aws.String(d.Get("name").(string)),
+ Password: aws.String(d.Get("password").(string)),
+ Size: aws.String(d.Get("size").(string)),
+ }
+
+ if v, ok := d.GetOk("description"); ok {
+ input.Description = aws.String(v.(string))
+ }
+ if v, ok := d.GetOk("short_name"); ok {
+ input.ShortName = aws.String(v.(string))
+ }
+
+ input.ConnectSettings, err = buildConnectSettings(d)
+ if err != nil {
+ return "", err
+ }
+
+ log.Printf("[DEBUG] Creating Directory Connector: %s", input)
+ out, err := dsconn.ConnectDirectory(&input)
+ if err != nil {
+ return "", err
+ }
+ log.Printf("[DEBUG] Directory Connector created: %s", out)
+
+ return *out.DirectoryId, nil
+}
+
func createSimpleDirectoryService(dsconn *directoryservice.DirectoryService, d *schema.ResourceData) (directoryId string, err error) {
if _, ok := d.GetOk("size"); !ok {
return "", fmt.Errorf("size is required for type = SimpleAD")
@@ -307,7 +410,12 @@ func resourceAwsDirectoryServiceDirectoryRead(d *schema.ResourceData, meta inter
if dir.Description != nil {
d.Set("description", *dir.Description)
}
- d.Set("dns_ip_addresses", schema.NewSet(schema.HashString, flattenStringList(dir.DnsIpAddrs)))
+
+ if *dir.Type == "ADConnector" {
+ d.Set("dns_ip_addresses", schema.NewSet(schema.HashString, flattenStringList(dir.ConnectSettings.ConnectIps)))
+ } else {
+ d.Set("dns_ip_addresses", schema.NewSet(schema.HashString, flattenStringList(dir.DnsIpAddrs)))
+ }
d.Set("name", *dir.Name)
if dir.ShortName != nil {
d.Set("short_name", *dir.ShortName)
@@ -317,6 +425,7 @@ func resourceAwsDirectoryServiceDirectoryRead(d *schema.ResourceData, meta inter
}
d.Set("type", *dir.Type)
d.Set("vpc_settings", flattenDSVpcSettings(dir.VpcSettings))
+ d.Set("connect_settings", flattenDSConnectSettings(dir.DnsIpAddrs, dir.ConnectSettings))
d.Set("enable_sso", *dir.SsoEnabled)
return nil
diff --git a/builtin/providers/aws/resource_aws_directory_service_directory_test.go b/builtin/providers/aws/resource_aws_directory_service_directory_test.go
index 0c71996d93..31848a4897 100644
--- a/builtin/providers/aws/resource_aws_directory_service_directory_test.go
+++ b/builtin/providers/aws/resource_aws_directory_service_directory_test.go
@@ -43,6 +43,22 @@ func TestAccAWSDirectoryServiceDirectory_microsoft(t *testing.T) {
})
}
+func TestAccAWSDirectoryServiceDirectory_connector(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckDirectoryServiceDirectoryDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccDirectoryServiceDirectoryConfig_connector,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckServiceDirectoryExists("aws_directory_service_directory.connector"),
+ ),
+ },
+ },
+ })
+}
+
func TestAccAWSDirectoryServiceDirectory_withAliasAndSso(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -208,6 +224,48 @@ resource "aws_subnet" "bar" {
}
`
+const testAccDirectoryServiceDirectoryConfig_connector = `
+resource "aws_directory_service_directory" "bar" {
+ name = "corp.notexample.com"
+ password = "SuperSecretPassw0rd"
+ size = "Small"
+
+ vpc_settings {
+ vpc_id = "${aws_vpc.main.id}"
+ subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
+ }
+}
+
+resource "aws_directory_service_directory" "connector" {
+ name = "corp.notexample.com"
+ password = "SuperSecretPassw0rd"
+ size = "Small"
+ type = "ADConnector"
+
+ connect_settings {
+ customer_dns_ips = ["${aws_directory_service_directory.bar.dns_ip_addresses}"]
+ customer_username = "Administrator"
+ vpc_id = "${aws_vpc.main.id}"
+ subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
+ }
+}
+
+resource "aws_vpc" "main" {
+ cidr_block = "10.0.0.0/16"
+}
+
+resource "aws_subnet" "foo" {
+ vpc_id = "${aws_vpc.main.id}"
+ availability_zone = "us-west-2a"
+ cidr_block = "10.0.1.0/24"
+}
+resource "aws_subnet" "bar" {
+ vpc_id = "${aws_vpc.main.id}"
+ availability_zone = "us-west-2b"
+ cidr_block = "10.0.2.0/24"
+}
+`
+
const testAccDirectoryServiceDirectoryConfig_microsoft = `
resource "aws_directory_service_directory" "bar" {
name = "corp.notexample.com"
diff --git a/builtin/providers/aws/structure.go b/builtin/providers/aws/structure.go
index 748ecc88be..1bcca71696 100644
--- a/builtin/providers/aws/structure.go
+++ b/builtin/providers/aws/structure.go
@@ -651,6 +651,28 @@ func flattenDSVpcSettings(
s *directoryservice.DirectoryVpcSettingsDescription) []map[string]interface{} {
settings := make(map[string]interface{}, 0)
+ if s == nil {
+ return nil
+ }
+
+ settings["subnet_ids"] = schema.NewSet(schema.HashString, flattenStringList(s.SubnetIds))
+ settings["vpc_id"] = *s.VpcId
+
+ return []map[string]interface{}{settings}
+}
+
+func flattenDSConnectSettings(
+ customerDnsIps []*string,
+ s *directoryservice.DirectoryConnectSettingsDescription) []map[string]interface{} {
+ if s == nil {
+ return nil
+ }
+
+ settings := make(map[string]interface{}, 0)
+
+ settings["customer_dns_ips"] = schema.NewSet(schema.HashString, flattenStringList(customerDnsIps))
+ settings["connect_ips"] = schema.NewSet(schema.HashString, flattenStringList(s.ConnectIps))
+ settings["customer_username"] = *s.CustomerUserName
settings["subnet_ids"] = schema.NewSet(schema.HashString, flattenStringList(s.SubnetIds))
settings["vpc_id"] = *s.VpcId
diff --git a/website/source/docs/providers/aws/r/directory_service_directory.html.markdown b/website/source/docs/providers/aws/r/directory_service_directory.html.markdown
index 7a8854487d..83f07649b1 100644
--- a/website/source/docs/providers/aws/r/directory_service_directory.html.markdown
+++ b/website/source/docs/providers/aws/r/directory_service_directory.html.markdown
@@ -45,9 +45,10 @@ resource "aws_subnet" "bar" {
The following arguments are supported:
* `name` - (Required) The fully qualified name for the directory, such as `corp.example.com`
-* `password` - (Required) The password for the directory administrator.
-* `size` - (Required) The size of the directory (`Small` or `Large` are accepted values). Only used when `type` is `SimpleAD`.
-* `vpc_settings` - (Required) VPC related information about the directory. Fields documented below.
+* `password` - (Required) The password for the directory administrator or connector user.
+* `size` - (Required for `SimpleAD` and `ADConnector`) The size of the directory (`Small` or `Large` are accepted values).
+* `vpc_settings` - (Required for `SimpleAD` and `MicrosoftAD`) VPC related information about the directory. Fields documented below.
+* `connect_settings` - (Required for `ADConnector`) Connector related information about the directory. Fields documented below.
* `alias` - (Optional) The alias for the directory (must be unique amongst all aliases in AWS). Required for `enable_sso`.
* `description` - (Optional) A textual description for the directory.
* `short_name` - (Optional) The short name of the directory, such as `CORP`.
@@ -59,10 +60,17 @@ The following arguments are supported:
* `subnet_ids` - (Required) The identifiers of the subnets for the directory servers (min. 2 subnets in 2 different AZs).
* `vpc_id` - (Required) The identifier of the VPC that the directory is in.
+**connect\_settings** supports the following:
+
+* `customer_username` - (Required) The username corresponding to the password provided.
+* `customer_dns_ips` - (Required) The DNS IP addresses of the domain to connect to.
+* `subnet_ids` - (Required) The identifiers of the subnets for the directory servers (min. 2 subnets in 2 different AZs).
+* `vpc_id` - (Required) The identifier of the VPC that the directory is in.
+
## Attributes Reference
The following attributes are exported:
* `id` - The directory identifier.
* `access_url` - The access URL for the directory, such as `http://alias.awsapps.com`.
-* `dns_ip_addresses` - A list of IP addresses of the DNS servers for the directory.
+* `dns_ip_addresses` - A list of IP addresses of the DNS servers for the directory or connector.
From a3cc4a2670bc6dfd5a626721e1b1d02ce0219cd4 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Fri, 18 Dec 2015 17:34:15 -0500
Subject: [PATCH 290/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index aa043189d1..f20ac1f1ad 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -64,6 +64,7 @@ BUG FIXES:
* provider/aws: Opsworks stack SSH key is write-only [GH-4241]
* provider/aws: Fix issue with ElasticSearch Domain `access_policies` always appear changed [GH-4245]
* provider/aws: Fix issue with nil parameter group value causing panic in `aws_db_parameter_group` [GH-4318]
+ * provider/aws: Fix issue with Elastic IPs not recognizing when they have been unassigned manually [GH-4387]
* provider/azure: Update for [breaking change to upstream client library](https://github.com/Azure/azure-sdk-for-go/commit/68d50cb53a73edfeb7f17f5e86cdc8eb359a9528). [GH-4300]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
* provider/openstack: Handle volumes in "deleting" state [GH-4204]
From 2df8d7d9b0c2d481bfc82963844c7ea983870e38 Mon Sep 17 00:00:00 2001
From: stack72
Date: Fri, 11 Dec 2015 18:41:33 +0000
Subject: [PATCH 291/664] Initial Scaffolding of the AWS Network ACL Entry
resource
---
builtin/providers/aws/network_acl_entry.go | 9 +
builtin/providers/aws/provider.go | 1 +
.../providers/aws/resource_aws_network_acl.go | 152 ++++++------
.../aws/resource_aws_network_acl_rule.go | 229 ++++++++++++++++++
.../aws/resource_aws_network_acl_rule_test.go | 110 +++++++++
5 files changed, 427 insertions(+), 74 deletions(-)
create mode 100644 builtin/providers/aws/resource_aws_network_acl_rule.go
create mode 100644 builtin/providers/aws/resource_aws_network_acl_rule_test.go
diff --git a/builtin/providers/aws/network_acl_entry.go b/builtin/providers/aws/network_acl_entry.go
index 22b909bceb..5a09746d64 100644
--- a/builtin/providers/aws/network_acl_entry.go
+++ b/builtin/providers/aws/network_acl_entry.go
@@ -69,6 +69,15 @@ func flattenNetworkAclEntries(list []*ec2.NetworkAclEntry) []map[string]interfac
}
+func protocolStrings(protocolIntegers map[string]int) map[int]string {
+ protocolStrings := make(map[int]string, len(protocolIntegers))
+ for k, v := range protocolIntegers {
+ protocolStrings[v] = k
+ }
+
+ return protocolStrings
+}
+
func protocolIntegers() map[string]int {
var protocolIntegers = make(map[string]int)
protocolIntegers = map[string]int{
diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go
index c13934c562..6b0c8db2e7 100644
--- a/builtin/providers/aws/provider.go
+++ b/builtin/providers/aws/provider.go
@@ -153,6 +153,7 @@ func Provider() terraform.ResourceProvider {
"aws_main_route_table_association": resourceAwsMainRouteTableAssociation(),
"aws_nat_gateway": resourceAwsNatGateway(),
"aws_network_acl": resourceAwsNetworkAcl(),
+ "aws_network_acl_rule": resourceAwsNetworkAclRule(),
"aws_network_interface": resourceAwsNetworkInterface(),
"aws_opsworks_stack": resourceAwsOpsworksStack(),
"aws_opsworks_java_app_layer": resourceAwsOpsworksJavaAppLayer(),
diff --git a/builtin/providers/aws/resource_aws_network_acl.go b/builtin/providers/aws/resource_aws_network_acl.go
index 20144f7325..97916f9f09 100644
--- a/builtin/providers/aws/resource_aws_network_acl.go
+++ b/builtin/providers/aws/resource_aws_network_acl.go
@@ -50,6 +50,7 @@ func resourceAwsNetworkAcl() *schema.Resource {
Type: schema.TypeSet,
Required: false,
Optional: true,
+ Computed: false,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"from_port": &schema.Schema{
@@ -92,6 +93,7 @@ func resourceAwsNetworkAcl() *schema.Resource {
Type: schema.TypeSet,
Required: false,
Optional: true,
+ Computed: false,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"from_port": &schema.Schema{
@@ -316,87 +318,89 @@ func resourceAwsNetworkAclUpdate(d *schema.ResourceData, meta interface{}) error
func updateNetworkAclEntries(d *schema.ResourceData, entryType string, conn *ec2.EC2) error {
- o, n := d.GetChange(entryType)
+ if d.HasChange(entryType) {
+ o, n := d.GetChange(entryType)
- if o == nil {
- o = new(schema.Set)
- }
- if n == nil {
- n = new(schema.Set)
- }
-
- os := o.(*schema.Set)
- ns := n.(*schema.Set)
-
- toBeDeleted, err := expandNetworkAclEntries(os.Difference(ns).List(), entryType)
- if err != nil {
- return err
- }
- for _, remove := range toBeDeleted {
-
- // AWS includes default rules with all network ACLs that can be
- // neither modified nor destroyed. They have a custom rule
- // number that is out of bounds for any other rule. If we
- // encounter it, just continue. There's no work to be done.
- if *remove.RuleNumber == 32767 {
- continue
+ if o == nil {
+ o = new(schema.Set)
+ }
+ if n == nil {
+ n = new(schema.Set)
}
- // Delete old Acl
- _, err := conn.DeleteNetworkAclEntry(&ec2.DeleteNetworkAclEntryInput{
- NetworkAclId: aws.String(d.Id()),
- RuleNumber: remove.RuleNumber,
- Egress: remove.Egress,
- })
+ os := o.(*schema.Set)
+ ns := n.(*schema.Set)
+
+ toBeDeleted, err := expandNetworkAclEntries(os.Difference(ns).List(), entryType)
if err != nil {
- return fmt.Errorf("Error deleting %s entry: %s", entryType, err)
- }
- }
-
- toBeCreated, err := expandNetworkAclEntries(ns.Difference(os).List(), entryType)
- if err != nil {
- return err
- }
- for _, add := range toBeCreated {
- // Protocol -1 rules don't store ports in AWS. Thus, they'll always
- // hash differently when being read out of the API. Force the user
- // to set from_port and to_port to 0 for these rules, to keep the
- // hashing consistent.
- if *add.Protocol == "-1" {
- to := *add.PortRange.To
- from := *add.PortRange.From
- expected := &expectedPortPair{
- to_port: 0,
- from_port: 0,
- }
- if ok := validatePorts(to, from, *expected); !ok {
- return fmt.Errorf(
- "to_port (%d) and from_port (%d) must both be 0 to use the the 'all' \"-1\" protocol!",
- to, from)
- }
- }
-
- // AWS mutates the CIDR block into a network implied by the IP and
- // mask provided. This results in hashing inconsistencies between
- // the local config file and the state returned by the API. Error
- // if the user provides a CIDR block with an inappropriate mask
- if err := validateCIDRBlock(*add.CidrBlock); err != nil {
return err
}
+ for _, remove := range toBeDeleted {
- // Add new Acl entry
- _, connErr := conn.CreateNetworkAclEntry(&ec2.CreateNetworkAclEntryInput{
- NetworkAclId: aws.String(d.Id()),
- CidrBlock: add.CidrBlock,
- Egress: add.Egress,
- PortRange: add.PortRange,
- Protocol: add.Protocol,
- RuleAction: add.RuleAction,
- RuleNumber: add.RuleNumber,
- IcmpTypeCode: add.IcmpTypeCode,
- })
- if connErr != nil {
- return fmt.Errorf("Error creating %s entry: %s", entryType, connErr)
+ // AWS includes default rules with all network ACLs that can be
+ // neither modified nor destroyed. They have a custom rule
+ // number that is out of bounds for any other rule. If we
+ // encounter it, just continue. There's no work to be done.
+ if *remove.RuleNumber == 32767 {
+ continue
+ }
+
+ // Delete old Acl
+ _, err := conn.DeleteNetworkAclEntry(&ec2.DeleteNetworkAclEntryInput{
+ NetworkAclId: aws.String(d.Id()),
+ RuleNumber: remove.RuleNumber,
+ Egress: remove.Egress,
+ })
+ if err != nil {
+ return fmt.Errorf("Error deleting %s entry: %s", entryType, err)
+ }
+ }
+
+ toBeCreated, err := expandNetworkAclEntries(ns.Difference(os).List(), entryType)
+ if err != nil {
+ return err
+ }
+ for _, add := range toBeCreated {
+ // Protocol -1 rules don't store ports in AWS. Thus, they'll always
+ // hash differently when being read out of the API. Force the user
+ // to set from_port and to_port to 0 for these rules, to keep the
+ // hashing consistent.
+ if *add.Protocol == "-1" {
+ to := *add.PortRange.To
+ from := *add.PortRange.From
+ expected := &expectedPortPair{
+ to_port: 0,
+ from_port: 0,
+ }
+ if ok := validatePorts(to, from, *expected); !ok {
+ return fmt.Errorf(
+ "to_port (%d) and from_port (%d) must both be 0 to use the the 'all' \"-1\" protocol!",
+ to, from)
+ }
+ }
+
+ // AWS mutates the CIDR block into a network implied by the IP and
+ // mask provided. This results in hashing inconsistencies between
+ // the local config file and the state returned by the API. Error
+ // if the user provides a CIDR block with an inappropriate mask
+ if err := validateCIDRBlock(*add.CidrBlock); err != nil {
+ return err
+ }
+
+ // Add new Acl entry
+ _, connErr := conn.CreateNetworkAclEntry(&ec2.CreateNetworkAclEntryInput{
+ NetworkAclId: aws.String(d.Id()),
+ CidrBlock: add.CidrBlock,
+ Egress: add.Egress,
+ PortRange: add.PortRange,
+ Protocol: add.Protocol,
+ RuleAction: add.RuleAction,
+ RuleNumber: add.RuleNumber,
+ IcmpTypeCode: add.IcmpTypeCode,
+ })
+ if connErr != nil {
+ return fmt.Errorf("Error creating %s entry: %s", entryType, connErr)
+ }
}
}
return nil
diff --git a/builtin/providers/aws/resource_aws_network_acl_rule.go b/builtin/providers/aws/resource_aws_network_acl_rule.go
new file mode 100644
index 0000000000..363ef0912a
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_network_acl_rule.go
@@ -0,0 +1,229 @@
+package aws
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "strconv"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/hashicorp/terraform/helper/hashcode"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceAwsNetworkAclRule() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceAwsNetworkAclRuleCreate,
+ Read: resourceAwsNetworkAclRuleRead,
+ Delete: resourceAwsNetworkAclRuleDelete,
+
+ Schema: map[string]*schema.Schema{
+ "network_acl_id": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "rule_number": &schema.Schema{
+ Type: schema.TypeInt,
+ Required: true,
+ ForceNew: true,
+ },
+ "egress": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: true,
+ Default: false,
+ },
+ "protocol": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "rule_action": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "cidr_block": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "from_port": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ "to_port": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ "icmp_type": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ "icmp_code": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ }
+}
+
+func resourceAwsNetworkAclRuleCreate(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).ec2conn
+
+ protocol := d.Get("protocol").(string)
+ p, protocolErr := strconv.Atoi(protocol)
+ if protocolErr != nil {
+ var ok bool
+ p, ok = protocolIntegers()[protocol]
+ if !ok {
+ return fmt.Errorf("Invalid Protocol %s for rule %#v", protocol, d.Get("rule_number").(int))
+ }
+ }
+ log.Printf("[INFO] Transformed Protocol %s into %d", protocol, p)
+
+ params := &ec2.CreateNetworkAclEntryInput{
+ NetworkAclId: aws.String(d.Get("network_acl_id").(string)),
+ Egress: aws.Bool(d.Get("egress").(bool)),
+ RuleNumber: aws.Int64(int64(d.Get("rule_number").(int))),
+ Protocol: aws.String(strconv.Itoa(p)),
+ CidrBlock: aws.String(d.Get("cidr_block").(string)),
+ RuleAction: aws.String(d.Get("rule_action").(string)),
+ PortRange: &ec2.PortRange{
+ From: aws.Int64(int64(d.Get("from_port").(int))),
+ To: aws.Int64(int64(d.Get("to_port").(int))),
+ },
+ }
+
+ // Specify additional required fields for ICMP
+ if p == 1 {
+ params.IcmpTypeCode = &ec2.IcmpTypeCode{}
+ if v, ok := d.GetOk("icmp_code"); ok {
+ params.IcmpTypeCode.Code = aws.Int64(int64(v.(int)))
+ }
+ if v, ok := d.GetOk("icmp_type"); ok {
+ params.IcmpTypeCode.Type = aws.Int64(int64(v.(int)))
+ }
+ }
+
+ log.Printf("[INFO] Creating Network Acl Rule: %d (%s)", d.Get("rule_number").(int), d.Get("egress").(bool))
+ _, err := conn.CreateNetworkAclEntry(params)
+ if err != nil {
+ return fmt.Errorf("Error Creating Network Acl Rule: %s", err.Error())
+ }
+ d.SetId(networkAclIdRuleNumberEgressHash(d.Get("network_acl_id").(string), d.Get("rule_number").(int), d.Get("egress").(bool), d.Get("protocol").(string)))
+ return resourceAwsNetworkAclRuleRead(d, meta)
+}
+
+func resourceAwsNetworkAclRuleRead(d *schema.ResourceData, meta interface{}) error {
+ resp, err := findNetworkAclRule(d, meta)
+ if err != nil {
+ return err
+ }
+
+ d.Set("rule_number", resp.RuleNumber)
+ d.Set("cidr_block", resp.CidrBlock)
+ d.Set("egress", resp.Egress)
+ if resp.IcmpTypeCode != nil {
+ d.Set("icmp_code", resp.IcmpTypeCode.Code)
+ d.Set("icmp_type", resp.IcmpTypeCode.Type)
+ }
+ if resp.PortRange != nil {
+ d.Set("from_port", resp.PortRange.From)
+ d.Set("to_port", resp.PortRange.To)
+ }
+
+ d.Set("rule_action", resp.RuleAction)
+
+ p, protocolErr := strconv.Atoi(*resp.Protocol)
+ log.Printf("[INFO] Converting the protocol %v", p)
+ if protocolErr == nil {
+ var ok bool
+ protocol, ok := protocolStrings(protocolIntegers())[p]
+ if !ok {
+ return fmt.Errorf("Invalid Protocol %s for rule %#v", *resp.Protocol, d.Get("rule_number").(int))
+ }
+ log.Printf("[INFO] Transformed Protocol %s back into %s", *resp.Protocol, protocol)
+ d.Set("protocol", protocol)
+ }
+
+ return nil
+}
+
+func resourceAwsNetworkAclRuleDelete(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).ec2conn
+
+ params := &ec2.DeleteNetworkAclEntryInput{
+ NetworkAclId: aws.String(d.Get("network_acl_id").(string)),
+ RuleNumber: aws.Int64(int64(d.Get("rule_number").(int))),
+ Egress: aws.Bool(d.Get("egress").(bool)),
+ }
+
+ log.Printf("[INFO] Deleting Network Acl Rule: %s", d.Id())
+ _, err := conn.DeleteNetworkAclEntry(params)
+ if err != nil {
+ return fmt.Errorf("Error Deleting Network Acl Rule: %s", err.Error())
+ }
+
+ return nil
+}
+
+func findNetworkAclRule(d *schema.ResourceData, meta interface{}) (*ec2.NetworkAclEntry, error) {
+ conn := meta.(*AWSClient).ec2conn
+
+ filters := make([]*ec2.Filter, 0, 2)
+ ruleNumberFilter := &ec2.Filter{
+ Name: aws.String("entry.rule-number"),
+ Values: []*string{aws.String(fmt.Sprintf("%v", d.Get("rule_number").(int)))},
+ }
+ filters = append(filters, ruleNumberFilter)
+ egressFilter := &ec2.Filter{
+ Name: aws.String("entry.egress"),
+ Values: []*string{aws.String(fmt.Sprintf("%v", d.Get("egress").(bool)))},
+ }
+ filters = append(filters, egressFilter)
+ params := &ec2.DescribeNetworkAclsInput{
+ NetworkAclIds: []*string{aws.String(d.Get("network_acl_id").(string))},
+ Filters: filters,
+ }
+
+ log.Printf("[INFO] Describing Network Acl: %s", d.Get("network_acl_id").(string))
+ log.Printf("[INFO] Describing Network Acl with the Filters %#v", params)
+ resp, err := conn.DescribeNetworkAcls(params)
+ if err != nil {
+ return nil, fmt.Errorf("Error Finding Network Acl Rule %d: %s", d.Get("rule_number").(int), err.Error())
+ }
+
+ if resp == nil || len(resp.NetworkAcls) != 1 || resp.NetworkAcls[0] == nil {
+ return nil, fmt.Errorf(
+ "Expected to find one Network ACL, got: %#v",
+ resp.NetworkAcls)
+ }
+ networkAcl := resp.NetworkAcls[0]
+ if networkAcl.Entries != nil {
+ for _, i := range networkAcl.Entries {
+ if *i.RuleNumber == int64(d.Get("rule_number").(int)) && *i.Egress == d.Get("egress").(bool) {
+ return i, nil
+ }
+ }
+ }
+ return nil, fmt.Errorf(
+ "Expected the Network ACL to have Entries, got: %#v",
+ networkAcl)
+
+}
+
+func networkAclIdRuleNumberEgressHash(networkAclId string, ruleNumber int, egress bool, protocol string) string {
+ var buf bytes.Buffer
+ buf.WriteString(fmt.Sprintf("%s-", networkAclId))
+ buf.WriteString(fmt.Sprintf("%d-", ruleNumber))
+ buf.WriteString(fmt.Sprintf("%t-", egress))
+ buf.WriteString(fmt.Sprintf("%s-", protocol))
+ return fmt.Sprintf("nacl-%d", hashcode.String(buf.String()))
+}
diff --git a/builtin/providers/aws/resource_aws_network_acl_rule_test.go b/builtin/providers/aws/resource_aws_network_acl_rule_test.go
new file mode 100644
index 0000000000..d55779d64c
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_network_acl_rule_test.go
@@ -0,0 +1,110 @@
+package aws
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAWSNetworkAclRule_basic(t *testing.T) {
+ var networkAcl ec2.NetworkAcl
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSNetworkAclRuleDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSNetworkAclRuleBasicConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.bar", &networkAcl),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckAWSNetworkAclRuleDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*AWSClient).ec2conn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_network_acl_rule" {
+ continue
+ }
+
+ rule_number := rs.Primary.Attributes["rule_number"].(int)
+ egress := rs.Primary.Attributes["egress"].(bool)
+
+ req := &ec2.DescribeNetworkAclsInput{
+ NetworkAclIds: []*string{aws.String(rs.Primary.ID)},
+ }
+ resp, err := conn.DescribeNetworkAcls(req)
+ if err == nil {
+ if len(resp.NetworkAcls) > 0 && *resp.NetworkAcls[0].NetworkAclId == rs.Primary.ID {
+ networkAcl := resp.NetworkAcls[0]
+ if networkAcl.Entries != nil {
+ for _, i := range networkAcl.Entries {
+ if *i.RuleNumber == int64(rule_number) && *i.Egress == egress {
+ return fmt.Errorf("Network ACL Rule (%s) still exists.", rs.Primary.ID)
+ }
+ }
+ }
+ }
+ }
+
+ ec2err, ok := err.(awserr.Error)
+ if !ok {
+ return err
+ }
+ // Confirm error code is what we want
+ if ec2err.Code() != "InvalidNetworkAclEntry.NotFound" {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func testAccCheckAWSNetworkAclRuleExists(n string, networkAcl *ec2.NetworkAcl) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No Security Group is set")
+ }
+
+ return nil
+ }
+
+ return nil
+}
+
+const testAccAWSNetworkAclRuleBasicConfig = `
+provider "aws" {
+ region = "us-east-1"
+}
+resource "aws_vpc" "foo" {
+ cidr_block = "10.3.0.0/16"
+}
+resource "aws_network_acl" "bar" {
+ vpc_id = "${aws_vpc.foo.id}"
+}
+resource "aws_network_acl_rule" "bar" {
+ network_acl_id = "${aws_network_acl.bar.id}"
+ rule_number = 200
+ egress = false
+ protocol = "tcp"
+ rule_action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 22
+ to_port = 22
+}
+`
From 96ad95abe99da46b0042df8123b28b29967aaf79 Mon Sep 17 00:00:00 2001
From: stack72
Date: Fri, 18 Dec 2015 16:57:17 +0000
Subject: [PATCH 292/664] Adds an acceptance test for the AWS Network ACL Rules
---
.../aws/resource_aws_network_acl_rule_test.go | 35 +++++++++++++------
1 file changed, 25 insertions(+), 10 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_network_acl_rule_test.go b/builtin/providers/aws/resource_aws_network_acl_rule_test.go
index d55779d64c..10add5d3cb 100644
--- a/builtin/providers/aws/resource_aws_network_acl_rule_test.go
+++ b/builtin/providers/aws/resource_aws_network_acl_rule_test.go
@@ -37,9 +37,6 @@ func testAccCheckAWSNetworkAclRuleDestroy(s *terraform.State) error {
continue
}
- rule_number := rs.Primary.Attributes["rule_number"].(int)
- egress := rs.Primary.Attributes["egress"].(bool)
-
req := &ec2.DescribeNetworkAclsInput{
NetworkAclIds: []*string{aws.String(rs.Primary.ID)},
}
@@ -48,11 +45,7 @@ func testAccCheckAWSNetworkAclRuleDestroy(s *terraform.State) error {
if len(resp.NetworkAcls) > 0 && *resp.NetworkAcls[0].NetworkAclId == rs.Primary.ID {
networkAcl := resp.NetworkAcls[0]
if networkAcl.Entries != nil {
- for _, i := range networkAcl.Entries {
- if *i.RuleNumber == int64(rule_number) && *i.Egress == egress {
- return fmt.Errorf("Network ACL Rule (%s) still exists.", rs.Primary.ID)
- }
- }
+ return fmt.Errorf("Network ACL Entries still exist")
}
}
}
@@ -61,7 +54,6 @@ func testAccCheckAWSNetworkAclRuleDestroy(s *terraform.State) error {
if !ok {
return err
}
- // Confirm error code is what we want
if ec2err.Code() != "InvalidNetworkAclEntry.NotFound" {
return err
}
@@ -71,6 +63,8 @@ func testAccCheckAWSNetworkAclRuleDestroy(s *terraform.State) error {
}
func testAccCheckAWSNetworkAclRuleExists(n string, networkAcl *ec2.NetworkAcl) resource.TestCheckFunc {
+ conn := testAccProvider.Meta().(*AWSClient).ec2conn
+
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
@@ -78,7 +72,28 @@ func testAccCheckAWSNetworkAclRuleExists(n string, networkAcl *ec2.NetworkAcl) r
}
if rs.Primary.ID == "" {
- return fmt.Errorf("No Security Group is set")
+ return fmt.Errorf("No Network ACL Id is set")
+ }
+
+ req := &ec2.DescribeNetworkAclsInput{
+ NetworkAclIds: []*string{aws.String(rs.Primary.ID)},
+ }
+ resp, err := conn.DescribeNetworkAcls(req)
+ if err == nil {
+ if len(resp.NetworkAcls) > 0 && *resp.NetworkAcls[0].NetworkAclId == rs.Primary.ID {
+ networkAcl := resp.NetworkAcls[0]
+ if networkAcl.Entries == nil {
+ return fmt.Errorf("No Network ACL Entries exist")
+ }
+ }
+ }
+
+ ec2err, ok := err.(awserr.Error)
+ if !ok {
+ return err
+ }
+ if ec2err.Code() != "InvalidNetworkAclEntry.NotFound" {
+ return err
}
return nil
From d932d996798d1856906213a84bab82f67ac505e5 Mon Sep 17 00:00:00 2001
From: stack72
Date: Fri, 18 Dec 2015 17:21:11 +0000
Subject: [PATCH 293/664] Adds documentation for the AWS Network ACL Rule
resource
---
.../aws/resource_aws_network_acl_rule.go | 2 +-
.../aws/resource_aws_network_acl_rule_test.go | 4 +-
.../aws/r/network_acl_rule.html.markdown | 53 +++++++++++++++++++
website/source/layouts/aws.erb | 4 ++
4 files changed, 60 insertions(+), 3 deletions(-)
create mode 100644 website/source/docs/providers/aws/r/network_acl_rule.html.markdown
diff --git a/builtin/providers/aws/resource_aws_network_acl_rule.go b/builtin/providers/aws/resource_aws_network_acl_rule.go
index 363ef0912a..ec6e153178 100644
--- a/builtin/providers/aws/resource_aws_network_acl_rule.go
+++ b/builtin/providers/aws/resource_aws_network_acl_rule.go
@@ -112,7 +112,7 @@ func resourceAwsNetworkAclRuleCreate(d *schema.ResourceData, meta interface{}) e
}
}
- log.Printf("[INFO] Creating Network Acl Rule: %d (%s)", d.Get("rule_number").(int), d.Get("egress").(bool))
+ log.Printf("[INFO] Creating Network Acl Rule: %d (%t)", d.Get("rule_number").(int), d.Get("egress").(bool))
_, err := conn.CreateNetworkAclEntry(params)
if err != nil {
return fmt.Errorf("Error Creating Network Acl Rule: %s", err.Error())
diff --git a/builtin/providers/aws/resource_aws_network_acl_rule_test.go b/builtin/providers/aws/resource_aws_network_acl_rule_test.go
index 10add5d3cb..98767cb573 100644
--- a/builtin/providers/aws/resource_aws_network_acl_rule_test.go
+++ b/builtin/providers/aws/resource_aws_network_acl_rule_test.go
@@ -30,9 +30,9 @@ func TestAccAWSNetworkAclRule_basic(t *testing.T) {
}
func testAccCheckAWSNetworkAclRuleDestroy(s *terraform.State) error {
- conn := testAccProvider.Meta().(*AWSClient).ec2conn
for _, rs := range s.RootModule().Resources {
+ conn := testAccProvider.Meta().(*AWSClient).ec2conn
if rs.Type != "aws_network_acl_rule" {
continue
}
@@ -63,9 +63,9 @@ func testAccCheckAWSNetworkAclRuleDestroy(s *terraform.State) error {
}
func testAccCheckAWSNetworkAclRuleExists(n string, networkAcl *ec2.NetworkAcl) resource.TestCheckFunc {
- conn := testAccProvider.Meta().(*AWSClient).ec2conn
return func(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*AWSClient).ec2conn
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
diff --git a/website/source/docs/providers/aws/r/network_acl_rule.html.markdown b/website/source/docs/providers/aws/r/network_acl_rule.html.markdown
new file mode 100644
index 0000000000..e5766756fe
--- /dev/null
+++ b/website/source/docs/providers/aws/r/network_acl_rule.html.markdown
@@ -0,0 +1,53 @@
+---
+layout: "aws"
+page_title: "AWS: aws_network_acl_rule"
+sidebar_current: "docs-aws-resource-network-acl-rule"
+description: |-
+ Provides an network ACL Rule resource.
+---
+
+# aws\_network\_acl\_rule
+
+Creates an entry (a rule) in a network ACL with the specified rule number.
+
+## Example Usage
+
+```
+resource "aws_network_acl" "bar" {
+ vpc_id = "${aws_vpc.foo.id}"
+}
+resource "aws_network_acl_rule" "bar" {
+ network_acl_id = "${aws_network_acl.bar.id}"
+ rule_number = 200
+ egress = false
+ protocol = "tcp"
+ rule_action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 22
+ to_port = 22
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `network_acl_id` - (Required) The ID of the network ACL.
+* `rule_number` - (Required) The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number.
+* `egress` - (Optional, bool) Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet). Default `false`.
+* `protocol` - (Required) The protocol. A value of -1 means all protocols.
+* `rule_action` - (Required) Indicates whether to allow or deny the traffic that matches the rule. Accepted values: `allow` | `deny`
+* `cidr_block` - (Required) The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24 ).
+* `from_port` - (Optional) The from port to match.
+* `to_port` - (Optional) The to port to match.
+* `icmp_type` - (Optional) ICMP protocol: The ICMP type. Required if specifying ICMP for the protocol. e.g. -1
+* `icmp_code` - (Optional) ICMP protocol: The ICMP code. Required if specifying ICMP for the protocol. e.g. -1
+
+~> Note: For more information on ICMP types and codes, see here: http://www.nthelp.com/icmp.html
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The ID of the network ACL Rule
+
diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb
index 56feb49709..5a12b432ee 100644
--- a/website/source/layouts/aws.erb
+++ b/website/source/layouts/aws.erb
@@ -538,6 +538,10 @@
aws_network_acl
+ >
+ aws_network_acl_rule
+
+
>
aws_network_interface
From 908403b8a8a1b87c0a4e84eba07c11b025ba9911 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Fri, 18 Dec 2015 18:59:58 -0500
Subject: [PATCH 294/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f20ac1f1ad..ff49d28de0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -8,6 +8,7 @@ FEATURES:
* **New provider: `mysql` - Create MySQL databases** [GH-3122]
* **New resource: `aws_autoscaling_schedule`** [GH-4256]
* **New resource: `aws_nat_gateway`** [GH-4381]
+ * **New resource: `aws_network_acl_rule`** [GH-4286]
* **New resource: `google_pubsub_topic`** [GH-3671]
* **New resource: `google_pubsub_subscription`** [GH-3671]
* **New resource: `tls_locally_signed_cert`** [GH-3930]
From 244a75504e665dced50b7d5a01bb9da73fe4bbbe Mon Sep 17 00:00:00 2001
From: stack72
Date: Sat, 19 Dec 2015 16:42:10 +0000
Subject: [PATCH 295/664] More gofmt errors I'm afraid
---
builtin/providers/aws/resource_aws_db_instance_test.go | 3 ++-
builtin/providers/aws/resource_aws_dynamodb_table.go | 2 +-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_db_instance_test.go b/builtin/providers/aws/resource_aws_db_instance_test.go
index fa1e9e733a..17e1922718 100644
--- a/builtin/providers/aws/resource_aws_db_instance_test.go
+++ b/builtin/providers/aws/resource_aws_db_instance_test.go
@@ -2,6 +2,8 @@ package aws
import (
"fmt"
+ "log"
+
"math/rand"
"testing"
"time"
@@ -12,7 +14,6 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/rds"
- "log"
)
func TestAccAWSDBInstance_basic(t *testing.T) {
diff --git a/builtin/providers/aws/resource_aws_dynamodb_table.go b/builtin/providers/aws/resource_aws_dynamodb_table.go
index c0a4f8c49e..0606cde2e8 100644
--- a/builtin/providers/aws/resource_aws_dynamodb_table.go
+++ b/builtin/providers/aws/resource_aws_dynamodb_table.go
@@ -4,6 +4,7 @@ import (
"bytes"
"fmt"
"log"
+ "strings"
"time"
"github.com/hashicorp/terraform/helper/schema"
@@ -12,7 +13,6 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/hashicorp/terraform/helper/hashcode"
- "strings"
)
// Number of times to retry if a throttling-related exception occurs
From ec0582d5b4f0441d46d90813318ba87f7dbb76b6 Mon Sep 17 00:00:00 2001
From: captainill
Date: Sat, 19 Dec 2015 21:33:48 -0800
Subject: [PATCH 296/664] capital C in by hashicorp lockup
---
.../source/assets/stylesheets/_header.scss | 8 +----
.../hashicorp-shared/_hashicorp-header.scss | 22 ++++----------
.../source/layouts/svg/_svg-by-hashicorp.erb | 29 +++++++++----------
3 files changed, 21 insertions(+), 38 deletions(-)
diff --git a/website/source/assets/stylesheets/_header.scss b/website/source/assets/stylesheets/_header.scss
index 68e50f3683..5b2980ebb8 100755
--- a/website/source/assets/stylesheets/_header.scss
+++ b/website/source/assets/stylesheets/_header.scss
@@ -28,7 +28,7 @@ body.page-sub{
.by-hashicorp{
&:hover{
svg{
- line{
+ .svg-bg-line{
opacity: .4;
}
}
@@ -41,12 +41,6 @@ body.page-sub{
ul.navbar-nav{
li {
- // &:hover{
- // svg path{
- // fill: $purple;
- // }
- // }
-
svg path{
fill: $white;
}
diff --git a/website/source/assets/stylesheets/hashicorp-shared/_hashicorp-header.scss b/website/source/assets/stylesheets/hashicorp-shared/_hashicorp-header.scss
index e9bbe501e7..5bd12805eb 100755
--- a/website/source/assets/stylesheets/hashicorp-shared/_hashicorp-header.scss
+++ b/website/source/assets/stylesheets/hashicorp-shared/_hashicorp-header.scss
@@ -171,12 +171,10 @@
font-weight: 300;
svg{
path,
- polygon{
+ polygon,
+ rect{
fill: white;
}
- line{
- stroke: white;
- }
}
&:focus,
@@ -212,15 +210,13 @@
path,
polygon{
- fill: black;
@include transition(all 300ms ease-in);
&:hover{
@include transition(all 300ms ease-in);
}
}
- line{
- stroke: black;
+ .svg-bg-line{
@include transition(all 300ms ease-in);
&:hover{
@@ -243,12 +239,10 @@
color: white;
svg{
path,
+ rect,
polygon{
fill: white;
}
- line{
- stroke: white;
- }
}
}
@@ -257,12 +251,9 @@
}
&:hover{
- text-decoration: none;
svg{
- &.svg-by{
- line{
- stroke: $purple;
- }
+ .svg-bg-line{
+ fill: $purple;
}
}
}
@@ -295,7 +286,6 @@
path,
line{
- fill: $black;
@include transition(all 300ms ease-in);
&:hover{
diff --git a/website/source/layouts/svg/_svg-by-hashicorp.erb b/website/source/layouts/svg/_svg-by-hashicorp.erb
index d89929590e..607d16b1e1 100644
--- a/website/source/layouts/svg/_svg-by-hashicorp.erb
+++ b/website/source/layouts/svg/_svg-by-hashicorp.erb
@@ -1,18 +1,17 @@
-
-
+
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
From e86104fc3581b2405625589da8e4fdc59bbd6d35 Mon Sep 17 00:00:00 2001
From: captainill
Date: Sun, 20 Dec 2015 11:49:02 -0800
Subject: [PATCH 297/664] cleanup footer hashicorp logo
---
website/source/assets/stylesheets/_footer.scss | 11 +++++++++++
.../hashicorp-shared/_hashicorp-header.scss | 10 +++++++++-
2 files changed, 20 insertions(+), 1 deletion(-)
diff --git a/website/source/assets/stylesheets/_footer.scss b/website/source/assets/stylesheets/_footer.scss
index 3c2c08e4fd..2bf21204f0 100644
--- a/website/source/assets/stylesheets/_footer.scss
+++ b/website/source/assets/stylesheets/_footer.scss
@@ -2,6 +2,17 @@ body.page-sub{
#footer{
padding: 40px 0;
margin-top: 0;
+
+ .hashicorp-project{
+ margin-top: 24px;
+ &:hover{
+ svg{
+ .svg-bg-line{
+ opacity: .4;
+ }
+ }
+ }
+ }
}
}
diff --git a/website/source/assets/stylesheets/hashicorp-shared/_hashicorp-header.scss b/website/source/assets/stylesheets/hashicorp-shared/_hashicorp-header.scss
index 5bd12805eb..699a2d073d 100755
--- a/website/source/assets/stylesheets/hashicorp-shared/_hashicorp-header.scss
+++ b/website/source/assets/stylesheets/hashicorp-shared/_hashicorp-header.scss
@@ -246,7 +246,8 @@
}
}
- &:focus{
+ &:focus,
+ &:hover{
text-decoration: none;
}
@@ -288,6 +289,13 @@
line{
@include transition(all 300ms ease-in);
+ &:hover{
+ @include transition(all 300ms ease-in);
+ }
+ }
+ .svg-bg-line{
+ @include transition(all 300ms ease-in);
+
&:hover{
@include transition(all 300ms ease-in);
}
From 85afc7d614dbd7d12f2d3d9cdeaa145e486d6dfa Mon Sep 17 00:00:00 2001
From: stack72
Date: Wed, 11 Nov 2015 20:51:46 +0000
Subject: [PATCH 298/664] Initial creation of the work for AWS RedShift Support
Finalising the schema and acceptance tests for the Redshift Security Group's
---
builtin/providers/aws/config.go | 6 +
builtin/providers/aws/provider.go | 1 +
.../resource_aws_redshift_security_group.go | 320 ++++++++++++++++++
...source_aws_redshift_security_group_test.go | 205 +++++++++++
4 files changed, 532 insertions(+)
create mode 100644 builtin/providers/aws/resource_aws_redshift_security_group.go
create mode 100644 builtin/providers/aws/resource_aws_redshift_security_group_test.go
diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go
index e3e2243f1c..d98488c1f0 100644
--- a/builtin/providers/aws/config.go
+++ b/builtin/providers/aws/config.go
@@ -39,6 +39,7 @@ import (
"github.com/aws/aws-sdk-go/service/lambda"
"github.com/aws/aws-sdk-go/service/opsworks"
"github.com/aws/aws-sdk-go/service/rds"
+ "github.com/aws/aws-sdk-go/service/redshift"
"github.com/aws/aws-sdk-go/service/route53"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/sns"
@@ -75,6 +76,7 @@ type AWSClient struct {
s3conn *s3.S3
sqsconn *sqs.SQS
snsconn *sns.SNS
+ redshiftconn *redshift.Redshift
r53conn *route53.Route53
region string
rdsconn *rds.RDS
@@ -233,6 +235,10 @@ func (c *Config) Client() (interface{}, error) {
log.Println("[INFO] Initializing CodeCommit SDK connection")
client.codecommitconn = codecommit.New(usEast1Sess)
+
+ log.Println("[INFO] Initializing Redshift SDK connection")
+ client.redshiftconn = redshift.New(sess)
+
}
if len(errs) > 0 {
diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go
index 6b0c8db2e7..45912f6cb6 100644
--- a/builtin/providers/aws/provider.go
+++ b/builtin/providers/aws/provider.go
@@ -170,6 +170,7 @@ func Provider() terraform.ResourceProvider {
"aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(),
"aws_rds_cluster": resourceAwsRDSCluster(),
"aws_rds_cluster_instance": resourceAwsRDSClusterInstance(),
+ "aws_redshift_security_group": resourceAwsRedshiftSecurityGroup(),
"aws_route53_delegation_set": resourceAwsRoute53DelegationSet(),
"aws_route53_record": resourceAwsRoute53Record(),
"aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(),
diff --git a/builtin/providers/aws/resource_aws_redshift_security_group.go b/builtin/providers/aws/resource_aws_redshift_security_group.go
new file mode 100644
index 0000000000..9f2520d15f
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_redshift_security_group.go
@@ -0,0 +1,320 @@
+package aws
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "regexp"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/redshift"
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/terraform/helper/hashcode"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceAwsRedshiftSecurityGroup() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceAwsRedshiftSecurityGroupCreate,
+ Read: resourceAwsRedshiftSecurityGroupRead,
+ Delete: resourceAwsRedshiftSecurityGroupDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ ValidateFunc: validateRedshiftSecurityGroupName,
+ },
+
+ "description": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "ingress": &schema.Schema{
+ Type: schema.TypeSet,
+ Required: true,
+ ForceNew: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cidr": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ },
+
+ "security_group_name": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+
+ "security_group_owner_id": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+ },
+ },
+ Set: resourceAwsRedshiftSecurityGroupIngressHash,
+ },
+
+ "tags": &schema.Schema{
+ Type: schema.TypeMap,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ }
+}
+
+func resourceAwsRedshiftSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).redshiftconn
+
+ var err error
+ var errs []error
+
+ name := d.Get("name").(string)
+ desc := d.Get("description").(string)
+ tags := tagsFromMapRedshift(d.Get("tags").(map[string]interface{}))
+ sgInput := &redshift.CreateClusterSecurityGroupInput{
+ ClusterSecurityGroupName: aws.String(name),
+ Description: aws.String(desc),
+ Tags: tags,
+ }
+ log.Printf("[DEBUG] Redshift security group create: name: %s, description: %s", name, desc)
+ _, err = conn.CreateClusterSecurityGroup(sgInput)
+ if err != nil {
+ return fmt.Errorf("Error creating RedshiftSecurityGroup: %s", err)
+ }
+
+ d.SetId(d.Get("name").(string))
+
+ log.Printf("[INFO] Redshift Security Group ID: %s", d.Id())
+ sg, err := resourceAwsRedshiftSecurityGroupRetrieve(d, meta)
+ if err != nil {
+ return err
+ }
+
+ ingresses := d.Get("ingress").(*schema.Set)
+ for _, ing := range ingresses.List() {
+ err := resourceAwsRedshiftSecurityGroupAuthorizeRule(ing, *sg.ClusterSecurityGroupName, conn)
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ if len(errs) > 0 {
+ return &multierror.Error{Errors: errs}
+ }
+
+ log.Println("[INFO] Waiting for Redshift Security Group Ingress Authorizations to be authorized")
+ stateConf := &resource.StateChangeConf{
+ Pending: []string{"authorizing"},
+ Target: "authorized",
+ Refresh: resourceAwsRedshiftSecurityGroupStateRefreshFunc(d, meta),
+ Timeout: 10 * time.Minute,
+ }
+
+ _, err = stateConf.WaitForState()
+ if err != nil {
+ return err
+ }
+
+ return resourceAwsRedshiftSecurityGroupRead(d, meta)
+}
+
+func resourceAwsRedshiftSecurityGroupRead(d *schema.ResourceData, meta interface{}) error {
+ sg, err := resourceAwsRedshiftSecurityGroupRetrieve(d, meta)
+ if err != nil {
+ return err
+ }
+
+ rules := &schema.Set{
+ F: resourceAwsRedshiftSecurityGroupIngressHash,
+ }
+
+ for _, v := range sg.IPRanges {
+ rule := map[string]interface{}{"cidr": *v.CIDRIP}
+ rules.Add(rule)
+ }
+
+ for _, g := range sg.EC2SecurityGroups {
+ rule := map[string]interface{}{
+ "security_group_name": *g.EC2SecurityGroupName,
+ "security_group_owner_id": *g.EC2SecurityGroupOwnerId,
+ }
+ rules.Add(rule)
+ }
+
+ d.Set("ingress", rules)
+ d.Set("name", *sg.ClusterSecurityGroupName)
+ d.Set("description", *sg.Description)
+
+ return nil
+}
+
+func resourceAwsRedshiftSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).redshiftconn
+
+ log.Printf("[DEBUG] Redshift Security Group destroy: %v", d.Id())
+ opts := redshift.DeleteClusterSecurityGroupInput{
+ ClusterSecurityGroupName: aws.String(d.Id()),
+ }
+
+ log.Printf("[DEBUG] Redshift Security Group destroy configuration: %v", opts)
+ _, err := conn.DeleteClusterSecurityGroup(&opts)
+
+ if err != nil {
+ newerr, ok := err.(awserr.Error)
+ if ok && newerr.Code() == "InvalidRedshiftSecurityGroup.NotFound" {
+ return nil
+ }
+ return err
+ }
+
+ return nil
+}
+
+func resourceAwsRedshiftSecurityGroupRetrieve(d *schema.ResourceData, meta interface{}) (*redshift.ClusterSecurityGroup, error) {
+ conn := meta.(*AWSClient).redshiftconn
+
+ opts := redshift.DescribeClusterSecurityGroupsInput{
+ ClusterSecurityGroupName: aws.String(d.Id()),
+ }
+
+ log.Printf("[DEBUG] Redshift Security Group describe configuration: %#v", opts)
+
+ resp, err := conn.DescribeClusterSecurityGroups(&opts)
+
+ if err != nil {
+ return nil, fmt.Errorf("Error retrieving Redshift Security Groups: %s", err)
+ }
+
+ if len(resp.ClusterSecurityGroups) != 1 ||
+ *resp.ClusterSecurityGroups[0].ClusterSecurityGroupName != d.Id() {
+ return nil, fmt.Errorf("Unable to find Redshift Security Group: %#v", resp.ClusterSecurityGroups)
+ }
+
+ return resp.ClusterSecurityGroups[0], nil
+}
+
+func tagsFromMapRedshift(m map[string]interface{}) []*redshift.Tag {
+ result := make([]*redshift.Tag, 0, len(m))
+ for k, v := range m {
+ result = append(result, &redshift.Tag{
+ Key: aws.String(k),
+ Value: aws.String(v.(string)),
+ })
+ }
+
+ return result
+}
+
+func tagsToMapRedshift(ts []*redshift.Tag) map[string]string {
+ result := make(map[string]string)
+ for _, t := range ts {
+ result[*t.Key] = *t.Value
+ }
+
+ return result
+}
+
+func validateRedshiftSecurityGroupName(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ if value == "default" {
+ errors = append(errors, fmt.Errorf("the Redshift Security Group name cannot be %q", value))
+ }
+ if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "only lowercase alphanumeric characters and hyphens allowed in %q: %q",
+ k, value))
+ }
+ if len(value) > 255 {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot be longer than 32 characters: %q", k, value))
+ }
+ return
+
+}
+
+func resourceAwsRedshiftSecurityGroupIngressHash(v interface{}) int {
+ var buf bytes.Buffer
+ m := v.(map[string]interface{})
+
+ if v, ok := m["cidr"]; ok {
+ buf.WriteString(fmt.Sprintf("%s-", v.(string)))
+ }
+
+ if v, ok := m["security_group_name"]; ok {
+ buf.WriteString(fmt.Sprintf("%s-", v.(string)))
+ }
+
+ if v, ok := m["security_group_owner_id"]; ok {
+ buf.WriteString(fmt.Sprintf("%s-", v.(string)))
+ }
+
+ return hashcode.String(buf.String())
+}
+
+func resourceAwsRedshiftSecurityGroupAuthorizeRule(ingress interface{}, redshiftSecurityGroupName string, conn *redshift.Redshift) error {
+ ing := ingress.(map[string]interface{})
+
+ opts := redshift.AuthorizeClusterSecurityGroupIngressInput{
+ ClusterSecurityGroupName: aws.String(redshiftSecurityGroupName),
+ }
+
+ if attr, ok := ing["cidr"]; ok && attr != "" {
+ opts.CIDRIP = aws.String(attr.(string))
+ }
+
+ if attr, ok := ing["security_group_name"]; ok && attr != "" {
+ opts.EC2SecurityGroupName = aws.String(attr.(string))
+ }
+
+ if attr, ok := ing["security_group_owner_id"]; ok && attr != "" {
+ opts.EC2SecurityGroupOwnerId = aws.String(attr.(string))
+ }
+
+ log.Printf("[DEBUG] Authorize ingress rule configuration: %#v", opts)
+ _, err := conn.AuthorizeClusterSecurityGroupIngress(&opts)
+
+ if err != nil {
+ return fmt.Errorf("Error authorizing security group ingress: %s", err)
+ }
+
+ return nil
+}
+
+func resourceAwsRedshiftSecurityGroupStateRefreshFunc(
+ d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc {
+ return func() (interface{}, string, error) {
+ v, err := resourceAwsRedshiftSecurityGroupRetrieve(d, meta)
+
+ if err != nil {
+ log.Printf("Error on retrieving Redshift Security Group when waiting: %s", err)
+ return nil, "", err
+ }
+
+ statuses := make([]string, 0, len(v.EC2SecurityGroups)+len(v.IPRanges))
+ for _, ec2g := range v.EC2SecurityGroups {
+ statuses = append(statuses, *ec2g.Status)
+ }
+ for _, ips := range v.IPRanges {
+ statuses = append(statuses, *ips.Status)
+ }
+
+ for _, stat := range statuses {
+ // Not done
+ if stat != "authorized" {
+ return nil, "authorizing", nil
+ }
+ }
+
+ return v, "authorized", nil
+ }
+}
diff --git a/builtin/providers/aws/resource_aws_redshift_security_group_test.go b/builtin/providers/aws/resource_aws_redshift_security_group_test.go
new file mode 100644
index 0000000000..8b61379751
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_redshift_security_group_test.go
@@ -0,0 +1,205 @@
+package aws
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/redshift"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAWSRedshiftSecurityGroup_ingressCidr(t *testing.T) {
+ var v redshift.ClusterSecurityGroup
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSRedshiftSecurityGroupDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSRedshiftSecurityGroupConfig_ingressCidr,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_security_group.bar", "name", "redshift-sg-terraform"),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_security_group.bar", "description", "this is a description"),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_security_group.bar", "ingress.2735652665.cidr", "10.0.0.1/24"),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_security_group.bar", "ingress.#", "1"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAWSRedshiftSecurityGroup_ingressSecurityGroup(t *testing.T) {
+ var v redshift.ClusterSecurityGroup
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSRedshiftSecurityGroupDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSRedshiftSecurityGroupConfig_ingressSgId,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_security_group.bar", "name", "redshift-sg-terraform"),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_security_group.bar", "description", "this is a description"),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_security_group.bar", "ingress.#", "1"),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_security_group.bar", "ingress.220863.security_group_name", "terraform_redshift_acceptance_test"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckAWSRedshiftSecurityGroupExists(n string, v *redshift.ClusterSecurityGroup) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No Redshift Security Group ID is set")
+ }
+
+ conn := testAccProvider.Meta().(*AWSClient).redshiftconn
+
+ opts := redshift.DescribeClusterSecurityGroupsInput{
+ ClusterSecurityGroupName: aws.String(rs.Primary.ID),
+ }
+
+ resp, err := conn.DescribeClusterSecurityGroups(&opts)
+
+ if err != nil {
+ return err
+ }
+
+ if len(resp.ClusterSecurityGroups) != 1 ||
+ *resp.ClusterSecurityGroups[0].ClusterSecurityGroupName != rs.Primary.ID {
+ return fmt.Errorf("Redshift Security Group not found")
+ }
+
+ *v = *resp.ClusterSecurityGroups[0]
+
+ return nil
+ }
+}
+
+func testAccCheckAWSRedshiftSecurityGroupDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*AWSClient).redshiftconn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_redshift_security_group" {
+ continue
+ }
+
+ // Try to find the Group
+ resp, err := conn.DescribeClusterSecurityGroups(
+ &redshift.DescribeClusterSecurityGroupsInput{
+ ClusterSecurityGroupName: aws.String(rs.Primary.ID),
+ })
+
+ if err == nil {
+ if len(resp.ClusterSecurityGroups) != 0 &&
+ *resp.ClusterSecurityGroups[0].ClusterSecurityGroupName == rs.Primary.ID {
+ return fmt.Errorf("Redshift Security Group still exists")
+ }
+ }
+
+ // Verify the error
+ newerr, ok := err.(awserr.Error)
+ if !ok {
+ return err
+ }
+ if newerr.Code() != "InvalidRedshiftSecurityGroup.NotFound" {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func TestResourceAWSRedshiftSecurityGroupName_validation(t *testing.T) {
+ cases := []struct {
+ Value string
+ ErrCount int
+ }{
+ {
+ Value: "default",
+ ErrCount: 1,
+ },
+ {
+ Value: "testing123%%",
+ ErrCount: 1,
+ },
+ {
+ Value: "TestingSG",
+ ErrCount: 1,
+ },
+ {
+ Value: randomString(256),
+ ErrCount: 1,
+ },
+ }
+
+ for _, tc := range cases {
+ _, errors := validateRedshiftSecurityGroupName(tc.Value, "aws_redshift_security_group_name")
+
+ if len(errors) != tc.ErrCount {
+ t.Fatalf("Expected the Redshift Security Group Name to trigger a validation error")
+ }
+ }
+}
+
+const testAccAWSRedshiftSecurityGroupConfig_ingressCidr = `
+provider "aws" {
+ region = "us-east-1"
+}
+
+resource "aws_redshift_security_group" "bar" {
+ name = "redshift-sg-terraform"
+ description = "this is a description"
+
+ ingress {
+ cidr = "10.0.0.1/24"
+ }
+}`
+
+const testAccAWSRedshiftSecurityGroupConfig_ingressSgId = `
+provider "aws" {
+ region = "us-east-1"
+}
+
+resource "aws_security_group" "redshift" {
+ name = "terraform_redshift_acceptance_test"
+ description = "Used in the redshift acceptance tests"
+
+ ingress {
+ protocol = "tcp"
+ from_port = 22
+ to_port = 22
+ cidr_blocks = ["10.0.0.0/8"]
+ }
+}
+
+resource "aws_redshift_security_group" "bar" {
+ name = "redshift-sg-terraform"
+ description = "this is a description"
+
+ ingress {
+ security_group_name = "${aws_security_group.redshift.name}"
+ security_group_owner_id = "${aws_security_group.redshift.owner_id}"
+ }
+}`
From 249e7df76c0c996289d93ac54d5f7da2c8fdd18e Mon Sep 17 00:00:00 2001
From: stack72
Date: Wed, 11 Nov 2015 23:37:56 +0000
Subject: [PATCH 299/664] Adding the documentation for the Redshift security
groups
Creation of the schema, CRUD and acceptance tests for Redshift Parameter Group
---
builtin/providers/aws/provider.go | 1 +
.../resource_aws_redshift_parameter_group.go | 241 ++++++++++++++++++
...ource_aws_redshift_parameter_group_test.go | 207 +++++++++++++++
.../resource_aws_redshift_security_group.go | 22 +-
builtin/providers/aws/structure.go | 36 +++
builtin/providers/aws/structure_test.go | 61 ++++-
builtin/providers/aws/tagsRedshift.go | 27 ++
.../r/redshift_security_group.html.markdown | 50 ++++
website/source/layouts/aws.erb | 11 +
9 files changed, 634 insertions(+), 22 deletions(-)
create mode 100644 builtin/providers/aws/resource_aws_redshift_parameter_group.go
create mode 100644 builtin/providers/aws/resource_aws_redshift_parameter_group_test.go
create mode 100644 builtin/providers/aws/tagsRedshift.go
create mode 100644 website/source/docs/providers/aws/r/redshift_security_group.html.markdown
diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go
index 45912f6cb6..d4d44ee7cd 100644
--- a/builtin/providers/aws/provider.go
+++ b/builtin/providers/aws/provider.go
@@ -171,6 +171,7 @@ func Provider() terraform.ResourceProvider {
"aws_rds_cluster": resourceAwsRDSCluster(),
"aws_rds_cluster_instance": resourceAwsRDSClusterInstance(),
"aws_redshift_security_group": resourceAwsRedshiftSecurityGroup(),
+ "aws_redshift_parameter_group": resourceAwsRedshiftParameterGroup(),
"aws_route53_delegation_set": resourceAwsRoute53DelegationSet(),
"aws_route53_record": resourceAwsRoute53Record(),
"aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(),
diff --git a/builtin/providers/aws/resource_aws_redshift_parameter_group.go b/builtin/providers/aws/resource_aws_redshift_parameter_group.go
new file mode 100644
index 0000000000..4bebdd3141
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_redshift_parameter_group.go
@@ -0,0 +1,241 @@
+package aws
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/redshift"
+ "github.com/hashicorp/terraform/helper/hashcode"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceAwsRedshiftParameterGroup() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceAwsRedshiftParameterGroupCreate,
+ Read: resourceAwsRedshiftParameterGroupRead,
+ Update: resourceAwsRedshiftParameterGroupUpdate,
+ Delete: resourceAwsRedshiftParameterGroupDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ ForceNew: true,
+ Required: true,
+ ValidateFunc: validateRedshiftParamGroupName,
+ },
+
+ "family": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "description": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "parameter": &schema.Schema{
+ Type: schema.TypeSet,
+ Optional: true,
+ ForceNew: false,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "value": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ },
+ },
+ Set: resourceAwsRedshiftParameterHash,
+ },
+
+ "tags": tagsSchema(),
+ },
+ }
+}
+
+func resourceAwsRedshiftParameterGroupCreate(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).redshiftconn
+
+ createOpts := redshift.CreateClusterParameterGroupInput{
+ ParameterGroupName: aws.String(d.Get("name").(string)),
+ ParameterGroupFamily: aws.String(d.Get("family").(string)),
+ Description: aws.String(d.Get("description").(string)),
+ Tags: tagsFromMapRedshift(d.Get("tags").(map[string]interface{})),
+ }
+
+ log.Printf("[DEBUG] Create Redshift Parameter Group: %#v", createOpts)
+ _, err := conn.CreateClusterParameterGroup(&createOpts)
+ if err != nil {
+ return fmt.Errorf("Error creating Redshift Parameter Group: %s", err)
+ }
+
+ d.SetId(*createOpts.ParameterGroupName)
+ log.Printf("[INFO] Redshift Parameter Group ID: %s", d.Id())
+
+ return resourceAwsRedshiftParameterGroupUpdate(d, meta)
+}
+
+func resourceAwsRedshiftParameterGroupRead(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).redshiftconn
+
+ describeOpts := redshift.DescribeClusterParameterGroupsInput{
+ ParameterGroupName: aws.String(d.Id()),
+ }
+
+ describeResp, err := conn.DescribeClusterParameterGroups(&describeOpts)
+ if err != nil {
+ return err
+ }
+
+ if len(describeResp.ParameterGroups) != 1 ||
+ *describeResp.ParameterGroups[0].ParameterGroupName != d.Id() {
+ return fmt.Errorf("Unable to find Parameter Group: %#v", describeResp.ParameterGroups)
+ }
+
+ d.Set("name", describeResp.ParameterGroups[0].ParameterGroupName)
+ d.Set("family", describeResp.ParameterGroups[0].ParameterGroupFamily)
+ d.Set("description", describeResp.ParameterGroups[0].Description)
+ d.Set("tags", tagsToMapRedshift(describeResp.ParameterGroups[0].Tags))
+
+ describeParametersOpts := redshift.DescribeClusterParametersInput{
+ ParameterGroupName: aws.String(d.Id()),
+ Source: aws.String("user"),
+ }
+
+ describeParametersResp, err := conn.DescribeClusterParameters(&describeParametersOpts)
+ if err != nil {
+ return err
+ }
+
+ d.Set("parameter", flattenRedshiftParameters(describeParametersResp.Parameters))
+ return nil
+}
+
+func resourceAwsRedshiftParameterGroupUpdate(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).redshiftconn
+
+ d.Partial(true)
+
+ if d.HasChange("parameter") {
+ o, n := d.GetChange("parameter")
+ if o == nil {
+ o = new(schema.Set)
+ }
+ if n == nil {
+ n = new(schema.Set)
+ }
+
+ os := o.(*schema.Set)
+ ns := n.(*schema.Set)
+
+ // Expand the "parameter" set to aws-sdk-go compat []redshift.Parameter
+ parameters, err := expandRedshiftParameters(ns.Difference(os).List())
+ if err != nil {
+ return err
+ }
+
+ if len(parameters) > 0 {
+ modifyOpts := redshift.ModifyClusterParameterGroupInput{
+ ParameterGroupName: aws.String(d.Get("name").(string)),
+ Parameters: parameters,
+ }
+
+ log.Printf("[DEBUG] Modify Redshift Parameter Group: %s", modifyOpts)
+ _, err = conn.ModifyClusterParameterGroup(&modifyOpts)
+ if err != nil {
+ return fmt.Errorf("Error modifying Redshift Parameter Group: %s", err)
+ }
+ }
+ d.SetPartial("parameter")
+ }
+
+ d.Partial(false)
+ return resourceAwsRedshiftParameterGroupRead(d, meta)
+}
+
+func resourceAwsRedshiftParameterGroupDelete(d *schema.ResourceData, meta interface{}) error {
+ stateConf := &resource.StateChangeConf{
+ Pending: []string{"pending"},
+ Target: "destroyed",
+ Refresh: resourceAwsRedshiftParameterGroupDeleteRefreshFunc(d, meta),
+ Timeout: 3 * time.Minute,
+ MinTimeout: 1 * time.Second,
+ }
+ _, err := stateConf.WaitForState()
+ return err
+}
+
+func resourceAwsRedshiftParameterGroupDeleteRefreshFunc(
+ d *schema.ResourceData,
+ meta interface{}) resource.StateRefreshFunc {
+ conn := meta.(*AWSClient).redshiftconn
+
+ return func() (interface{}, string, error) {
+
+ deleteOpts := redshift.DeleteClusterParameterGroupInput{
+ ParameterGroupName: aws.String(d.Id()),
+ }
+
+ if _, err := conn.DeleteClusterParameterGroup(&deleteOpts); err != nil {
+ redshiftErr, ok := err.(awserr.Error)
+ if !ok {
+ return d, "error", err
+ }
+
+ if redshiftErr.Code() != "RedshiftParameterGroupNotFoundFault" {
+ return d, "error", err
+ }
+ }
+
+ return d, "destroyed", nil
+ }
+}
+
+func resourceAwsRedshiftParameterHash(v interface{}) int {
+ var buf bytes.Buffer
+ m := v.(map[string]interface{})
+ buf.WriteString(fmt.Sprintf("%s-", m["name"].(string)))
+ // Store the value as a lower case string, to match how we store them in flattenParameters
+ buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["value"].(string))))
+
+ return hashcode.String(buf.String())
+}
+
+func validateRedshiftParamGroupName(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "only lowercase alphanumeric characters and hyphens allowed in %q", k))
+ }
+ if !regexp.MustCompile(`^[a-z]`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "first character of %q must be a letter", k))
+ }
+ if regexp.MustCompile(`--`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot contain two consecutive hyphens", k))
+ }
+ if regexp.MustCompile(`-$`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot end with a hyphen", k))
+ }
+ if len(value) > 255 {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot be greater than 255 characters", k))
+ }
+ return
+}
diff --git a/builtin/providers/aws/resource_aws_redshift_parameter_group_test.go b/builtin/providers/aws/resource_aws_redshift_parameter_group_test.go
new file mode 100644
index 0000000000..e5139d288b
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_redshift_parameter_group_test.go
@@ -0,0 +1,207 @@
+package aws
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/redshift"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAWSRedshiftParameterGroup_withParameters(t *testing.T) {
+ var v redshift.ClusterParameterGroup
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSRedshiftParameterGroupDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSRedshiftParameterGroupConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAWSRedshiftParameterGroupExists("aws_redshift_parameter_group.bar", &v),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_parameter_group.bar", "name", "parameter-group-test-terraform"),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_parameter_group.bar", "family", "redshift-1.0"),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_parameter_group.bar", "description", "Test parameter group for terraform"),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_parameter_group.bar", "parameter.490804664.name", "require_ssl"),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_parameter_group.bar", "parameter.490804664.value", "true"),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_parameter_group.bar", "parameter.2036118857.name", "query_group"),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_parameter_group.bar", "parameter.2036118857.value", "example"),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_parameter_group.bar", "parameter.484080973.name", "enable_user_activity_logging"),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_parameter_group.bar", "parameter.484080973.value", "true"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAWSRedshiftParameterGroup_withoutParameters(t *testing.T) {
+ var v redshift.ClusterParameterGroup
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSRedshiftParameterGroupDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSRedshiftParameterGroupOnlyConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAWSRedshiftParameterGroupExists("aws_redshift_parameter_group.bar", &v),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_parameter_group.bar", "name", "parameter-group-test-terraform"),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_parameter_group.bar", "family", "redshift-1.0"),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_parameter_group.bar", "description", "Test parameter group for terraform"),
+ ),
+ },
+ },
+ })
+}
+
+func TestResourceAWSRedshiftParameterGroupName_validation(t *testing.T) {
+ cases := []struct {
+ Value string
+ ErrCount int
+ }{
+ {
+ Value: "tEsting123",
+ ErrCount: 1,
+ },
+ {
+ Value: "testing123!",
+ ErrCount: 1,
+ },
+ {
+ Value: "1testing123",
+ ErrCount: 1,
+ },
+ {
+ Value: "testing--123",
+ ErrCount: 1,
+ },
+ {
+ Value: "testing123-",
+ ErrCount: 1,
+ },
+ {
+ Value: randomString(256),
+ ErrCount: 1,
+ },
+ }
+
+ for _, tc := range cases {
+ _, errors := validateRedshiftParamGroupName(tc.Value, "aws_redshift_parameter_group_name")
+
+ if len(errors) != tc.ErrCount {
+ t.Fatalf("Expected the Redshift Parameter Group Name to trigger a validation error")
+ }
+ }
+}
+
+func testAccCheckAWSRedshiftParameterGroupDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*AWSClient).redshiftconn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_redshift_parameter_group" {
+ continue
+ }
+
+ // Try to find the Group
+ resp, err := conn.DescribeClusterParameterGroups(
+ &redshift.DescribeClusterParameterGroupsInput{
+ ParameterGroupName: aws.String(rs.Primary.ID),
+ })
+
+ if err == nil {
+ if len(resp.ParameterGroups) != 0 &&
+ *resp.ParameterGroups[0].ParameterGroupName == rs.Primary.ID {
+ return fmt.Errorf("Redshift Parameter Group still exists")
+ }
+ }
+
+ // Verify the error
+ newerr, ok := err.(awserr.Error)
+ if !ok {
+ return err
+ }
+ if newerr.Code() != "InvalidRedshiftParameterGroup.NotFound" {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func testAccCheckAWSRedshiftParameterGroupExists(n string, v *redshift.ClusterParameterGroup) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No Redshift Parameter Group ID is set")
+ }
+
+ conn := testAccProvider.Meta().(*AWSClient).redshiftconn
+
+ opts := redshift.DescribeClusterParameterGroupsInput{
+ ParameterGroupName: aws.String(rs.Primary.ID),
+ }
+
+ resp, err := conn.DescribeClusterParameterGroups(&opts)
+
+ if err != nil {
+ return err
+ }
+
+ if len(resp.ParameterGroups) != 1 ||
+ *resp.ParameterGroups[0].ParameterGroupName != rs.Primary.ID {
+ return fmt.Errorf("Redshift Parameter Group not found")
+ }
+
+ *v = *resp.ParameterGroups[0]
+
+ return nil
+ }
+}
+
+const testAccAWSRedshiftParameterGroupOnlyConfig = `
+resource "aws_redshift_parameter_group" "bar" {
+ name = "parameter-group-test-terraform"
+ family = "redshift-1.0"
+ description = "Test parameter group for terraform"
+}`
+
+const testAccAWSRedshiftParameterGroupConfig = `
+resource "aws_redshift_parameter_group" "bar" {
+ name = "parameter-group-test-terraform"
+ family = "redshift-1.0"
+ description = "Test parameter group for terraform"
+ parameter {
+ name = "require_ssl"
+ value = "true"
+ }
+ parameter {
+ name = "query_group"
+ value = "example"
+ }
+ parameter{
+ name = "enable_user_activity_logging"
+ value = "true"
+ }
+}
+`
diff --git a/builtin/providers/aws/resource_aws_redshift_security_group.go b/builtin/providers/aws/resource_aws_redshift_security_group.go
index 9f2520d15f..0e09eb7c46 100644
--- a/builtin/providers/aws/resource_aws_redshift_security_group.go
+++ b/builtin/providers/aws/resource_aws_redshift_security_group.go
@@ -154,6 +154,7 @@ func resourceAwsRedshiftSecurityGroupRead(d *schema.ResourceData, meta interface
d.Set("ingress", rules)
d.Set("name", *sg.ClusterSecurityGroupName)
d.Set("description", *sg.Description)
+ d.Set("tags", tagsToMapRedshift(sg.Tags))
return nil
}
@@ -203,27 +204,6 @@ func resourceAwsRedshiftSecurityGroupRetrieve(d *schema.ResourceData, meta inter
return resp.ClusterSecurityGroups[0], nil
}
-func tagsFromMapRedshift(m map[string]interface{}) []*redshift.Tag {
- result := make([]*redshift.Tag, 0, len(m))
- for k, v := range m {
- result = append(result, &redshift.Tag{
- Key: aws.String(k),
- Value: aws.String(v.(string)),
- })
- }
-
- return result
-}
-
-func tagsToMapRedshift(ts []*redshift.Tag) map[string]string {
- result := make(map[string]string)
- for _, t := range ts {
- result[*t.Key] = *t.Value
- }
-
- return result
-}
-
func validateRedshiftSecurityGroupName(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if value == "default" {
diff --git a/builtin/providers/aws/structure.go b/builtin/providers/aws/structure.go
index 748ecc88be..e82d3d961a 100644
--- a/builtin/providers/aws/structure.go
+++ b/builtin/providers/aws/structure.go
@@ -17,6 +17,7 @@ import (
elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice"
"github.com/aws/aws-sdk-go/service/elb"
"github.com/aws/aws-sdk-go/service/rds"
+ "github.com/aws/aws-sdk-go/service/redshift"
"github.com/aws/aws-sdk-go/service/route53"
"github.com/hashicorp/terraform/helper/schema"
)
@@ -233,6 +234,29 @@ func expandParameters(configured []interface{}) ([]*rds.Parameter, error) {
return parameters, nil
}
+func expandRedshiftParameters(configured []interface{}) ([]*redshift.Parameter, error) {
+ var parameters []*redshift.Parameter
+
+ // Loop over our configured parameters and create
+ // an array of aws-sdk-go compatabile objects
+ for _, pRaw := range configured {
+ data := pRaw.(map[string]interface{})
+
+ if data["name"].(string) == "" {
+ continue
+ }
+
+ p := &redshift.Parameter{
+ ParameterName: aws.String(data["name"].(string)),
+ ParameterValue: aws.String(data["value"].(string)),
+ }
+
+ parameters = append(parameters, p)
+ }
+
+ return parameters, nil
+}
+
// Takes the result of flatmap.Expand for an array of parameters and
// returns Parameter API compatible objects
func expandElastiCacheParameters(configured []interface{}) ([]*elasticache.ParameterNameValue, error) {
@@ -413,6 +437,18 @@ func flattenParameters(list []*rds.Parameter) []map[string]interface{} {
return result
}
+// Flattens an array of Redshift Parameters into a []map[string]interface{}
+func flattenRedshiftParameters(list []*redshift.Parameter) []map[string]interface{} {
+ result := make([]map[string]interface{}, 0, len(list))
+ for _, i := range list {
+ result = append(result, map[string]interface{}{
+ "name": strings.ToLower(*i.ParameterName),
+ "value": strings.ToLower(*i.ParameterValue),
+ })
+ }
+ return result
+}
+
// Flattens an array of Parameters into a []map[string]interface{}
func flattenElastiCacheParameters(list []*elasticache.Parameter) []map[string]interface{} {
result := make([]map[string]interface{}, 0, len(list))
diff --git a/builtin/providers/aws/structure_test.go b/builtin/providers/aws/structure_test.go
index 8e41b631f9..f74911b506 100644
--- a/builtin/providers/aws/structure_test.go
+++ b/builtin/providers/aws/structure_test.go
@@ -10,6 +10,7 @@ import (
"github.com/aws/aws-sdk-go/service/elasticache"
"github.com/aws/aws-sdk-go/service/elb"
"github.com/aws/aws-sdk-go/service/rds"
+ "github.com/aws/aws-sdk-go/service/redshift"
"github.com/aws/aws-sdk-go/service/route53"
"github.com/hashicorp/terraform/flatmap"
"github.com/hashicorp/terraform/helper/schema"
@@ -426,7 +427,36 @@ func TestExpandParameters(t *testing.T) {
}
}
+<<<<<<< HEAD
func TestExpandElasticacheParameters(t *testing.T) {
+=======
+func TestexpandRedshiftParameters(t *testing.T) {
+ expanded := []interface{}{
+ map[string]interface{}{
+ "name": "character_set_client",
+ "value": "utf8",
+ },
+ }
+ parameters, err := expandRedshiftParameters(expanded)
+ if err != nil {
+ t.Fatalf("bad: %#v", err)
+ }
+
+ expected := &redshift.Parameter{
+ ParameterName: aws.String("character_set_client"),
+ ParameterValue: aws.String("utf8"),
+ }
+
+ if !reflect.DeepEqual(parameters[0], expected) {
+ t.Fatalf(
+ "Got:\n\n%#v\n\nExpected:\n\n%#v\n",
+ parameters[0],
+ expected)
+ }
+}
+
+func TestexpandElasticacheParameters(t *testing.T) {
+>>>>>>> Creation of the schema, CRUD and acceptance tests for Redshift Parameter Group
expanded := []interface{}{
map[string]interface{}{
"name": "activerehashing",
@@ -481,7 +511,36 @@ func TestFlattenParameters(t *testing.T) {
}
}
-func TestFlattenElasticacheParameters(t *testing.T) {
+func TestflattenRedshiftParameters(t *testing.T) {
+ cases := []struct {
+ Input []*redshift.Parameter
+ Output []map[string]interface{}
+ }{
+ {
+ Input: []*redshift.Parameter{
+ &redshift.Parameter{
+ ParameterName: aws.String("character_set_client"),
+ ParameterValue: aws.String("utf8"),
+ },
+ },
+ Output: []map[string]interface{}{
+ map[string]interface{}{
+ "name": "character_set_client",
+ "value": "utf8",
+ },
+ },
+ },
+ }
+
+ for _, tc := range cases {
+ output := flattenRedshiftParameters(tc.Input)
+ if !reflect.DeepEqual(output, tc.Output) {
+ t.Fatalf("Got:\n\n%#v\n\nExpected:\n\n%#v", output, tc.Output)
+ }
+ }
+}
+
+func TestflattenElasticacheParameters(t *testing.T) {
cases := []struct {
Input []*elasticache.Parameter
Output []map[string]interface{}
diff --git a/builtin/providers/aws/tagsRedshift.go b/builtin/providers/aws/tagsRedshift.go
new file mode 100644
index 0000000000..06d6fda232
--- /dev/null
+++ b/builtin/providers/aws/tagsRedshift.go
@@ -0,0 +1,27 @@
+package aws
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/redshift"
+)
+
+func tagsFromMapRedshift(m map[string]interface{}) []*redshift.Tag {
+ result := make([]*redshift.Tag, 0, len(m))
+ for k, v := range m {
+ result = append(result, &redshift.Tag{
+ Key: aws.String(k),
+ Value: aws.String(v.(string)),
+ })
+ }
+
+ return result
+}
+
+func tagsToMapRedshift(ts []*redshift.Tag) map[string]string {
+ result := make(map[string]string)
+ for _, t := range ts {
+ result[*t.Key] = *t.Value
+ }
+
+ return result
+}
diff --git a/website/source/docs/providers/aws/r/redshift_security_group.html.markdown b/website/source/docs/providers/aws/r/redshift_security_group.html.markdown
new file mode 100644
index 0000000000..ec8a67ae15
--- /dev/null
+++ b/website/source/docs/providers/aws/r/redshift_security_group.html.markdown
@@ -0,0 +1,50 @@
+---
+layout: "aws"
+page_title: "AWS: aws_redshift_security_group"
+sidebar_current: "docs-aws-resource-redshift-security-group"
+description: |-
+ Provides a Redshift security group resource.
+---
+
+# aws\_redshift\_security\_group
+
+Creates a new Amazon Redshift security group. You use security groups to control access to non-VPC clusters
+
+## Example Usage
+
+```
+resource "aws_redshift_security_group" "default" {
+ name = "redshift_sg"
+ description = "Redshift Example security group"
+
+ ingress {
+ cidr = "10.0.0.0/24"
+ }
+
+ tags {
+ Environment = "test"
+ }
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) The name of the Redshift security group.
+* `description` - (Required) The description of the Redshift security group.
+* `ingress` - (Optional) A list of ingress rules.
+
+Ingress blocks support the following:
+
+* `cidr` - The CIDR block to accept
+* `security_group_name` - The name of the security group to authorize
+* `security_group_owner_id` - The owner Id of the security group provided
+ by `security_group_name`.
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The Redshift security group ID.
+
diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb
index 5a12b432ee..499b2148db 100644
--- a/website/source/layouts/aws.erb
+++ b/website/source/layouts/aws.erb
@@ -433,6 +433,17 @@
+ >
+ Redshift Resources
+
+
+
>
Route53 Resources
From 48091e37c7ea7a73990ca0b1b885bb10045cfb80 Mon Sep 17 00:00:00 2001
From: stack72
Date: Thu, 12 Nov 2015 01:10:57 +0000
Subject: [PATCH 300/664] Adding the documentation for the Redshift Parameter
Groups
Changed the aws_redshift_security_group and aws_redshift_parameter_group
to remove the tags from the schema. Tags are a little bit more
complicated than originally though - I will revisit this later
Then added the schema, CRUD functionality and basic acceptance tests for
aws_redshift_subnet_group
Adding an acceptance test for the Update of subnet_ids in AWS Redshift Subnet Group
---
builtin/providers/aws/provider.go | 1 +
.../resource_aws_redshift_parameter_group.go | 5 +-
.../resource_aws_redshift_security_group.go | 9 -
.../aws/resource_aws_redshift_subnet_group.go | 186 +++++++++++++++
...resource_aws_redshift_subnet_group_test.go | 220 ++++++++++++++++++
.../r/redshift_parameter_group.html.markdown | 57 +++++
website/source/layouts/aws.erb | 4 +
7 files changed, 469 insertions(+), 13 deletions(-)
create mode 100644 builtin/providers/aws/resource_aws_redshift_subnet_group.go
create mode 100644 builtin/providers/aws/resource_aws_redshift_subnet_group_test.go
create mode 100644 website/source/docs/providers/aws/r/redshift_parameter_group.html.markdown
diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go
index d4d44ee7cd..df1b773786 100644
--- a/builtin/providers/aws/provider.go
+++ b/builtin/providers/aws/provider.go
@@ -172,6 +172,7 @@ func Provider() terraform.ResourceProvider {
"aws_rds_cluster_instance": resourceAwsRDSClusterInstance(),
"aws_redshift_security_group": resourceAwsRedshiftSecurityGroup(),
"aws_redshift_parameter_group": resourceAwsRedshiftParameterGroup(),
+ "aws_redshift_subnet_group": resourceAwsRedshiftSubnetGroup(),
"aws_route53_delegation_set": resourceAwsRoute53DelegationSet(),
"aws_route53_record": resourceAwsRoute53Record(),
"aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(),
diff --git a/builtin/providers/aws/resource_aws_redshift_parameter_group.go b/builtin/providers/aws/resource_aws_redshift_parameter_group.go
index 4bebdd3141..54336d4395 100644
--- a/builtin/providers/aws/resource_aws_redshift_parameter_group.go
+++ b/builtin/providers/aws/resource_aws_redshift_parameter_group.go
@@ -61,8 +61,6 @@ func resourceAwsRedshiftParameterGroup() *schema.Resource {
},
Set: resourceAwsRedshiftParameterHash,
},
-
- "tags": tagsSchema(),
},
}
}
@@ -74,7 +72,6 @@ func resourceAwsRedshiftParameterGroupCreate(d *schema.ResourceData, meta interf
ParameterGroupName: aws.String(d.Get("name").(string)),
ParameterGroupFamily: aws.String(d.Get("family").(string)),
Description: aws.String(d.Get("description").(string)),
- Tags: tagsFromMapRedshift(d.Get("tags").(map[string]interface{})),
}
log.Printf("[DEBUG] Create Redshift Parameter Group: %#v", createOpts)
@@ -103,13 +100,13 @@ func resourceAwsRedshiftParameterGroupRead(d *schema.ResourceData, meta interfac
if len(describeResp.ParameterGroups) != 1 ||
*describeResp.ParameterGroups[0].ParameterGroupName != d.Id() {
+ d.SetId("")
return fmt.Errorf("Unable to find Parameter Group: %#v", describeResp.ParameterGroups)
}
d.Set("name", describeResp.ParameterGroups[0].ParameterGroupName)
d.Set("family", describeResp.ParameterGroups[0].ParameterGroupFamily)
d.Set("description", describeResp.ParameterGroups[0].Description)
- d.Set("tags", tagsToMapRedshift(describeResp.ParameterGroups[0].Tags))
describeParametersOpts := redshift.DescribeClusterParametersInput{
ParameterGroupName: aws.String(d.Id()),
diff --git a/builtin/providers/aws/resource_aws_redshift_security_group.go b/builtin/providers/aws/resource_aws_redshift_security_group.go
index 0e09eb7c46..e21ccc5df1 100644
--- a/builtin/providers/aws/resource_aws_redshift_security_group.go
+++ b/builtin/providers/aws/resource_aws_redshift_security_group.go
@@ -62,12 +62,6 @@ func resourceAwsRedshiftSecurityGroup() *schema.Resource {
},
Set: resourceAwsRedshiftSecurityGroupIngressHash,
},
-
- "tags": &schema.Schema{
- Type: schema.TypeMap,
- Optional: true,
- ForceNew: true,
- },
},
}
}
@@ -80,11 +74,9 @@ func resourceAwsRedshiftSecurityGroupCreate(d *schema.ResourceData, meta interfa
name := d.Get("name").(string)
desc := d.Get("description").(string)
- tags := tagsFromMapRedshift(d.Get("tags").(map[string]interface{}))
sgInput := &redshift.CreateClusterSecurityGroupInput{
ClusterSecurityGroupName: aws.String(name),
Description: aws.String(desc),
- Tags: tags,
}
log.Printf("[DEBUG] Redshift security group create: name: %s, description: %s", name, desc)
_, err = conn.CreateClusterSecurityGroup(sgInput)
@@ -154,7 +146,6 @@ func resourceAwsRedshiftSecurityGroupRead(d *schema.ResourceData, meta interface
d.Set("ingress", rules)
d.Set("name", *sg.ClusterSecurityGroupName)
d.Set("description", *sg.Description)
- d.Set("tags", tagsToMapRedshift(sg.Tags))
return nil
}
diff --git a/builtin/providers/aws/resource_aws_redshift_subnet_group.go b/builtin/providers/aws/resource_aws_redshift_subnet_group.go
new file mode 100644
index 0000000000..878cd727ed
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_redshift_subnet_group.go
@@ -0,0 +1,186 @@
+package aws
+
+import (
+ "fmt"
+ "log"
+ "regexp"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/redshift"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceAwsRedshiftSubnetGroup() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceAwsRedshiftSubnetGroupCreate,
+ Read: resourceAwsRedshiftSubnetGroupRead,
+ Update: resourceAwsRedshiftSubnetGroupUpdate,
+ Delete: resourceAwsRedshiftSubnetGroupDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ ForceNew: true,
+ Required: true,
+ ValidateFunc: validateRedshiftSubnetGroupName,
+ },
+
+ "description": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "subnet_ids": &schema.Schema{
+ Type: schema.TypeSet,
+ Required: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ Set: schema.HashString,
+ },
+ },
+ }
+}
+
+func resourceAwsRedshiftSubnetGroupCreate(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).redshiftconn
+
+ subnetIdsSet := d.Get("subnet_ids").(*schema.Set)
+ subnetIds := make([]*string, subnetIdsSet.Len())
+ for i, subnetId := range subnetIdsSet.List() {
+ subnetIds[i] = aws.String(subnetId.(string))
+ }
+
+ createOpts := redshift.CreateClusterSubnetGroupInput{
+ ClusterSubnetGroupName: aws.String(d.Get("name").(string)),
+ Description: aws.String(d.Get("description").(string)),
+ SubnetIds: subnetIds,
+ }
+
+ log.Printf("[DEBUG] Create Redshift Subnet Group: %#v", createOpts)
+ _, err := conn.CreateClusterSubnetGroup(&createOpts)
+ if err != nil {
+ return fmt.Errorf("Error creating Redshift Subnet Group: %s", err)
+ }
+
+ d.SetId(*createOpts.ClusterSubnetGroupName)
+ log.Printf("[INFO] Redshift Subnet Group ID: %s", d.Id())
+ return resourceAwsRedshiftSubnetGroupRead(d, meta)
+}
+
+func resourceAwsRedshiftSubnetGroupRead(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).redshiftconn
+
+ describeOpts := redshift.DescribeClusterSubnetGroupsInput{
+ ClusterSubnetGroupName: aws.String(d.Id()),
+ }
+
+ describeResp, err := conn.DescribeClusterSubnetGroups(&describeOpts)
+ if err != nil {
+ if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "ClusterSubnetGroupNotFoundFault" {
+ log.Printf("[INFO] Redshift Subnet Group: %s was not found", d.Id())
+ d.SetId("")
+ return nil
+ }
+ return err
+ }
+
+ if len(describeResp.ClusterSubnetGroups) == 0 {
+ return fmt.Errorf("Unable to find Redshift Subnet Group: %#v", describeResp.ClusterSubnetGroups)
+ }
+
+ d.Set("name", d.Id())
+ d.Set("description", describeResp.ClusterSubnetGroups[0].Description)
+ d.Set("subnet_ids", subnetIdsToSlice(describeResp.ClusterSubnetGroups[0].Subnets))
+
+ return nil
+}
+
+func resourceAwsRedshiftSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).redshiftconn
+ if d.HasChange("subnet_ids") {
+ _, n := d.GetChange("subnet_ids")
+ if n == nil {
+ n = new(schema.Set)
+ }
+ ns := n.(*schema.Set)
+
+ var sIds []*string
+ for _, s := range ns.List() {
+ sIds = append(sIds, aws.String(s.(string)))
+ }
+
+ _, err := conn.ModifyClusterSubnetGroup(&redshift.ModifyClusterSubnetGroupInput{
+ ClusterSubnetGroupName: aws.String(d.Id()),
+ SubnetIds: sIds,
+ })
+
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func resourceAwsRedshiftSubnetGroupDelete(d *schema.ResourceData, meta interface{}) error {
+ stateConf := &resource.StateChangeConf{
+ Pending: []string{"pending"},
+ Target: "destroyed",
+ Refresh: resourceAwsRedshiftSubnetGroupDeleteRefreshFunc(d, meta),
+ Timeout: 3 * time.Minute,
+ MinTimeout: 1 * time.Second,
+ }
+ _, err := stateConf.WaitForState()
+ return err
+}
+
+func resourceAwsRedshiftSubnetGroupDeleteRefreshFunc(d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc {
+ conn := meta.(*AWSClient).redshiftconn
+
+ return func() (interface{}, string, error) {
+
+ deleteOpts := redshift.DeleteClusterSubnetGroupInput{
+ ClusterSubnetGroupName: aws.String(d.Id()),
+ }
+
+ if _, err := conn.DeleteClusterSubnetGroup(&deleteOpts); err != nil {
+ redshiftErr, ok := err.(awserr.Error)
+ if !ok {
+ return d, "error", err
+ }
+
+ if redshiftErr.Code() != "ClusterSubnetGroupNotFoundFault" {
+ return d, "error", err
+ }
+ }
+
+ return d, "destroyed", nil
+ }
+}
+
+func subnetIdsToSlice(subnetIds []*redshift.Subnet) []string {
+ subnetsSlice := make([]string, 0, len(subnetIds))
+ for _, s := range subnetIds {
+ subnetsSlice = append(subnetsSlice, *s.SubnetIdentifier)
+ }
+ return subnetsSlice
+}
+
+func validateRedshiftSubnetGroupName(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ if !regexp.MustCompile(`^[0-9a-z-_]+$`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "only lowercase alphanumeric characters, hyphens, underscores, and periods allowed in %q", k))
+ }
+ if len(value) > 255 {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot be longer than 255 characters", k))
+ }
+ if regexp.MustCompile(`(?i)^default$`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "%q is not allowed as %q", "Default", k))
+ }
+ return
+}
diff --git a/builtin/providers/aws/resource_aws_redshift_subnet_group_test.go b/builtin/providers/aws/resource_aws_redshift_subnet_group_test.go
new file mode 100644
index 0000000000..ba69c4f409
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_redshift_subnet_group_test.go
@@ -0,0 +1,220 @@
+package aws
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/redshift"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAWSRedshiftSubnetGroup_basic(t *testing.T) {
+ var v redshift.ClusterSubnetGroup
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckRedshiftSubnetGroupDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccRedshiftSubnetGroupConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckRedshiftSubnetGroupExists("aws_redshift_subnet_group.foo", &v),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_subnet_group.foo", "subnet_ids.#", "2"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAWSRedshiftSubnetGroup_updateSubnetIds(t *testing.T) {
+ var v redshift.ClusterSubnetGroup
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckRedshiftSubnetGroupDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccRedshiftSubnetGroupConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckRedshiftSubnetGroupExists("aws_redshift_subnet_group.foo", &v),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_subnet_group.foo", "subnet_ids.#", "2"),
+ ),
+ },
+
+ resource.TestStep{
+ Config: testAccRedshiftSubnetGroupConfig_updateSubnetIds,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckRedshiftSubnetGroupExists("aws_redshift_subnet_group.foo", &v),
+ resource.TestCheckResourceAttr(
+ "aws_redshift_subnet_group.foo", "subnet_ids.#", "3"),
+ ),
+ },
+ },
+ })
+}
+
+func TestResourceAWSRedshiftSubnetGroupName_validation(t *testing.T) {
+ cases := []struct {
+ Value string
+ ErrCount int
+ }{
+ {
+ Value: "default",
+ ErrCount: 1,
+ },
+ {
+ Value: "testing123%%",
+ ErrCount: 1,
+ },
+ {
+ Value: "TestingSG",
+ ErrCount: 1,
+ },
+ {
+ Value: randomString(256),
+ ErrCount: 1,
+ },
+ }
+
+ for _, tc := range cases {
+ _, errors := validateRedshiftSubnetGroupName(tc.Value, "aws_redshift_subnet_group_name")
+
+ if len(errors) != tc.ErrCount {
+ t.Fatalf("Expected the Redshift Subnet Group Name to trigger a validation error")
+ }
+ }
+}
+
+func testAccCheckRedshiftSubnetGroupDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*AWSClient).redshiftconn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_redshift_subnet_group" {
+ continue
+ }
+
+ resp, err := conn.DescribeClusterSubnetGroups(
+ &redshift.DescribeClusterSubnetGroupsInput{
+ ClusterSubnetGroupName: aws.String(rs.Primary.ID)})
+ if err == nil {
+ if len(resp.ClusterSubnetGroups) > 0 {
+ return fmt.Errorf("still exist.")
+ }
+
+ return nil
+ }
+
+ redshiftErr, ok := err.(awserr.Error)
+ if !ok {
+ return err
+ }
+ if redshiftErr.Code() != "ClusterSubnetGroupNotFoundFault" {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func testAccCheckRedshiftSubnetGroupExists(n string, v *redshift.ClusterSubnetGroup) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No ID is set")
+ }
+
+ conn := testAccProvider.Meta().(*AWSClient).redshiftconn
+ resp, err := conn.DescribeClusterSubnetGroups(
+ &redshift.DescribeClusterSubnetGroupsInput{ClusterSubnetGroupName: aws.String(rs.Primary.ID)})
+ if err != nil {
+ return err
+ }
+ if len(resp.ClusterSubnetGroups) == 0 {
+ return fmt.Errorf("ClusterSubnetGroup not found")
+ }
+
+ *v = *resp.ClusterSubnetGroups[0]
+
+ return nil
+ }
+}
+
+const testAccRedshiftSubnetGroupConfig = `
+resource "aws_vpc" "foo" {
+ cidr_block = "10.1.0.0/16"
+}
+
+resource "aws_subnet" "foo" {
+ cidr_block = "10.1.1.0/24"
+ availability_zone = "us-west-2a"
+ vpc_id = "${aws_vpc.foo.id}"
+ tags {
+ Name = "tf-dbsubnet-test-1"
+ }
+}
+
+resource "aws_subnet" "bar" {
+ cidr_block = "10.1.2.0/24"
+ availability_zone = "us-west-2b"
+ vpc_id = "${aws_vpc.foo.id}"
+ tags {
+ Name = "tf-dbsubnet-test-2"
+ }
+}
+
+resource "aws_redshift_subnet_group" "foo" {
+ name = "foo"
+ description = "foo description"
+ subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
+}
+`
+
+const testAccRedshiftSubnetGroupConfig_updateSubnetIds = `
+resource "aws_vpc" "foo" {
+ cidr_block = "10.1.0.0/16"
+}
+
+resource "aws_subnet" "foo" {
+ cidr_block = "10.1.1.0/24"
+ availability_zone = "us-west-2a"
+ vpc_id = "${aws_vpc.foo.id}"
+ tags {
+ Name = "tf-dbsubnet-test-1"
+ }
+}
+
+resource "aws_subnet" "bar" {
+ cidr_block = "10.1.2.0/24"
+ availability_zone = "us-west-2b"
+ vpc_id = "${aws_vpc.foo.id}"
+ tags {
+ Name = "tf-dbsubnet-test-2"
+ }
+}
+
+resource "aws_subnet" "foobar" {
+ cidr_block = "10.1.3.0/24"
+ availability_zone = "us-west-2c"
+ vpc_id = "${aws_vpc.foo.id}"
+ tags {
+ Name = "tf-dbsubnet-test-3"
+ }
+}
+
+resource "aws_redshift_subnet_group" "foo" {
+ name = "foo"
+ description = "foo description"
+ subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}", "${aws_subnet.foobar.id}"]
+}
+`
diff --git a/website/source/docs/providers/aws/r/redshift_parameter_group.html.markdown b/website/source/docs/providers/aws/r/redshift_parameter_group.html.markdown
new file mode 100644
index 0000000000..0974ee6f46
--- /dev/null
+++ b/website/source/docs/providers/aws/r/redshift_parameter_group.html.markdown
@@ -0,0 +1,57 @@
+---
+layout: "aws"
+page_title: "AWS: aws_redshift_parameter_group"
+sidebar_current: "docs-aws-resource-redshift-parameter-group"
+---
+
+# aws\_redshift\_parameter\_group
+
+Provides an Redshift Cluster parameter group resource.
+
+## Example Usage
+
+```
+resource "aws_redshift_parameter_group" "bar" {
+ name = "parameter-group-test-terraform"
+ family = "redshift-1.0"
+ description = "Test parameter group for terraform"
+ parameter {
+ name = "require_ssl"
+ value = "true"
+ }
+ parameter {
+ name = "query_group"
+ value = "example"
+ }
+ parameter{
+ name = "enable_user_activity_logging"
+ value = "true"
+ }
+
+ tags {
+ Environment = "test"
+ }
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) The name of the Redshift parameter group.
+* `family` - (Required) The family of the Redshift parameter group.
+* `description` - (Required) The description of the Redshift parameter group.
+* `parameter` - (Optional) A list of Redshift parameters to apply.
+
+Parameter blocks support the following:
+
+* `name` - (Required) The name of the Redshift parameter.
+* `value` - (Required) The value of the Redshift parameter.
+
+You can read more about the parameters that Redshift supports in the [documentation](http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The Redshift parameter group name.
diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb
index 499b2148db..81d2d0f629 100644
--- a/website/source/layouts/aws.erb
+++ b/website/source/layouts/aws.erb
@@ -437,6 +437,10 @@
Redshift Resources
+ >
+ aws_redshift_parameter_group
+
+
>
aws_redshift_security_group
From bf03752552d9d8b7e2f4a665943f21402ec0a46b Mon Sep 17 00:00:00 2001
From: stack72
Date: Thu, 12 Nov 2015 09:45:22 +0000
Subject: [PATCH 301/664] Adding the documentation for the AWS Redshift Subnet
Group resource
also removed the notion of tags from the redshift security group and
parameter group documentation until that has been implemented
Redshift Cluster CRUD and acceptance tests
Removing the Acceptance test for the Cluster Updates. You cannot delete
a cluster immediately after performing an operation on it. We would need
to add a lot of retry logic to the system to get this test to work
Adding some schema validation for RedShift cluster
Adding the last of the pieces of a first draft of the Redshift work - this is the documentation
---
builtin/providers/aws/provider.go | 1 +
.../aws/resource_aws_redshift_cluster.go | 574 ++++++++++++++++++
.../aws/resource_aws_redshift_cluster_test.go | 249 ++++++++
...ource_aws_redshift_parameter_group_test.go | 2 +-
...source_aws_redshift_security_group_test.go | 2 +-
...resource_aws_redshift_subnet_group_test.go | 2 +-
builtin/providers/aws/structure_test.go | 4 -
.../aws/r/redshift_cluster.html.markdown | 80 +++
.../r/redshift_parameter_group.html.markdown | 6 +-
.../r/redshift_security_group.html.markdown | 4 -
.../aws/r/redshift_subnet_group.html.markdown | 59 ++
website/source/layouts/aws.erb | 8 +
12 files changed, 975 insertions(+), 16 deletions(-)
create mode 100644 builtin/providers/aws/resource_aws_redshift_cluster.go
create mode 100644 builtin/providers/aws/resource_aws_redshift_cluster_test.go
create mode 100644 website/source/docs/providers/aws/r/redshift_cluster.html.markdown
create mode 100644 website/source/docs/providers/aws/r/redshift_subnet_group.html.markdown
diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go
index df1b773786..a5b5fde7e9 100644
--- a/builtin/providers/aws/provider.go
+++ b/builtin/providers/aws/provider.go
@@ -170,6 +170,7 @@ func Provider() terraform.ResourceProvider {
"aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(),
"aws_rds_cluster": resourceAwsRDSCluster(),
"aws_rds_cluster_instance": resourceAwsRDSClusterInstance(),
+ "aws_redshift_cluster": resourceAwsRedshiftCluster(),
"aws_redshift_security_group": resourceAwsRedshiftSecurityGroup(),
"aws_redshift_parameter_group": resourceAwsRedshiftParameterGroup(),
"aws_redshift_subnet_group": resourceAwsRedshiftSubnetGroup(),
diff --git a/builtin/providers/aws/resource_aws_redshift_cluster.go b/builtin/providers/aws/resource_aws_redshift_cluster.go
new file mode 100644
index 0000000000..b2e7d0cf0a
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_redshift_cluster.go
@@ -0,0 +1,574 @@
+package aws
+
+import (
+ "fmt"
+ "log"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/redshift"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceAwsRedshiftCluster() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceAwsRedshiftClusterCreate,
+ Read: resourceAwsRedshiftClusterRead,
+ Update: resourceAwsRedshiftClusterUpdate,
+ Delete: resourceAwsRedshiftClusterDelete,
+
+ Schema: map[string]*schema.Schema{
+ "database_name": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ ValidateFunc: validateRedshiftClusterDbName,
+ },
+
+ "cluster_identifier": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ ValidateFunc: validateRedshiftClusterIdentifier,
+ },
+ "cluster_type": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "node_type": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "master_username": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ValidateFunc: validateRedshiftClusterMasterUsername,
+ },
+
+ "master_password": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "cluster_security_groups": &schema.Schema{
+ Type: schema.TypeSet,
+ Optional: true,
+ Computed: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ Set: schema.HashString,
+ },
+
+ "vpc_security_group_ids": &schema.Schema{
+ Type: schema.TypeSet,
+ Optional: true,
+ Computed: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ Set: schema.HashString,
+ },
+
+ "cluster_subnet_group_name": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ Computed: true,
+ },
+
+ "availability_zone": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+
+ "preferred_maintenance_window": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ StateFunc: func(val interface{}) string {
+ if val == nil {
+ return ""
+ }
+ return strings.ToLower(val.(string))
+ },
+ },
+
+ "cluster_parameter_group_name": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+
+ "automated_snapshot_retention_period": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 1,
+ ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
+ value := v.(int)
+ if value > 35 {
+ es = append(es, fmt.Errorf(
+ "backup retention period cannot be more than 35 days"))
+ }
+ return
+ },
+ },
+
+ "port": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 5439,
+ },
+
+ "cluster_version": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "1.0",
+ },
+
+ "allow_version_upgrade": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ Default: true,
+ },
+
+ "number_of_nodes": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 1,
+ },
+
+ "publicly_accessible": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "encrypted": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ Computed: true,
+ },
+
+ "elastic_ip": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ },
+
+ "final_snapshot_identifier": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ValidateFunc: validateRedshiftClusterFinalSnapshotIdentifier,
+ },
+
+ "skip_final_snapshot": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ Default: true,
+ },
+
+ "endpoint": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+
+ "cluster_public_key": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+
+ "cluster_revision_number": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func resourceAwsRedshiftClusterCreate(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).redshiftconn
+
+ log.Printf("[INFO] Building Redshift Cluster Options")
+ createOpts := &redshift.CreateClusterInput{
+ ClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
+ Port: aws.Int64(int64(d.Get("port").(int))),
+ MasterUserPassword: aws.String(d.Get("master_password").(string)),
+ MasterUsername: aws.String(d.Get("master_username").(string)),
+ ClusterType: aws.String(d.Get("cluster_type").(string)),
+ ClusterVersion: aws.String(d.Get("cluster_version").(string)),
+ NodeType: aws.String(d.Get("node_type").(string)),
+ DBName: aws.String(d.Get("database_name").(string)),
+ AllowVersionUpgrade: aws.Bool(d.Get("allow_version_upgrade").(bool)),
+ }
+ if d.Get("cluster_type") == "multi-node" {
+ createOpts.NumberOfNodes = aws.Int64(int64(d.Get("number_of_nodes").(int)))
+ }
+ if v := d.Get("cluster_security_groups").(*schema.Set); v.Len() > 0 {
+ createOpts.ClusterSecurityGroups = expandStringList(v.List())
+ }
+
+ if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 {
+ createOpts.VpcSecurityGroupIds = expandStringList(v.List())
+ }
+
+ if v, ok := d.GetOk("cluster_subnet_group_name"); ok {
+ createOpts.ClusterSubnetGroupName = aws.String(v.(string))
+ }
+
+ if v, ok := d.GetOk("availability_zone"); ok {
+ createOpts.AvailabilityZone = aws.String(v.(string))
+ }
+
+ if v, ok := d.GetOk("preferred_maintenance_window"); ok {
+ createOpts.PreferredMaintenanceWindow = aws.String(v.(string))
+ }
+
+ if v, ok := d.GetOk("cluster_parameter_group_name"); ok {
+ createOpts.ClusterParameterGroupName = aws.String(v.(string))
+ }
+
+ if v, ok := d.GetOk("automated_snapshot_retention_period"); ok {
+ createOpts.AutomatedSnapshotRetentionPeriod = aws.Int64(int64(v.(int)))
+ }
+
+ if v, ok := d.GetOk("publicly_accessible"); ok {
+ createOpts.PubliclyAccessible = aws.Bool(v.(bool))
+ }
+
+ if v, ok := d.GetOk("encrypted"); ok {
+ createOpts.Encrypted = aws.Bool(v.(bool))
+ }
+
+ if v, ok := d.GetOk("elastic_ip"); ok {
+ createOpts.ElasticIp = aws.String(v.(string))
+ }
+
+ log.Printf("[DEBUG] Redshift Cluster create options: %s", createOpts)
+ resp, err := conn.CreateCluster(createOpts)
+ if err != nil {
+ log.Printf("[ERROR] Error creating Redshift Cluster: %s", err)
+ return err
+ }
+
+ log.Printf("[DEBUG]: Cluster create response: %s", resp)
+ d.SetId(*resp.Cluster.ClusterIdentifier)
+
+ stateConf := &resource.StateChangeConf{
+ Pending: []string{"creating", "backing-up", "modifying"},
+ Target: "available",
+ Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta),
+ Timeout: 5 * time.Minute,
+ MinTimeout: 3 * time.Second,
+ }
+
+ _, err = stateConf.WaitForState()
+ if err != nil {
+ return fmt.Errorf("[WARN] Error waiting for Redshift Cluster state to be \"available\": %s", err)
+ }
+
+ return resourceAwsRedshiftClusterRead(d, meta)
+}
+
+func resourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).redshiftconn
+
+ log.Printf("[INFO] Reading Redshift Cluster Information: %s", d.Id())
+ resp, err := conn.DescribeClusters(&redshift.DescribeClustersInput{
+ ClusterIdentifier: aws.String(d.Id()),
+ })
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ if "ClusterNotFound" == awsErr.Code() {
+ d.SetId("")
+ log.Printf("[DEBUG] Redshift Cluster (%s) not found", d.Id())
+ return nil
+ }
+ }
+ log.Printf("[DEBUG] Error describing Redshift Cluster (%s)", d.Id())
+ return err
+ }
+
+ var rsc *redshift.Cluster
+ for _, c := range resp.Clusters {
+ if *c.ClusterIdentifier == d.Id() {
+ rsc = c
+ }
+ }
+
+ if rsc == nil {
+ log.Printf("[WARN] Redshift Cluster (%s) not found", d.Id())
+ d.SetId("")
+ return nil
+ }
+
+ d.Set("database_name", rsc.DBName)
+ d.Set("cluster_subnet_group_name", rsc.ClusterSubnetGroupName)
+ d.Set("availability_zone", rsc.AvailabilityZone)
+ d.Set("encrypted", rsc.Encrypted)
+ d.Set("automated_snapshot_retention_period", rsc.AutomatedSnapshotRetentionPeriod)
+ d.Set("preferred_maintenance_window", rsc.PreferredMaintenanceWindow)
+ d.Set("endpoint", aws.String(fmt.Sprintf("%s:%d", *rsc.Endpoint.Address, *rsc.Endpoint.Port)))
+ d.Set("cluster_parameter_group_name", rsc.ClusterParameterGroups[0].ParameterGroupName)
+
+ var vpcg []string
+ for _, g := range rsc.VpcSecurityGroups {
+ vpcg = append(vpcg, *g.VpcSecurityGroupId)
+ }
+ if err := d.Set("vpc_security_group_ids", vpcg); err != nil {
+ return fmt.Errorf("[DEBUG] Error saving VPC Security Group IDs to state for Redshift Cluster (%s): %s", d.Id(), err)
+ }
+
+ var csg []string
+ for _, g := range rsc.ClusterSecurityGroups {
+ csg = append(csg, *g.ClusterSecurityGroupName)
+ }
+ if err := d.Set("cluster_security_groups", csg); err != nil {
+ return fmt.Errorf("[DEBUG] Error saving Cluster Security Group Names to state for Redshift Cluster (%s): %s", d.Id(), err)
+ }
+
+ d.Set("cluster_public_key", rsc.ClusterPublicKey)
+ d.Set("cluster_revision_number", rsc.ClusterRevisionNumber)
+
+ return nil
+}
+
+func resourceAwsRedshiftClusterUpdate(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).redshiftconn
+
+ log.Printf("[INFO] Building Redshift Modify Cluster Options")
+ req := &redshift.ModifyClusterInput{
+ ClusterIdentifier: aws.String(d.Id()),
+ }
+
+ if d.HasChange("cluster_type") {
+ req.ClusterType = aws.String(d.Get("cluster_type").(string))
+ }
+
+ if d.HasChange("node_type") {
+ req.NodeType = aws.String(d.Get("node_type").(string))
+ }
+
+ if d.HasChange("number_of_nodes") {
+ log.Printf("[INFO] When changing the NumberOfNodes in a Redshift Cluster, NodeType is required")
+ req.NumberOfNodes = aws.Int64(int64(d.Get("number_of_nodes").(int)))
+ req.NodeType = aws.String(d.Get("node_type").(string))
+ }
+
+ if d.HasChange("cluster_security_groups") {
+ req.ClusterSecurityGroups = expandStringList(d.Get("cluster_security_groups").(*schema.Set).List())
+ }
+
+ if d.HasChange("vpc_security_group_ips") {
+ req.VpcSecurityGroupIds = expandStringList(d.Get("vpc_security_group_ips").(*schema.Set).List())
+ }
+
+ if d.HasChange("master_password") {
+ req.MasterUserPassword = aws.String(d.Get("master_password").(string))
+ }
+
+ if d.HasChange("cluster_parameter_group_name") {
+ req.ClusterParameterGroupName = aws.String(d.Get("cluster_parameter_group_name").(string))
+ }
+
+ if d.HasChange("automated_snapshot_retention_period") {
+ req.AutomatedSnapshotRetentionPeriod = aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int)))
+ }
+
+ if d.HasChange("preferred_maintenance_window") {
+ req.PreferredMaintenanceWindow = aws.String(d.Get("preferred_maintenance_window").(string))
+ }
+
+ if d.HasChange("cluster_version") {
+ req.ClusterVersion = aws.String(d.Get("cluster_version").(string))
+ }
+
+ if d.HasChange("allow_version_upgrade") {
+ req.AllowVersionUpgrade = aws.Bool(d.Get("allow_version_upgrade").(bool))
+ }
+
+ log.Printf("[INFO] Modifying Redshift Cluster: %s", d.Id())
+ log.Printf("[DEBUG] Redshift Cluster Modify options: %s", req)
+ _, err := conn.ModifyCluster(req)
+ if err != nil {
+ return fmt.Errorf("[WARN] Error modifying Redshift Cluster (%s): %s", d.Id(), err)
+ }
+
+ stateConf := &resource.StateChangeConf{
+ Pending: []string{"creating", "deleting", "rebooting", "resizing", "renaming"},
+ Target: "available",
+ Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta),
+ Timeout: 10 * time.Minute,
+ MinTimeout: 5 * time.Second,
+ }
+
+ // Wait, catching any errors
+ _, err = stateConf.WaitForState()
+ if err != nil {
+ return fmt.Errorf("[WARN] Error Modifying Redshift Cluster (%s): %s", d.Id(), err)
+ }
+
+ return resourceAwsRedshiftClusterRead(d, meta)
+}
+
+func resourceAwsRedshiftClusterDelete(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).redshiftconn
+ log.Printf("[DEBUG] Destroying Redshift Cluster (%s)", d.Id())
+
+ deleteOpts := redshift.DeleteClusterInput{
+ ClusterIdentifier: aws.String(d.Id()),
+ }
+
+ skipFinalSnapshot := d.Get("skip_final_snapshot").(bool)
+ deleteOpts.SkipFinalClusterSnapshot = aws.Bool(skipFinalSnapshot)
+
+ if !skipFinalSnapshot {
+ if name, present := d.GetOk("final_snapshot_identifier"); present {
+ deleteOpts.FinalClusterSnapshotIdentifier = aws.String(name.(string))
+ } else {
+ return fmt.Errorf("Redshift Cluster Instance FinalSnapshotIdentifier is required when a final snapshot is required")
+ }
+ }
+
+ log.Printf("[DEBUG] Redshift Cluster delete options: %s", deleteOpts)
+ _, err := conn.DeleteCluster(&deleteOpts)
+ if err != nil {
+ return fmt.Errorf("[ERROR] Error deleting Redshift Cluster (%s): %s", d.Id(), err)
+ }
+
+ stateConf := &resource.StateChangeConf{
+ Pending: []string{"available", "creating", "deleting", "rebooting", "resizing", "renaming"},
+ Target: "destroyed",
+ Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta),
+ Timeout: 40 * time.Minute,
+ MinTimeout: 5 * time.Second,
+ }
+
+ // Wait, catching any errors
+ _, err = stateConf.WaitForState()
+ if err != nil {
+ return fmt.Errorf("[ERROR] Error deleting Redshift Cluster (%s): %s", d.Id(), err)
+ }
+
+ log.Printf("[INFO] Redshift Cluster %s successfully deleted", d.Id())
+
+ return nil
+}
+
+func resourceAwsRedshiftClusterStateRefreshFunc(d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc {
+ return func() (interface{}, string, error) {
+ conn := meta.(*AWSClient).redshiftconn
+
+ log.Printf("[INFO] Reading Redshift Cluster Information: %s", d.Id())
+ resp, err := conn.DescribeClusters(&redshift.DescribeClustersInput{
+ ClusterIdentifier: aws.String(d.Id()),
+ })
+
+ if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ if "ClusterNotFound" == awsErr.Code() {
+ return 42, "destroyed", nil
+ }
+ }
+ log.Printf("[WARN] Error on retrieving Redshift Cluster (%s) when waiting: %s", d.Id(), err)
+ return nil, "", err
+ }
+
+ var rsc *redshift.Cluster
+
+ for _, c := range resp.Clusters {
+ if *c.ClusterIdentifier == d.Id() {
+ rsc = c
+ }
+ }
+
+ if rsc == nil {
+ return 42, "destroyed", nil
+ }
+
+ if rsc.ClusterStatus != nil {
+ log.Printf("[DEBUG] Redshift Cluster status (%s): %s", d.Id(), *rsc.ClusterStatus)
+ }
+
+ return rsc, *rsc.ClusterStatus, nil
+ }
+}
+
+func validateRedshiftClusterIdentifier(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "only lowercase alphanumeric characters and hyphens allowed in %q", k))
+ }
+ if !regexp.MustCompile(`^[a-z]`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "first character of %q must be a letter", k))
+ }
+ if regexp.MustCompile(`--`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot contain two consecutive hyphens", k))
+ }
+ if regexp.MustCompile(`-$`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot end with a hyphen", k))
+ }
+ return
+}
+
+func validateRedshiftClusterDbName(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ if !regexp.MustCompile(`^[a-z]+$`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "only lowercase letters characters allowed in %q", k))
+ }
+ if len(value) > 64 {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot be longer than 64 characters: %q", k, value))
+ }
+ if value == "" {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot be an empty string", k))
+ }
+
+ return
+}
+
+func validateRedshiftClusterFinalSnapshotIdentifier(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "only alphanumeric characters and hyphens allowed in %q", k))
+ }
+ if regexp.MustCompile(`--`).MatchString(value) {
+ errors = append(errors, fmt.Errorf("%q cannot contain two consecutive hyphens", k))
+ }
+ if regexp.MustCompile(`-$`).MatchString(value) {
+ errors = append(errors, fmt.Errorf("%q cannot end in a hyphen", k))
+ }
+ if len(value) > 255 {
+ errors = append(errors, fmt.Errorf("%q cannot be more than 255 characters", k))
+ }
+ return
+}
+
+func validateRedshiftClusterMasterUsername(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ if !regexp.MustCompile(`^[A-Za-z0-9]+$`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "only alphanumeric characters in %q", k))
+ }
+ if !regexp.MustCompile(`^[A-Za-z]`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "first character of %q must be a letter", k))
+ }
+ if len(value) > 128 {
+ errors = append(errors, fmt.Errorf("%q cannot be more than 128 characters", k))
+ }
+ return
+}
diff --git a/builtin/providers/aws/resource_aws_redshift_cluster_test.go b/builtin/providers/aws/resource_aws_redshift_cluster_test.go
new file mode 100644
index 0000000000..241311db6b
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_redshift_cluster_test.go
@@ -0,0 +1,249 @@
+package aws
+
+import (
+ "fmt"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/redshift"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAWSRedshiftCluster_basic(t *testing.T) {
+ var v redshift.Cluster
+
+ ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int()
+ config := fmt.Sprintf(testAccAWSRedshiftClusterConfig_basic, ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSRedshiftClusterDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckAWSRedshiftClusterDestroy(s *terraform.State) error {
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_redshift_cluster" {
+ continue
+ }
+
+ // Try to find the Group
+ conn := testAccProvider.Meta().(*AWSClient).redshiftconn
+ var err error
+ resp, err := conn.DescribeClusters(
+ &redshift.DescribeClustersInput{
+ ClusterIdentifier: aws.String(rs.Primary.ID),
+ })
+
+ if err == nil {
+ if len(resp.Clusters) != 0 &&
+ *resp.Clusters[0].ClusterIdentifier == rs.Primary.ID {
+ return fmt.Errorf("Redshift Cluster %s still exists", rs.Primary.ID)
+ }
+ }
+
+ // Return nil if the cluster is already destroyed
+ if awsErr, ok := err.(awserr.Error); ok {
+ if awsErr.Code() == "ClusterNotFound" {
+ return nil
+ }
+ }
+
+ return err
+ }
+
+ return nil
+}
+
+func testAccCheckAWSRedshiftClusterExists(n string, v *redshift.Cluster) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No Redshift Cluster Instance ID is set")
+ }
+
+ conn := testAccProvider.Meta().(*AWSClient).redshiftconn
+ resp, err := conn.DescribeClusters(&redshift.DescribeClustersInput{
+ ClusterIdentifier: aws.String(rs.Primary.ID),
+ })
+
+ if err != nil {
+ return err
+ }
+
+ for _, c := range resp.Clusters {
+ if *c.ClusterIdentifier == rs.Primary.ID {
+ *v = *c
+ return nil
+ }
+ }
+
+ return fmt.Errorf("Redshift Cluster (%s) not found", rs.Primary.ID)
+ }
+}
+
+func TestResourceAWSRedshiftClusterIdentifierValidation(t *testing.T) {
+ cases := []struct {
+ Value string
+ ErrCount int
+ }{
+ {
+ Value: "tEsting",
+ ErrCount: 1,
+ },
+ {
+ Value: "1testing",
+ ErrCount: 1,
+ },
+ {
+ Value: "testing--123",
+ ErrCount: 1,
+ },
+ {
+ Value: "testing!",
+ ErrCount: 1,
+ },
+ {
+ Value: "testing-",
+ ErrCount: 1,
+ },
+ }
+
+ for _, tc := range cases {
+ _, errors := validateRedshiftClusterIdentifier(tc.Value, "aws_redshift_cluster_identifier")
+
+ if len(errors) != tc.ErrCount {
+ t.Fatalf("Expected the Redshift Cluster cluster_identifier to trigger a validation error")
+ }
+ }
+}
+
+func TestResourceAWSRedshiftClusterDbNameValidation(t *testing.T) {
+ cases := []struct {
+ Value string
+ ErrCount int
+ }{
+ {
+ Value: "tEsting",
+ ErrCount: 1,
+ },
+ {
+ Value: "testing1",
+ ErrCount: 1,
+ },
+ {
+ Value: "testing-",
+ ErrCount: 1,
+ },
+ {
+ Value: "",
+ ErrCount: 2,
+ },
+ {
+ Value: randomString(65),
+ ErrCount: 1,
+ },
+ }
+
+ for _, tc := range cases {
+ _, errors := validateRedshiftClusterDbName(tc.Value, "aws_redshift_cluster_database_name")
+
+ if len(errors) != tc.ErrCount {
+ t.Fatalf("Expected the Redshift Cluster database_name to trigger a validation error")
+ }
+ }
+}
+
+func TestResourceAWSRedshiftClusterFinalSnapshotIdentifierValidation(t *testing.T) {
+ cases := []struct {
+ Value string
+ ErrCount int
+ }{
+ {
+ Value: "testing--123",
+ ErrCount: 1,
+ },
+ {
+ Value: "testing-",
+ ErrCount: 1,
+ },
+ {
+ Value: "Testingq123!",
+ ErrCount: 1,
+ },
+ {
+ Value: randomString(256),
+ ErrCount: 1,
+ },
+ }
+
+ for _, tc := range cases {
+ _, errors := validateRedshiftClusterFinalSnapshotIdentifier(tc.Value, "aws_redshift_cluster_final_snapshot_identifier")
+
+ if len(errors) != tc.ErrCount {
+ t.Fatalf("Expected the Redshift Cluster final_snapshot_identifier to trigger a validation error")
+ }
+ }
+}
+
+func TestResourceAWSRedshiftClusterMasterUsernameValidation(t *testing.T) {
+ cases := []struct {
+ Value string
+ ErrCount int
+ }{
+ {
+ Value: "1Testing",
+ ErrCount: 1,
+ },
+ {
+ Value: "Testing!!",
+ ErrCount: 1,
+ },
+ {
+ Value: randomString(129),
+ ErrCount: 1,
+ },
+ }
+
+ for _, tc := range cases {
+ _, errors := validateRedshiftClusterMasterUsername(tc.Value, "aws_redshift_cluster_master_username")
+
+ if len(errors) != tc.ErrCount {
+ t.Fatalf("Expected the Redshift Cluster master_username to trigger a validation error")
+ }
+ }
+}
+
+var testAccAWSRedshiftClusterConfig_basic = `
+provider "aws" {
+ region = "us-west-2"
+}
+
+resource "aws_redshift_cluster" "default" {
+ cluster_identifier = "tf-redshift-cluster-%d"
+ availability_zone = "us-west-2a"
+ database_name = "mydb"
+ master_username = "foo"
+ master_password = "Mustbe8characters"
+ node_type = "dc1.large"
+ cluster_type = "single-node"
+ automated_snapshot_retention_period = 7
+ allow_version_upgrade = false
+}`
diff --git a/builtin/providers/aws/resource_aws_redshift_parameter_group_test.go b/builtin/providers/aws/resource_aws_redshift_parameter_group_test.go
index e5139d288b..969cf97915 100644
--- a/builtin/providers/aws/resource_aws_redshift_parameter_group_test.go
+++ b/builtin/providers/aws/resource_aws_redshift_parameter_group_test.go
@@ -71,7 +71,7 @@ func TestAccAWSRedshiftParameterGroup_withoutParameters(t *testing.T) {
})
}
-func TestResourceAWSRedshiftParameterGroupName_validation(t *testing.T) {
+func TestResourceAWSRedshiftParameterGroupNameValidation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
diff --git a/builtin/providers/aws/resource_aws_redshift_security_group_test.go b/builtin/providers/aws/resource_aws_redshift_security_group_test.go
index 8b61379751..f663f33327 100644
--- a/builtin/providers/aws/resource_aws_redshift_security_group_test.go
+++ b/builtin/providers/aws/resource_aws_redshift_security_group_test.go
@@ -131,7 +131,7 @@ func testAccCheckAWSRedshiftSecurityGroupDestroy(s *terraform.State) error {
return nil
}
-func TestResourceAWSRedshiftSecurityGroupName_validation(t *testing.T) {
+func TestResourceAWSRedshiftSecurityGroupNameValidation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
diff --git a/builtin/providers/aws/resource_aws_redshift_subnet_group_test.go b/builtin/providers/aws/resource_aws_redshift_subnet_group_test.go
index ba69c4f409..296ee569af 100644
--- a/builtin/providers/aws/resource_aws_redshift_subnet_group_test.go
+++ b/builtin/providers/aws/resource_aws_redshift_subnet_group_test.go
@@ -60,7 +60,7 @@ func TestAccAWSRedshiftSubnetGroup_updateSubnetIds(t *testing.T) {
})
}
-func TestResourceAWSRedshiftSubnetGroupName_validation(t *testing.T) {
+func TestResourceAWSRedshiftSubnetGroupNameValidation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
diff --git a/builtin/providers/aws/structure_test.go b/builtin/providers/aws/structure_test.go
index f74911b506..998a25747c 100644
--- a/builtin/providers/aws/structure_test.go
+++ b/builtin/providers/aws/structure_test.go
@@ -427,9 +427,6 @@ func TestExpandParameters(t *testing.T) {
}
}
-<<<<<<< HEAD
-func TestExpandElasticacheParameters(t *testing.T) {
-=======
func TestexpandRedshiftParameters(t *testing.T) {
expanded := []interface{}{
map[string]interface{}{
@@ -456,7 +453,6 @@ func TestexpandRedshiftParameters(t *testing.T) {
}
func TestexpandElasticacheParameters(t *testing.T) {
->>>>>>> Creation of the schema, CRUD and acceptance tests for Redshift Parameter Group
expanded := []interface{}{
map[string]interface{}{
"name": "activerehashing",
diff --git a/website/source/docs/providers/aws/r/redshift_cluster.html.markdown b/website/source/docs/providers/aws/r/redshift_cluster.html.markdown
new file mode 100644
index 0000000000..ec6db4bb3e
--- /dev/null
+++ b/website/source/docs/providers/aws/r/redshift_cluster.html.markdown
@@ -0,0 +1,80 @@
+---
+layout: "aws"
+page_title: "AWS: aws_redshift_cluster"
+sidebar_current: "docs-aws-resource-redshift-cluster"
+---
+
+# aws\_redshift\_cluster
+
+Provides a Redshift Cluster Resource.
+
+## Example Usage
+
+resource "aws_redshift_cluster" "default" {
+ cluster_identifier = "tf-redshift-cluster"
+ database_name = "mydb"
+ master_username = "foo"
+ master_password = "Mustbe8characters"
+ node_type = "dc1.large"
+ cluster_type = "single-node"
+}
+
+## Argument Reference
+
+For more detailed documentation about each argument, refer to
+the [AWS official documentation](http://docs.aws.amazon.com/cli/latest/reference/redshift/index.html#cli-aws-redshift).
+
+The following arguments are supported:
+
+* `cluster_identifier` - (Required) The Cluster Identifier. Must be a lower case
+string.
+* `database_name` - (Optional) The name of the first database to be created when the cluster is created.
+ If you do not provide a name, Amazon Redshift will create a default database called `dev`.
+* `cluster_type` - (Required) The type of the cluster. Valid values are `multi-node` and `single-node`
+* `node_type` - (Required) The node type to be provisioned for the cluster.
+* `master_password` - (Required) Password for the master DB user. Note that this may
+ show up in logs, and it will be stored in the state file
+* `master_username` - (Required) Username for the master DB user
+* `cluster_security_groups` - (Optional) A list of security groups to be associated with this cluster.
+* `vpc_security_group_ids` - (Optional) A list of Virtual Private Cloud (VPC) security groups to be associated with the cluster.
+* `cluster_subnet_group_name` - (Optional) The name of a cluster subnet group to be associated with this cluster. If this parameter is not provided the resulting cluster will be deployed outside virtual private cloud (VPC).
+* `availability_zone` - (Optional) The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision the cluster. For example, if you have several EC2 instances running in a specific Availability Zone, then you might want the cluster to be provisioned in the same zone in order to decrease network latency.
+* `preferred_maintenance_window` - (Optional) The weekly time range (in UTC) during which automated cluster maintenance can occur.
+ Format: ddd:hh24:mi-ddd:hh24:mi
+* `cluster_parameter_group_name` - (Optional) The name of the parameter group to be associated with this cluster.
+* `automated_snapshot_retention_period` - (Optional) The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with create-cluster-snapshot.
+* `port` - (Optional) The port number on which the cluster accepts incoming connections.
+ The cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections. Default port is 5439.
+* `cluster_version` - (Optional) The version of the Amazon Redshift engine software that you want to deploy on the cluster.
+ The version selected runs on all the nodes in the cluster.
+* `allow_version_upgrade` - (Optional) If true , major version upgrades can be applied during the maintenance window to the Amazon Redshift engine that is running on the cluster. Default is true
+* `number_of_nodes` - (Optional) The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node. Default is 1.
+* `publicly_accessible` - (Optional) If true , the cluster can be accessed from a public network.
+* `encrypted` - (Optional) If true , the data in the cluster is encrypted at rest.
+* `elastic_ip` - (Optional) The Elastic IP (EIP) address for the cluster.
+* `skip_final_snapshot` - (Optional) Determines whether a final snapshot of the cluster is created before Amazon Redshift deletes the cluster. If true , a final cluster snapshot is not created. If false , a final cluster snapshot is created before the cluster is deleted. Default is false.
+* `final_snapshot_identifier` - (Optional) The identifier of the final snapshot that is to be created immediately before deleting the cluster. If this parameter is provided, `skip_final_snapshot` must be false.
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The Redshift Cluster ID.
+* `cluster_identifier` - The Cluster Identifier
+* `cluster_type` - The cluster type
+* `node_type` - The type of nodes in the cluster
+* `database_name` - The name of the default database in the Cluster
+* `availability_zone` - The availability zone of the Cluster
+* `automated_snapshot_retention_period` - The backup retention period
+* `preferred_maintenance_window` - The backup window
+* `endpoint` - The connection endpoint
+* `encrypted` - Whether the data in the cluster is encrypted
+* `cluster_security_groups` - The security groups associated with the cluster
+* `vpc_security_group_ids` - The VPC security group Ids associated with the cluster
+* `port` - The Port the cluster responds on
+* `cluster_version` - The version of Redshift engine software
+* `cluster_parameter_group_name` - The name of the parameter group to be associated with this cluster
+* `cluster_subnet_group_name` - The name of a cluster subnet group to be associated with this cluster
+* `cluster_public_key` - The public key for the cluster
+* `cluster_revision_number` - The specific revision number of the database in the cluster
+
diff --git a/website/source/docs/providers/aws/r/redshift_parameter_group.html.markdown b/website/source/docs/providers/aws/r/redshift_parameter_group.html.markdown
index 0974ee6f46..d7a869520d 100644
--- a/website/source/docs/providers/aws/r/redshift_parameter_group.html.markdown
+++ b/website/source/docs/providers/aws/r/redshift_parameter_group.html.markdown
@@ -6,7 +6,7 @@ sidebar_current: "docs-aws-resource-redshift-parameter-group"
# aws\_redshift\_parameter\_group
-Provides an Redshift Cluster parameter group resource.
+Provides a Redshift Cluster parameter group resource.
## Example Usage
@@ -27,10 +27,6 @@ resource "aws_redshift_parameter_group" "bar" {
name = "enable_user_activity_logging"
value = "true"
}
-
- tags {
- Environment = "test"
- }
}
```
diff --git a/website/source/docs/providers/aws/r/redshift_security_group.html.markdown b/website/source/docs/providers/aws/r/redshift_security_group.html.markdown
index ec8a67ae15..ebdcc92c7e 100644
--- a/website/source/docs/providers/aws/r/redshift_security_group.html.markdown
+++ b/website/source/docs/providers/aws/r/redshift_security_group.html.markdown
@@ -20,10 +20,6 @@ resource "aws_redshift_security_group" "default" {
ingress {
cidr = "10.0.0.0/24"
}
-
- tags {
- Environment = "test"
- }
}
```
diff --git a/website/source/docs/providers/aws/r/redshift_subnet_group.html.markdown b/website/source/docs/providers/aws/r/redshift_subnet_group.html.markdown
new file mode 100644
index 0000000000..6354c32baa
--- /dev/null
+++ b/website/source/docs/providers/aws/r/redshift_subnet_group.html.markdown
@@ -0,0 +1,59 @@
+---
+layout: "aws"
+page_title: "AWS: aws_redshift_subnet_group"
+sidebar_current: "docs-aws-resource-redshift-subnet-group"
+description: |-
+ Provides a Redshift Subnet Group resource.
+---
+
+# aws\_redshift\_subnet\_group
+
+Creates a new Amazon Redshift subnet group. You must provide a list of one or more subnets in your existing Amazon Virtual Private Cloud (Amazon VPC) when creating Amazon Redshift subnet group.
+
+## Example Usage
+
+```
+resource "aws_vpc" "foo" {
+ cidr_block = "10.1.0.0/16"
+}
+
+resource "aws_subnet" "foo" {
+ cidr_block = "10.1.1.0/24"
+ availability_zone = "us-west-2a"
+ vpc_id = "${aws_vpc.foo.id}"
+ tags {
+ Name = "tf-dbsubnet-test-1"
+ }
+}
+
+resource "aws_subnet" "bar" {
+ cidr_block = "10.1.2.0/24"
+ availability_zone = "us-west-2b"
+ vpc_id = "${aws_vpc.foo.id}"
+ tags {
+ Name = "tf-dbsubnet-test-2"
+ }
+}
+
+resource "aws_redshift_subnet_group" "foo" {
+ name = "foo"
+ description = "foo description"
+ subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
+}
+`
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) The name of the Redshift Subnet group.
+* `description` - (Required) The description of the Redshift Subnet group.
+* `subnet_ids` - (Optional) An array of VPC subnet IDs..
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The Redshift Subnet group ID.
+
diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb
index 81d2d0f629..e44be78378 100644
--- a/website/source/layouts/aws.erb
+++ b/website/source/layouts/aws.erb
@@ -437,6 +437,10 @@
Redshift Resources
+ >
+ aws_redshift_subnet_group
+
+
From 5fbec544d7fb34afeb8b74f670301dc8cbba1a90 Mon Sep 17 00:00:00 2001
From: stack72
Date: Mon, 21 Dec 2015 09:54:24 +0000
Subject: [PATCH 302/664] Fixing yet more gofmt errors with imports
---
builtin/providers/docker/resource_docker_container.go | 3 ++-
builtin/providers/postgresql/config.go | 1 +
builtin/providers/vcd/resource_vcd_dnat.go | 1 +
builtin/providers/vcd/resource_vcd_firewall_rules.go | 5 +++--
builtin/providers/vcd/resource_vcd_network.go | 3 ++-
builtin/providers/vcd/resource_vcd_snat.go | 1 +
builtin/providers/vcd/resource_vcd_vapp.go | 3 ++-
builtin/providers/vcd/structure.go | 5 +++--
8 files changed, 15 insertions(+), 7 deletions(-)
diff --git a/builtin/providers/docker/resource_docker_container.go b/builtin/providers/docker/resource_docker_container.go
index 242462e1a7..323850499a 100644
--- a/builtin/providers/docker/resource_docker_container.go
+++ b/builtin/providers/docker/resource_docker_container.go
@@ -4,9 +4,10 @@ import (
"bytes"
"fmt"
+ "regexp"
+
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
- "regexp"
)
func resourceDockerContainer() *schema.Resource {
diff --git a/builtin/providers/postgresql/config.go b/builtin/providers/postgresql/config.go
index 454c016141..8bf7b2daa5 100644
--- a/builtin/providers/postgresql/config.go
+++ b/builtin/providers/postgresql/config.go
@@ -3,6 +3,7 @@ package postgresql
import (
"database/sql"
"fmt"
+
_ "github.com/lib/pq" //PostgreSQL db
)
diff --git a/builtin/providers/vcd/resource_vcd_dnat.go b/builtin/providers/vcd/resource_vcd_dnat.go
index 5c2e8006c1..b764e13ba7 100644
--- a/builtin/providers/vcd/resource_vcd_dnat.go
+++ b/builtin/providers/vcd/resource_vcd_dnat.go
@@ -2,6 +2,7 @@ package vcd
import (
"fmt"
+
"github.com/hashicorp/terraform/helper/schema"
)
diff --git a/builtin/providers/vcd/resource_vcd_firewall_rules.go b/builtin/providers/vcd/resource_vcd_firewall_rules.go
index 913bff8be0..325af24cd3 100644
--- a/builtin/providers/vcd/resource_vcd_firewall_rules.go
+++ b/builtin/providers/vcd/resource_vcd_firewall_rules.go
@@ -2,10 +2,11 @@ package vcd
import (
"fmt"
- "github.com/hashicorp/terraform/helper/schema"
- types "github.com/hmrc/vmware-govcd/types/v56"
"log"
"strings"
+
+ "github.com/hashicorp/terraform/helper/schema"
+ types "github.com/hmrc/vmware-govcd/types/v56"
)
func resourceVcdFirewallRules() *schema.Resource {
diff --git a/builtin/providers/vcd/resource_vcd_network.go b/builtin/providers/vcd/resource_vcd_network.go
index 531afd878d..389f37b6a0 100644
--- a/builtin/providers/vcd/resource_vcd_network.go
+++ b/builtin/providers/vcd/resource_vcd_network.go
@@ -5,10 +5,11 @@ import (
"bytes"
"fmt"
+ "strings"
+
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
types "github.com/hmrc/vmware-govcd/types/v56"
- "strings"
)
func resourceVcdNetwork() *schema.Resource {
diff --git a/builtin/providers/vcd/resource_vcd_snat.go b/builtin/providers/vcd/resource_vcd_snat.go
index c2ae891210..4ad018c863 100644
--- a/builtin/providers/vcd/resource_vcd_snat.go
+++ b/builtin/providers/vcd/resource_vcd_snat.go
@@ -2,6 +2,7 @@ package vcd
import (
"fmt"
+
"github.com/hashicorp/terraform/helper/schema"
)
diff --git a/builtin/providers/vcd/resource_vcd_vapp.go b/builtin/providers/vcd/resource_vcd_vapp.go
index 50fc93563f..8c98ecf21e 100644
--- a/builtin/providers/vcd/resource_vcd_vapp.go
+++ b/builtin/providers/vcd/resource_vcd_vapp.go
@@ -2,9 +2,10 @@ package vcd
import (
"fmt"
+ "log"
+
"github.com/hashicorp/terraform/helper/schema"
types "github.com/hmrc/vmware-govcd/types/v56"
- "log"
)
func resourceVcdVApp() *schema.Resource {
diff --git a/builtin/providers/vcd/structure.go b/builtin/providers/vcd/structure.go
index d4ac65eaee..6a15f0c65b 100644
--- a/builtin/providers/vcd/structure.go
+++ b/builtin/providers/vcd/structure.go
@@ -2,11 +2,12 @@ package vcd
import (
"fmt"
+ "strconv"
+ "time"
+
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
types "github.com/hmrc/vmware-govcd/types/v56"
- "strconv"
- "time"
)
func expandIPRange(configured []interface{}) types.IPRanges {
From c94b7c4584a1f2e290f4e7f26b69821bd9b5a159 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Mon, 21 Dec 2015 10:00:34 -0600
Subject: [PATCH 303/664] provider/aws: Update docs with t1 instance for
accounts with EC2 Classic
---
.../source/docs/configuration/override.html.md | 2 +-
.../source/docs/configuration/resources.html.md | 4 ++--
website/source/docs/modules/usage.html.markdown | 4 ++--
.../docs/providers/aws/r/instance.html.markdown | 12 ++++++++----
.../aws/r/launch_configuration.html.markdown | 12 ++++++------
website/source/index.html.erb | 4 ++--
.../source/intro/getting-started/build.html.md | 16 ++++++++--------
.../source/intro/getting-started/change.html.md | 8 ++++----
8 files changed, 33 insertions(+), 29 deletions(-)
diff --git a/website/source/docs/configuration/override.html.md b/website/source/docs/configuration/override.html.md
index 1f841af08a..b3dbb1dbc5 100644
--- a/website/source/docs/configuration/override.html.md
+++ b/website/source/docs/configuration/override.html.md
@@ -37,7 +37,7 @@ If you have a Terraform configuration `example.tf` with the contents:
```
resource "aws_instance" "web" {
- ami = "ami-d05e75b8"
+ ami = "ami-408c7f28"
}
```
diff --git a/website/source/docs/configuration/resources.html.md b/website/source/docs/configuration/resources.html.md
index 3bf2031ac2..11fb9a9c5f 100644
--- a/website/source/docs/configuration/resources.html.md
+++ b/website/source/docs/configuration/resources.html.md
@@ -25,8 +25,8 @@ A resource configuration looks like the following:
```
resource "aws_instance" "web" {
- ami = "ami-d05e75b8"
- instance_type = "t2.micro"
+ ami = "ami-408c7f28"
+ instance_type = "t1.micro"
}
```
diff --git a/website/source/docs/modules/usage.html.markdown b/website/source/docs/modules/usage.html.markdown
index 65ce75cf33..31921623a9 100644
--- a/website/source/docs/modules/usage.html.markdown
+++ b/website/source/docs/modules/usage.html.markdown
@@ -87,8 +87,8 @@ For example:
```
resource "aws_instance" "client" {
- ami = "ami-d05e75b8"
- instance_type = "t2.micro"
+ ami = "ami-408c7f28"
+ instance_type = "t1.micro"
availability_zone = "${module.consul.server_availability_zone}"
}
```
diff --git a/website/source/docs/providers/aws/r/instance.html.markdown b/website/source/docs/providers/aws/r/instance.html.markdown
index 79e4d22070..5e35161e3f 100644
--- a/website/source/docs/providers/aws/r/instance.html.markdown
+++ b/website/source/docs/providers/aws/r/instance.html.markdown
@@ -14,11 +14,15 @@ and deleted. Instances also support [provisioning](/docs/provisioners/index.html
## Example Usage
```
-# Create a new instance of the `ami-d05e75b8` (Ubuntu 14.04) on an
-# t2.micro node with an AWS Tag naming it "HelloWorld"
+# Create a new instance of the `ami-408c7f28` (Ubuntu 14.04) on an
+# t1.micro node with an AWS Tag naming it "HelloWorld"
+provider "aws" {
+ region = "us-east-1"
+}
+
resource "aws_instance" "web" {
- ami = "ami-d05e75b8"
- instance_type = "t2.micro"
+ ami = "ami-408c7f28"
+ instance_type = "t1.micro"
tags {
Name = "HelloWorld"
}
diff --git a/website/source/docs/providers/aws/r/launch_configuration.html.markdown b/website/source/docs/providers/aws/r/launch_configuration.html.markdown
index 4f820b7f67..dd7dd84fcb 100644
--- a/website/source/docs/providers/aws/r/launch_configuration.html.markdown
+++ b/website/source/docs/providers/aws/r/launch_configuration.html.markdown
@@ -15,8 +15,8 @@ Provides a resource to create a new launch configuration, used for autoscaling g
```
resource "aws_launch_configuration" "as_conf" {
name = "web_config"
- ami = "ami-d05e75b8"
- instance_type = "t2.micro"
+ ami = "ami-408c7f28"
+ instance_type = "t1.micro"
}
```
@@ -33,8 +33,8 @@ with `name_prefix`. Example:
```
resource "aws_launch_configuration" "as_conf" {
name_prefix = "terraform-lc-example-"
- ami = "ami-d05e75b8"
- instance_type = "t2.micro"
+ ami = "ami-408c7f28"
+ instance_type = "t1.micro"
lifecycle {
create_before_destroy = true
@@ -66,8 +66,8 @@ for more information or how to launch [Spot Instances][3] with Terraform.
```
resource "aws_launch_configuration" "as_conf" {
- ami = "ami-d05e75b8"
- instance_type = "t2.micro"
+ ami = "ami-408c7f28"
+ instance_type = "t1.micro"
spot_price = "0.001"
lifecycle {
create_before_destroy = true
diff --git a/website/source/index.html.erb b/website/source/index.html.erb
index 52c8b747bf..c2bf8467bc 100644
--- a/website/source/index.html.erb
+++ b/website/source/index.html.erb
@@ -196,8 +196,8 @@
resource "aws_instance" "app" {
count = 5
- ami = "ami-d05e75b8"
- instance_type = "t2.micro"
+ ami = "ami-408c7f28"
+ instance_type = "t1.micro"
}
diff --git a/website/source/intro/getting-started/build.html.md b/website/source/intro/getting-started/build.html.md
index 633db888b4..aa3c7c506d 100644
--- a/website/source/intro/getting-started/build.html.md
+++ b/website/source/intro/getting-started/build.html.md
@@ -59,8 +59,8 @@ provider "aws" {
}
resource "aws_instance" "example" {
- ami = "ami-d05e75b8"
- instance_type = "t2.micro"
+ ami = "ami-408c7f28"
+ instance_type = "t1.micro"
}
```
@@ -111,9 +111,9 @@ $ terraform plan
...
+ aws_instance.example
- ami: "" => "ami-d05e75b8"
+ ami: "" => "ami-408c7f28"
availability_zone: "" => ""
- instance_type: "" => "t2.micro"
+ instance_type: "" => "t1.micro"
key_name: "" => ""
private_dns: "" => ""
private_ip: "" => ""
@@ -148,8 +148,8 @@ since Terraform waits for the EC2 instance to become available.
```
$ terraform apply
aws_instance.example: Creating...
- ami: "" => "ami-d05e75b8"
- instance_type: "" => "t2.micro"
+ ami: "" => "ami-408c7f28"
+ instance_type: "" => "t1.micro"
Apply complete! Resources: 1 added, 0 changed, 0 destroyed.
@@ -172,9 +172,9 @@ You can inspect the state using `terraform show`:
$ terraform show
aws_instance.example:
id = i-e60900cd
- ami = ami-d05e75b8
+ ami = ami-408c7f28
availability_zone = us-east-1c
- instance_type = t2.micro
+ instance_type = t1.micro
key_name =
private_dns = domU-12-31-39-12-38-AB.compute-1.internal
private_ip = 10.200.59.89
diff --git a/website/source/intro/getting-started/change.html.md b/website/source/intro/getting-started/change.html.md
index 60d14fd4b6..3856e5ad93 100644
--- a/website/source/intro/getting-started/change.html.md
+++ b/website/source/intro/getting-started/change.html.md
@@ -28,8 +28,8 @@ resource in your configuration and change it to the following:
```
resource "aws_instance" "example" {
- ami = "ami-8eb061e6"
- instance_type = "t2.micro"
+ ami = "ami-b8b061d0"
+ instance_type = "t1.micro"
}
```
@@ -47,7 +47,7 @@ $ terraform plan
...
-/+ aws_instance.example
- ami: "ami-d05e75b8" => "ami-8eb061e6" (forces new resource)
+ ami: "ami-408c7f28" => "ami-b8b061d0" (forces new resource)
availability_zone: "us-east-1c" => ""
key_name: "" => ""
private_dns: "domU-12-31-39-12-38-AB.compute-1.internal" => ""
@@ -79,7 +79,7 @@ the change.
$ terraform apply
aws_instance.example: Destroying...
aws_instance.example: Modifying...
- ami: "ami-d05e75b8" => "ami-8eb061e6"
+ ami: "ami-408c7f28" => "ami-b8b061d0"
Apply complete! Resources: 0 added, 1 changed, 1 destroyed.
From a926fa6fdd633fc43127bd18719099e92b1f4377 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Mon, 5 Oct 2015 19:08:33 -0400
Subject: [PATCH 304/664] Adds template_cloudinit_config resource to template
This adds a new resource to template to generate multipart cloudinit
configurations to be used with other providers/resources.
The resource has the ability gzip and base64 encode the parts.
---
builtin/providers/template/provider.go | 3 +-
builtin/providers/template/resource.go | 24 +-
.../template/resource_cloudinit_config.go | 223 ++++++++++++++++++
.../resource_cloudinit_config_test.go | 92 ++++++++
.../template/r/cloudinit_config.html.markdown | 68 ++++++
5 files changed, 397 insertions(+), 13 deletions(-)
create mode 100644 builtin/providers/template/resource_cloudinit_config.go
create mode 100644 builtin/providers/template/resource_cloudinit_config_test.go
create mode 100644 website/source/docs/providers/template/r/cloudinit_config.html.markdown
diff --git a/builtin/providers/template/provider.go b/builtin/providers/template/provider.go
index 7513341bc1..1ebf3ae22a 100644
--- a/builtin/providers/template/provider.go
+++ b/builtin/providers/template/provider.go
@@ -8,7 +8,8 @@ import (
func Provider() terraform.ResourceProvider {
return &schema.Provider{
ResourcesMap: map[string]*schema.Resource{
- "template_file": resource(),
+ "template_file": resourceFile(),
+ "template_cloudinit_config": resourceCloudinitConfig(),
},
}
}
diff --git a/builtin/providers/template/resource.go b/builtin/providers/template/resource.go
index 8022c064be..fd0808828c 100644
--- a/builtin/providers/template/resource.go
+++ b/builtin/providers/template/resource.go
@@ -15,12 +15,12 @@ import (
"github.com/hashicorp/terraform/helper/schema"
)
-func resource() *schema.Resource {
+func resourceFile() *schema.Resource {
return &schema.Resource{
- Create: Create,
- Delete: Delete,
- Exists: Exists,
- Read: Read,
+ Create: resourceFileCreate,
+ Delete: resourceFileDelete,
+ Exists: resourceFileExists,
+ Read: resourceFileRead,
Schema: map[string]*schema.Schema{
"template": &schema.Schema{
@@ -69,8 +69,8 @@ func resource() *schema.Resource {
}
}
-func Create(d *schema.ResourceData, meta interface{}) error {
- rendered, err := render(d)
+func resourceFileCreate(d *schema.ResourceData, meta interface{}) error {
+ rendered, err := renderFile(d)
if err != nil {
return err
}
@@ -79,13 +79,13 @@ func Create(d *schema.ResourceData, meta interface{}) error {
return nil
}
-func Delete(d *schema.ResourceData, meta interface{}) error {
+func resourceFileDelete(d *schema.ResourceData, meta interface{}) error {
d.SetId("")
return nil
}
-func Exists(d *schema.ResourceData, meta interface{}) (bool, error) {
- rendered, err := render(d)
+func resourceFileExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ rendered, err := renderFile(d)
if err != nil {
if _, ok := err.(templateRenderError); ok {
log.Printf("[DEBUG] Got error while rendering in Exists: %s", err)
@@ -98,7 +98,7 @@ func Exists(d *schema.ResourceData, meta interface{}) (bool, error) {
return hash(rendered) == d.Id(), nil
}
-func Read(d *schema.ResourceData, meta interface{}) error {
+func resourceFileRead(d *schema.ResourceData, meta interface{}) error {
// Logic is handled in Exists, which only returns true if the rendered
// contents haven't changed. That means if we get here there's nothing to
// do.
@@ -107,7 +107,7 @@ func Read(d *schema.ResourceData, meta interface{}) error {
type templateRenderError error
-func render(d *schema.ResourceData) (string, error) {
+func renderFile(d *schema.ResourceData) (string, error) {
template := d.Get("template").(string)
filename := d.Get("filename").(string)
vars := d.Get("vars").(map[string]interface{})
diff --git a/builtin/providers/template/resource_cloudinit_config.go b/builtin/providers/template/resource_cloudinit_config.go
new file mode 100644
index 0000000000..88af9bef22
--- /dev/null
+++ b/builtin/providers/template/resource_cloudinit_config.go
@@ -0,0 +1,223 @@
+package template
+
+import (
+ "bytes"
+ "compress/gzip"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "mime/multipart"
+ "net/textproto"
+ "strconv"
+
+ "github.com/hashicorp/terraform/helper/hashcode"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceCloudinitConfig() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceCloudinitConfigCreate,
+ Delete: resourceCloudinitConfigDelete,
+ Exists: resourceCloudinitConfigExists,
+ Read: resourceCloudinitConfigRead,
+
+ Schema: map[string]*schema.Schema{
+ "part": &schema.Schema{
+ Type: schema.TypeList,
+ Required: true,
+ ForceNew: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "content_type": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+
+ if _, supported := supportedContentTypes[value]; !supported {
+ errors = append(errors, fmt.Errorf("Part has an unsupported content type: %s", v))
+ }
+
+ return
+ },
+ },
+ "content": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "filename": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ "merge_type": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ },
+ },
+ },
+ "gzip": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ Default: true,
+ ForceNew: true,
+ },
+ "base64_encode": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ Default: true,
+ ForceNew: true,
+ },
+ "rendered": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "rendered cloudinit configuration",
+ },
+ },
+ }
+}
+
+func resourceCloudinitConfigCreate(d *schema.ResourceData, meta interface{}) error {
+ rendered, err := renderCloudinitConfig(d)
+ if err != nil {
+ return err
+ }
+
+ d.Set("rendered", rendered)
+ d.SetId(strconv.Itoa(hashcode.String(rendered)))
+ return nil
+}
+
+func resourceCloudinitConfigDelete(d *schema.ResourceData, meta interface{}) error {
+ d.SetId("")
+ return nil
+}
+
+func resourceCloudinitConfigExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ rendered, err := renderCloudinitConfig(d)
+ if err != nil {
+ return false, err
+ }
+
+ return strconv.Itoa(hashcode.String(rendered)) == d.Id(), nil
+}
+
+func resourceCloudinitConfigRead(d *schema.ResourceData, meta interface{}) error {
+ return nil
+}
+
+func renderCloudinitConfig(d *schema.ResourceData) (string, error) {
+ gzipOutput := d.Get("gzip").(bool)
+ base64Output := d.Get("base64_encode").(bool)
+
+ partsValue, hasParts := d.GetOk("part")
+ if !hasParts {
+ return "", fmt.Errorf("No parts found in the cloudinit resource declaration")
+ }
+
+ cloudInitParts := make(cloudInitParts, len(partsValue.([]interface{})))
+ for i, v := range partsValue.([]interface{}) {
+ p := v.(map[string]interface{})
+
+ part := cloudInitPart{}
+ if p, ok := p["content_type"]; ok {
+ part.ContentType = p.(string)
+ }
+ if p, ok := p["content"]; ok {
+ part.Content = p.(string)
+ }
+ if p, ok := p["merge_type"]; ok {
+ part.MergeType = p.(string)
+ }
+ if p, ok := p["filename"]; ok {
+ part.Filename = p.(string)
+ }
+ cloudInitParts[i] = part
+ }
+
+ var buffer bytes.Buffer
+
+ var err error
+ if gzipOutput {
+ gzipWriter := gzip.NewWriter(&buffer)
+ err = renderPartsToWriter(cloudInitParts, gzipWriter)
+ gzipWriter.Close()
+ } else {
+ err = renderPartsToWriter(cloudInitParts, &buffer)
+ }
+ if err != nil {
+ return "", err
+ }
+
+ output := ""
+ if base64Output {
+ output = base64.StdEncoding.EncodeToString(buffer.Bytes())
+ } else {
+ output = buffer.String()
+ }
+
+ return output, nil
+}
+
+func renderPartsToWriter(parts cloudInitParts, writer io.Writer) error {
+ mimeWriter := multipart.NewWriter(writer)
+ defer mimeWriter.Close()
+
+ // we need to set the boundary explictly, otherwise the boundary is random
+ // and this causes terraform to complain about the resource being different
+ if err := mimeWriter.SetBoundary("MIMEBOUNDRY"); err != nil {
+ return err
+ }
+
+ writer.Write([]byte(fmt.Sprintf("Content-Type: multipart/mixed; boundary=\"%s\"\n", mimeWriter.Boundary())))
+
+ for _, part := range parts {
+ header := textproto.MIMEHeader{}
+ if part.ContentType == "" {
+ header.Set("Content-Type", "text/plain")
+ } else {
+ header.Set("Content-Type", part.ContentType)
+ }
+
+ if part.Filename != "" {
+ header.Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, part.Filename))
+ }
+
+ if part.MergeType != "" {
+ header.Set("X-Merge-Type", part.MergeType)
+ }
+
+ partWriter, err := mimeWriter.CreatePart(header)
+ if err != nil {
+ return err
+ }
+
+ _, err = partWriter.Write([]byte(part.Content))
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type cloudInitPart struct {
+ ContentType string
+ MergeType string
+ Filename string
+ Content string
+}
+
+type cloudInitParts []cloudInitPart
+
+// Support content types as specified by http://cloudinit.readthedocs.org/en/latest/topics/format.html
+var supportedContentTypes = map[string]bool{
+ "text/x-include-once-url": true,
+ "text/x-include-url": true,
+ "text/cloud-config-archive": true,
+ "text/upstart-job": true,
+ "text/cloud-config": true,
+ "text/part-handler": true,
+ "text/x-shellscript": true,
+ "text/cloud-boothook": true,
+}
diff --git a/builtin/providers/template/resource_cloudinit_config_test.go b/builtin/providers/template/resource_cloudinit_config_test.go
new file mode 100644
index 0000000000..83d0a61528
--- /dev/null
+++ b/builtin/providers/template/resource_cloudinit_config_test.go
@@ -0,0 +1,92 @@
+package template
+
+import (
+ "testing"
+
+ r "github.com/hashicorp/terraform/helper/resource"
+ // "github.com/hashicorp/terraform/terraform"
+)
+
+// var testProviders = map[string]terraform.ResourceProvider{
+// "template": Provider(),
+// }
+
+func TestRender(t *testing.T) {
+ testCases := []struct {
+ ResourceBlock string
+ Expected string
+ }{
+ {
+ `resource "template_cloudinit_config" "foo" {
+ gzip = false
+ base64_encode = false
+
+ part {
+ content_type = "text/x-shellscript"
+ content = "baz"
+ }
+ }`,
+ "Content-Type: multipart/mixed; boundary=\"MIMEBOUNDRY\"\n--MIMEBOUNDRY\r\nContent-Type: text/x-shellscript\r\n\r\nbaz\r\n--MIMEBOUNDRY--\r\n",
+ },
+ {
+ `resource "template_cloudinit_config" "foo" {
+ gzip = false
+ base64_encode = false
+
+ part {
+ content_type = "text/x-shellscript"
+ content = "baz"
+ filename = "foobar.sh"
+ }
+ }`,
+ "Content-Type: multipart/mixed; boundary=\"MIMEBOUNDRY\"\n--MIMEBOUNDRY\r\nContent-Type: text/x-shellscript\r\nContent-Disposition: attachment; filename=\"foobar.sh\"\r\n\r\nbaz\r\n--MIMEBOUNDRY--\r\n",
+ },
+ {
+ `resource "template_cloudinit_config" "foo" {
+ gzip = false
+ base64_encode = false
+
+ part {
+ content_type = "text/x-shellscript"
+ content = "baz"
+ }
+ part {
+ content_type = "text/x-shellscript"
+ content = "ffbaz"
+ }
+ }`,
+ "Content-Type: multipart/mixed; boundary=\"MIMEBOUNDRY\"\n--MIMEBOUNDRY\r\nContent-Type: text/x-shellscript\r\n\r\nbaz\r\n--MIMEBOUNDRY\r\nContent-Type: text/x-shellscript\r\n\r\nffbaz\r\n--MIMEBOUNDRY--\r\n",
+ },
+ {
+ `resource "template_cloudinit_config" "foo" {
+ gzip = true
+ base64_encode = false
+
+ part {
+ content_type = "text/x-shellscript"
+ content = "baz"
+ filename = "ah"
+ }
+ part {
+ content_type = "text/x-shellscript"
+ content = "ffbaz"
+ }
+ }`,
+ "\x1f\x8b\b\x00\x00\tn\x88\x00\xff\x94\x8d\xbd\n\xc2@\x10\x84\xfb\x83{\x87\xe3\xfa%}B\x1a\x8d\x85E\x14D\v\xcbM\xb2!\v\xf7Gn\x03\x89O\xaf\x9d\x8a\x95\xe5\f3߷\x8fA(\b\\\xb7D\xa5\xf1\x8b\x13N8K\xe1y\xa5\xa12]\\\u0080\xf3V\xdb\xf6\xd8\x1ev\xe7۩\xb9ܭ\x02\xf8\x88Z}C\x84V)V\xc8\x139\x97\xfb\x99\x93\xbc\x17\r\xe7\x143\v\xc7P\x1a\x14\xc1~\xf2\xaf\xbe2#;\n詶8Y\xad\xb4\xea\xf0\xa1\xff\xf7h5\x8e\xbfO\x00\xad\x9e\x01\x00\x00\xff\xff\xecM\xd3\x1e\xe9\x00\x00\x00",
+ },
+ }
+
+ for _, tt := range testCases {
+ r.Test(t, r.TestCase{
+ Providers: testProviders,
+ Steps: []r.TestStep{
+ r.TestStep{
+ Config: tt.ResourceBlock,
+ Check: r.ComposeTestCheckFunc(
+ r.TestCheckResourceAttr("template_cloudinit_config.foo", "rendered", tt.Expected),
+ ),
+ },
+ },
+ })
+ }
+}
diff --git a/website/source/docs/providers/template/r/cloudinit_config.html.markdown b/website/source/docs/providers/template/r/cloudinit_config.html.markdown
new file mode 100644
index 0000000000..84416e2d5a
--- /dev/null
+++ b/website/source/docs/providers/template/r/cloudinit_config.html.markdown
@@ -0,0 +1,68 @@
+---
+layout: "Template"
+page_title: "Template: cloudinit_multipart"
+sidebar_current: "docs-template-resource-cloudinit-config"
+description: |-
+ Renders a cloud-init config.
+---
+
+# template\_cloudinit\_config
+
+Renders a template from a file.
+
+## Example Usage
+
+```
+resource "template_file" "script" {
+ template = "${file("${path.module}/init.tpl")}"
+
+ vars {
+ consul_address = "${aws_instance.consul.private_ip}"
+ }
+}
+
+resource "template_cloudinit_config" "config" {
+ # Setup hello world script to be called by the cloud-config
+ part {
+ filename = "init.cfg"
+ content_type = "text/part-handler"
+ content = "${template_file.script.rendered}"
+ }
+
+ # Setup cloud-config yaml
+ part {
+ content_type = "text/cloud-config"
+ content = "${file(\"config.yaml\")"
+ }
+}
+
+
+
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `gzip` - (Optional) Specify whether or not to gzip the rendered output.
+
+* `base64_encode` - (Optional) Base64 encoding of the rendered output.
+
+* `part` - (Required) One may specify this many times, this creates a fragment of the rendered cloud-init config.
+
+The `part` block supports:
+
+* `filename` - (Optional) Filename to save part as.
+
+* `content_type` - (Optional) Content type to send file as.
+
+* `content` - (Required) Body for the part.
+
+* `merge_type` - (Optional) Gives the ability to merge multiple blocks of cloud-config together.
+
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `rendered` - The final rendered template.
From abffa67a49ac76699718219ccd40c483e62261ef Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Fri, 27 Nov 2015 18:08:17 +0000
Subject: [PATCH 305/664] Rename resource.go -> resource_template_file.go
This is necessitated by new resources being added to the template
provider.
---
.../providers/template/{resource.go => resource_template_file.go} | 0
.../template/{resource_test.go => resource_template_file_test.go} | 0
2 files changed, 0 insertions(+), 0 deletions(-)
rename builtin/providers/template/{resource.go => resource_template_file.go} (100%)
rename builtin/providers/template/{resource_test.go => resource_template_file_test.go} (100%)
diff --git a/builtin/providers/template/resource.go b/builtin/providers/template/resource_template_file.go
similarity index 100%
rename from builtin/providers/template/resource.go
rename to builtin/providers/template/resource_template_file.go
diff --git a/builtin/providers/template/resource_test.go b/builtin/providers/template/resource_template_file_test.go
similarity index 100%
rename from builtin/providers/template/resource_test.go
rename to builtin/providers/template/resource_template_file_test.go
From ba9c7323a9d19bda4bc5540c3f7009bf0acb14d3 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Fri, 27 Nov 2015 18:10:24 +0000
Subject: [PATCH 306/664] Remove unnecessary commented imports
---
builtin/providers/template/resource_cloudinit_config_test.go | 5 -----
1 file changed, 5 deletions(-)
diff --git a/builtin/providers/template/resource_cloudinit_config_test.go b/builtin/providers/template/resource_cloudinit_config_test.go
index 83d0a61528..b0cd48fd35 100644
--- a/builtin/providers/template/resource_cloudinit_config_test.go
+++ b/builtin/providers/template/resource_cloudinit_config_test.go
@@ -4,13 +4,8 @@ import (
"testing"
r "github.com/hashicorp/terraform/helper/resource"
- // "github.com/hashicorp/terraform/terraform"
)
-// var testProviders = map[string]terraform.ResourceProvider{
-// "template": Provider(),
-// }
-
func TestRender(t *testing.T) {
testCases := []struct {
ResourceBlock string
From 16c8750bab0bff98ab076750cc6d3c1fcb6b7aad Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Sat, 28 Nov 2015 10:07:06 +0000
Subject: [PATCH 307/664] Make minor alterations to the cloudinit docs
---
.../template/r/cloudinit_config.html.markdown | 28 +++++++++++--------
1 file changed, 17 insertions(+), 11 deletions(-)
diff --git a/website/source/docs/providers/template/r/cloudinit_config.html.markdown b/website/source/docs/providers/template/r/cloudinit_config.html.markdown
index 84416e2d5a..b82a8be2b7 100644
--- a/website/source/docs/providers/template/r/cloudinit_config.html.markdown
+++ b/website/source/docs/providers/template/r/cloudinit_config.html.markdown
@@ -3,16 +3,17 @@ layout: "Template"
page_title: "Template: cloudinit_multipart"
sidebar_current: "docs-template-resource-cloudinit-config"
description: |-
- Renders a cloud-init config.
+ Renders a multi-part cloud-init config from source files.
---
# template\_cloudinit\_config
-Renders a template from a file.
+Renders a multi-part cloud-init config from source files.
## Example Usage
```
+# Render a part using a `template_file`
resource "template_file" "script" {
template = "${file("${path.module}/init.tpl")}"
@@ -21,7 +22,12 @@ resource "template_file" "script" {
}
}
+# Render a multi-part cloudinit config making use of the part
+# above, and other source files
resource "template_cloudinit_config" "config" {
+ gzip = true
+ base64_encode = true
+
# Setup hello world script to be called by the cloud-config
part {
filename = "init.cfg"
@@ -29,15 +35,16 @@ resource "template_cloudinit_config" "config" {
content = "${template_file.script.rendered}"
}
- # Setup cloud-config yaml
part {
- content_type = "text/cloud-config"
- content = "${file(\"config.yaml\")"
+ content_type = "text/x-shellscript"
+ content = "baz"
+ }
+
+ part {
+ content_type = "text/x-shellscript"
+ content = "ffbaz"
}
}
-
-
-
```
## Argument Reference
@@ -48,7 +55,7 @@ The following arguments are supported:
* `base64_encode` - (Optional) Base64 encoding of the rendered output.
-* `part` - (Required) One may specify this many times, this creates a fragment of the rendered cloud-init config.
+* `part` - (Required) One may specify this many times, this creates a fragment of the rendered cloud-init config file. The order of the parts is maintained in the configuration is maintained in the rendered template.
The `part` block supports:
@@ -60,9 +67,8 @@ The `part` block supports:
* `merge_type` - (Optional) Gives the ability to merge multiple blocks of cloud-config together.
-
## Attributes Reference
The following attributes are exported:
-* `rendered` - The final rendered template.
+* `rendered` - The final rendered multi-part cloudinit config.
From 33d2afc26da82e815489c573a520ba54303f644e Mon Sep 17 00:00:00 2001
From: Simon Thulbourn
Date: Mon, 7 Dec 2015 13:41:52 +0000
Subject: [PATCH 308/664] Add headers to output
The original implmentation was missing headers to denote mime version &
content transfer encoding, this caused issues with EC2.
Signed-off-by: Simon Thulbourn
---
builtin/providers/template/resource_cloudinit_config.go | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/builtin/providers/template/resource_cloudinit_config.go b/builtin/providers/template/resource_cloudinit_config.go
index 88af9bef22..86798b8c78 100644
--- a/builtin/providers/template/resource_cloudinit_config.go
+++ b/builtin/providers/template/resource_cloudinit_config.go
@@ -170,6 +170,7 @@ func renderPartsToWriter(parts cloudInitParts, writer io.Writer) error {
}
writer.Write([]byte(fmt.Sprintf("Content-Type: multipart/mixed; boundary=\"%s\"\n", mimeWriter.Boundary())))
+ writer.Write([]byte("MIME-Version: 1.0\r\n"))
for _, part := range parts {
header := textproto.MIMEHeader{}
@@ -179,6 +180,9 @@ func renderPartsToWriter(parts cloudInitParts, writer io.Writer) error {
header.Set("Content-Type", part.ContentType)
}
+ header.Set("MIME-Version", "1.0")
+ header.Set("Content-Transfer-Encoding", "7bit")
+
if part.Filename != "" {
header.Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, part.Filename))
}
From 3701342716ce28958239288699b4675f757b8da6 Mon Sep 17 00:00:00 2001
From: Simon Thulbourn
Date: Mon, 7 Dec 2015 18:58:45 +0000
Subject: [PATCH 309/664] Alters template provider to use a fork of multipart.
Signed-off-by: Simon Thulbourn
---
builtin/providers/template/resource_cloudinit_config.go | 3 ++-
.../providers/template/resource_cloudinit_config_test.go | 8 ++++----
2 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/builtin/providers/template/resource_cloudinit_config.go b/builtin/providers/template/resource_cloudinit_config.go
index 86798b8c78..0082dde623 100644
--- a/builtin/providers/template/resource_cloudinit_config.go
+++ b/builtin/providers/template/resource_cloudinit_config.go
@@ -6,12 +6,13 @@ import (
"encoding/base64"
"fmt"
"io"
- "mime/multipart"
"net/textproto"
"strconv"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
+
+ "github.com/sthulb/mime/multipart"
)
func resourceCloudinitConfig() *schema.Resource {
diff --git a/builtin/providers/template/resource_cloudinit_config_test.go b/builtin/providers/template/resource_cloudinit_config_test.go
index b0cd48fd35..afaac19713 100644
--- a/builtin/providers/template/resource_cloudinit_config_test.go
+++ b/builtin/providers/template/resource_cloudinit_config_test.go
@@ -21,7 +21,7 @@ func TestRender(t *testing.T) {
content = "baz"
}
}`,
- "Content-Type: multipart/mixed; boundary=\"MIMEBOUNDRY\"\n--MIMEBOUNDRY\r\nContent-Type: text/x-shellscript\r\n\r\nbaz\r\n--MIMEBOUNDRY--\r\n",
+ "Content-Type: multipart/mixed; boundary=\"MIMEBOUNDRY\"\nMIME-Version: 1.0\r\n--MIMEBOUNDRY\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nbaz\r\n--MIMEBOUNDRY--\r\n",
},
{
`resource "template_cloudinit_config" "foo" {
@@ -34,7 +34,7 @@ func TestRender(t *testing.T) {
filename = "foobar.sh"
}
}`,
- "Content-Type: multipart/mixed; boundary=\"MIMEBOUNDRY\"\n--MIMEBOUNDRY\r\nContent-Type: text/x-shellscript\r\nContent-Disposition: attachment; filename=\"foobar.sh\"\r\n\r\nbaz\r\n--MIMEBOUNDRY--\r\n",
+ "Content-Type: multipart/mixed; boundary=\"MIMEBOUNDRY\"\nMIME-Version: 1.0\r\n--MIMEBOUNDRY\r\nContent-Disposition: attachment; filename=\"foobar.sh\"\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nbaz\r\n--MIMEBOUNDRY--\r\n",
},
{
`resource "template_cloudinit_config" "foo" {
@@ -50,7 +50,7 @@ func TestRender(t *testing.T) {
content = "ffbaz"
}
}`,
- "Content-Type: multipart/mixed; boundary=\"MIMEBOUNDRY\"\n--MIMEBOUNDRY\r\nContent-Type: text/x-shellscript\r\n\r\nbaz\r\n--MIMEBOUNDRY\r\nContent-Type: text/x-shellscript\r\n\r\nffbaz\r\n--MIMEBOUNDRY--\r\n",
+ "Content-Type: multipart/mixed; boundary=\"MIMEBOUNDRY\"\nMIME-Version: 1.0\r\n--MIMEBOUNDRY\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nbaz\r\n--MIMEBOUNDRY\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nffbaz\r\n--MIMEBOUNDRY--\r\n",
},
{
`resource "template_cloudinit_config" "foo" {
@@ -67,7 +67,7 @@ func TestRender(t *testing.T) {
content = "ffbaz"
}
}`,
- "\x1f\x8b\b\x00\x00\tn\x88\x00\xff\x94\x8d\xbd\n\xc2@\x10\x84\xfb\x83{\x87\xe3\xfa%}B\x1a\x8d\x85E\x14D\v\xcbM\xb2!\v\xf7Gn\x03\x89O\xaf\x9d\x8a\x95\xe5\f3߷\x8fA(\b\\\xb7D\xa5\xf1\x8b\x13N8K\xe1y\xa5\xa12]\\\u0080\xf3V\xdb\xf6\xd8\x1ev\xe7۩\xb9ܭ\x02\xf8\x88Z}C\x84V)V\xc8\x139\x97\xfb\x99\x93\xbc\x17\r\xe7\x143\v\xc7P\x1a\x14\xc1~\xf2\xaf\xbe2#;\n詶8Y\xad\xb4\xea\xf0\xa1\xff\xf7h5\x8e\xbfO\x00\xad\x9e\x01\x00\x00\xff\xff\xecM\xd3\x1e\xe9\x00\x00\x00",
+ "\x1f\x8b\b\x00\x00\tn\x88\x00\xff\xac\xce\xc1J\x031\x10\xc6\xf1{`\xdf!\xe4>VO\u0096^\xb4=xX\x05\xa9\x82\xc7\xd9݉;\x90LB2\x85\xadOo-\x88\x8b\xe2\xadDŽ\x1f\xf3\xfd\xef\x93(\x89\xc2\xfe\x98\xa9\xb5\xf1\x10\x943\x16]E\x9ei\\\xdb>\x1dd\xc4rܸ\xee\xa1\xdb\xdd=\xbd\x03\x00\x00\xff\xffmB\x8c\xeed\x01\x00\x00",
},
}
From a4568c596e56009d422590f1fd07bd6ddcb158de Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Mon, 21 Dec 2015 13:31:31 -0500
Subject: [PATCH 310/664] Fix CloudInit doc format and give better example
---
.../template/r/cloudinit_config.html.markdown | 47 +++++++++++--------
1 file changed, 27 insertions(+), 20 deletions(-)
diff --git a/website/source/docs/providers/template/r/cloudinit_config.html.markdown b/website/source/docs/providers/template/r/cloudinit_config.html.markdown
index b82a8be2b7..32f3fe07f4 100644
--- a/website/source/docs/providers/template/r/cloudinit_config.html.markdown
+++ b/website/source/docs/providers/template/r/cloudinit_config.html.markdown
@@ -15,35 +15,42 @@ Renders a multi-part cloud-init config from source files.
```
# Render a part using a `template_file`
resource "template_file" "script" {
- template = "${file("${path.module}/init.tpl")}"
+ template = "${file("${path.module}/init.tpl")}"
- vars {
- consul_address = "${aws_instance.consul.private_ip}"
- }
+ vars {
+ consul_address = "${aws_instance.consul.private_ip}"
+ }
}
# Render a multi-part cloudinit config making use of the part
# above, and other source files
resource "template_cloudinit_config" "config" {
- gzip = true
- base64_encode = true
+ gzip = true
+ base64_encode = true
- # Setup hello world script to be called by the cloud-config
- part {
- filename = "init.cfg"
- content_type = "text/part-handler"
- content = "${template_file.script.rendered}"
- }
+ # Setup hello world script to be called by the cloud-config
+ part {
+ filename = "init.cfg"
+ content_type = "text/part-handler"
+ content = "${template_file.script.rendered}"
+ }
- part {
- content_type = "text/x-shellscript"
- content = "baz"
- }
+ part {
+ content_type = "text/x-shellscript"
+ content = "baz"
+ }
- part {
- content_type = "text/x-shellscript"
- content = "ffbaz"
- }
+ part {
+ content_type = "text/x-shellscript"
+ content = "ffbaz"
+ }
+}
+
+# Start an AWS instance with the cloudinit config as user data
+resource "aws_instance" "web" {
+ ami = "ami-d05e75b8"
+ instance_type = "t2.micro"
+ user_data = "${template_cloudinit_config.config.rendered}"
}
```
From 757a42704e0fd550ecf2d5e84d076517bf85e339 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Mon, 21 Dec 2015 13:52:58 -0500
Subject: [PATCH 311/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ff49d28de0..24fc0edf20 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -11,6 +11,7 @@ FEATURES:
* **New resource: `aws_network_acl_rule`** [GH-4286]
* **New resource: `google_pubsub_topic`** [GH-3671]
* **New resource: `google_pubsub_subscription`** [GH-3671]
+ * **New resource: `template_cloudinit_config`** [GH-4095]
* **New resource: `tls_locally_signed_cert`** [GH-3930]
IMPROVEMENTS:
From 092c268681826352390b8abb3420db2142c2b179 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Mon, 21 Dec 2015 14:46:25 -0600
Subject: [PATCH 312/664] provider/aws: kinesis firehose stream: retry through
IAM propagation
As I was fixing up the AccTests to not depend on a single existing IAM
role (which this commit does), I noticed that without some sleeping that
the kinesis_firehose_delivery_stream would often come back with:
```
msg: Firehose is unable to assume role {{arn}}. Please check the role provided.
code: InvalidArgumentException
```
Similar to the strategy taken in aws_instance with IAM Instance Profile errors,
I dropped in a simple retry loop which seemed to take care of the issue. Seems
that the same permission propagation delays apply here too.
---
...ce_aws_kinesis_firehose_delivery_stream.go | 22 +-
...s_kinesis_firehose_delivery_stream_test.go | 192 +++++++++++++++++-
2 files changed, 200 insertions(+), 14 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream.go b/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream.go
index c39467ee4f..e7b952a337 100644
--- a/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream.go
+++ b/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream.go
@@ -2,6 +2,7 @@ package aws
import (
"fmt"
+ "log"
"strings"
"time"
@@ -102,7 +103,7 @@ func resourceAwsKinesisFirehoseDeliveryStreamCreate(d *schema.ResourceData, meta
DeliveryStreamName: aws.String(sn),
}
- s3_config := &firehose.S3DestinationConfiguration{
+ s3Config := &firehose.S3DestinationConfiguration{
BucketARN: aws.String(d.Get("s3_bucket_arn").(string)),
RoleARN: aws.String(d.Get("role_arn").(string)),
BufferingHints: &firehose.BufferingHints{
@@ -112,12 +113,25 @@ func resourceAwsKinesisFirehoseDeliveryStreamCreate(d *schema.ResourceData, meta
CompressionFormat: aws.String(d.Get("s3_data_compression").(string)),
}
if v, ok := d.GetOk("s3_prefix"); ok {
- s3_config.Prefix = aws.String(v.(string))
+ s3Config.Prefix = aws.String(v.(string))
}
- input.S3DestinationConfiguration = s3_config
+ input.S3DestinationConfiguration = s3Config
- _, err := conn.CreateDeliveryStream(input)
+ var err error
+ for i := 0; i < 5; i++ {
+ _, err := conn.CreateDeliveryStream(input)
+ if awsErr, ok := err.(awserr.Error); ok {
+ // IAM roles can take ~10 seconds to propagate in AWS:
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console
+ if awsErr.Code() == "InvalidArgumentException" && strings.Contains(awsErr.Message(), "Firehose is unable to assume role") {
+ log.Printf("[DEBUG] Firehose could not assume role referenced, retrying...")
+ time.Sleep(2 * time.Second)
+ continue
+ }
+ }
+ break
+ }
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
return fmt.Errorf("[WARN] Error creating Kinesis Firehose Delivery Stream: \"%s\", code: \"%s\"", awsErr.Message(), awsErr.Code())
diff --git a/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_test.go b/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_test.go
index 611e196ce5..5130b32017 100644
--- a/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_test.go
+++ b/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"log"
"math/rand"
+ "os"
"strings"
"testing"
"time"
@@ -16,12 +17,17 @@ import (
func TestAccAWSKinesisFirehoseDeliveryStream_basic(t *testing.T) {
var stream firehose.DeliveryStreamDescription
-
ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int()
- config := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_basic, ri, ri)
+ config := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_basic,
+ os.Getenv("AWS_ACCOUNT_ID"), ri, ri)
resource.Test(t, resource.TestCase{
- PreCheck: func() { testAccPreCheck(t) },
+ PreCheck: func() {
+ testAccPreCheck(t)
+ if os.Getenv("AWS_ACCOUNT_ID") == "" {
+ t.Fatal("AWS_ACCOUNT_ID must be set")
+ }
+ },
Providers: testAccProviders,
CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy,
Steps: []resource.TestStep{
@@ -40,11 +46,18 @@ func TestAccAWSKinesisFirehoseDeliveryStream_s3ConfigUpdates(t *testing.T) {
var stream firehose.DeliveryStreamDescription
ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int()
- preconfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3, ri, ri)
- postConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3Updates, ri, ri)
+ preconfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3,
+ os.Getenv("AWS_ACCOUNT_ID"), ri, ri)
+ postConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3Updates,
+ os.Getenv("AWS_ACCOUNT_ID"), ri, ri)
resource.Test(t, resource.TestCase{
- PreCheck: func() { testAccPreCheck(t) },
+ PreCheck: func() {
+ testAccPreCheck(t)
+ if os.Getenv("AWS_ACCOUNT_ID") == "" {
+ t.Fatal("AWS_ACCOUNT_ID must be set")
+ }
+ },
Providers: testAccProviders,
CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy,
Steps: []resource.TestStep{
@@ -147,41 +160,200 @@ func testAccCheckKinesisFirehoseDeliveryStreamDestroy(s *terraform.State) error
}
var testAccKinesisFirehoseDeliveryStreamConfig_basic = `
+resource "aws_iam_role" "firehose" {
+ name = "terraform_acctest_firehose_delivery_role"
+ assume_role_policy = <
Date: Mon, 21 Dec 2015 15:09:12 -0600
Subject: [PATCH 313/664] provider/aws: route table test; use standard account
id env var
also moves the env var check inside the PreCheck, which makes it a
little simpler
---
.../aws/resource_aws_route_table_test.go | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_route_table_test.go b/builtin/providers/aws/resource_aws_route_table_test.go
index 17fd4087ec..5c74a57ddb 100644
--- a/builtin/providers/aws/resource_aws_route_table_test.go
+++ b/builtin/providers/aws/resource_aws_route_table_test.go
@@ -218,11 +218,6 @@ func testAccCheckRouteTableExists(n string, v *ec2.RouteTable) resource.TestChec
func TestAccAWSRouteTable_vpcPeering(t *testing.T) {
var v ec2.RouteTable
- acctId := os.Getenv("TF_ACC_ID")
- if acctId == "" && os.Getenv(resource.TestEnvVar) != "" {
- t.Fatal("Error: Test TestAccAWSRouteTable_vpcPeering requires an Account ID in TF_ACC_ID ")
- }
-
testCheck := func(*terraform.State) error {
if len(v.Routes) != 2 {
return fmt.Errorf("bad routes: %#v", v.Routes)
@@ -243,12 +238,17 @@ func TestAccAWSRouteTable_vpcPeering(t *testing.T) {
return nil
}
resource.Test(t, resource.TestCase{
- PreCheck: func() { testAccPreCheck(t) },
+ PreCheck: func() {
+ testAccPreCheck(t)
+ if os.Getenv("AWS_ACCOUNT_ID") == "" {
+ t.Fatal("Error: Test TestAccAWSRouteTable_vpcPeering requires an Account ID in AWS_ACCOUNT_ID ")
+ }
+ },
Providers: testAccProviders,
CheckDestroy: testAccCheckRouteTableDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccRouteTableVpcPeeringConfig(acctId),
+ Config: testAccRouteTableVpcPeeringConfig(os.Getenv("AWS_ACCOUNT_ID")),
Check: resource.ComposeTestCheckFunc(
testAccCheckRouteTableExists(
"aws_route_table.foo", &v),
@@ -401,7 +401,7 @@ resource "aws_route_table" "foo" {
`
// VPC Peering connections are prefixed with pcx
-// This test requires an ENV var, TF_ACC_ID, with a valid AWS Account ID
+// This test requires an ENV var, AWS_ACCOUNT_ID, with a valid AWS Account ID
func testAccRouteTableVpcPeeringConfig(acc string) string {
cfg := `resource "aws_vpc" "foo" {
cidr_block = "10.1.0.0/16"
From 562b7dfab783bfa8e030e0a4d49ff6f32229a842 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Mon, 21 Dec 2015 17:46:55 -0500
Subject: [PATCH 314/664] Add documentation for Artifactory remote state
---
website/source/docs/commands/remote-config.html.markdown | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/website/source/docs/commands/remote-config.html.markdown b/website/source/docs/commands/remote-config.html.markdown
index 818642929b..e7025d9d56 100644
--- a/website/source/docs/commands/remote-config.html.markdown
+++ b/website/source/docs/commands/remote-config.html.markdown
@@ -82,6 +82,13 @@ The following backends are supported:
* `acl` - [Canned ACL](http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl)
to be applied to the state file.
+* Artifactory - Stores the state as an artifact in a given repository in
+ Artifactory. Requires the `url`, `username`, `password`, `repo` and `subpath`
+ variables. Generic HTTP repositories are supported, and state from different
+ configurations may be kept at different subpaths within the repository. The URL
+ must include the path to the Artifactory installation - it will likely end in
+ `/artifactory`.
+
* HTTP - Stores the state using a simple REST client. State will be fetched
via GET, updated via POST, and purged with DELETE. Requires the `address` variable.
From eb13baf96f7fcc9e2d4b0069f34b6872be341240 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Mon, 21 Dec 2015 17:50:25 -0500
Subject: [PATCH 315/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 24fc0edf20..cfeec8fb9f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -13,6 +13,7 @@ FEATURES:
* **New resource: `google_pubsub_subscription`** [GH-3671]
* **New resource: `template_cloudinit_config`** [GH-4095]
* **New resource: `tls_locally_signed_cert`** [GH-3930]
+ * **New remote state backend: `artifactory`** [GH-3684]
IMPROVEMENTS:
From f473c2a6d42a12de5bf41ca1f99b3668b8a84a54 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Mon, 21 Dec 2015 17:41:55 -0600
Subject: [PATCH 316/664] provider/aws: don't set static ELB names in tests
Prevents ELB name collisions during test runs
---
.../resource_aws_autoscaling_group_test.go | 7 +--
.../aws/resource_aws_ecs_service_test.go | 1 -
.../providers/aws/resource_aws_elb_test.go | 56 +++----------------
3 files changed, 12 insertions(+), 52 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_autoscaling_group_test.go b/builtin/providers/aws/resource_aws_autoscaling_group_test.go
index d9268f6925..8c2b934f31 100644
--- a/builtin/providers/aws/resource_aws_autoscaling_group_test.go
+++ b/builtin/providers/aws/resource_aws_autoscaling_group_test.go
@@ -161,7 +161,7 @@ func TestAccAWSAutoScalingGroup_WithLoadBalancer(t *testing.T) {
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccAWSAutoScalingGroupConfigWithLoadBalancer,
+ Config: fmt.Sprintf(testAccAWSAutoScalingGroupConfigWithLoadBalancer),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
testAccCheckAWSAutoScalingGroupAttributesLoadBalancer(&group),
@@ -280,8 +280,8 @@ func testAccCheckAWSAutoScalingGroupAttributes(group *autoscaling.Group) resourc
func testAccCheckAWSAutoScalingGroupAttributesLoadBalancer(group *autoscaling.Group) resource.TestCheckFunc {
return func(s *terraform.State) error {
- if *group.LoadBalancerNames[0] != "foobar-terraform-test" {
- return fmt.Errorf("Bad load_balancers: %#v", group.LoadBalancerNames[0])
+ if len(group.LoadBalancerNames) != 1 {
+ return fmt.Errorf("Bad load_balancers: %v", group.LoadBalancerNames)
}
return nil
@@ -513,7 +513,6 @@ resource "aws_security_group" "foo" {
}
resource "aws_elb" "bar" {
- name = "foobar-terraform-test"
subnets = ["${aws_subnet.foo.id}"]
security_groups = ["${aws_security_group.foo.id}"]
diff --git a/builtin/providers/aws/resource_aws_ecs_service_test.go b/builtin/providers/aws/resource_aws_ecs_service_test.go
index a2f71ad2f8..c77f874355 100644
--- a/builtin/providers/aws/resource_aws_ecs_service_test.go
+++ b/builtin/providers/aws/resource_aws_ecs_service_test.go
@@ -356,7 +356,6 @@ EOF
}
resource "aws_elb" "main" {
- name = "foobar-terraform-test"
availability_zones = ["us-west-2a"]
listener {
diff --git a/builtin/providers/aws/resource_aws_elb_test.go b/builtin/providers/aws/resource_aws_elb_test.go
index 6ccc5cd66f..15f40b6d97 100644
--- a/builtin/providers/aws/resource_aws_elb_test.go
+++ b/builtin/providers/aws/resource_aws_elb_test.go
@@ -2,11 +2,12 @@ package aws
import (
"fmt"
- "os"
+ "math/rand"
"reflect"
"regexp"
"sort"
"testing"
+ "time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
@@ -17,7 +18,6 @@ import (
func TestAccAWSELB_basic(t *testing.T) {
var conf elb.LoadBalancerDescription
- ssl_certificate_id := os.Getenv("AWS_SSL_CERTIFICATE_ID")
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -29,8 +29,6 @@ func TestAccAWSELB_basic(t *testing.T) {
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSELBExists("aws_elb.bar", &conf),
testAccCheckAWSELBAttributes(&conf),
- resource.TestCheckResourceAttr(
- "aws_elb.bar", "name", "foobar-terraform-test"),
resource.TestCheckResourceAttr(
"aws_elb.bar", "availability_zones.2487133097", "us-west-2a"),
resource.TestCheckResourceAttr(
@@ -41,8 +39,6 @@ func TestAccAWSELB_basic(t *testing.T) {
"aws_elb.bar", "listener.206423021.instance_port", "8000"),
resource.TestCheckResourceAttr(
"aws_elb.bar", "listener.206423021.instance_protocol", "http"),
- resource.TestCheckResourceAttr(
- "aws_elb.bar", "listener.206423021.ssl_certificate_id", ssl_certificate_id),
resource.TestCheckResourceAttr(
"aws_elb.bar", "listener.206423021.lb_port", "80"),
resource.TestCheckResourceAttr(
@@ -58,17 +54,20 @@ func TestAccAWSELB_basic(t *testing.T) {
func TestAccAWSELB_fullCharacterRange(t *testing.T) {
var conf elb.LoadBalancerDescription
+ lbName := fmt.Sprintf("FoobarTerraform-test123-%d",
+ rand.New(rand.NewSource(time.Now().UnixNano())).Int())
+
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSELBDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccAWSELBFullRangeOfCharacters,
+ Config: fmt.Sprintf(testAccAWSELBFullRangeOfCharacters, lbName),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSELBExists("aws_elb.foo", &conf),
resource.TestCheckResourceAttr(
- "aws_elb.foo", "name", "FoobarTerraform-test123"),
+ "aws_elb.foo", "name", lbName),
),
},
},
@@ -87,8 +86,6 @@ func TestAccAWSELB_AccessLogs(t *testing.T) {
Config: testAccAWSELBAccessLogs,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSELBExists("aws_elb.foo", &conf),
- resource.TestCheckResourceAttr(
- "aws_elb.foo", "name", "FoobarTerraform-test123"),
),
},
@@ -96,8 +93,6 @@ func TestAccAWSELB_AccessLogs(t *testing.T) {
Config: testAccAWSELBAccessLogsOn,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSELBExists("aws_elb.foo", &conf),
- resource.TestCheckResourceAttr(
- "aws_elb.foo", "name", "FoobarTerraform-test123"),
resource.TestCheckResourceAttr(
"aws_elb.foo", "access_logs.#", "1"),
resource.TestCheckResourceAttr(
@@ -111,8 +106,6 @@ func TestAccAWSELB_AccessLogs(t *testing.T) {
Config: testAccAWSELBAccessLogs,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSELBExists("aws_elb.foo", &conf),
- resource.TestCheckResourceAttr(
- "aws_elb.foo", "name", "FoobarTerraform-test123"),
resource.TestCheckResourceAttr(
"aws_elb.foo", "access_logs.#", "0"),
),
@@ -156,8 +149,6 @@ func TestAccAWSELB_tags(t *testing.T) {
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSELBExists("aws_elb.bar", &conf),
testAccCheckAWSELBAttributes(&conf),
- resource.TestCheckResourceAttr(
- "aws_elb.bar", "name", "foobar-terraform-test"),
testAccLoadTags(&conf, &td),
testAccCheckELBTags(&td.Tags, "bar", "baz"),
),
@@ -168,8 +159,6 @@ func TestAccAWSELB_tags(t *testing.T) {
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSELBExists("aws_elb.bar", &conf),
testAccCheckAWSELBAttributes(&conf),
- resource.TestCheckResourceAttr(
- "aws_elb.bar", "name", "foobar-terraform-test"),
testAccLoadTags(&conf, &td),
testAccCheckELBTags(&td.Tags, "foo", "bar"),
testAccCheckELBTags(&td.Tags, "new", "type"),
@@ -591,10 +580,6 @@ func testAccCheckAWSELBAttributes(conf *elb.LoadBalancerDescription) resource.Te
return fmt.Errorf("bad availability_zones")
}
- if *conf.LoadBalancerName != "foobar-terraform-test" {
- return fmt.Errorf("bad name")
- }
-
l := elb.Listener{
InstancePort: aws.Int64(int64(8000)),
InstanceProtocol: aws.String("HTTP"),
@@ -629,10 +614,6 @@ func testAccCheckAWSELBAttributesHealthCheck(conf *elb.LoadBalancerDescription)
return fmt.Errorf("bad availability_zones")
}
- if *conf.LoadBalancerName != "foobar-terraform-test" {
- return fmt.Errorf("bad name")
- }
-
check := &elb.HealthCheck{
Timeout: aws.Int64(int64(30)),
UnhealthyThreshold: aws.Int64(int64(5)),
@@ -699,7 +680,6 @@ func testAccCheckAWSELBExists(n string, res *elb.LoadBalancerDescription) resour
const testAccAWSELBConfig = `
resource "aws_elb" "bar" {
- name = "foobar-terraform-test"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener {
@@ -720,7 +700,7 @@ resource "aws_elb" "bar" {
const testAccAWSELBFullRangeOfCharacters = `
resource "aws_elb" "foo" {
- name = "FoobarTerraform-test123"
+ name = "%s"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener {
@@ -734,7 +714,6 @@ resource "aws_elb" "foo" {
const testAccAWSELBAccessLogs = `
resource "aws_elb" "foo" {
- name = "FoobarTerraform-test123"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener {
@@ -773,7 +752,6 @@ EOF
}
resource "aws_elb" "foo" {
- name = "FoobarTerraform-test123"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener {
@@ -805,7 +783,6 @@ resource "aws_elb" "foo" {
const testAccAWSELBConfig_TagUpdate = `
resource "aws_elb" "bar" {
- name = "foobar-terraform-test"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener {
@@ -826,7 +803,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigNewInstance = `
resource "aws_elb" "bar" {
- name = "foobar-terraform-test"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener {
@@ -848,7 +824,6 @@ resource "aws_instance" "foo" {
const testAccAWSELBConfigListenerSSLCertificateId = `
resource "aws_elb" "bar" {
- name = "foobar-terraform-test"
availability_zones = ["us-west-2a"]
listener {
@@ -863,7 +838,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigHealthCheck = `
resource "aws_elb" "bar" {
- name = "foobar-terraform-test"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener {
@@ -885,7 +859,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigHealthCheck_update = `
resource "aws_elb" "bar" {
- name = "foobar-terraform-test"
availability_zones = ["us-west-2a"]
listener {
@@ -907,7 +880,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigListener_update = `
resource "aws_elb" "bar" {
- name = "foobar-terraform-test"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener {
@@ -921,7 +893,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigIdleTimeout = `
resource "aws_elb" "bar" {
- name = "foobar-terraform-test"
availability_zones = ["us-west-2a"]
listener {
@@ -937,7 +908,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigIdleTimeout_update = `
resource "aws_elb" "bar" {
- name = "foobar-terraform-test"
availability_zones = ["us-west-2a"]
listener {
@@ -953,7 +923,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigConnectionDraining = `
resource "aws_elb" "bar" {
- name = "foobar-terraform-test"
availability_zones = ["us-west-2a"]
listener {
@@ -970,7 +939,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigConnectionDraining_update_timeout = `
resource "aws_elb" "bar" {
- name = "foobar-terraform-test"
availability_zones = ["us-west-2a"]
listener {
@@ -987,7 +955,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigConnectionDraining_update_disable = `
resource "aws_elb" "bar" {
- name = "foobar-terraform-test"
availability_zones = ["us-west-2a"]
listener {
@@ -1003,7 +970,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigSecurityGroups = `
resource "aws_elb" "bar" {
- name = "foobar-terraform-test"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener {
@@ -1017,9 +983,6 @@ resource "aws_elb" "bar" {
}
resource "aws_security_group" "bar" {
- name = "terraform-elb-acceptance-test"
- description = "Used in the terraform acceptance tests for the elb resource"
-
ingress {
protocol = "tcp"
from_port = 80
@@ -1033,7 +996,7 @@ resource "aws_security_group" "bar" {
// builtin/providers/aws/resource_aws_iam_server_certificate_test.go
var testAccELBIAMServerCertConfig = `
resource "aws_iam_server_certificate" "test_cert" {
- name = "terraform-test-cert"
+ name = "terraform-test-cert-elb"
certificate_body = <
Date: Mon, 21 Dec 2015 21:10:13 -0600
Subject: [PATCH 317/664] provider/aws: fix ECS service CheckDestroy in tests
---
builtin/providers/aws/resource_aws_ecs_service_test.go | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_ecs_service_test.go b/builtin/providers/aws/resource_aws_ecs_service_test.go
index c77f874355..9eb9bce186 100644
--- a/builtin/providers/aws/resource_aws_ecs_service_test.go
+++ b/builtin/providers/aws/resource_aws_ecs_service_test.go
@@ -6,6 +6,7 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ecs"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -210,6 +211,10 @@ func testAccCheckAWSEcsServiceDestroy(s *terraform.State) error {
Services: []*string{aws.String(rs.Primary.ID)},
})
+ if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ClusterNotFoundException" {
+ continue
+ }
+
if err == nil {
if len(out.Services) > 0 {
return fmt.Errorf("ECS service still exists:\n%#v", out.Services)
From 67832f6bd06cdf5eb64f5948db684e2892a85fc6 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Mon, 21 Dec 2015 21:12:47 -0600
Subject: [PATCH 318/664] provider/aws: fix cache SG tests
* CheckDestroy should handle not found error
* Pin provider in config to region most likely to have EC2 Classic
---
.../resource_aws_elasticache_security_group_test.go | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_elasticache_security_group_test.go b/builtin/providers/aws/resource_aws_elasticache_security_group_test.go
index 87644242fb..452e7b896e 100644
--- a/builtin/providers/aws/resource_aws_elasticache_security_group_test.go
+++ b/builtin/providers/aws/resource_aws_elasticache_security_group_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/elasticache"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -36,12 +37,14 @@ func testAccCheckAWSElasticacheSecurityGroupDestroy(s *terraform.State) error {
res, err := conn.DescribeCacheSecurityGroups(&elasticache.DescribeCacheSecurityGroupsInput{
CacheSecurityGroupName: aws.String(rs.Primary.ID),
})
- if err != nil {
- return err
+ if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "CacheSecurityGroupNotFound" {
+ continue
}
+
if len(res.CacheSecurityGroups) > 0 {
- return fmt.Errorf("still exist.")
+ return fmt.Errorf("cache security group still exists")
}
+ return err
}
return nil
}
@@ -69,6 +72,9 @@ func testAccCheckAWSElasticacheSecurityGroupExists(n string) resource.TestCheckF
}
var testAccAWSElasticacheSecurityGroupConfig = fmt.Sprintf(`
+provider "aws" {
+ region = "us-east-1"
+}
resource "aws_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
From 1d5c65fa867fbe36ec0711f1d9095accbcee59a1 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Mon, 21 Dec 2015 21:23:57 -0600
Subject: [PATCH 319/664] provider/aws: fix cookie stickiness policy test
destroys
---
...e_aws_app_cookie_stickiness_policy_test.go | 28 +++++++++++++++++--
...ce_aws_lb_cookie_stickiness_policy_test.go | 26 +++++++++++++++--
2 files changed, 49 insertions(+), 5 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_app_cookie_stickiness_policy_test.go b/builtin/providers/aws/resource_aws_app_cookie_stickiness_policy_test.go
index ff13da2856..d1fd59f690 100644
--- a/builtin/providers/aws/resource_aws_app_cookie_stickiness_policy_test.go
+++ b/builtin/providers/aws/resource_aws_app_cookie_stickiness_policy_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/elb"
"github.com/hashicorp/terraform/helper/resource"
@@ -40,10 +41,31 @@ func TestAccAWSAppCookieStickinessPolicy_basic(t *testing.T) {
}
func testAccCheckAppCookieStickinessPolicyDestroy(s *terraform.State) error {
- if len(s.RootModule().Resources) > 0 {
- return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
- }
+ conn := testAccProvider.Meta().(*AWSClient).elbconn
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_app_cookie_stickiness_policy" {
+ continue
+ }
+
+ lbName, _, policyName := resourceAwsAppCookieStickinessPolicyParseId(
+ rs.Primary.ID)
+ out, err := conn.DescribeLoadBalancerPolicies(
+ &elb.DescribeLoadBalancerPoliciesInput{
+ LoadBalancerName: aws.String(lbName),
+ PolicyNames: []*string{aws.String(policyName)},
+ })
+ if err != nil {
+ if ec2err, ok := err.(awserr.Error); ok && (ec2err.Code() == "PolicyNotFound" || ec2err.Code() == "LoadBalancerNotFound") {
+ continue
+ }
+ return err
+ }
+
+ if len(out.PolicyDescriptions) > 0 {
+ return fmt.Errorf("Policy still exists")
+ }
+ }
return nil
}
diff --git a/builtin/providers/aws/resource_aws_lb_cookie_stickiness_policy_test.go b/builtin/providers/aws/resource_aws_lb_cookie_stickiness_policy_test.go
index 7417f01217..7337432e44 100644
--- a/builtin/providers/aws/resource_aws_lb_cookie_stickiness_policy_test.go
+++ b/builtin/providers/aws/resource_aws_lb_cookie_stickiness_policy_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/elb"
"github.com/hashicorp/terraform/helper/resource"
@@ -40,8 +41,29 @@ func TestAccAWSLBCookieStickinessPolicy_basic(t *testing.T) {
}
func testAccCheckLBCookieStickinessPolicyDestroy(s *terraform.State) error {
- if len(s.RootModule().Resources) > 0 {
- return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
+ conn := testAccProvider.Meta().(*AWSClient).elbconn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_lb_cookie_stickiness_policy" {
+ continue
+ }
+
+ lbName, _, policyName := resourceAwsLBCookieStickinessPolicyParseId(rs.Primary.ID)
+ out, err := conn.DescribeLoadBalancerPolicies(
+ &elb.DescribeLoadBalancerPoliciesInput{
+ LoadBalancerName: aws.String(lbName),
+ PolicyNames: []*string{aws.String(policyName)},
+ })
+ if err != nil {
+ if ec2err, ok := err.(awserr.Error); ok && (ec2err.Code() == "PolicyNotFound" || ec2err.Code() == "LoadBalancerNotFound") {
+ continue
+ }
+ return err
+ }
+
+ if len(out.PolicyDescriptions) > 0 {
+ return fmt.Errorf("Policy still exists")
+ }
}
return nil
From 7d6b98060a886fae65e4a01ef9585477e08cab18 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Mon, 21 Dec 2015 21:43:42 -0600
Subject: [PATCH 320/664] provider/aws: fix CheckDestroy on a bunch of
resources
---
...resource_aws_codecommit_repository_test.go | 22 ++++++++++++--
.../aws/resource_aws_codedeploy_app_test.go | 2 +-
...ce_aws_codedeploy_deployment_group_test.go | 5 ++++
.../aws/resource_aws_customer_gateway_test.go | 30 +++++++++++++++++--
.../aws/resource_aws_db_instance_test.go | 4 +++
.../aws/resource_aws_vpc_dhcp_options_test.go | 5 +++-
6 files changed, 61 insertions(+), 7 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_codecommit_repository_test.go b/builtin/providers/aws/resource_aws_codecommit_repository_test.go
index 332e2b04a6..0f7c8da534 100644
--- a/builtin/providers/aws/resource_aws_codecommit_repository_test.go
+++ b/builtin/providers/aws/resource_aws_codecommit_repository_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/codecommit"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -86,9 +87,24 @@ func testAccCheckCodeCommitRepositoryExists(name string) resource.TestCheckFunc
}
func testAccCheckCodeCommitRepositoryDestroy(s *terraform.State) error {
- if len(s.RootModule().Resources) > 0 {
- return fmt.Errorf("Expected all resources to be gone, but found: %#v",
- s.RootModule().Resources)
+ conn := testAccProvider.Meta().(*AWSClient).codecommitconn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_codecommit_repository" {
+ continue
+ }
+
+ _, err := conn.GetRepository(&codecommit.GetRepositoryInput{
+ RepositoryName: aws.String(rs.Primary.ID),
+ })
+
+ if ae, ok := err.(awserr.Error); ok && ae.Code() == "RepositoryDoesNotExistException" {
+ continue
+ }
+ if err == nil {
+ return fmt.Errorf("Repository still exists: %s", rs.Primary.ID)
+ }
+ return err
}
return nil
diff --git a/builtin/providers/aws/resource_aws_codedeploy_app_test.go b/builtin/providers/aws/resource_aws_codedeploy_app_test.go
index 9610a01a74..6bfa141aea 100644
--- a/builtin/providers/aws/resource_aws_codedeploy_app_test.go
+++ b/builtin/providers/aws/resource_aws_codedeploy_app_test.go
@@ -41,7 +41,7 @@ func testAccCheckAWSCodeDeployAppDestroy(s *terraform.State) error {
}
resp, err := conn.GetApplication(&codedeploy.GetApplicationInput{
- ApplicationName: aws.String(rs.Primary.ID),
+ ApplicationName: aws.String(rs.Primary.Attributes["name"]),
})
if err == nil {
diff --git a/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go b/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go
index 3b873fe3ba..fa97ca4cc6 100644
--- a/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go
+++ b/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/codedeploy"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -45,6 +46,10 @@ func testAccCheckAWSCodeDeployDeploymentGroupDestroy(s *terraform.State) error {
DeploymentGroupName: aws.String(rs.Primary.Attributes["deployment_group_name"]),
})
+ if ae, ok := err.(awserr.Error); ok && ae.Code() == "ApplicationDoesNotExistException" {
+ continue
+ }
+
if err == nil {
if resp.DeploymentGroupInfo.DeploymentGroupName != nil {
return fmt.Errorf("CodeDeploy deployment group still exists:\n%#v", *resp.DeploymentGroupInfo.DeploymentGroupName)
diff --git a/builtin/providers/aws/resource_aws_customer_gateway_test.go b/builtin/providers/aws/resource_aws_customer_gateway_test.go
index 9e3daec6d0..055e9054c1 100644
--- a/builtin/providers/aws/resource_aws_customer_gateway_test.go
+++ b/builtin/providers/aws/resource_aws_customer_gateway_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/resource"
@@ -46,8 +47,33 @@ func TestAccAWSCustomerGateway_basic(t *testing.T) {
}
func testAccCheckCustomerGatewayDestroy(s *terraform.State) error {
- if len(s.RootModule().Resources) > 0 {
- return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
+ conn := testAccProvider.Meta().(*AWSClient).ec2conn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_customer_gatewah" {
+ continue
+ }
+
+ gatewayFilter := &ec2.Filter{
+ Name: aws.String("customer-gateway-id"),
+ Values: []*string{aws.String(rs.Primary.ID)},
+ }
+
+ resp, err := conn.DescribeCustomerGateways(&ec2.DescribeCustomerGatewaysInput{
+ Filters: []*ec2.Filter{gatewayFilter},
+ })
+
+ if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidCustomerGatewayID.NotFound" {
+ continue
+ }
+
+ if err == nil {
+ if len(resp.CustomerGateways) > 0 {
+ return fmt.Errorf("Customer gateway still exists: %v", resp.CustomerGateways)
+ }
+ }
+
+ return err
}
return nil
diff --git a/builtin/providers/aws/resource_aws_db_instance_test.go b/builtin/providers/aws/resource_aws_db_instance_test.go
index 17e1922718..6142281d00 100644
--- a/builtin/providers/aws/resource_aws_db_instance_test.go
+++ b/builtin/providers/aws/resource_aws_db_instance_test.go
@@ -120,6 +120,10 @@ func testAccCheckAWSDBInstanceDestroy(s *terraform.State) error {
DBInstanceIdentifier: aws.String(rs.Primary.ID),
})
+ if ae, ok := err.(awserr.Error); ok && ae.Code() == "DBInstanceNotFound" {
+ continue
+ }
+
if err == nil {
if len(resp.DBInstances) != 0 &&
*resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID {
diff --git a/builtin/providers/aws/resource_aws_vpc_dhcp_options_test.go b/builtin/providers/aws/resource_aws_vpc_dhcp_options_test.go
index 7ff15a5fa9..baa86f7d7d 100644
--- a/builtin/providers/aws/resource_aws_vpc_dhcp_options_test.go
+++ b/builtin/providers/aws/resource_aws_vpc_dhcp_options_test.go
@@ -50,9 +50,12 @@ func testAccCheckDHCPOptionsDestroy(s *terraform.State) error {
aws.String(rs.Primary.ID),
},
})
+ if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidDhcpOptionID.NotFound" {
+ continue
+ }
if err == nil {
if len(resp.DhcpOptions) > 0 {
- return fmt.Errorf("still exist.")
+ return fmt.Errorf("still exists")
}
return nil
From 02f14ae34aabd6cf25b521c7993602e21d9923d1 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 22 Dec 2015 07:17:35 -0600
Subject: [PATCH 321/664] provider/aws: fix CheckDestroy on glacier vault
---
.../aws/resource_aws_glacier_vault_test.go | 23 +++++++++++++++----
1 file changed, 19 insertions(+), 4 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_glacier_vault_test.go b/builtin/providers/aws/resource_aws_glacier_vault_test.go
index 4f5c26bf28..009cfb03d2 100644
--- a/builtin/providers/aws/resource_aws_glacier_vault_test.go
+++ b/builtin/providers/aws/resource_aws_glacier_vault_test.go
@@ -182,11 +182,26 @@ func testAccCheckVaultNotificationsMissing(name string) resource.TestCheckFunc {
}
func testAccCheckGlacierVaultDestroy(s *terraform.State) error {
- if len(s.RootModule().Resources) > 0 {
- return fmt.Errorf("Expected all resources to be gone, but found: %#v",
- s.RootModule().Resources)
- }
+ conn := testAccProvider.Meta().(*AWSClient).glacierconn
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_glacier_vault" {
+ continue
+ }
+
+ input := &glacier.DescribeVaultInput{
+ VaultName: aws.String(rs.Primary.ID),
+ }
+ if _, err := conn.DescribeVault(input); err != nil {
+ // Verify the error is what we want
+ if ae, ok := err.(awserr.Error); ok && ae.Code() == "ResourceNotFoundException" {
+ continue
+ }
+
+ return err
+ }
+ return fmt.Errorf("still exists")
+ }
return nil
}
From 51732ac9eb2ddcc29772180686b259e506f454fd Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 22 Dec 2015 07:22:06 -0600
Subject: [PATCH 322/664] provider/aws: fix CheckDestroy for codedeploy_app
tests
---
.../aws/resource_aws_codedeploy_app_test.go | 13 ++++++++-----
1 file changed, 8 insertions(+), 5 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_codedeploy_app_test.go b/builtin/providers/aws/resource_aws_codedeploy_app_test.go
index 6bfa141aea..dd3a4ce7a9 100644
--- a/builtin/providers/aws/resource_aws_codedeploy_app_test.go
+++ b/builtin/providers/aws/resource_aws_codedeploy_app_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/codedeploy"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -40,17 +41,19 @@ func testAccCheckAWSCodeDeployAppDestroy(s *terraform.State) error {
continue
}
- resp, err := conn.GetApplication(&codedeploy.GetApplicationInput{
+ _, err := conn.GetApplication(&codedeploy.GetApplicationInput{
ApplicationName: aws.String(rs.Primary.Attributes["name"]),
})
- if err == nil {
- if resp.Application != nil {
- return fmt.Errorf("CodeDeploy app still exists:\n%#v", *resp.Application.ApplicationId)
+ if err != nil {
+ // Verify the error is what we want
+ if ae, ok := err.(awserr.Error); ok && ae.Code() == "ApplicationDoesNotExistException" {
+ continue
}
+ return err
}
- return err
+ return fmt.Errorf("still exists")
}
return nil
From 10cc513ae03975d1c0010e83f6423a3c7aaa087b Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 22 Dec 2015 07:34:06 -0600
Subject: [PATCH 323/664] provider/aws: fix CheckDestroy for aws_eip tests
---
.../providers/aws/resource_aws_eip_test.go | 46 ++++++++++++-------
1 file changed, 29 insertions(+), 17 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_eip_test.go b/builtin/providers/aws/resource_aws_eip_test.go
index e93e002f7e..56955a8c11 100644
--- a/builtin/providers/aws/resource_aws_eip_test.go
+++ b/builtin/providers/aws/resource_aws_eip_test.go
@@ -86,26 +86,38 @@ func testAccCheckAWSEIPDestroy(s *terraform.State) error {
continue
}
- req := &ec2.DescribeAddressesInput{
- PublicIps: []*string{aws.String(rs.Primary.ID)},
- }
- describe, err := conn.DescribeAddresses(req)
-
- if err == nil {
- if len(describe.Addresses) != 0 &&
- *describe.Addresses[0].PublicIp == rs.Primary.ID {
- return fmt.Errorf("EIP still exists")
+ if strings.Contains(rs.Primary.ID, "eipalloc") {
+ req := &ec2.DescribeAddressesInput{
+ AllocationIds: []*string{aws.String(rs.Primary.ID)},
+ }
+ describe, err := conn.DescribeAddresses(req)
+ if err != nil {
+ // Verify the error is what we want
+ if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidAllocationID.NotFound" {
+ continue
+ }
+ return err
}
- }
- // Verify the error
- providerErr, ok := err.(awserr.Error)
- if !ok {
- return err
- }
+ if len(describe.Addresses) > 0 {
+ return fmt.Errorf("still exists")
+ }
+ } else {
+ req := &ec2.DescribeAddressesInput{
+ PublicIps: []*string{aws.String(rs.Primary.ID)},
+ }
+ describe, err := conn.DescribeAddresses(req)
+ if err != nil {
+ // Verify the error is what we want
+ if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidAllocationID.NotFound" {
+ continue
+ }
+ return err
+ }
- if providerErr.Code() != "InvalidAllocationID.NotFound" {
- return fmt.Errorf("Unexpected error: %s", err)
+ if len(describe.Addresses) > 0 {
+ return fmt.Errorf("still exists")
+ }
}
}
From bf40c48075768b2c1cac9ea2186cd9254c5df3c4 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 22 Dec 2015 07:41:24 -0600
Subject: [PATCH 324/664] provider/aws: fix ELB acc tests
* LB Name test could end up too long and fail
* CheckDestroy was looking for the wrong error code
---
builtin/providers/aws/resource_aws_elb_test.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_elb_test.go b/builtin/providers/aws/resource_aws_elb_test.go
index 15f40b6d97..83e947f934 100644
--- a/builtin/providers/aws/resource_aws_elb_test.go
+++ b/builtin/providers/aws/resource_aws_elb_test.go
@@ -54,7 +54,7 @@ func TestAccAWSELB_basic(t *testing.T) {
func TestAccAWSELB_fullCharacterRange(t *testing.T) {
var conf elb.LoadBalancerDescription
- lbName := fmt.Sprintf("FoobarTerraform-test123-%d",
+ lbName := fmt.Sprintf("Tf-%d",
rand.New(rand.NewSource(time.Now().UnixNano())).Int())
resource.Test(t, resource.TestCase{
@@ -560,7 +560,7 @@ func testAccCheckAWSELBDestroy(s *terraform.State) error {
return err
}
- if providerErr.Code() != "InvalidLoadBalancerName.NotFound" {
+ if providerErr.Code() != "LoadBalancerNotFound" {
return fmt.Errorf("Unexpected error: %s", err)
}
}
From 843a09f664af18117448cb3990c72fc4f75441c4 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 22 Dec 2015 07:56:56 -0600
Subject: [PATCH 325/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index cfeec8fb9f..e79c9d95bf 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -68,6 +68,7 @@ BUG FIXES:
* provider/aws: Fix issue with ElasticSearch Domain `access_policies` always appear changed [GH-4245]
* provider/aws: Fix issue with nil parameter group value causing panic in `aws_db_parameter_group` [GH-4318]
* provider/aws: Fix issue with Elastic IPs not recognizing when they have been unassigned manually [GH-4387]
+ * provider/aws: Use body or URL for all CloudFormation stack updates [GH-4370]
* provider/azure: Update for [breaking change to upstream client library](https://github.com/Azure/azure-sdk-for-go/commit/68d50cb53a73edfeb7f17f5e86cdc8eb359a9528). [GH-4300]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
* provider/openstack: Handle volumes in "deleting" state [GH-4204]
From c8e88ed1b4b7af6a13c3379db29684acac17fd27 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 22 Dec 2015 08:00:31 -0600
Subject: [PATCH 326/664] provider/aws: fix CheckDestroy for group_membership
test
---
.../resource_aws_iam_group_membership_test.go | 18 +++++++-----------
1 file changed, 7 insertions(+), 11 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_iam_group_membership_test.go b/builtin/providers/aws/resource_aws_iam_group_membership_test.go
index 26076dd9b7..63bef4dac7 100644
--- a/builtin/providers/aws/resource_aws_iam_group_membership_test.go
+++ b/builtin/providers/aws/resource_aws_iam_group_membership_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -55,23 +56,18 @@ func testAccCheckAWSGroupMembershipDestroy(s *terraform.State) error {
group := rs.Primary.Attributes["group"]
- resp, err := conn.GetGroup(&iam.GetGroupInput{
+ _, err := conn.GetGroup(&iam.GetGroupInput{
GroupName: aws.String(group),
})
if err != nil {
- // might error here
+ // Verify the error is what we want
+ if ae, ok := err.(awserr.Error); ok && ae.Code() == "NoSuchEntity" {
+ continue
+ }
return err
}
- users := []string{"test-user", "test-user-two", "test-user-three"}
- for _, u := range resp.Users {
- for _, i := range users {
- if i == *u.UserName {
- return fmt.Errorf("Error: User (%s) still a member of Group (%s)", i, *resp.Group.GroupName)
- }
- }
- }
-
+ return fmt.Errorf("still exists")
}
return nil
From fd528df00284efd103f5763c60b1a0613f367519 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 22 Dec 2015 08:05:37 -0600
Subject: [PATCH 327/664] provider/aws: fix CheckDestroy for iam_group_policy
tests
---
.../aws/resource_aws_iam_group_policy_test.go | 27 +++++++++++++++++--
1 file changed, 25 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_iam_group_policy_test.go b/builtin/providers/aws/resource_aws_iam_group_policy_test.go
index ac7a3baaa0..ccf35310be 100644
--- a/builtin/providers/aws/resource_aws_iam_group_policy_test.go
+++ b/builtin/providers/aws/resource_aws_iam_group_policy_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -39,8 +40,30 @@ func TestAccAWSIAMGroupPolicy_basic(t *testing.T) {
}
func testAccCheckIAMGroupPolicyDestroy(s *terraform.State) error {
- if len(s.RootModule().Resources) > 0 {
- return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
+ conn := testAccProvider.Meta().(*AWSClient).iamconn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_iam_group_policy" {
+ continue
+ }
+
+ group, name := resourceAwsIamGroupPolicyParseId(rs.Primary.ID)
+
+ request := &iam.GetGroupPolicyInput{
+ PolicyName: aws.String(name),
+ GroupName: aws.String(group),
+ }
+
+ _, err := conn.GetGroupPolicy(request)
+ if err != nil {
+ // Verify the error is what we want
+ if ae, ok := err.(awserr.Error); ok && ae.Code() == "NoSuchEntity" {
+ continue
+ }
+ return err
+ }
+
+ return fmt.Errorf("still exists")
}
return nil
From d9b83ada393d0548f34277b55e93b21ee8b8123f Mon Sep 17 00:00:00 2001
From: gorazio
Date: Tue, 22 Dec 2015 17:27:44 +0300
Subject: [PATCH 328/664] Update ebs_volume.html.md
add default value of volume type
---
website/source/docs/providers/aws/r/ebs_volume.html.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/website/source/docs/providers/aws/r/ebs_volume.html.md b/website/source/docs/providers/aws/r/ebs_volume.html.md
index 8a41ea26b5..78d902b3e0 100644
--- a/website/source/docs/providers/aws/r/ebs_volume.html.md
+++ b/website/source/docs/providers/aws/r/ebs_volume.html.md
@@ -31,7 +31,7 @@ The following arguments are supported:
* `iops` - (Optional) The amount of IOPS to provision for the disk.
* `size` - (Optional) The size of the drive in GB.
* `snapshot_id` (Optional) A snapshot to base the EBS volume off of.
-* `type` - (Optional) The type of EBS volume.
+* `type` - (Optional) The type of EBS volume. Can be "standard", "gp2", or "io1". (Default: "standard").
* `kms_key_id` - (Optional) The KMS key ID for the volume.
* `tags` - (Optional) A mapping of tags to assign to the resource.
From c8319d3b7248ea50336e5352e1dfe3a43c5fe8a6 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 22 Dec 2015 08:49:35 -0600
Subject: [PATCH 329/664] provider/aws: fix CheckDestroy on aws_instance tests
---
.../aws/resource_aws_instance_test.go | 21 +++++++++----------
1 file changed, 10 insertions(+), 11 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_instance_test.go b/builtin/providers/aws/resource_aws_instance_test.go
index 23c1e7b380..9b1e3b8e74 100644
--- a/builtin/providers/aws/resource_aws_instance_test.go
+++ b/builtin/providers/aws/resource_aws_instance_test.go
@@ -540,26 +540,25 @@ func testAccCheckInstanceDestroyWithProvider(s *terraform.State, provider *schem
}
// Try to find the resource
- var err error
resp, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{
InstanceIds: []*string{aws.String(rs.Primary.ID)},
})
if err == nil {
- if len(resp.Reservations) > 0 {
- return fmt.Errorf("still exist.")
+ for _, r := range resp.Reservations {
+ for _, i := range r.Instances {
+ if i.State != nil && *i.State.Name != "terminated" {
+ return fmt.Errorf("Found unterminated instance: %s", i)
+ }
+ }
}
-
- return nil
}
// Verify the error is what we want
- ec2err, ok := err.(awserr.Error)
- if !ok {
- return err
- }
- if ec2err.Code() != "InvalidInstanceID.NotFound" {
- return err
+ if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidInstanceID.NotFound" {
+ continue
}
+
+ return err
}
return nil
From b7e87bbf167f78082f26ab125e32d86bbe890fac Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 22 Dec 2015 09:02:01 -0600
Subject: [PATCH 330/664] provider/aws: fix CheckDestroy for
main_route_table_association tests
---
...e_aws_main_route_table_association_test.go | 25 +++++++++++++++++--
1 file changed, 23 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_main_route_table_association_test.go b/builtin/providers/aws/resource_aws_main_route_table_association_test.go
index 49f2815d9d..191696ef2d 100644
--- a/builtin/providers/aws/resource_aws_main_route_table_association_test.go
+++ b/builtin/providers/aws/resource_aws_main_route_table_association_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@@ -39,8 +40,28 @@ func TestAccAWSMainRouteTableAssociation_basic(t *testing.T) {
}
func testAccCheckMainRouteTableAssociationDestroy(s *terraform.State) error {
- if len(s.RootModule().Resources) > 0 {
- return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
+ conn := testAccProvider.Meta().(*AWSClient).ec2conn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_main_route_table_association" {
+ continue
+ }
+
+ mainAssociation, err := findMainRouteTableAssociation(
+ conn,
+ rs.Primary.Attributes["vpc_id"],
+ )
+ if err != nil {
+ // Verify the error is what we want
+ if ae, ok := err.(awserr.Error); ok && ae.Code() == "ApplicationDoesNotExistException" {
+ continue
+ }
+ return err
+ }
+
+ if mainAssociation != nil {
+ return fmt.Errorf("still exists")
+ }
}
return nil
From 6a3ed429ade7bd55894b2074738fbb3345f55923 Mon Sep 17 00:00:00 2001
From: nextrevision
Date: Tue, 22 Dec 2015 10:31:30 -0500
Subject: [PATCH 331/664] Adding AWS ECR provider resources
Adds ECR aws_ecr_repository and aws_ecr_repository_policy resources to
the AWS provider.
---
builtin/providers/aws/config.go | 5 +
builtin/providers/aws/provider.go | 2 +
.../aws/resource_aws_ecr_repository.go | 106 ++++++++++++++
.../aws/resource_aws_ecr_repository_policy.go | 133 ++++++++++++++++++
...resource_aws_ecr_repository_policy_test.go | 87 ++++++++++++
.../aws/resource_aws_ecr_repository_test.go | 77 ++++++++++
.../aws/r/ecr_repository.html.markdown | 33 +++++
.../aws/r/ecr_repository_policy.html.markdown | 67 +++++++++
8 files changed, 510 insertions(+)
create mode 100644 builtin/providers/aws/resource_aws_ecr_repository.go
create mode 100644 builtin/providers/aws/resource_aws_ecr_repository_policy.go
create mode 100644 builtin/providers/aws/resource_aws_ecr_repository_policy_test.go
create mode 100644 builtin/providers/aws/resource_aws_ecr_repository_test.go
create mode 100644 website/source/docs/providers/aws/r/ecr_repository.html.markdown
create mode 100644 website/source/docs/providers/aws/r/ecr_repository_policy.html.markdown
diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go
index e3e2243f1c..a55e182c90 100644
--- a/builtin/providers/aws/config.go
+++ b/builtin/providers/aws/config.go
@@ -27,6 +27,7 @@ import (
"github.com/aws/aws-sdk-go/service/directoryservice"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/aws/aws-sdk-go/service/ecr"
"github.com/aws/aws-sdk-go/service/ecs"
"github.com/aws/aws-sdk-go/service/efs"
"github.com/aws/aws-sdk-go/service/elasticache"
@@ -67,6 +68,7 @@ type AWSClient struct {
dsconn *directoryservice.DirectoryService
dynamodbconn *dynamodb.DynamoDB
ec2conn *ec2.EC2
+ ecrconn *ecr.ECR
ecsconn *ecs.ECS
efsconn *efs.EFS
elbconn *elb.ELB
@@ -189,6 +191,9 @@ func (c *Config) Client() (interface{}, error) {
log.Println("[INFO] Initializing EC2 Connection")
client.ec2conn = ec2.New(sess)
+ log.Println("[INFO] Initializing ECR Connection")
+ client.ecrconn = ecr.New(sess)
+
log.Println("[INFO] Initializing ECS Connection")
client.ecsconn = ecs.New(sess)
diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go
index 6b0c8db2e7..adcab7e95e 100644
--- a/builtin/providers/aws/provider.go
+++ b/builtin/providers/aws/provider.go
@@ -114,6 +114,8 @@ func Provider() terraform.ResourceProvider {
"aws_directory_service_directory": resourceAwsDirectoryServiceDirectory(),
"aws_dynamodb_table": resourceAwsDynamoDbTable(),
"aws_ebs_volume": resourceAwsEbsVolume(),
+ "aws_ecr_repository": resourceAwsEcrRepository(),
+ "aws_ecr_repository_policy": resourceAwsEcrRepositoryPolicy(),
"aws_ecs_cluster": resourceAwsEcsCluster(),
"aws_ecs_service": resourceAwsEcsService(),
"aws_ecs_task_definition": resourceAwsEcsTaskDefinition(),
diff --git a/builtin/providers/aws/resource_aws_ecr_repository.go b/builtin/providers/aws/resource_aws_ecr_repository.go
new file mode 100644
index 0000000000..ca94bcdb3b
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_ecr_repository.go
@@ -0,0 +1,106 @@
+package aws
+
+import (
+ "log"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/ecr"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceAwsEcrRepository() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceAwsEcrRepositoryCreate,
+ Read: resourceAwsEcrRepositoryRead,
+ Delete: resourceAwsEcrRepositoryDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "arn": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "registry_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func resourceAwsEcrRepositoryCreate(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).ecrconn
+
+ input := ecr.CreateRepositoryInput{
+ RepositoryName: aws.String(d.Get("name").(string)),
+ }
+
+ log.Printf("[DEBUG] Creating ECR resository: %s", input)
+ out, err := conn.CreateRepository(&input)
+ if err != nil {
+ return err
+ }
+
+ repository := *out.Repository
+
+ log.Printf("[DEBUG] ECR repository created: %q", *repository.RepositoryArn)
+
+ d.SetId(*repository.RepositoryName)
+ d.Set("arn", *repository.RepositoryArn)
+ d.Set("registry_id", *repository.RegistryId)
+
+ return resourceAwsEcrRepositoryRead(d, meta)
+}
+
+func resourceAwsEcrRepositoryRead(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).ecrconn
+
+ log.Printf("[DEBUG] Reading repository %s", d.Id())
+ out, err := conn.DescribeRepositories(&ecr.DescribeRepositoriesInput{
+ RegistryId: aws.String(d.Get("registry_id").(string)),
+ RepositoryNames: []*string{aws.String(d.Id())},
+ })
+ if err != nil {
+ if ecrerr, ok := err.(awserr.Error); ok && ecrerr.Code() == "RepositoryNotFoundException" {
+ d.SetId("")
+ return nil
+ }
+ return err
+ }
+
+ repository := out.Repositories[0]
+
+ log.Printf("[DEBUG] Received repository %s", out)
+
+ d.SetId(*repository.RepositoryName)
+ d.Set("arn", *repository.RepositoryArn)
+ d.Set("registry_id", *repository.RegistryId)
+
+ return nil
+}
+
+func resourceAwsEcrRepositoryDelete(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).ecrconn
+
+ _, err := conn.DeleteRepository(&ecr.DeleteRepositoryInput{
+ RepositoryName: aws.String(d.Id()),
+ RegistryId: aws.String(d.Get("registry_id").(string)),
+ Force: aws.Bool(true),
+ })
+ if err != nil {
+ if ecrerr, ok := err.(awserr.Error); ok && ecrerr.Code() == "RepositoryNotFoundException" {
+ d.SetId("")
+ return nil
+ }
+ return err
+ }
+
+ log.Printf("[DEBUG] repository %q deleted.", d.Get("arn").(string))
+
+ return nil
+}
diff --git a/builtin/providers/aws/resource_aws_ecr_repository_policy.go b/builtin/providers/aws/resource_aws_ecr_repository_policy.go
new file mode 100644
index 0000000000..d2d7d8b345
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_ecr_repository_policy.go
@@ -0,0 +1,133 @@
+package aws
+
+import (
+ "log"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/ecr"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceAwsEcrRepositoryPolicy() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceAwsEcrRepositoryPolicyCreate,
+ Read: resourceAwsEcrRepositoryPolicyRead,
+ Update: resourceAwsEcrRepositoryPolicyUpdate,
+ Delete: resourceAwsEcrRepositoryPolicyDelete,
+
+ Schema: map[string]*schema.Schema{
+ "repository": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "policy": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "registry_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func resourceAwsEcrRepositoryPolicyCreate(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).ecrconn
+
+ input := ecr.SetRepositoryPolicyInput{
+ RepositoryName: aws.String(d.Get("repository").(string)),
+ PolicyText: aws.String(d.Get("policy").(string)),
+ }
+
+ log.Printf("[DEBUG] Creating ECR resository policy: %s", input)
+ out, err := conn.SetRepositoryPolicy(&input)
+ if err != nil {
+ return err
+ }
+
+ repositoryPolicy := *out
+
+ log.Printf("[DEBUG] ECR repository policy created: %s", *repositoryPolicy.RepositoryName)
+
+ d.SetId(*repositoryPolicy.RepositoryName)
+ d.Set("registry_id", *repositoryPolicy.RegistryId)
+
+ return resourceAwsEcrRepositoryPolicyRead(d, meta)
+}
+
+func resourceAwsEcrRepositoryPolicyRead(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).ecrconn
+
+ log.Printf("[DEBUG] Reading repository policy %s", d.Id())
+ out, err := conn.GetRepositoryPolicy(&ecr.GetRepositoryPolicyInput{
+ RegistryId: aws.String(d.Get("registry_id").(string)),
+ RepositoryName: aws.String(d.Id()),
+ })
+ if err != nil {
+ if ecrerr, ok := err.(awserr.Error); ok && ecrerr.Code() == "RepositoryPolicyNotFoundException" {
+ d.SetId("")
+ return nil
+ }
+ return err
+ }
+
+ log.Printf("[DEBUG] Received repository policy %s", out)
+
+ repositoryPolicy := out
+
+ d.SetId(*repositoryPolicy.RepositoryName)
+ d.Set("registry_id", *repositoryPolicy.RegistryId)
+
+ return nil
+}
+
+func resourceAwsEcrRepositoryPolicyUpdate(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).ecrconn
+
+ if !d.HasChange("policy") {
+ return nil
+ }
+
+ input := ecr.SetRepositoryPolicyInput{
+ RepositoryName: aws.String(d.Get("repository").(string)),
+ RegistryId: aws.String(d.Get("registry_id").(string)),
+ PolicyText: aws.String(d.Get("policy").(string)),
+ }
+
+ out, err := conn.SetRepositoryPolicy(&input)
+ if err != nil {
+ return err
+ }
+
+ repositoryPolicy := *out
+
+ d.SetId(*repositoryPolicy.RepositoryName)
+ d.Set("registry_id", *repositoryPolicy.RegistryId)
+
+ return nil
+}
+
+func resourceAwsEcrRepositoryPolicyDelete(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).ecrconn
+
+ _, err := conn.DeleteRepositoryPolicy(&ecr.DeleteRepositoryPolicyInput{
+ RepositoryName: aws.String(d.Id()),
+ RegistryId: aws.String(d.Get("registry_id").(string)),
+ })
+ if err != nil {
+ if ecrerr, ok := err.(awserr.Error); ok {
+ if ecrerr.Code() == "RepositoryPolicyNotFoundException" || ecrerr.Code() == "RepositoryNotFoundException" {
+ d.SetId("")
+ return nil
+ }
+ }
+ return err
+ }
+
+ log.Printf("[DEBUG] repository policy %s deleted.", d.Id())
+
+ return nil
+}
diff --git a/builtin/providers/aws/resource_aws_ecr_repository_policy_test.go b/builtin/providers/aws/resource_aws_ecr_repository_policy_test.go
new file mode 100644
index 0000000000..5955160ee8
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_ecr_repository_policy_test.go
@@ -0,0 +1,87 @@
+package aws
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/ecr"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAWSEcrRepositoryPolicy_basic(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSEcrRepositoryPolicyDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSEcrRepositoryPolicy,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAWSEcrRepositoryPolicyExists("aws_ecr_repository_policy.default"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckAWSEcrRepositoryPolicyDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*AWSClient).ecrconn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_ecr_repository_policy" {
+ continue
+ }
+
+ _, err := conn.GetRepositoryPolicy(&ecr.GetRepositoryPolicyInput{
+ RegistryId: aws.String(rs.Primary.Attributes["registry_id"]),
+ RepositoryName: aws.String(rs.Primary.Attributes["repository"]),
+ })
+ if err != nil {
+ if ecrerr, ok := err.(awserr.Error); ok && ecrerr.Code() == "RepositoryNotFoundException" {
+ return nil
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func testAccCheckAWSEcrRepositoryPolicyExists(name string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ _, ok := s.RootModule().Resources[name]
+ if !ok {
+ return fmt.Errorf("Not found: %s", name)
+ }
+
+ return nil
+ }
+}
+
+var testAccAWSEcrRepositoryPolicy = `
+resource "aws_ecr_repository" "foo" {
+ name = "bar"
+}
+
+resource "aws_ecr_repository_policy" "default" {
+ repository = "${aws_ecr_repository.foo.name}"
+ policy = <
Date: Tue, 22 Dec 2015 09:58:22 -0600
Subject: [PATCH 332/664] provider/aws: fixes for Network ACL Rules
* Fixup Exists and CheckDestroy assertions
* Make ingress/egress computed on network_acl, otherwise you could
never use network_acl_rule with a managed network_acl without a
perpetual diff.
---
.../providers/aws/resource_aws_network_acl.go | 4 +-
.../aws/resource_aws_network_acl_rule_test.go | 42 +++++++++----------
2 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_network_acl.go b/builtin/providers/aws/resource_aws_network_acl.go
index 97916f9f09..ede84ed958 100644
--- a/builtin/providers/aws/resource_aws_network_acl.go
+++ b/builtin/providers/aws/resource_aws_network_acl.go
@@ -50,7 +50,7 @@ func resourceAwsNetworkAcl() *schema.Resource {
Type: schema.TypeSet,
Required: false,
Optional: true,
- Computed: false,
+ Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"from_port": &schema.Schema{
@@ -93,7 +93,7 @@ func resourceAwsNetworkAcl() *schema.Resource {
Type: schema.TypeSet,
Required: false,
Optional: true,
- Computed: false,
+ Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"from_port": &schema.Schema{
diff --git a/builtin/providers/aws/resource_aws_network_acl_rule_test.go b/builtin/providers/aws/resource_aws_network_acl_rule_test.go
index 98767cb573..56973b1d47 100644
--- a/builtin/providers/aws/resource_aws_network_acl_rule_test.go
+++ b/builtin/providers/aws/resource_aws_network_acl_rule_test.go
@@ -2,6 +2,7 @@ package aws
import (
"fmt"
+ "strconv"
"testing"
"github.com/aws/aws-sdk-go/aws"
@@ -54,7 +55,7 @@ func testAccCheckAWSNetworkAclRuleDestroy(s *terraform.State) error {
if !ok {
return err
}
- if ec2err.Code() != "InvalidNetworkAclEntry.NotFound" {
+ if ec2err.Code() != "InvalidNetworkAclID.NotFound" {
return err
}
}
@@ -63,7 +64,6 @@ func testAccCheckAWSNetworkAclRuleDestroy(s *terraform.State) error {
}
func testAccCheckAWSNetworkAclRuleExists(n string, networkAcl *ec2.NetworkAcl) resource.TestCheckFunc {
-
return func(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).ec2conn
rs, ok := s.RootModule().Resources[n]
@@ -76,30 +76,30 @@ func testAccCheckAWSNetworkAclRuleExists(n string, networkAcl *ec2.NetworkAcl) r
}
req := &ec2.DescribeNetworkAclsInput{
- NetworkAclIds: []*string{aws.String(rs.Primary.ID)},
+ NetworkAclIds: []*string{aws.String(rs.Primary.Attributes["network_acl_id"])},
}
resp, err := conn.DescribeNetworkAcls(req)
- if err == nil {
- if len(resp.NetworkAcls) > 0 && *resp.NetworkAcls[0].NetworkAclId == rs.Primary.ID {
- networkAcl := resp.NetworkAcls[0]
- if networkAcl.Entries == nil {
- return fmt.Errorf("No Network ACL Entries exist")
- }
+ if err != nil {
+ return err
+ }
+ if len(resp.NetworkAcls) != 1 {
+ return fmt.Errorf("Network ACL not found")
+ }
+ egress, err := strconv.ParseBool(rs.Primary.Attributes["egress"])
+ if err != nil {
+ return err
+ }
+ ruleNo, err := strconv.ParseInt(rs.Primary.Attributes["rule_number"], 10, 64)
+ if err != nil {
+ return err
+ }
+ for _, e := range resp.NetworkAcls[0].Entries {
+ if *e.RuleNumber == ruleNo && *e.Egress == egress {
+ return nil
}
}
-
- ec2err, ok := err.(awserr.Error)
- if !ok {
- return err
- }
- if ec2err.Code() != "InvalidNetworkAclEntry.NotFound" {
- return err
- }
-
- return nil
+ return fmt.Errorf("Entry not found: %s", resp.NetworkAcls[0])
}
-
- return nil
}
const testAccAWSNetworkAclRuleBasicConfig = `
From 27b8dd08e79c37cf0fb5c0604efad3796d46d8d2 Mon Sep 17 00:00:00 2001
From: nextrevision
Date: Tue, 22 Dec 2015 11:04:07 -0500
Subject: [PATCH 333/664] Check for ecr repository and policy removal
---
.../aws/resource_aws_ecr_repository_policy.go | 16 ++++++++++++----
1 file changed, 12 insertions(+), 4 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_ecr_repository_policy.go b/builtin/providers/aws/resource_aws_ecr_repository_policy.go
index d2d7d8b345..8932ea557b 100644
--- a/builtin/providers/aws/resource_aws_ecr_repository_policy.go
+++ b/builtin/providers/aws/resource_aws_ecr_repository_policy.go
@@ -67,9 +67,14 @@ func resourceAwsEcrRepositoryPolicyRead(d *schema.ResourceData, meta interface{}
RepositoryName: aws.String(d.Id()),
})
if err != nil {
- if ecrerr, ok := err.(awserr.Error); ok && ecrerr.Code() == "RepositoryPolicyNotFoundException" {
- d.SetId("")
- return nil
+ if ecrerr, ok := err.(awserr.Error); ok {
+ switch ecrerr.Code() {
+ case "RepositoryNotFoundException", "RepositoryPolicyNotFoundException":
+ d.SetId("")
+ return nil
+ default:
+ return err
+ }
}
return err
}
@@ -119,9 +124,12 @@ func resourceAwsEcrRepositoryPolicyDelete(d *schema.ResourceData, meta interface
})
if err != nil {
if ecrerr, ok := err.(awserr.Error); ok {
- if ecrerr.Code() == "RepositoryPolicyNotFoundException" || ecrerr.Code() == "RepositoryNotFoundException" {
+ switch ecrerr.Code() {
+ case "RepositoryNotFoundException", "RepositoryPolicyNotFoundException":
d.SetId("")
return nil
+ default:
+ return err
}
}
return err
From df0f65502fd1967e01caee916a0588a286dde1a8 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 22 Dec 2015 10:20:28 -0600
Subject: [PATCH 334/664] provider/aws: fix CheckDestroy on placement_group
tests
---
.../aws/resource_aws_placement_group_test.go | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_placement_group_test.go b/builtin/providers/aws/resource_aws_placement_group_test.go
index a68e43e92f..8743975c24 100644
--- a/builtin/providers/aws/resource_aws_placement_group_test.go
+++ b/builtin/providers/aws/resource_aws_placement_group_test.go
@@ -8,6 +8,7 @@ import (
"github.com/hashicorp/terraform/terraform"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
)
@@ -34,12 +35,19 @@ func testAccCheckAWSPlacementGroupDestroy(s *terraform.State) error {
if rs.Type != "aws_placement_group" {
continue
}
- _, err := conn.DeletePlacementGroup(&ec2.DeletePlacementGroupInput{
- GroupName: aws.String(rs.Primary.ID),
+
+ _, err := conn.DescribePlacementGroups(&ec2.DescribePlacementGroupsInput{
+ GroupNames: []*string{aws.String(rs.Primary.Attributes["name"])},
})
if err != nil {
+ // Verify the error is what we want
+ if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidPlacementGroup.Unknown" {
+ continue
+ }
return err
}
+
+ return fmt.Errorf("still exists")
}
return nil
}
From 84fe0b15fd41b17a590aa625e7e184952b622a15 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 22 Dec 2015 09:02:19 -0600
Subject: [PATCH 335/664] provider/aws: Update ElastiCache tests to verify
delete
provider/aws: update elasticache search domain to only save access policies if not empty
---
.../providers/aws/resource_aws_elasticache_cluster_test.go | 5 +++++
.../aws/resource_aws_elasticache_parameter_group_test.go | 2 +-
.../aws/resource_aws_elasticache_subnet_group_test.go | 5 +++++
builtin/providers/aws/resource_aws_elasticsearch_domain.go | 4 +++-
.../aws/resource_aws_elasticsearch_domain_test.go | 7 ++++++-
5 files changed, 20 insertions(+), 3 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go
index 88504e0f21..f8372d2b9f 100644
--- a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go
+++ b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go
@@ -8,6 +8,7 @@ import (
"time"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/elasticache"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -152,6 +153,10 @@ func testAccCheckAWSElasticacheClusterDestroy(s *terraform.State) error {
CacheClusterId: aws.String(rs.Primary.ID),
})
if err != nil {
+ // Verify the error is what we want
+ if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "CacheClusterNotFound" {
+ continue
+ }
return err
}
if len(res.CacheClusters) > 0 {
diff --git a/builtin/providers/aws/resource_aws_elasticache_parameter_group_test.go b/builtin/providers/aws/resource_aws_elasticache_parameter_group_test.go
index e61e64b3c7..d1df02c7f2 100644
--- a/builtin/providers/aws/resource_aws_elasticache_parameter_group_test.go
+++ b/builtin/providers/aws/resource_aws_elasticache_parameter_group_test.go
@@ -112,7 +112,7 @@ func testAccCheckAWSElasticacheParameterGroupDestroy(s *terraform.State) error {
if !ok {
return err
}
- if newerr.Code() != "InvalidCacheParameterGroup.NotFound" {
+ if newerr.Code() != "CacheParameterGroupNotFound" {
return err
}
}
diff --git a/builtin/providers/aws/resource_aws_elasticache_subnet_group_test.go b/builtin/providers/aws/resource_aws_elasticache_subnet_group_test.go
index b3035c767c..55fe25cbca 100644
--- a/builtin/providers/aws/resource_aws_elasticache_subnet_group_test.go
+++ b/builtin/providers/aws/resource_aws_elasticache_subnet_group_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/elasticache"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -71,6 +72,10 @@ func testAccCheckAWSElasticacheSubnetGroupDestroy(s *terraform.State) error {
CacheSubnetGroupName: aws.String(rs.Primary.ID),
})
if err != nil {
+ // Verify the error is what we want
+ if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "CacheSubnetGroupNotFoundFault" {
+ continue
+ }
return err
}
if len(res.CacheSubnetGroups) > 0 {
diff --git a/builtin/providers/aws/resource_aws_elasticsearch_domain.go b/builtin/providers/aws/resource_aws_elasticsearch_domain.go
index 5ccbacc282..c5666424b4 100644
--- a/builtin/providers/aws/resource_aws_elasticsearch_domain.go
+++ b/builtin/providers/aws/resource_aws_elasticsearch_domain.go
@@ -247,7 +247,9 @@ func resourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface{}
ds := out.DomainStatus
- d.Set("access_policies", normalizeJson(*ds.AccessPolicies))
+ if ds.AccessPolicies != nil && *ds.AccessPolicies != "" {
+ d.Set("access_policies", normalizeJson(*ds.AccessPolicies))
+ }
err = d.Set("advanced_options", pointersMapToStringList(ds.AdvancedOptions))
if err != nil {
return err
diff --git a/builtin/providers/aws/resource_aws_elasticsearch_domain_test.go b/builtin/providers/aws/resource_aws_elasticsearch_domain_test.go
index dee675d0d0..e17c0c0e89 100644
--- a/builtin/providers/aws/resource_aws_elasticsearch_domain_test.go
+++ b/builtin/providers/aws/resource_aws_elasticsearch_domain_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -85,8 +86,12 @@ func testAccCheckESDomainDestroy(s *terraform.State) error {
}
_, err := conn.DescribeElasticsearchDomain(opts)
+ // Verify the error is what we want
if err != nil {
- return fmt.Errorf("Error describing ES domains: %q", err.Error())
+ if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceNotFoundException" {
+ continue
+ }
+ return err
}
}
return nil
From 45a3b5c5425723878a83282246742229feb0d5ff Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Tue, 22 Dec 2015 11:24:46 -0500
Subject: [PATCH 336/664] Add script for running tests in Travis
---
scripts/travis.sh | 14 ++++++++++++++
1 file changed, 14 insertions(+)
create mode 100755 scripts/travis.sh
diff --git a/scripts/travis.sh b/scripts/travis.sh
new file mode 100755
index 0000000000..90e5cb9fff
--- /dev/null
+++ b/scripts/travis.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+
+# Consistent output so travis does not think we're dead during long running
+# tests.
+export PING_SLEEP=30
+bash -c "while true; do echo \$(date) - building ...; sleep $PING_SLEEP; done" &
+PING_LOOP_PID=$!
+
+make testacc
+TEST_OUTPUT=$?
+
+kill $PING_LOOP_PID
+exit $TEST_OUTPUT
From 1c2c1a5edb4701cfe909bf5af018ae215dbe9fea Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 22 Dec 2015 11:08:09 -0600
Subject: [PATCH 337/664] provider/aws: Update tests destroy checks
---
.../resource_aws_spot_instance_request_test.go | 18 ++++++++++++++++--
1 file changed, 16 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_spot_instance_request_test.go b/builtin/providers/aws/resource_aws_spot_instance_request_test.go
index 2fe9860a6c..37bd93507f 100644
--- a/builtin/providers/aws/resource_aws_spot_instance_request_test.go
+++ b/builtin/providers/aws/resource_aws_spot_instance_request_test.go
@@ -135,12 +135,26 @@ func testAccCheckAWSSpotInstanceRequestDestroy(s *terraform.State) error {
}
resp, err := conn.DescribeSpotInstanceRequests(req)
+ var s *ec2.SpotInstanceRequest
if err == nil {
- if len(resp.SpotInstanceRequests) > 0 {
- return fmt.Errorf("Spot instance request is still here.")
+ for _, sir := range resp.SpotInstanceRequests {
+ if sir.SpotInstanceRequestId != nil && *sir.SpotInstanceRequestId == rs.Primary.ID {
+ s = sir
+ }
+ continue
}
}
+ if s == nil {
+ // not found
+ return nil
+ }
+
+ if *s.State == "canceled" {
+ // Requests stick around for a while, so we make sure it's cancelled
+ return nil
+ }
+
// Verify the error is what we expect
ec2err, ok := err.(awserr.Error)
if !ok {
From b9ea8d869839b41d5cb6d8ac16c328b2e353af5c Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 22 Dec 2015 11:16:22 -0600
Subject: [PATCH 338/664] provider/aws: fix CheckDestroy for ProtocolPolicy
tests
Can only assert that the load balancer is gone, since the test suite
deletes everything, and the load balancer is the way you get to the
proxy protocol policy.
---
...resource_aws_proxy_protocol_policy_test.go | 26 ++++++++++++++++---
1 file changed, 23 insertions(+), 3 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_proxy_protocol_policy_test.go b/builtin/providers/aws/resource_aws_proxy_protocol_policy_test.go
index 945a62d48b..ff1cc7a7e7 100644
--- a/builtin/providers/aws/resource_aws_proxy_protocol_policy_test.go
+++ b/builtin/providers/aws/resource_aws_proxy_protocol_policy_test.go
@@ -4,6 +4,8 @@ import (
"fmt"
"testing"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/elb"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@@ -43,10 +45,28 @@ func TestAccAWSProxyProtocolPolicy_basic(t *testing.T) {
}
func testAccCheckProxyProtocolPolicyDestroy(s *terraform.State) error {
- if len(s.RootModule().Resources) > 0 {
- return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
- }
+ conn := testAccProvider.Meta().(*AWSClient).elbconn
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_placement_group" {
+ continue
+ }
+
+ req := &elb.DescribeLoadBalancersInput{
+ LoadBalancerNames: []*string{
+ aws.String(rs.Primary.Attributes["load_balancer"])},
+ }
+ _, err := conn.DescribeLoadBalancers(req)
+ if err != nil {
+ // Verify the error is what we want
+ if isLoadBalancerNotFound(err) {
+ continue
+ }
+ return err
+ }
+
+ return fmt.Errorf("still exists")
+ }
return nil
}
From 05983fde217f5cdd768772ff151e7295bf772d57 Mon Sep 17 00:00:00 2001
From: Clint
Date: Tue, 22 Dec 2015 11:45:17 -0600
Subject: [PATCH 339/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e79c9d95bf..06c2d8bf03 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -65,6 +65,7 @@ BUG FIXES:
* provider/aws: Fix issue with finding S3 Hosted Zone ID for eu-central-1 region [GH-4236]
* provider/aws: Fix missing AMI issue with Launch Configurations [GH-4242]
* provider/aws: Opsworks stack SSH key is write-only [GH-4241]
+ * provider/aws: Update VPC Endpoint to correctly set route table ids [GH-4392]
* provider/aws: Fix issue with ElasticSearch Domain `access_policies` always appear changed [GH-4245]
* provider/aws: Fix issue with nil parameter group value causing panic in `aws_db_parameter_group` [GH-4318]
* provider/aws: Fix issue with Elastic IPs not recognizing when they have been unassigned manually [GH-4387]
From c3d987ab18fd794c37d7feeb71405e7f09a6c189 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 22 Dec 2015 12:05:01 -0600
Subject: [PATCH 340/664] VPC Endpoint test updates
---
.../aws/resource_aws_vpc_endpoint_test.go | 24 ++++++++-----------
...esource_aws_vpc_peering_connection_test.go | 3 +++
2 files changed, 13 insertions(+), 14 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_vpc_endpoint_test.go b/builtin/providers/aws/resource_aws_vpc_endpoint_test.go
index 7973cf8f00..4a081b69c0 100644
--- a/builtin/providers/aws/resource_aws_vpc_endpoint_test.go
+++ b/builtin/providers/aws/resource_aws_vpc_endpoint_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/resource"
@@ -20,9 +21,9 @@ func TestAccAWSVpcEndpoint_basic(t *testing.T) {
CheckDestroy: testAccCheckVpcEndpointDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccVpcEndpointConfig,
+ Config: testAccVpcEndpointWithRouteTableAndPolicyConfig,
Check: resource.ComposeTestCheckFunc(
- testAccCheckVpcEndpointExists("aws_vpc_endpoint.private-s3", &endpoint),
+ testAccCheckVpcEndpointExists("aws_vpc_endpoint.second-private-s3", &endpoint),
),
},
},
@@ -69,7 +70,13 @@ func testAccCheckVpcEndpointDestroy(s *terraform.State) error {
VpcEndpointIds: []*string{aws.String(rs.Primary.ID)},
}
resp, err := conn.DescribeVpcEndpoints(input)
-
+ if err != nil {
+ // Verify the error is what we want
+ if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidVpcEndpointId.NotFound" {
+ continue
+ }
+ return err
+ }
if len(resp.VpcEndpoints) > 0 {
return fmt.Errorf("VPC Endpoints still exist.")
}
@@ -109,17 +116,6 @@ func testAccCheckVpcEndpointExists(n string, endpoint *ec2.VpcEndpoint) resource
}
}
-const testAccVpcEndpointConfig = `
-resource "aws_vpc" "foo" {
- cidr_block = "10.1.0.0/16"
-}
-
-resource "aws_vpc_endpoint" "private-s3" {
- vpc_id = "${aws_vpc.foo.id}"
- service_name = "com.amazonaws.us-west-2.s3"
-}
-`
-
const testAccVpcEndpointWithRouteTableAndPolicyConfig = `
resource "aws_vpc" "foo" {
cidr_block = "10.0.0.0/16"
diff --git a/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go b/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go
index ca92ce66a6..48f8bee850 100644
--- a/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go
+++ b/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go
@@ -37,6 +37,9 @@ func TestAccAWSVPCPeeringConnection_basic(t *testing.T) {
func TestAccAWSVPCPeeringConnection_tags(t *testing.T) {
var connection ec2.VpcPeeringConnection
peerId := os.Getenv("TF_PEER_ID")
+ if peerId == "" {
+ t.Fatalf("Error: TestAccAWSVPCPeeringConnection_tags requires a peer id to be set")
+ }
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
From 70bb536be10952452aa3ac54d8881a74c579621b Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 22 Dec 2015 12:18:22 -0600
Subject: [PATCH 341/664] skip TestAccAWSVPCPeeringConnection_tags for now
---
.../providers/aws/resource_aws_vpc_peering_connection_test.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go b/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go
index 48f8bee850..7e85659f2e 100644
--- a/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go
+++ b/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go
@@ -38,7 +38,7 @@ func TestAccAWSVPCPeeringConnection_tags(t *testing.T) {
var connection ec2.VpcPeeringConnection
peerId := os.Getenv("TF_PEER_ID")
if peerId == "" {
- t.Fatalf("Error: TestAccAWSVPCPeeringConnection_tags requires a peer id to be set")
+ t.Skip("Error: TestAccAWSVPCPeeringConnection_tags requires a peer id to be set")
}
resource.Test(t, resource.TestCase{
From 558e839bf28c67d81f511ad42f73d02177426c40 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 22 Dec 2015 12:53:42 -0600
Subject: [PATCH 342/664] vpc vpn connection test fixes
---
.../aws/resource_aws_vpn_connection_test.go | 37 ++++++++++++++++++-
1 file changed, 35 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_vpn_connection_test.go b/builtin/providers/aws/resource_aws_vpn_connection_test.go
index 123cb07e6b..cf151fc854 100644
--- a/builtin/providers/aws/resource_aws_vpn_connection_test.go
+++ b/builtin/providers/aws/resource_aws_vpn_connection_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/resource"
@@ -44,8 +45,40 @@ func TestAccAWSVpnConnection_basic(t *testing.T) {
}
func testAccAwsVpnConnectionDestroy(s *terraform.State) error {
- if len(s.RootModule().Resources) > 0 {
- return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
+ conn := testAccProvider.Meta().(*AWSClient).ec2conn
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_vpn_connection" {
+ continue
+ }
+
+ resp, err := conn.DescribeVpnConnections(&ec2.DescribeVpnConnectionsInput{
+ VpnConnectionIds: []*string{aws.String(rs.Primary.ID)},
+ })
+
+ if err != nil {
+ if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnConnectionID.NotFound" {
+ // not found
+ return nil
+ }
+ return err
+ }
+
+ var vpn *ec2.VpnConnection
+ for _, v := range resp.VpnConnections {
+ if v.VpnConnectionId != nil && *v.VpnConnectionId == rs.Primary.ID {
+ vpn = v
+ }
+ }
+
+ if vpn == nil {
+ // vpn connection not found
+ return nil
+ }
+
+ if vpn.State != nil && *vpn.State == "deleted" {
+ return nil
+ }
+
}
return nil
From 583c0a54c43ee29d7489786b71aadf88bab2a9e9 Mon Sep 17 00:00:00 2001
From: Radek Simko
Date: Tue, 22 Dec 2015 21:10:23 +0100
Subject: [PATCH 343/664] aws: Fix bug w/ changing ECS service LB association
- fixes #3444
- fixes #4227
---
builtin/providers/aws/resource_aws_ecs_service.go | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_ecs_service.go b/builtin/providers/aws/resource_aws_ecs_service.go
index 805d968407..3af04dbda7 100644
--- a/builtin/providers/aws/resource_aws_ecs_service.go
+++ b/builtin/providers/aws/resource_aws_ecs_service.go
@@ -51,27 +51,32 @@ func resourceAwsEcsService() *schema.Resource {
"iam_role": &schema.Schema{
Type: schema.TypeString,
+ ForceNew: true,
Optional: true,
},
"load_balancer": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
+ ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"elb_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
+ ForceNew: true,
},
"container_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
+ ForceNew: true,
},
"container_port": &schema.Schema{
Type: schema.TypeInt,
Required: true,
+ ForceNew: true,
},
},
},
From f8bb48b28721a451d896d578f6db4a795be7f669 Mon Sep 17 00:00:00 2001
From: Radek Simko
Date: Thu, 17 Dec 2015 15:21:05 +0100
Subject: [PATCH 344/664] aws: Wait for ECS service to be drained before
deletion
---
.../providers/aws/resource_aws_ecs_service.go | 35 +++++++++++++++----
1 file changed, 28 insertions(+), 7 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_ecs_service.go b/builtin/providers/aws/resource_aws_ecs_service.go
index 3af04dbda7..19225361c2 100644
--- a/builtin/providers/aws/resource_aws_ecs_service.go
+++ b/builtin/providers/aws/resource_aws_ecs_service.go
@@ -279,13 +279,33 @@ func resourceAwsEcsServiceDelete(d *schema.ResourceData, meta interface{}) error
}
}
- input := ecs.DeleteServiceInput{
- Service: aws.String(d.Id()),
- Cluster: aws.String(d.Get("cluster").(string)),
- }
+ // Wait until the ECS service is drained
+ err = resource.Retry(5*time.Minute, func() error {
+ input := ecs.DeleteServiceInput{
+ Service: aws.String(d.Id()),
+ Cluster: aws.String(d.Get("cluster").(string)),
+ }
- log.Printf("[DEBUG] Deleting ECS service %s", input)
- out, err := conn.DeleteService(&input)
+ log.Printf("[DEBUG] Trying to delete ECS service %s", input)
+ _, err := conn.DeleteService(&input)
+ if err == nil {
+ return nil
+ }
+
+ ec2err, ok := err.(awserr.Error)
+ if !ok {
+ return &resource.RetryError{Err: err}
+ }
+ if ec2err.Code() == "InvalidParameterException" {
+ // Prevent "The service cannot be stopped while deployments are active."
+ log.Printf("[DEBUG] Trying to delete ECS service again: %q",
+ ec2err.Message())
+ return err
+ }
+
+ return &resource.RetryError{Err: err}
+
+ })
if err != nil {
return err
}
@@ -306,6 +326,7 @@ func resourceAwsEcsServiceDelete(d *schema.ResourceData, meta interface{}) error
return resp, "FAILED", err
}
+ log.Printf("[DEBUG] ECS service %s is currently %q", *resp.Services[0].Status)
return resp, *resp.Services[0].Status, nil
},
}
@@ -315,7 +336,7 @@ func resourceAwsEcsServiceDelete(d *schema.ResourceData, meta interface{}) error
return err
}
- log.Printf("[DEBUG] ECS service %s deleted.", *out.Service.ServiceArn)
+ log.Printf("[DEBUG] ECS service %s deleted.", d.Id())
return nil
}
From 47ea43394f859bba3b72831ab91735691886e5bf Mon Sep 17 00:00:00 2001
From: Seth Bergman
Date: Tue, 22 Dec 2015 14:18:29 -0600
Subject: [PATCH 345/664] Update install.html.markdown
Added example commands for setting environment paths in Linux/Unix and Windows.
---
website/source/intro/getting-started/install.html.markdown | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/website/source/intro/getting-started/install.html.markdown b/website/source/intro/getting-started/install.html.markdown
index 2392e35491..3a171352a9 100644
--- a/website/source/intro/getting-started/install.html.markdown
+++ b/website/source/intro/getting-started/install.html.markdown
@@ -28,6 +28,13 @@ for instructions on setting the PATH on Linux and Mac.
[This page](http://stackoverflow.com/questions/1618280/where-can-i-set-path-to-make-exe-on-windows)
contains instructions for setting the PATH on Windows.
+Example for Linux/Mac - Type the following into your terminal:
+>`PATH=/usr/local/terraform/bin:/home/your-user-name/terraform:$PATH`
+
+Example for Windows - Type the following into Powershell:
+>`set PATH=%PATH%;C:\terraform`
+
+
## Verifying the Installation
After installing Terraform, verify the installation worked by opening a new
From 363defb548cf090ec4fe536f77b58b9f9a091362 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 22 Dec 2015 14:58:43 -0600
Subject: [PATCH 346/664] provider/aws: Update Route53 Zone tests
---
builtin/providers/aws/resource_aws_route53_record_test.go | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_route53_record_test.go b/builtin/providers/aws/resource_aws_route53_record_test.go
index 94dfe8e4b4..f07215df51 100644
--- a/builtin/providers/aws/resource_aws_route53_record_test.go
+++ b/builtin/providers/aws/resource_aws_route53_record_test.go
@@ -9,6 +9,7 @@ import (
"github.com/hashicorp/terraform/terraform"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/route53"
)
@@ -277,6 +278,12 @@ func testAccCheckRoute53RecordDestroy(s *terraform.State) error {
resp, err := conn.ListResourceRecordSets(lopts)
if err != nil {
+ if awsErr, ok := err.(awserr.Error); ok {
+ // if NoSuchHostedZone, then all the things are destroyed
+ if awsErr.Code() == "NoSuchHostedZone" {
+ return nil
+ }
+ }
return err
}
if len(resp.ResourceRecordSets) == 0 {
From 8aa991cb5d0a0e40d518f8e1935d5581895108f9 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Tue, 22 Dec 2015 15:46:46 -0500
Subject: [PATCH 347/664] provider/azure: Remove obsolete tests
---
builtin/providers/azure/provider_test.go | 46 +++++-------------------
1 file changed, 8 insertions(+), 38 deletions(-)
diff --git a/builtin/providers/azure/provider_test.go b/builtin/providers/azure/provider_test.go
index d06cf896d2..7422b0eb0f 100644
--- a/builtin/providers/azure/provider_test.go
+++ b/builtin/providers/azure/provider_test.go
@@ -127,50 +127,20 @@ func TestAzure_validateSettingsFile(t *testing.T) {
}
func TestAzure_providerConfigure(t *testing.T) {
- home, err := homedir.Dir()
- if err != nil {
- t.Fatalf("Error fetching homedir: %s", err)
+ rp := Provider()
+ raw := map[string]interface{}{
+ "publish_settings": testAzurePublishSettingsStr,
}
- fh, err := ioutil.TempFile(home, "tf-test-home")
- if err != nil {
- t.Fatalf("Error creating homedir-based temporary file: %s", err)
- }
- defer os.Remove(fh.Name())
- _, err = io.WriteString(fh, testAzurePublishSettingsStr)
+ rawConfig, err := config.NewRawConfig(raw)
if err != nil {
t.Fatalf("err: %s", err)
}
- fh.Close()
- r := strings.NewReplacer(home, "~")
- homePath := r.Replace(fh.Name())
-
- cases := []struct {
- SettingsFile string // String of XML or a path to an XML file
- NilMeta bool // whether meta is expected to be nil
- }{
- {testAzurePublishSettingsStr, false},
- {homePath, false},
- }
-
- for _, tc := range cases {
- rp := Provider()
- raw := map[string]interface{}{
- "settings_file": tc.SettingsFile,
- }
-
- rawConfig, err := config.NewRawConfig(raw)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- err = rp.Configure(terraform.NewResourceConfig(rawConfig))
- meta := rp.(*schema.Provider).Meta()
- if (meta == nil) != tc.NilMeta {
- t.Fatalf("expected NilMeta: %t, got meta: %#v, settings_file: %q",
- tc.NilMeta, meta, tc.SettingsFile)
- }
+ err = rp.Configure(terraform.NewResourceConfig(rawConfig))
+ meta := rp.(*schema.Provider).Meta()
+ if meta == nil {
+ t.Fatal("Expected metadata, got nil: err: %s", err)
}
}
From 66f1555727ec4479401bf7a3393e50f826192b0b Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 22 Dec 2015 15:35:12 -0600
Subject: [PATCH 348/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 06c2d8bf03..e84873d702 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -9,6 +9,7 @@ FEATURES:
* **New resource: `aws_autoscaling_schedule`** [GH-4256]
* **New resource: `aws_nat_gateway`** [GH-4381]
* **New resource: `aws_network_acl_rule`** [GH-4286]
+ * **New resources: `aws_ecr_repository` and `aws_ecr_repository_policy`** [GH-4415]
* **New resource: `google_pubsub_topic`** [GH-3671]
* **New resource: `google_pubsub_subscription`** [GH-3671]
* **New resource: `template_cloudinit_config`** [GH-4095]
From 4d74bc789e06b7e6f0a6a567b1ee42d6773e6418 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Tue, 22 Dec 2015 16:55:01 -0500
Subject: [PATCH 349/664] provider/azure: Fix vetting error
---
builtin/providers/azure/provider_test.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/builtin/providers/azure/provider_test.go b/builtin/providers/azure/provider_test.go
index 7422b0eb0f..3eb836c44d 100644
--- a/builtin/providers/azure/provider_test.go
+++ b/builtin/providers/azure/provider_test.go
@@ -140,7 +140,7 @@ func TestAzure_providerConfigure(t *testing.T) {
err = rp.Configure(terraform.NewResourceConfig(rawConfig))
meta := rp.(*schema.Provider).Meta()
if meta == nil {
- t.Fatal("Expected metadata, got nil: err: %s", err)
+ t.Fatalf("Expected metadata, got nil: err: %s", err)
}
}
From 0d895ffa96792c8658e4a63aa330bffbe12bf4e3 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 22 Dec 2015 15:59:17 -0600
Subject: [PATCH 350/664] provider/aws: mention us-east-1 in ECR docs and tests
ECR is only available in us-east-1 for now.
Also added missing sidebar links for ECR resources.
/cc @nextrevision
---
.../aws/resource_aws_ecr_repository_policy_test.go | 5 +++++
.../providers/aws/resource_aws_ecr_repository_test.go | 5 +++++
.../docs/providers/aws/r/ecr_repository.html.markdown | 10 ++++++++--
.../aws/r/ecr_repository_policy.html.markdown | 6 ++++++
website/source/layouts/aws.erb | 8 ++++++++
5 files changed, 32 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_ecr_repository_policy_test.go b/builtin/providers/aws/resource_aws_ecr_repository_policy_test.go
index 5955160ee8..9ff1bffd5f 100644
--- a/builtin/providers/aws/resource_aws_ecr_repository_policy_test.go
+++ b/builtin/providers/aws/resource_aws_ecr_repository_policy_test.go
@@ -62,6 +62,11 @@ func testAccCheckAWSEcrRepositoryPolicyExists(name string) resource.TestCheckFun
}
var testAccAWSEcrRepositoryPolicy = `
+# ECR initially only available in us-east-1
+# https://aws.amazon.com/blogs/aws/ec2-container-registry-now-generally-available/
+provider "aws" {
+ region = "us-east-1"
+}
resource "aws_ecr_repository" "foo" {
name = "bar"
}
diff --git a/builtin/providers/aws/resource_aws_ecr_repository_test.go b/builtin/providers/aws/resource_aws_ecr_repository_test.go
index 57de3c0e38..79b8b3bfa5 100644
--- a/builtin/providers/aws/resource_aws_ecr_repository_test.go
+++ b/builtin/providers/aws/resource_aws_ecr_repository_test.go
@@ -71,6 +71,11 @@ func testAccCheckAWSEcrRepositoryExists(name string) resource.TestCheckFunc {
}
var testAccAWSEcrRepository = `
+# ECR initially only available in us-east-1
+# https://aws.amazon.com/blogs/aws/ec2-container-registry-now-generally-available/
+provider "aws" {
+ region = "us-east-1"
+}
resource "aws_ecr_repository" "default" {
name = "foo-repository-terraform"
}
diff --git a/website/source/docs/providers/aws/r/ecr_repository.html.markdown b/website/source/docs/providers/aws/r/ecr_repository.html.markdown
index ad49932bab..e90b796da5 100644
--- a/website/source/docs/providers/aws/r/ecr_repository.html.markdown
+++ b/website/source/docs/providers/aws/r/ecr_repository.html.markdown
@@ -3,12 +3,18 @@ layout: "aws"
page_title: "AWS: aws_ecr_repository"
sidebar_current: "docs-aws-resource-ecr-repository"
description: |-
- Provides an ECR Repository.
+ Provides an EC2 Container Registry Repository.
---
# aws\_ecr\_repository
-Provides an ECR repository.
+Provides an EC2 Container Registry Repository.
+
+~> **NOTE on ECR Availability**: The EC2 Container Registry has an [initial
+launch region of
+`us-east-1`](https://aws.amazon.com/blogs/aws/ec2-container-registry-now-generally-available/).
+As more regions become available, they will be listed [in the AWS
+Docs](https://docs.aws.amazon.com/general/latest/gr/rande.html#ecr_region)
## Example Usage
diff --git a/website/source/docs/providers/aws/r/ecr_repository_policy.html.markdown b/website/source/docs/providers/aws/r/ecr_repository_policy.html.markdown
index 5df7d67272..cb1b954b4f 100644
--- a/website/source/docs/providers/aws/r/ecr_repository_policy.html.markdown
+++ b/website/source/docs/providers/aws/r/ecr_repository_policy.html.markdown
@@ -12,6 +12,12 @@ Provides an ECR repository policy.
Note that currently only one policy may be applied to a repository.
+~> **NOTE on ECR Availability**: The EC2 Container Registry has an [initial
+launch region of
+`us-east-1`](https://aws.amazon.com/blogs/aws/ec2-container-registry-now-generally-available/).
+As more regions become available, they will be listed [in the AWS
+Docs](https://docs.aws.amazon.com/general/latest/gr/rande.html#ecr_region)
+
## Example Usage
```
diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb
index 5a12b432ee..53b7d37c9f 100644
--- a/website/source/layouts/aws.erb
+++ b/website/source/layouts/aws.erb
@@ -193,6 +193,14 @@
aws_ecs_task_definition
+ >
+ aws_ecr_repository
+
+
+ >
+ aws_ecr_repository_policy
+
+
From 2a7576068faf2c5342a8be5b1ac01b399b3f6ee6 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Tue, 22 Dec 2015 17:16:18 -0500
Subject: [PATCH 351/664] provider/azure: Allow AZURE_SETTINGS_FILE for tests
---
builtin/providers/azure/provider_test.go | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/azure/provider_test.go b/builtin/providers/azure/provider_test.go
index 3eb836c44d..6206291764 100644
--- a/builtin/providers/azure/provider_test.go
+++ b/builtin/providers/azure/provider_test.go
@@ -51,12 +51,15 @@ func TestProvider_impl(t *testing.T) {
}
func testAccPreCheck(t *testing.T) {
- if v := os.Getenv("AZURE_PUBLISH_SETTINGS"); v == "" {
+ v1 := os.Getenv("AZURE_PUBLISH_SETTINGS")
+ v2 := os.Getenv("AZURE_SETTINGS_FILE")
+
+ if v1 == "" && v2 == "" {
subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID")
certificate := os.Getenv("AZURE_CERTIFICATE")
if subscriptionID == "" || certificate == "" {
- t.Fatal("either AZURE_PUBLISH_SETTINGS, or AZURE_SUBSCRIPTION_ID " +
+ t.Fatal("either AZURE_PUBLISH_SETTINGS, AZURE_SETTINGS_FILE, or AZURE_SUBSCRIPTION_ID " +
"and AZURE_CERTIFICATE must be set for acceptance tests")
}
}
From f68a351fba706dfb6b543c40d64f127e301b8b36 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 22 Dec 2015 16:23:08 -0600
Subject: [PATCH 352/664] provider/aws: Fix issue with Route53 and zero
weighted records
---
.../aws/resource_aws_route53_record.go | 20 ++++++++++++++-----
1 file changed, 15 insertions(+), 5 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_route53_record.go b/builtin/providers/aws/resource_aws_route53_record.go
index a6c88ade40..2d024fe5e2 100644
--- a/builtin/providers/aws/resource_aws_route53_record.go
+++ b/builtin/providers/aws/resource_aws_route53_record.go
@@ -64,9 +64,13 @@ func resourceAwsRoute53Record() *schema.Resource {
ConflictsWith: []string{"alias"},
},
+ // Weight uses a special sentinel value to indicate it's presense.
+ // Because 0 is a valid value for Weight, we default to -1 so that any
+ // inclusion of a weight (zero or not) will be a usable value
"weight": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
+ Default: -1,
},
"set_identifier": &schema.Schema{
@@ -171,8 +175,8 @@ func resourceAwsRoute53RecordCreate(d *schema.ResourceData, meta interface{}) er
ChangeBatch: changeBatch,
}
- log.Printf("[DEBUG] Creating resource records for zone: %s, name: %s",
- zone, *rec.Name)
+ log.Printf("[DEBUG] Creating resource records for zone: %s, name: %s\n\n%s",
+ zone, *rec.Name, req)
wait := resource.StateChangeConf{
Pending: []string{"rejected"},
@@ -292,7 +296,12 @@ func resourceAwsRoute53RecordRead(d *schema.ResourceData, meta interface{}) erro
}
d.Set("ttl", record.TTL)
- d.Set("weight", record.Weight)
+ // Only set the weight if it's non-nil, otherwise we end up with a 0 weight
+ // which has actual contextual meaning with Route 53 records
+ // See http://docs.aws.amazon.com/fr_fr/Route53/latest/APIReference/API_ChangeResourceRecordSets_Examples.html
+ if record.Weight != nil {
+ d.Set("weight", record.Weight)
+ }
d.Set("set_identifier", record.SetIdentifier)
d.Set("failover", record.Failover)
d.Set("health_check_id", record.HealthCheckId)
@@ -439,8 +448,9 @@ func resourceAwsRoute53RecordBuildSet(d *schema.ResourceData, zoneName string) (
rec.SetIdentifier = aws.String(v.(string))
}
- if v, ok := d.GetOk("weight"); ok {
- rec.Weight = aws.Int64(int64(v.(int)))
+ w := d.Get("weight").(int)
+ if w > -1 {
+ rec.Weight = aws.Int64(int64(w))
}
return rec, nil
From 6b5c1d50e700feec75cbb6d85981edb8c46e34f2 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Tue, 22 Dec 2015 17:31:06 -0500
Subject: [PATCH 353/664] provider/azure: Copy settings file into variable
---
builtin/providers/azure/provider_test.go | 15 +++++++++++----
1 file changed, 11 insertions(+), 4 deletions(-)
diff --git a/builtin/providers/azure/provider_test.go b/builtin/providers/azure/provider_test.go
index 6206291764..57e852ce0f 100644
--- a/builtin/providers/azure/provider_test.go
+++ b/builtin/providers/azure/provider_test.go
@@ -51,15 +51,22 @@ func TestProvider_impl(t *testing.T) {
}
func testAccPreCheck(t *testing.T) {
- v1 := os.Getenv("AZURE_PUBLISH_SETTINGS")
- v2 := os.Getenv("AZURE_SETTINGS_FILE")
+ sf := os.Getenv("PUBLISH_SETTINGS_FILE")
+ if sf != "" {
+ publishSettings, err := ioutil.ReadFile(sf)
+ if err != nil {
+ t.Fatalf("Error reading AZURE_SETTINGS_FILE path: %s", err)
+ }
- if v1 == "" && v2 == "" {
+ os.Setenv("AZURE_PUBLISH_SETTINGS", string(publishSettings))
+ }
+
+ if v := os.Getenv("AZURE_PUBLISH_SETTINGS"); v == "" {
subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID")
certificate := os.Getenv("AZURE_CERTIFICATE")
if subscriptionID == "" || certificate == "" {
- t.Fatal("either AZURE_PUBLISH_SETTINGS, AZURE_SETTINGS_FILE, or AZURE_SUBSCRIPTION_ID " +
+ t.Fatal("either AZURE_PUBLISH_SETTINGS, PUBLISH_SETTINGS_FILE, or AZURE_SUBSCRIPTION_ID " +
"and AZURE_CERTIFICATE must be set for acceptance tests")
}
}
From 787340f801e9b2bb7c26072cd5eae71f39102338 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 22 Dec 2015 16:47:57 -0600
Subject: [PATCH 354/664] make note of -1 value for r53 record
---
.../docs/providers/aws/r/route53_record.html.markdown | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/website/source/docs/providers/aws/r/route53_record.html.markdown b/website/source/docs/providers/aws/r/route53_record.html.markdown
index e7455ba13f..b1c6bf7550 100644
--- a/website/source/docs/providers/aws/r/route53_record.html.markdown
+++ b/website/source/docs/providers/aws/r/route53_record.html.markdown
@@ -99,6 +99,12 @@ record from one another. Required for each weighted record.
* `alias` - (Optional) An alias block. Conflicts with `ttl` & `records`.
Alias record documented below.
+~> **Note:** The `weight` attribute uses a special sentinel value of `-1` for a
+default in Terraform. This allows Terraform to distinquish between a `0` value
+and an empty value in the configuration (none specified). As a result, a
+`weight` of `-1` will be present in the statefile if `weight` is omitted in the
+configuraiton.
+
Exactly one of `records` or `alias` must be specified: this determines whether it's an alias record.
Alias records support the following:
From 95367bc0fceb0a54abccdbf85ebea40ea7d562c6 Mon Sep 17 00:00:00 2001
From: Radek Simko
Date: Wed, 23 Dec 2015 10:17:52 +0100
Subject: [PATCH 355/664] aws: Fix CheckDestroy for ecs service
---
.../providers/aws/resource_aws_ecs_service_test.go | 14 +++++++++++++-
1 file changed, 13 insertions(+), 1 deletion(-)
diff --git a/builtin/providers/aws/resource_aws_ecs_service_test.go b/builtin/providers/aws/resource_aws_ecs_service_test.go
index 9eb9bce186..738741287b 100644
--- a/builtin/providers/aws/resource_aws_ecs_service_test.go
+++ b/builtin/providers/aws/resource_aws_ecs_service_test.go
@@ -209,6 +209,7 @@ func testAccCheckAWSEcsServiceDestroy(s *terraform.State) error {
out, err := conn.DescribeServices(&ecs.DescribeServicesInput{
Services: []*string{aws.String(rs.Primary.ID)},
+ Cluster: aws.String(rs.Primary.Attributes["cluster"]),
})
if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ClusterNotFoundException" {
@@ -217,8 +218,19 @@ func testAccCheckAWSEcsServiceDestroy(s *terraform.State) error {
if err == nil {
if len(out.Services) > 0 {
- return fmt.Errorf("ECS service still exists:\n%#v", out.Services)
+ var activeServices []*ecs.Service
+ for _, svc := range out.Services {
+ if *svc.Status != "INACTIVE" {
+ activeServices = append(activeServices, svc)
+ }
+ }
+ if len(activeServices) == 0 {
+ return nil
+ }
+
+ return fmt.Errorf("ECS service still exists:\n%#v", activeServices)
}
+ return nil
}
return err
From 9a625427ca31b4057c9c939dfd3bd3f0f77dc3fe Mon Sep 17 00:00:00 2001
From: Radek Simko
Date: Wed, 23 Dec 2015 10:43:37 +0100
Subject: [PATCH 356/664] aws: Add regression acc test for ecs svc lb changes
---
.../aws/resource_aws_ecs_service_test.go | 124 ++++++++++++++++++
1 file changed, 124 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_ecs_service_test.go b/builtin/providers/aws/resource_aws_ecs_service_test.go
index 738741287b..40cb7975ee 100644
--- a/builtin/providers/aws/resource_aws_ecs_service_test.go
+++ b/builtin/providers/aws/resource_aws_ecs_service_test.go
@@ -179,6 +179,29 @@ func TestAccAWSEcsService_withIamRole(t *testing.T) {
})
}
+// Regression for https://github.com/hashicorp/terraform/issues/3444
+func TestAccAWSEcsService_withLbChanges(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSEcsServiceDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSEcsService_withLbChanges,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAWSEcsServiceExists("aws_ecs_service.with_lb_changes"),
+ ),
+ },
+ resource.TestStep{
+ Config: testAccAWSEcsService_withLbChanges_modified,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAWSEcsServiceExists("aws_ecs_service.with_lb_changes"),
+ ),
+ },
+ },
+ })
+}
+
// Regression for https://github.com/hashicorp/terraform/issues/3361
func TestAccAWSEcsService_withEcsClusterName(t *testing.T) {
clusterName := regexp.MustCompile("^terraformecstestcluster$")
@@ -400,6 +423,107 @@ resource "aws_ecs_service" "ghost" {
}
`
+var tpl_testAccAWSEcsService_withLbChanges = `
+resource "aws_ecs_cluster" "main" {
+ name = "terraformecstest12"
+}
+
+resource "aws_ecs_task_definition" "with_lb_changes" {
+ family = "ghost_lbd"
+ container_definitions = <
Date: Wed, 23 Dec 2015 11:35:30 +0100
Subject: [PATCH 357/664] Revert "provider/aws: fix ECS service CheckDestroy in
tests"
This reverts commit 47f8b0cd79b511805fa0cdbcf86c0ac2ae2eed1a.
cc @phinze
---
builtin/providers/aws/resource_aws_ecs_service_test.go | 5 -----
1 file changed, 5 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_ecs_service_test.go b/builtin/providers/aws/resource_aws_ecs_service_test.go
index 40cb7975ee..fcac09ba51 100644
--- a/builtin/providers/aws/resource_aws_ecs_service_test.go
+++ b/builtin/providers/aws/resource_aws_ecs_service_test.go
@@ -6,7 +6,6 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ecs"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -235,10 +234,6 @@ func testAccCheckAWSEcsServiceDestroy(s *terraform.State) error {
Cluster: aws.String(rs.Primary.Attributes["cluster"]),
})
- if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ClusterNotFoundException" {
- continue
- }
-
if err == nil {
if len(out.Services) > 0 {
var activeServices []*ecs.Service
From a367886eaf7007d596f52f640f63b26f9a5111fd Mon Sep 17 00:00:00 2001
From: Radek Simko
Date: Wed, 23 Dec 2015 15:11:45 +0100
Subject: [PATCH 358/664] provider/aws: Separate existing validate functions
---
.../aws/resource_aws_autoscaling_schedule.go | 11 --
...esource_aws_codedeploy_deployment_group.go | 11 --
.../aws/resource_aws_db_parameter_group.go | 27 -----
.../aws/resource_aws_dynamodb_table.go | 15 ---
builtin/providers/aws/resource_aws_elb.go | 24 ----
builtin/providers/aws/structure.go | 22 ----
builtin/providers/aws/validators.go | 114 ++++++++++++++++++
7 files changed, 114 insertions(+), 110 deletions(-)
create mode 100644 builtin/providers/aws/validators.go
diff --git a/builtin/providers/aws/resource_aws_autoscaling_schedule.go b/builtin/providers/aws/resource_aws_autoscaling_schedule.go
index b8a1135dee..f6e3745d17 100644
--- a/builtin/providers/aws/resource_aws_autoscaling_schedule.go
+++ b/builtin/providers/aws/resource_aws_autoscaling_schedule.go
@@ -176,14 +176,3 @@ func resourceAwsASGScheduledActionRetrieve(d *schema.ResourceData, meta interfac
return actions.ScheduledUpdateGroupActions[0], nil
}
-
-func validateASGScheduleTimestamp(v interface{}, k string) (ws []string, errors []error) {
- value := v.(string)
- _, err := time.Parse(awsAutoscalingScheduleTimeLayout, value)
- if err != nil {
- errors = append(errors, fmt.Errorf(
- "%q cannot be parsed as iso8601 Timestamp Format", value))
- }
-
- return
-}
diff --git a/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go b/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go
index ee81f1cf3c..457368aed8 100644
--- a/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go
+++ b/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go
@@ -344,17 +344,6 @@ func onPremisesTagFiltersToMap(list []*codedeploy.TagFilter) []map[string]string
return result
}
-// validateTagFilters confirms the "value" component of a tag filter is one of
-// AWS's three allowed types.
-func validateTagFilters(v interface{}, k string) (ws []string, errors []error) {
- value := v.(string)
- if value != "KEY_ONLY" && value != "VALUE_ONLY" && value != "KEY_AND_VALUE" {
- errors = append(errors, fmt.Errorf(
- "%q must be one of \"KEY_ONLY\", \"VALUE_ONLY\", or \"KEY_AND_VALUE\"", k))
- }
- return
-}
-
func resourceAwsCodeDeployTagFilterHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
diff --git a/builtin/providers/aws/resource_aws_db_parameter_group.go b/builtin/providers/aws/resource_aws_db_parameter_group.go
index 0513be0b54..7204c82826 100644
--- a/builtin/providers/aws/resource_aws_db_parameter_group.go
+++ b/builtin/providers/aws/resource_aws_db_parameter_group.go
@@ -4,7 +4,6 @@ import (
"bytes"
"fmt"
"log"
- "regexp"
"strings"
"time"
@@ -285,29 +284,3 @@ func buildRDSPGARN(d *schema.ResourceData, meta interface{}) (string, error) {
arn := fmt.Sprintf("arn:aws:rds:%s:%s:pg:%s", region, accountID, d.Id())
return arn, nil
}
-
-func validateDbParamGroupName(v interface{}, k string) (ws []string, errors []error) {
- value := v.(string)
- if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) {
- errors = append(errors, fmt.Errorf(
- "only lowercase alphanumeric characters and hyphens allowed in %q", k))
- }
- if !regexp.MustCompile(`^[a-z]`).MatchString(value) {
- errors = append(errors, fmt.Errorf(
- "first character of %q must be a letter", k))
- }
- if regexp.MustCompile(`--`).MatchString(value) {
- errors = append(errors, fmt.Errorf(
- "%q cannot contain two consecutive hyphens", k))
- }
- if regexp.MustCompile(`-$`).MatchString(value) {
- errors = append(errors, fmt.Errorf(
- "%q cannot end with a hyphen", k))
- }
- if len(value) > 255 {
- errors = append(errors, fmt.Errorf(
- "%q cannot be greater than 255 characters", k))
- }
- return
-
-}
diff --git a/builtin/providers/aws/resource_aws_dynamodb_table.go b/builtin/providers/aws/resource_aws_dynamodb_table.go
index 0606cde2e8..8f1ac41c5a 100644
--- a/builtin/providers/aws/resource_aws_dynamodb_table.go
+++ b/builtin/providers/aws/resource_aws_dynamodb_table.go
@@ -801,18 +801,3 @@ func waitForTableToBeActive(tableName string, meta interface{}) error {
return nil
}
-
-func validateStreamViewType(v interface{}, k string) (ws []string, errors []error) {
- value := v.(string)
- viewTypes := map[string]bool{
- "KEYS_ONLY": true,
- "NEW_IMAGE": true,
- "OLD_IMAGE": true,
- "NEW_AND_OLD_IMAGES": true,
- }
-
- if !viewTypes[value] {
- errors = append(errors, fmt.Errorf("%q be a valid DynamoDB StreamViewType", k))
- }
- return
-}
diff --git a/builtin/providers/aws/resource_aws_elb.go b/builtin/providers/aws/resource_aws_elb.go
index 1bc6ddc283..23042aa5bd 100644
--- a/builtin/providers/aws/resource_aws_elb.go
+++ b/builtin/providers/aws/resource_aws_elb.go
@@ -4,7 +4,6 @@ import (
"bytes"
"fmt"
"log"
- "regexp"
"strings"
"time"
@@ -673,29 +672,6 @@ func isLoadBalancerNotFound(err error) bool {
return ok && elberr.Code() == "LoadBalancerNotFound"
}
-func validateElbName(v interface{}, k string) (ws []string, errors []error) {
- value := v.(string)
- if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) {
- errors = append(errors, fmt.Errorf(
- "only alphanumeric characters and hyphens allowed in %q: %q",
- k, value))
- }
- if len(value) > 32 {
- errors = append(errors, fmt.Errorf(
- "%q cannot be longer than 32 characters: %q", k, value))
- }
- if regexp.MustCompile(`^-`).MatchString(value) {
- errors = append(errors, fmt.Errorf(
- "%q cannot begin with a hyphen: %q", k, value))
- }
- if regexp.MustCompile(`-$`).MatchString(value) {
- errors = append(errors, fmt.Errorf(
- "%q cannot end with a hyphen: %q", k, value))
- }
- return
-
-}
-
func sourceSGIdByName(meta interface{}, sg, vpcId string) (string, error) {
conn := meta.(*AWSClient).ec2conn
var filters []*ec2.Filter
diff --git a/builtin/providers/aws/structure.go b/builtin/providers/aws/structure.go
index 748ecc88be..35b7ba4e11 100644
--- a/builtin/providers/aws/structure.go
+++ b/builtin/providers/aws/structure.go
@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"fmt"
- "regexp"
"sort"
"strings"
@@ -516,27 +515,6 @@ func expandResourceRecords(recs []interface{}, typeStr string) []*route53.Resour
return records
}
-func validateRdsId(v interface{}, k string) (ws []string, errors []error) {
- value := v.(string)
- if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) {
- errors = append(errors, fmt.Errorf(
- "only lowercase alphanumeric characters and hyphens allowed in %q", k))
- }
- if !regexp.MustCompile(`^[a-z]`).MatchString(value) {
- errors = append(errors, fmt.Errorf(
- "first character of %q must be a letter", k))
- }
- if regexp.MustCompile(`--`).MatchString(value) {
- errors = append(errors, fmt.Errorf(
- "%q cannot contain two consecutive hyphens", k))
- }
- if regexp.MustCompile(`-$`).MatchString(value) {
- errors = append(errors, fmt.Errorf(
- "%q cannot end with a hyphen", k))
- }
- return
-}
-
func expandESClusterConfig(m map[string]interface{}) *elasticsearch.ElasticsearchClusterConfig {
config := elasticsearch.ElasticsearchClusterConfig{}
diff --git a/builtin/providers/aws/validators.go b/builtin/providers/aws/validators.go
new file mode 100644
index 0000000000..1e34451910
--- /dev/null
+++ b/builtin/providers/aws/validators.go
@@ -0,0 +1,114 @@
+package aws
+
+import (
+ "fmt"
+ "regexp"
+ "time"
+)
+
+func validateRdsId(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "only lowercase alphanumeric characters and hyphens allowed in %q", k))
+ }
+ if !regexp.MustCompile(`^[a-z]`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "first character of %q must be a letter", k))
+ }
+ if regexp.MustCompile(`--`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot contain two consecutive hyphens", k))
+ }
+ if regexp.MustCompile(`-$`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot end with a hyphen", k))
+ }
+ return
+}
+
+func validateASGScheduleTimestamp(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ _, err := time.Parse(awsAutoscalingScheduleTimeLayout, value)
+ if err != nil {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot be parsed as iso8601 Timestamp Format", value))
+ }
+
+ return
+}
+
+// validateTagFilters confirms the "value" component of a tag filter is one of
+// AWS's three allowed types.
+func validateTagFilters(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ if value != "KEY_ONLY" && value != "VALUE_ONLY" && value != "KEY_AND_VALUE" {
+ errors = append(errors, fmt.Errorf(
+ "%q must be one of \"KEY_ONLY\", \"VALUE_ONLY\", or \"KEY_AND_VALUE\"", k))
+ }
+ return
+}
+
+func validateDbParamGroupName(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "only lowercase alphanumeric characters and hyphens allowed in %q", k))
+ }
+ if !regexp.MustCompile(`^[a-z]`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "first character of %q must be a letter", k))
+ }
+ if regexp.MustCompile(`--`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot contain two consecutive hyphens", k))
+ }
+ if regexp.MustCompile(`-$`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot end with a hyphen", k))
+ }
+ if len(value) > 255 {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot be greater than 255 characters", k))
+ }
+ return
+
+}
+
+func validateStreamViewType(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ viewTypes := map[string]bool{
+ "KEYS_ONLY": true,
+ "NEW_IMAGE": true,
+ "OLD_IMAGE": true,
+ "NEW_AND_OLD_IMAGES": true,
+ }
+
+ if !viewTypes[value] {
+ errors = append(errors, fmt.Errorf("%q be a valid DynamoDB StreamViewType", k))
+ }
+ return
+}
+
+func validateElbName(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "only alphanumeric characters and hyphens allowed in %q: %q",
+ k, value))
+ }
+ if len(value) > 32 {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot be longer than 32 characters: %q", k, value))
+ }
+ if regexp.MustCompile(`^-`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot begin with a hyphen: %q", k, value))
+ }
+ if regexp.MustCompile(`-$`).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot end with a hyphen: %q", k, value))
+ }
+ return
+
+}
From d853d2cc93ef3a884cc6f442875fd55e4d1be392 Mon Sep 17 00:00:00 2001
From: Radek Simko
Date: Wed, 23 Dec 2015 16:07:39 +0100
Subject: [PATCH 359/664] aws: Add validation for ECR repository name
---
builtin/providers/aws/validators.go | 22 ++++++++++++
builtin/providers/aws/validators_test.go | 45 ++++++++++++++++++++++++
2 files changed, 67 insertions(+)
create mode 100644 builtin/providers/aws/validators_test.go
diff --git a/builtin/providers/aws/validators.go b/builtin/providers/aws/validators.go
index 1e34451910..ede6b36dd7 100644
--- a/builtin/providers/aws/validators.go
+++ b/builtin/providers/aws/validators.go
@@ -112,3 +112,25 @@ func validateElbName(v interface{}, k string) (ws []string, errors []error) {
return
}
+
+func validateEcrRepositoryName(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ if len(value) < 2 {
+ errors = append(errors, fmt.Errorf(
+ "%q must be at least 2 characters long: %q", k, value))
+ }
+ if len(value) > 256 {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot be longer than 256 characters: %q", k, value))
+ }
+
+ // http://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_CreateRepository.html
+ pattern := `^(?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*$`
+ if !regexp.MustCompile(pattern).MatchString(value) {
+ errors = append(errors, fmt.Errorf(
+ "%q doesn't comply with restrictions (%q): %q",
+ k, pattern, value))
+ }
+
+ return
+}
diff --git a/builtin/providers/aws/validators_test.go b/builtin/providers/aws/validators_test.go
new file mode 100644
index 0000000000..0b2ee011ea
--- /dev/null
+++ b/builtin/providers/aws/validators_test.go
@@ -0,0 +1,45 @@
+package aws
+
+import (
+ "testing"
+)
+
+func TestValidateEcrRepositoryName(t *testing.T) {
+ validNames := []string{
+ "nginx-web-app",
+ "project-a/nginx-web-app",
+ "domain.ltd/nginx-web-app",
+ "3chosome-thing.com/01different-pattern",
+ "0123456789/999999999",
+ "double/forward/slash",
+ "000000000000000",
+ }
+ for _, v := range validNames {
+ _, errors := validateEcrRepositoryName(v, "name")
+ if len(errors) != 0 {
+ t.Fatalf("%q should be a valid ECR repository name: %q", v, errors)
+ }
+ }
+
+ invalidNames := []string{
+ // length > 256
+ "3cho_some-thing.com/01different.-_pattern01different.-_pattern01diff" +
+ "erent.-_pattern01different.-_pattern01different.-_pattern01different" +
+ ".-_pattern01different.-_pattern01different.-_pattern01different.-_pa" +
+ "ttern01different.-_pattern01different.-_pattern234567",
+ // length < 2
+ "i",
+ "special@character",
+ "different+special=character",
+ "double//slash",
+ "double..dot",
+ "/slash-at-the-beginning",
+ "slash-at-the-end/",
+ }
+ for _, v := range invalidNames {
+ _, errors := validateEcrRepositoryName(v, "name")
+ if len(errors) == 0 {
+ t.Fatalf("%q should be an invalid ECR repository name", v)
+ }
+ }
+}
From 00da7173026ebb073fc477d9ce42323e4cfd0a43 Mon Sep 17 00:00:00 2001
From: Radek Simko
Date: Wed, 23 Dec 2015 16:19:28 +0100
Subject: [PATCH 360/664] docs/aws: Whitespaces removed
---
website/source/layouts/aws.erb | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb
index 53b7d37c9f..d949d6bc16 100644
--- a/website/source/layouts/aws.erb
+++ b/website/source/layouts/aws.erb
@@ -39,7 +39,7 @@
>
aws_cloudwatch_metric_alarm
-
+
@@ -465,7 +465,7 @@
>
aws_route53_zone_association
-
+
From ae7dcfcf1ba80d8ea2abaa3161b47fc21c8fa91f Mon Sep 17 00:00:00 2001
From: Radek Simko
Date: Wed, 23 Dec 2015 16:21:22 +0100
Subject: [PATCH 361/664] docs/aws: Fix highlighting of ECR in sidebar
---
website/source/layouts/aws.erb | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb
index d949d6bc16..e187361937 100644
--- a/website/source/layouts/aws.erb
+++ b/website/source/layouts/aws.erb
@@ -177,7 +177,7 @@
- >
+ >
ECS Resources
From 1eb129a99b894a9ccdc8736fa4436cdfe191db95 Mon Sep 17 00:00:00 2001
From: Nashwan Azhari
Date: Wed, 16 Dec 2015 17:37:50 +0200
Subject: [PATCH 362/664] provider/azure: added local network gateway resource
---
builtin/providers/azurerm/provider.go | 5 +-
.../resource_arm_local_network_gateway.go | 158 ++++++++++++++++++
...resource_arm_local_network_gateway_test.go | 106 ++++++++++++
.../r/local_network_gateway.html.markdown | 48 ++++++
website/source/layouts/azurerm.erb | 9 +-
5 files changed, 322 insertions(+), 4 deletions(-)
create mode 100644 builtin/providers/azurerm/resource_arm_local_network_gateway.go
create mode 100644 builtin/providers/azurerm/resource_arm_local_network_gateway_test.go
create mode 100644 website/source/docs/providers/azurerm/r/local_network_gateway.html.markdown
diff --git a/builtin/providers/azurerm/provider.go b/builtin/providers/azurerm/provider.go
index 53c5c97a68..9f6476c53a 100644
--- a/builtin/providers/azurerm/provider.go
+++ b/builtin/providers/azurerm/provider.go
@@ -37,8 +37,9 @@ func Provider() terraform.ResourceProvider {
},
ResourcesMap: map[string]*schema.Resource{
- "azurerm_resource_group": resourceArmResourceGroup(),
- "azurerm_virtual_network": resourceArmVirtualNetwork(),
+ "azurerm_resource_group": resourceArmResourceGroup(),
+ "azurerm_virtual_network": resourceArmVirtualNetwork(),
+ "azurerm_local_network_gateway": resourceArmLocalNetworkGateway(),
},
ConfigureFunc: providerConfigure,
diff --git a/builtin/providers/azurerm/resource_arm_local_network_gateway.go b/builtin/providers/azurerm/resource_arm_local_network_gateway.go
new file mode 100644
index 0000000000..a99a35d0d3
--- /dev/null
+++ b/builtin/providers/azurerm/resource_arm_local_network_gateway.go
@@ -0,0 +1,158 @@
+package azurerm
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/Azure/azure-sdk-for-go/arm/network"
+ "github.com/Azure/azure-sdk-for-go/core/http"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+// resourceArmLocalNetworkGateway returns the schema.Resource
+// associated to an Azure local network gateway.
+func resourceArmLocalNetworkGateway() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceArmLocalNetworkGatewayCreate,
+ Read: resourceArmLocalNetworkGatewayRead,
+ Update: resourceArmLocalNetworkGatewayUpdate,
+ Delete: resourceArmLocalNetworkGatewayDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "location": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ StateFunc: azureRMNormalizeLocation,
+ },
+
+ "resource_group_name": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "resource_guid": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ },
+
+ "gateway_address": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "address_space": &schema.Schema{
+ Type: schema.TypeList,
+ Required: true,
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ },
+ }
+}
+
+// resourceArmLocalNetworkGatewayCreate goes ahead and creates the specified ARM local network gateway.
+func resourceArmLocalNetworkGatewayCreate(d *schema.ResourceData, meta interface{}) error {
+ lnetClient := meta.(*ArmClient).localNetConnClient
+
+ name := d.Get("name").(string)
+ location := d.Get("location").(string)
+ resGroup := d.Get("resource_group_name").(string)
+ ipAddress := d.Get("gateway_address").(string)
+
+ // NOTE: due to the including-but-different relationship between the ASM
+ // and ARM APIs, one may set the following local network gateway type to
+ // "Classic" and basically get an old ASM local network connection through
+ // the ARM API. This functionality is redundant with respect to the old
+ // ASM-based implementation which we already have, so we just use the
+ // new Resource Manager APIs here:
+ typ := "Resource Manager"
+
+ // fetch the 'address_space_prefix'es:
+ prefixes := []string{}
+ for _, pref := range d.Get("addres_space").([]interface{}) {
+ prefixes = append(prefixes, pref.(string))
+ }
+
+ // NOTE: result ignored here; review below...
+ resp, err := lnetClient.CreateOrUpdate(resGroup, name, network.LocalNetworkGateway{
+ Name: &name,
+ Location: &location,
+ Type: &typ,
+ Properties: &network.LocalNetworkGatewayPropertiesFormat{
+ LocalNetworkAddressSpace: &network.AddressSpace{
+ AddressPrefixes: &prefixes,
+ },
+ GatewayIPAddress: &ipAddress,
+ },
+ })
+ if err != nil {
+ return fmt.Errorf("Error reading the state of Azure ARM Local Network Gateway '%s': %s", name, err)
+ }
+
+ // NOTE: we either call read here or basically repeat the reading process
+ // with the ignored network.LocalNetworkGateway result of the above:
+ d.SetId(*resp.ID)
+ return resourceArmLocalNetworkGatewayRead(d, meta)
+}
+
+// resourceArmLocalNetworkGatewayRead goes ahead and reads the state of the corresponding ARM local network gateway.
+func resourceArmLocalNetworkGatewayRead(d *schema.ResourceData, meta interface{}) error {
+ lnetClient := meta.(*ArmClient).localNetConnClient
+
+ name := d.Get("name").(string)
+ resGroup := d.Get("resource_group_name").(string)
+
+ log.Printf("[INFO] Sending GET request to Azure ARM for local network gateway '%s'.", name)
+ lnet, err := lnetClient.Get(resGroup, name)
+ if lnet.StatusCode == http.StatusNotFound {
+ // it means that the resource has been deleted in the meantime...
+ d.SetId("")
+ return nil
+ }
+ if err != nil {
+ return fmt.Errorf("Error reading the state of Azure ARM local network gateway '%s': %s", name, err)
+ }
+
+ d.Set("resource_guid", *lnet.Properties.ResourceGUID)
+ d.Set("gateway_address", *lnet.Properties.GatewayIPAddress)
+
+ prefs := []string{}
+ if ps := *lnet.Properties.LocalNetworkAddressSpace.AddressPrefixes; ps != nil {
+ prefs = ps
+ }
+ d.Set("address_space", prefs)
+
+ return nil
+}
+
+// resourceArmLocalNetworkGatewayUpdate goes ahead and updates the corresponding ARM local network gateway.
+func resourceArmLocalNetworkGatewayUpdate(d *schema.ResourceData, meta interface{}) error {
+ // NOTE: considering the idempotency, we can safely call create again on
+ // update. This has been written out in order to ensure clarity,
+ return resourceArmLocalNetworkGatewayCreate(d, meta)
+}
+
+// resourceArmLocalNetworkGatewayDelete deletes the specified ARM local network gateway.
+func resourceArmLocalNetworkGatewayDelete(d *schema.ResourceData, meta interface{}) error {
+ lnetClient := meta.(*ArmClient).localNetConnClient
+
+ name := d.Get("name").(string)
+ resGroup := d.Get("resource_group_name").(string)
+
+ log.Printf("[INFO] Sending Azure ARM delete request for local network gateway '%s'.", name)
+ _, err := lnetClient.Delete(resGroup, name)
+ if err != nil {
+ return fmt.Errorf("Error issuing Azure ARM delete request of local network gateway '%s': %s", name, err)
+ }
+
+ return nil
+}
diff --git a/builtin/providers/azurerm/resource_arm_local_network_gateway_test.go b/builtin/providers/azurerm/resource_arm_local_network_gateway_test.go
new file mode 100644
index 0000000000..5a27240b01
--- /dev/null
+++ b/builtin/providers/azurerm/resource_arm_local_network_gateway_test.go
@@ -0,0 +1,106 @@
+package azurerm
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/Azure/azure-sdk-for-go/core/http"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAzureRMLocalNetworkGateway_basic(t *testing.T) {
+ name := "azurerm_local_network_gateway.test"
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMLocalNetworkGatewayDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAzureRMLocalNetworkGatewayConfig_basic,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMLocalNetworkGatewayExists(name),
+ resource.TestCheckResourceAttr(name, "gateway_address", "127.0.0.1"),
+ resource.TestCheckResourceAttr(name, "address_space.0", "127.0.0.0/8"),
+ ),
+ },
+ },
+ })
+}
+
+// testCheckAzureRMLocalNetworkGatewayExists returns the resurce.TestCheckFunc
+// which checks whether or not the expected local network gateway exists both
+// in the schema, and on Azure.
+func testCheckAzureRMLocalNetworkGatewayExists(name string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ // first check within the schema for the local network gateway:
+ res, ok := s.RootModule().Resources[name]
+ if !ok {
+ return fmt.Errorf("Local network gateway '%s' not found.", name)
+ }
+
+ // then, extranct the name and the resource group:
+ localNetName := res.Primary.Attributes["name"]
+ resGrp, hasResGrp := res.Primary.Attributes["resource_group_name"]
+ if !hasResGrp {
+ return fmt.Errorf("Local network gateway '%s' has no resource group set.", name)
+ }
+
+ // and finally, check that it exists on Azure:
+ lnetClient := testAccProvider.Meta().(*ArmClient).localNetConnClient
+
+ resp, err := lnetClient.Get(resGrp, name)
+ if resp.StatusCode == http.StatusNotFound {
+ return fmt.Errorf("Local network gateway '%s' (resource group '%s') does not exist on Azure.", localNetName, resGrp)
+ }
+
+ if err != nil {
+ return fmt.Errorf("Error reading the state of local network gateway '%s'.", localNetName)
+ }
+
+ return nil
+ }
+}
+
+// testCheckAzureRMLocalNetworkGatewayDestroy is the resurce.TestCheckFunc
+// which checks whether or not the expected local network gateway still
+// exists on Azure.
+func testCheckAzureRMLocalNetworkGatewayDestroy(s *terraform.State) error {
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "azurerm_local_network_gateway" {
+ continue
+ }
+
+ name := rs.Primary.Attributes["name"]
+ resourceGroup := rs.Primary.Attributes["resource_group_name"]
+
+ lnetClient := testAccProvider.Meta().(*ArmClient).localNetConnClient
+ resp, err := lnetClient.Get(resourceGroup, name)
+
+ if err != nil {
+ return nil
+ }
+
+ if resp.StatusCode != http.StatusNotFound {
+ return fmt.Errorf("Local network gateway still exists:\n%#v", resp.Properties)
+ }
+ }
+
+ return nil
+}
+
+var testAccAzureRMLocalNetworkGatewayConfig_basic = `
+resource "azurerm_resource_group" "test" {
+ name = "tftestingResourceGroup"
+ location = "West US"
+}
+
+resource "azurerm_local_network_gateway" "test" {
+ name = "tftestingLocalNetworkGateway"
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ gateway_address = "127.0.0.1"
+ address_space = ["127.0.0.0/8"]
+}
+`
diff --git a/website/source/docs/providers/azurerm/r/local_network_gateway.html.markdown b/website/source/docs/providers/azurerm/r/local_network_gateway.html.markdown
new file mode 100644
index 0000000000..1f2bbe0f98
--- /dev/null
+++ b/website/source/docs/providers/azurerm/r/local_network_gateway.html.markdown
@@ -0,0 +1,48 @@
+---
+layout: "azurerm"
+page_title: "Azure Resource Manager: azurerm_local_network_gateway"
+sidebar_current: "docs-azurerm-resource-local-network-gateway"
+description: |-
+ Creates a new local network gateway connection over which specific connections can be configured.
+---
+
+# azurerm\_local\_network\_gateway
+
+Creates a new local network gateway connection over which specific connections can be configured.
+
+## Example Usage
+
+```
+resource "azurerm_local_network_gateway" "home" {
+ name = "backHome"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ location = "${azurerm_resource_group.test.location}"
+ gateway_address = "12.13.14.15"
+ address_space = ["10.0.0.0/16"]
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) The name of the local network gateway. Changing this
+ forces a new resource to be created.
+
+* `resource_group_name` - (Required) The name of the resource group in which to
+ create the local network gateway.
+
+* `location` - (Required) The location/region where the local network gatway is
+ created. Changing this forces a new resource to be created.
+
+* `gateway_address` - (Required) The IP address of the gatway to which to
+ connect.
+
+* `address_space` - (Required) The list of string CIDRs representing the
+ addredss spaces the gateway exposes.
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The local network gateway unique ID within Azure.
diff --git a/website/source/layouts/azurerm.erb b/website/source/layouts/azurerm.erb
index f52a1bce23..fe00f7939d 100644
--- a/website/source/layouts/azurerm.erb
+++ b/website/source/layouts/azurerm.erb
@@ -13,13 +13,18 @@
>
Resources
From 008dc970b63e5a5d274f5efb44a1a014db11868f Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Thu, 17 Dec 2015 11:22:17 -0500
Subject: [PATCH 363/664] provider/azurerm: Fix up network gateway tests
This diff represents the changes necessary to make local network
gateway tests pass:
- Parse the resource ID instead of relying on attributes
- Remove unecessary logging (which is handled via the autorest wrapper)
- Resource GUID is removed - if this is actually required for anything
we may need to find a way to supress it during apply, as we get
spurious diffs in plans otherwise.
- Various typos fixed.
---
.../resource_arm_local_network_gateway.go | 76 +++++++------------
...resource_arm_local_network_gateway_test.go | 38 +++++-----
2 files changed, 47 insertions(+), 67 deletions(-)
diff --git a/builtin/providers/azurerm/resource_arm_local_network_gateway.go b/builtin/providers/azurerm/resource_arm_local_network_gateway.go
index a99a35d0d3..ae91d665fc 100644
--- a/builtin/providers/azurerm/resource_arm_local_network_gateway.go
+++ b/builtin/providers/azurerm/resource_arm_local_network_gateway.go
@@ -2,20 +2,17 @@ package azurerm
import (
"fmt"
- "log"
"github.com/Azure/azure-sdk-for-go/arm/network"
"github.com/Azure/azure-sdk-for-go/core/http"
"github.com/hashicorp/terraform/helper/schema"
)
-// resourceArmLocalNetworkGateway returns the schema.Resource
-// associated to an Azure local network gateway.
func resourceArmLocalNetworkGateway() *schema.Resource {
return &schema.Resource{
Create: resourceArmLocalNetworkGatewayCreate,
Read: resourceArmLocalNetworkGatewayRead,
- Update: resourceArmLocalNetworkGatewayUpdate,
+ Update: resourceArmLocalNetworkGatewayCreate,
Delete: resourceArmLocalNetworkGatewayDelete,
Schema: map[string]*schema.Schema{
@@ -38,11 +35,6 @@ func resourceArmLocalNetworkGateway() *schema.Resource {
ForceNew: true,
},
- "resource_guid": &schema.Schema{
- Type: schema.TypeString,
- Optional: true,
- },
-
"gateway_address": &schema.Schema{
Type: schema.TypeString,
Required: true,
@@ -59,7 +51,6 @@ func resourceArmLocalNetworkGateway() *schema.Resource {
}
}
-// resourceArmLocalNetworkGatewayCreate goes ahead and creates the specified ARM local network gateway.
func resourceArmLocalNetworkGatewayCreate(d *schema.ResourceData, meta interface{}) error {
lnetClient := meta.(*ArmClient).localNetConnClient
@@ -68,25 +59,15 @@ func resourceArmLocalNetworkGatewayCreate(d *schema.ResourceData, meta interface
resGroup := d.Get("resource_group_name").(string)
ipAddress := d.Get("gateway_address").(string)
- // NOTE: due to the including-but-different relationship between the ASM
- // and ARM APIs, one may set the following local network gateway type to
- // "Classic" and basically get an old ASM local network connection through
- // the ARM API. This functionality is redundant with respect to the old
- // ASM-based implementation which we already have, so we just use the
- // new Resource Manager APIs here:
- typ := "Resource Manager"
-
- // fetch the 'address_space_prefix'es:
+ // fetch the 'address_space_prefixes:
prefixes := []string{}
- for _, pref := range d.Get("addres_space").([]interface{}) {
+ for _, pref := range d.Get("address_space").([]interface{}) {
prefixes = append(prefixes, pref.(string))
}
- // NOTE: result ignored here; review below...
resp, err := lnetClient.CreateOrUpdate(resGroup, name, network.LocalNetworkGateway{
Name: &name,
Location: &location,
- Type: &typ,
Properties: &network.LocalNetworkGatewayPropertiesFormat{
LocalNetworkAddressSpace: &network.AddressSpace{
AddressPrefixes: &prefixes,
@@ -95,12 +76,11 @@ func resourceArmLocalNetworkGatewayCreate(d *schema.ResourceData, meta interface
},
})
if err != nil {
- return fmt.Errorf("Error reading the state of Azure ARM Local Network Gateway '%s': %s", name, err)
+ return fmt.Errorf("Error creating Azure ARM Local Network Gateway '%s': %s", name, err)
}
- // NOTE: we either call read here or basically repeat the reading process
- // with the ignored network.LocalNetworkGateway result of the above:
d.SetId(*resp.ID)
+
return resourceArmLocalNetworkGatewayRead(d, meta)
}
@@ -108,25 +88,27 @@ func resourceArmLocalNetworkGatewayCreate(d *schema.ResourceData, meta interface
func resourceArmLocalNetworkGatewayRead(d *schema.ResourceData, meta interface{}) error {
lnetClient := meta.(*ArmClient).localNetConnClient
- name := d.Get("name").(string)
- resGroup := d.Get("resource_group_name").(string)
-
- log.Printf("[INFO] Sending GET request to Azure ARM for local network gateway '%s'.", name)
- lnet, err := lnetClient.Get(resGroup, name)
- if lnet.StatusCode == http.StatusNotFound {
- // it means that the resource has been deleted in the meantime...
- d.SetId("")
- return nil
- }
+ id, err := parseAzureResourceID(d.Id())
if err != nil {
+ return err
+ }
+ name := id.Path["localNetworkGateways"]
+ resGroup := id.ResourceGroup
+
+ resp, err := lnetClient.Get(resGroup, name)
+ if err != nil {
+ if resp.StatusCode == http.StatusNotFound {
+ d.SetId("")
+ return nil
+ }
+
return fmt.Errorf("Error reading the state of Azure ARM local network gateway '%s': %s", name, err)
}
- d.Set("resource_guid", *lnet.Properties.ResourceGUID)
- d.Set("gateway_address", *lnet.Properties.GatewayIPAddress)
+ d.Set("gateway_address", resp.Properties.GatewayIPAddress)
prefs := []string{}
- if ps := *lnet.Properties.LocalNetworkAddressSpace.AddressPrefixes; ps != nil {
+ if ps := *resp.Properties.LocalNetworkAddressSpace.AddressPrefixes; ps != nil {
prefs = ps
}
d.Set("address_space", prefs)
@@ -134,22 +116,18 @@ func resourceArmLocalNetworkGatewayRead(d *schema.ResourceData, meta interface{}
return nil
}
-// resourceArmLocalNetworkGatewayUpdate goes ahead and updates the corresponding ARM local network gateway.
-func resourceArmLocalNetworkGatewayUpdate(d *schema.ResourceData, meta interface{}) error {
- // NOTE: considering the idempotency, we can safely call create again on
- // update. This has been written out in order to ensure clarity,
- return resourceArmLocalNetworkGatewayCreate(d, meta)
-}
-
// resourceArmLocalNetworkGatewayDelete deletes the specified ARM local network gateway.
func resourceArmLocalNetworkGatewayDelete(d *schema.ResourceData, meta interface{}) error {
lnetClient := meta.(*ArmClient).localNetConnClient
- name := d.Get("name").(string)
- resGroup := d.Get("resource_group_name").(string)
+ id, err := parseAzureResourceID(d.Id())
+ if err != nil {
+ return err
+ }
+ name := id.Path["localNetworkGateways"]
+ resGroup := id.ResourceGroup
- log.Printf("[INFO] Sending Azure ARM delete request for local network gateway '%s'.", name)
- _, err := lnetClient.Delete(resGroup, name)
+ _, err = lnetClient.Delete(resGroup, name)
if err != nil {
return fmt.Errorf("Error issuing Azure ARM delete request of local network gateway '%s': %s", name, err)
}
diff --git a/builtin/providers/azurerm/resource_arm_local_network_gateway_test.go b/builtin/providers/azurerm/resource_arm_local_network_gateway_test.go
index 5a27240b01..889a57e6eb 100644
--- a/builtin/providers/azurerm/resource_arm_local_network_gateway_test.go
+++ b/builtin/providers/azurerm/resource_arm_local_network_gateway_test.go
@@ -40,22 +40,23 @@ func testCheckAzureRMLocalNetworkGatewayExists(name string) resource.TestCheckFu
return fmt.Errorf("Local network gateway '%s' not found.", name)
}
- // then, extranct the name and the resource group:
- localNetName := res.Primary.Attributes["name"]
- resGrp, hasResGrp := res.Primary.Attributes["resource_group_name"]
- if !hasResGrp {
- return fmt.Errorf("Local network gateway '%s' has no resource group set.", name)
+ // then, extract the name and the resource group:
+ id, err := parseAzureResourceID(res.Primary.ID)
+ if err != nil {
+ return err
}
+ localNetName := id.Path["localNetworkGateways"]
+ resGrp := id.ResourceGroup
// and finally, check that it exists on Azure:
lnetClient := testAccProvider.Meta().(*ArmClient).localNetConnClient
- resp, err := lnetClient.Get(resGrp, name)
- if resp.StatusCode == http.StatusNotFound {
- return fmt.Errorf("Local network gateway '%s' (resource group '%s') does not exist on Azure.", localNetName, resGrp)
- }
-
+ resp, err := lnetClient.Get(resGrp, localNetName)
if err != nil {
+ if resp.StatusCode == http.StatusNotFound {
+ return fmt.Errorf("Local network gateway '%s' (resource group '%s') does not exist on Azure.", localNetName, resGrp)
+ }
+
return fmt.Errorf("Error reading the state of local network gateway '%s'.", localNetName)
}
@@ -63,20 +64,21 @@ func testCheckAzureRMLocalNetworkGatewayExists(name string) resource.TestCheckFu
}
}
-// testCheckAzureRMLocalNetworkGatewayDestroy is the resurce.TestCheckFunc
-// which checks whether or not the expected local network gateway still
-// exists on Azure.
func testCheckAzureRMLocalNetworkGatewayDestroy(s *terraform.State) error {
- for _, rs := range s.RootModule().Resources {
- if rs.Type != "azurerm_local_network_gateway" {
+ for _, res := range s.RootModule().Resources {
+ if res.Type != "azurerm_local_network_gateway" {
continue
}
- name := rs.Primary.Attributes["name"]
- resourceGroup := rs.Primary.Attributes["resource_group_name"]
+ id, err := parseAzureResourceID(res.Primary.ID)
+ if err != nil {
+ return err
+ }
+ localNetName := id.Path["localNetworkGateways"]
+ resGrp := id.ResourceGroup
lnetClient := testAccProvider.Meta().(*ArmClient).localNetConnClient
- resp, err := lnetClient.Get(resourceGroup, name)
+ resp, err := lnetClient.Get(resGrp, localNetName)
if err != nil {
return nil
From b6788479dec40a7c6dafde8932878c4d1bcb9f8a Mon Sep 17 00:00:00 2001
From: John Engelman
Date: Tue, 26 May 2015 08:35:19 -0500
Subject: [PATCH 364/664] Add Terraform/Remote State documentation to
provider/resource section.
Issue #2074
---
website/source/assets/stylesheets/_docs.scss | 1 +
.../providers/terraform/index.html.markdown | 38 +++++++++++++++++
.../terraform/r/remote_state.html.md | 42 +++++++++++++++++++
website/source/docs/state/remote.html.md | 20 +--------
website/source/layouts/docs.erb | 4 ++
website/source/layouts/terraform.erb | 26 ++++++++++++
6 files changed, 112 insertions(+), 19 deletions(-)
create mode 100644 website/source/docs/providers/terraform/index.html.markdown
create mode 100644 website/source/docs/providers/terraform/r/remote_state.html.md
create mode 100644 website/source/layouts/terraform.erb
diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss
index 0143966b4f..9a2a5052e6 100755
--- a/website/source/assets/stylesheets/_docs.scss
+++ b/website/source/assets/stylesheets/_docs.scss
@@ -35,6 +35,7 @@ body.layout-vsphere,
body.layout-docs,
body.layout-downloads,
body.layout-inner,
+body.layout-terraform,
body.layout-intro{
background: $light-black image-url('sidebar-wire.png') left 62px no-repeat;
diff --git a/website/source/docs/providers/terraform/index.html.markdown b/website/source/docs/providers/terraform/index.html.markdown
new file mode 100644
index 0000000000..e5ccbff59e
--- /dev/null
+++ b/website/source/docs/providers/terraform/index.html.markdown
@@ -0,0 +1,38 @@
+---
+layout: "terraform"
+page_title: "Provider: Terraform"
+sidebar_current: "docs-terraform-index"
+description: |-
+ The Terraform provider is used to access meta data from shared infrastructure.
+---
+
+# Terraform Provider
+
+The terraform provider exposes resources to access state meta data
+for Terraform outputs from shared infrastructure.
+
+The terraform provider is what we call a _logical provider_. This has no
+impact on how it behaves, but conceptually it is important to understand.
+The terraform provider doesn't manage any _physical_ resources; it isn't
+creating servers, writing files, etc. It is used to access the outputs
+of other Terraform states to be used as inputs for resources.
+Examples will explain this best.
+
+Use the navigation to the left to read about the available resources.
+
+## Example Usage
+
+```
+# Shared infrastructure state stored in Atlas
+resource "terraform_remote_state" "vpc" {
+ backend = "atlas"
+ config {
+ path = "hashicorp/vpc-prod"
+ }
+}
+
+resource "aws_instance" "foo" {
+ # ...
+ subnet_id = "${terraform_remote_state.vpc.output.subnet_id}"
+}
+```
diff --git a/website/source/docs/providers/terraform/r/remote_state.html.md b/website/source/docs/providers/terraform/r/remote_state.html.md
new file mode 100644
index 0000000000..b02ddfee99
--- /dev/null
+++ b/website/source/docs/providers/terraform/r/remote_state.html.md
@@ -0,0 +1,42 @@
+---
+layout: "terraform"
+page_title: "Terraform: terraform_remote_state"
+sidebar_current: "docs-terraform-resource-remote-state"
+description: |-
+ Accesses state meta data from a remote backend.
+---
+
+# remote\_state
+
+Retrieves state meta data from a remote backend
+
+## Example Usage
+
+```
+resource "terraform_remote_state" "vpc" {
+ backend = "atlas"
+ config {
+ path = "hashicorp/vpc-prod"
+ }
+}
+
+resource "aws_instance" "foo" {
+ # ...
+ subnet_id = "${terraform_remote_state.vpc.output.subnet_id}"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `backend` - (Required) The remote backend to use.
+* `config` - (Optional) The configuration of the remote backend.
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `backend` - See Argument Reference above.
+* `config` - See Argument Reference above.
+* `output` - The values of the configured `outputs` for the root module referenced by the remote state.
diff --git a/website/source/docs/state/remote.html.md b/website/source/docs/state/remote.html.md
index 3ab01fa79b..40f4e32be4 100644
--- a/website/source/docs/state/remote.html.md
+++ b/website/source/docs/state/remote.html.md
@@ -41,24 +41,7 @@ teams to run their own infrastructure. As a more specific example with AWS:
you can expose things such as VPC IDs, subnets, NAT instance IDs, etc. through
remote state and have other Terraform states consume that.
-An example is shown below:
-
-```
-resource "terraform_remote_state" "vpc" {
- backend = "atlas"
- config {
- name = "hashicorp/vpc-prod"
- }
-}
-
-resource "aws_instance" "foo" {
- # ...
- subnet_id = "${terraform_remote_state.vpc.output.subnet_id}"
-}
-```
-
-This makes teamwork and componentization of infrastructure frictionless
-within your infrastructure.
+For example usage see the [terraform_remote_state](/docs/providers/terraform/r/remote_state.html) resource.
## Locking and Teamwork
@@ -73,4 +56,3 @@ locking for you.
In the future, we'd like to extend the remote state system to allow some
minimal locking functionality, but it is a difficult problem without a
central system that we currently aren't focused on solving.
-
diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb
index 73c8aad088..5e7ec4c3a0 100644
--- a/website/source/layouts/docs.erb
+++ b/website/source/layouts/docs.erb
@@ -213,6 +213,10 @@
Template
+ >
+ Terraform
+
+
>
TLS
diff --git a/website/source/layouts/terraform.erb b/website/source/layouts/terraform.erb
new file mode 100644
index 0000000000..c3ff37f8ec
--- /dev/null
+++ b/website/source/layouts/terraform.erb
@@ -0,0 +1,26 @@
+<% wrap_layout :inner do %>
+ <% content_for :sidebar do %>
+
+ <% end %>
+
+ <%= yield %>
+<% end %>
From 59bfa636c07a38492e223e0669310178d93fe6e9 Mon Sep 17 00:00:00 2001
From: stack72
Date: Sun, 27 Dec 2015 21:38:45 +0000
Subject: [PATCH 365/664] Adding a link to the runtime options for the AWS
Lambda functions
---
.../source/docs/providers/aws/r/lambda_function.html.markdown | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/website/source/docs/providers/aws/r/lambda_function.html.markdown b/website/source/docs/providers/aws/r/lambda_function.html.markdown
index f9c1ea4a3f..c4e86a0a47 100644
--- a/website/source/docs/providers/aws/r/lambda_function.html.markdown
+++ b/website/source/docs/providers/aws/r/lambda_function.html.markdown
@@ -53,7 +53,7 @@ resource "aws_lambda_function" "test_lambda" {
* `role` - (Required) IAM role attached to the Lambda Function. This governs both who / what can invoke your Lambda Function, as well as what resources our Lambda Function has access to. See [Lambda Permission Model][4] for more details.
* `description` - (Optional) Description of what your Lambda Function does.
* `memory_size` - (Optional) Amount of memory in MB your Lambda Function can use at runtime. Defaults to `128`. See [Limits][5]
-* `runtime` - (Optional) Defaults to `nodejs`.
+* `runtime` - (Optional) Defaults to `nodejs`. See [Runtimes][6] for valid values.
* `timeout` - (Optional) The amount of time your Lambda Function has to run in seconds. Defaults to `3`. See [Limits][5]
## Attributes Reference
@@ -67,3 +67,4 @@ resource "aws_lambda_function" "test_lambda" {
[3]: http://docs.aws.amazon.com/lambda/latest/dg/walkthrough-custom-events-create-test-function.html
[4]: http://docs.aws.amazon.com/lambda/latest/dg/intro-permission-model.html
[5]: http://docs.aws.amazon.com/lambda/latest/dg/limits.html
+[6]: https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html#API_CreateFunction_RequestBody
From b923e879bd66b88b05e4a17e028a12c65b495155 Mon Sep 17 00:00:00 2001
From: Daniel Bryant
Date: Mon, 28 Dec 2015 12:52:52 +0000
Subject: [PATCH 366/664] Correct README.md and add missing variable. Correct
README.md execution instructions var name and use 'key_name' var as intended
in 'aws_key_pair' resource
---
examples/aws-two-tier/README.md | 8 ++++++--
examples/aws-two-tier/main.tf | 2 +-
examples/aws-two-tier/variables.tf | 6 +++++-
3 files changed, 12 insertions(+), 4 deletions(-)
diff --git a/examples/aws-two-tier/README.md b/examples/aws-two-tier/README.md
index dcdf7d1c43..3c0e54bc0b 100644
--- a/examples/aws-two-tier/README.md
+++ b/examples/aws-two-tier/README.md
@@ -10,6 +10,10 @@ getting your application onto the servers. However, you could do so either via
management tool, or by pre-baking configured AMIs with
[Packer](http://www.packer.io).
+This example will also create a new EC2 Key Pair in the specified AWS Region.
+The key name and path to the public key must be specified via the
+terraform command vars.
+
After you run `terraform apply` on this configuration, it will
automatically output the DNS address of the ELB. After your instance
registers, this should respond with the default nginx web page.
@@ -22,11 +26,11 @@ Run with a command like this:
```
terraform apply -var 'key_name={your_aws_key_name}' \
- -var 'key_path={location_of_your_key_in_your_local_machine}'`
+ -var 'public_key_path={location_of_your_key_in_your_local_machine}'`
```
For example:
```
-terraform apply -var 'key_name=terraform' -var 'key_path=/Users/jsmith/.ssh/terraform.pem'
+terraform apply -var 'key_name=terraform' -var 'public_key_path=/Users/jsmith/.ssh/terraform.pub'
```
diff --git a/examples/aws-two-tier/main.tf b/examples/aws-two-tier/main.tf
index 8b98d979c7..ab61071492 100644
--- a/examples/aws-two-tier/main.tf
+++ b/examples/aws-two-tier/main.tf
@@ -100,7 +100,7 @@ resource "aws_elb" "web" {
}
resource "aws_key_pair" "auth" {
- key_name = "tf-aws-two-tier-example"
+ key_name = "${var.key_name}"
public_key = "${file(var.public_key_path)}"
}
diff --git a/examples/aws-two-tier/variables.tf b/examples/aws-two-tier/variables.tf
index 1321fcf1b4..a5d6adc64d 100644
--- a/examples/aws-two-tier/variables.tf
+++ b/examples/aws-two-tier/variables.tf
@@ -4,10 +4,14 @@ Path to the SSH public key to be used for authentication.
Ensure this keypair is added to your local SSH agent so provisioners can
connect.
-Example: ~/.ssh/id_rsa.pub
+Example: ~/.ssh/terraform.pub
DESCRIPTION
}
+variable "key_name" {
+ description = "Desired name of AWS key pair"
+}
+
variable "aws_region" {
description = "AWS region to launch servers."
default = "us-west-2"
From 52aea649e4e29e04f8fa2d9d4dc251f23f14d969 Mon Sep 17 00:00:00 2001
From: Justin Clayton
Date: Mon, 28 Dec 2015 11:25:46 -0800
Subject: [PATCH 367/664] Update lb_pool_v1.html.markdown
updating docs to match behavior of the code for admin_state_up
---
.../source/docs/providers/openstack/r/lb_pool_v1.html.markdown | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown
index 5ddbdf1af8..95a797ede3 100644
--- a/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown
+++ b/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown
@@ -68,7 +68,7 @@ new member.
* `port` - (Required) An integer representing the port on which the member is
hosted. Changing this creates a new member.
-* `admin_state_up` - (Optional) The administrative state of the member.
+* `admin_state_up` - (Required) The administrative state of the member.
Acceptable values are 'true' and 'false'. Changing this value updates the
state of the existing member.
From 9fb631a076b62f6ba57b7230c3c82d93dbd3a960 Mon Sep 17 00:00:00 2001
From: Kevin London
Date: Tue, 29 Dec 2015 15:23:38 -0800
Subject: [PATCH 368/664] Update Build documentation to use t2.micro
This just helps it stay consistent across the doc.
---
website/source/intro/getting-started/build.html.md | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/website/source/intro/getting-started/build.html.md b/website/source/intro/getting-started/build.html.md
index aa3c7c506d..f2c6c19ba8 100644
--- a/website/source/intro/getting-started/build.html.md
+++ b/website/source/intro/getting-started/build.html.md
@@ -60,7 +60,7 @@ provider "aws" {
resource "aws_instance" "example" {
ami = "ami-408c7f28"
- instance_type = "t1.micro"
+ instance_type = "t2.micro"
}
```
@@ -113,7 +113,7 @@ $ terraform plan
+ aws_instance.example
ami: "" => "ami-408c7f28"
availability_zone: "" => ""
- instance_type: "" => "t1.micro"
+ instance_type: "" => "t2.micro"
key_name: "" => ""
private_dns: "" => ""
private_ip: "" => ""
@@ -149,7 +149,7 @@ since Terraform waits for the EC2 instance to become available.
$ terraform apply
aws_instance.example: Creating...
ami: "" => "ami-408c7f28"
- instance_type: "" => "t1.micro"
+ instance_type: "" => "t2.micro"
Apply complete! Resources: 1 added, 0 changed, 0 destroyed.
@@ -174,7 +174,7 @@ aws_instance.example:
id = i-e60900cd
ami = ami-408c7f28
availability_zone = us-east-1c
- instance_type = t1.micro
+ instance_type = t2.micro
key_name =
private_dns = domU-12-31-39-12-38-AB.compute-1.internal
private_ip = 10.200.59.89
From bf7220aa53a4837273d663343d7c454dff94153a Mon Sep 17 00:00:00 2001
From: Jordan
Date: Tue, 29 Dec 2015 17:21:58 -0700
Subject: [PATCH 369/664] Update outputs.html.md
Fixes small syntax error
---
website/source/intro/getting-started/outputs.html.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/website/source/intro/getting-started/outputs.html.md b/website/source/intro/getting-started/outputs.html.md
index 2d537097e1..d5a6ca1e72 100644
--- a/website/source/intro/getting-started/outputs.html.md
+++ b/website/source/intro/getting-started/outputs.html.md
@@ -35,7 +35,7 @@ output "ip" {
}
```
-This defines an output variables named "ip". The `value` field
+This defines an output variable named "ip". The `value` field
specifies what the value will be, and almost always contains
one or more interpolations, since the output data is typically
dynamic. In this case, we're outputting the
From 2a3c80461f825c383b2b7a642897ca9f4a9bf3e7 Mon Sep 17 00:00:00 2001
From: kozo yamagata
Date: Wed, 30 Dec 2015 16:58:23 +0900
Subject: [PATCH 370/664] Fix typo 'auto' => 'allow'
---
website/source/docs/providers/aws/r/db_instance.html.markdown | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/website/source/docs/providers/aws/r/db_instance.html.markdown b/website/source/docs/providers/aws/r/db_instance.html.markdown
index 4efed4ac6c..2c045773cc 100644
--- a/website/source/docs/providers/aws/r/db_instance.html.markdown
+++ b/website/source/docs/providers/aws/r/db_instance.html.markdown
@@ -98,7 +98,7 @@ database, and to use this value as the source database. This correlates to the
* `snapshot_identifier` - (Optional) Specifies whether or not to create this database from a snapshot. This correlates to the snapshot ID you'd find in the RDS console, e.g: rds:production-2015-06-26-06-05.
* `license_model` - (Optional, but required for some DB engines, i.e. Oracle SE1) License model information for this DB instance.
* `auto_minor_version_upgrade` - (Optional) Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window. Defaults to true.
-* `auto_major_version_upgrade` - (Optional) Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.
+* `allow_major_version_upgrade` - (Optional) Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.
~> **NOTE:** Removing the `replicate_source_db` attribute from an existing RDS
Replicate database managed by Terraform will promote the database to a fully
From a018195645f30d581f4ccffc4365f4f3a2ac0794 Mon Sep 17 00:00:00 2001
From: Jens Bissinger
Date: Wed, 30 Dec 2015 10:46:32 +0100
Subject: [PATCH 371/664] Remove recommendation to use
create_before_destroy-hook in autoscaling group
Only use the create_before_destroy-hook in launch configurations. The autoscaling group must not use the create_before_destroy-hook, because it can be updated (and not destroyed + re-created). Using the create_before_destroy-hook in autoscaling group also leads to unwanted cyclic dependencies.
---
.../docs/providers/aws/r/launch_configuration.html.markdown | 4 ----
1 file changed, 4 deletions(-)
diff --git a/website/source/docs/providers/aws/r/launch_configuration.html.markdown b/website/source/docs/providers/aws/r/launch_configuration.html.markdown
index dd7dd84fcb..3713923e8b 100644
--- a/website/source/docs/providers/aws/r/launch_configuration.html.markdown
+++ b/website/source/docs/providers/aws/r/launch_configuration.html.markdown
@@ -77,10 +77,6 @@ resource "aws_launch_configuration" "as_conf" {
resource "aws_autoscaling_group" "bar" {
name = "terraform-asg-example"
launch_configuration = "${aws_launch_configuration.as_conf.name}"
-
- lifecycle {
- create_before_destroy = true
- }
}
```
From fc39334b52ec1b3163eafbf85f3f6e861ed4606c Mon Sep 17 00:00:00 2001
From: Craig Marsden
Date: Wed, 30 Dec 2015 17:50:22 +0000
Subject: [PATCH 372/664] update docs to reflect that the allocation ID is
exported as the attribute 'id'
---
website/source/docs/providers/aws/r/eip.html.markdown | 1 +
1 file changed, 1 insertion(+)
diff --git a/website/source/docs/providers/aws/r/eip.html.markdown b/website/source/docs/providers/aws/r/eip.html.markdown
index dbc5410e2e..3447228ed8 100644
--- a/website/source/docs/providers/aws/r/eip.html.markdown
+++ b/website/source/docs/providers/aws/r/eip.html.markdown
@@ -36,6 +36,7 @@ more information.
The following attributes are exported:
+* `id` - Contains the EIP allocation ID.
* `private_ip` - Contains the private IP address (if in VPC).
* `public_ip` - Contains the public IP address.
* `instance` - Contains the ID of the attached instance.
From 2503f0b01d0531f5811d412d7cfa51d2409b153c Mon Sep 17 00:00:00 2001
From: Joe Topjian
Date: Wed, 30 Dec 2015 18:22:24 +0000
Subject: [PATCH 373/664] provider/openstack: Ensure valid Security Group Rule
attribute combination
This commit ensures that a valid combination of security group rule attributes
is set before creating the security group.
---
.../resource_openstack_compute_secgroup_v2.go | 43 +++++++++++++++++++
.../r/compute_secgroup_v2.html.markdown | 17 ++++----
2 files changed, 52 insertions(+), 8 deletions(-)
diff --git a/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go b/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go
index e3d281b2e1..556208e92a 100644
--- a/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go
+++ b/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go
@@ -93,6 +93,12 @@ func resourceComputeSecGroupV2Create(d *schema.ResourceData, meta interface{}) e
return fmt.Errorf("Error creating OpenStack compute client: %s", err)
}
+ // Before creating the security group, make sure all rules are valid.
+ if err := checkSecGroupV2RulesForErrors(d); err != nil {
+ return err
+ }
+
+ // If all rules are valid, proceed with creating the security gruop.
createOpts := secgroups.CreateOpts{
Name: d.Get("name").(string),
Description: d.Get("description").(string),
@@ -106,6 +112,7 @@ func resourceComputeSecGroupV2Create(d *schema.ResourceData, meta interface{}) e
d.SetId(sg.ID)
+ // Now that the security group has been created, iterate through each rule and create it
createRuleOptsList := resourceSecGroupRulesV2(d)
for _, createRuleOpts := range createRuleOptsList {
_, err := secgroups.CreateRule(computeClient, createRuleOpts).Extract()
@@ -251,6 +258,42 @@ func resourceSecGroupRuleCreateOptsV2(d *schema.ResourceData, rawRule interface{
}
}
+func checkSecGroupV2RulesForErrors(d *schema.ResourceData) error {
+ rawRules := d.Get("rule").(*schema.Set).List()
+ for _, rawRule := range rawRules {
+ rawRuleMap := rawRule.(map[string]interface{})
+
+ // only one of cidr, from_group_id, or self can be set
+ cidr := rawRuleMap["cidr"].(string)
+ groupId := rawRuleMap["from_group_id"].(string)
+ self := rawRuleMap["self"].(bool)
+ errorMessage := fmt.Errorf("Only one of cidr, from_group_id, or self can be set.")
+
+ // if cidr is set, from_group_id and self cannot be set
+ if cidr != "" {
+ if groupId != "" || self {
+ return errorMessage
+ }
+ }
+
+ // if from_group_id is set, cidr and self cannot be set
+ if groupId != "" {
+ if cidr != "" || self {
+ return errorMessage
+ }
+ }
+
+ // if self is set, cidr and from_group_id cannot be set
+ if self {
+ if cidr != "" || groupId != "" {
+ return errorMessage
+ }
+ }
+ }
+
+ return nil
+}
+
func resourceSecGroupRuleV2(d *schema.ResourceData, rawRule interface{}) secgroups.Rule {
rawRuleMap := rawRule.(map[string]interface{})
return secgroups.Rule{
diff --git a/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown
index e7d88ead76..2005c9aea0 100644
--- a/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown
+++ b/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown
@@ -62,17 +62,18 @@ range to open. Changing this creates a new security group rule.
* `ip_protocol` - (Required) The protocol type that will be allowed. Changing
this creates a new security group rule.
-* `cidr` - (Optional) Required if `from_group_id` is empty. The IP range that
-will be the source of network traffic to the security group. Use 0.0.0.0./0
-to allow all IP addresses. Changing this creates a new security group rule.
+* `cidr` - (Optional) Required if `from_group_id` or `self` is empty. The IP range
+that will be the source of network traffic to the security group. Use 0.0.0.0/0
+to allow all IP addresses. Changing this creates a new security group rule. Cannot
+be combined with `from_group_id` or `self`.
-* `from_group_id` - (Optional) Required if `cidr` is empty. The ID of a group
-from which to forward traffic to the parent group. Changing
-this creates a new security group rule.
+* `from_group_id` - (Optional) Required if `cidr` or `self` is empty. The ID of a
+group from which to forward traffic to the parent group. Changing this creates a
+new security group rule. Cannot be combined with `cidr` or `self`.
* `self` - (Optional) Required if `cidr` and `from_group_id` is empty. If true,
-the security group itself will be added as a source to this ingress rule. `cidr`
-and `from_group_id` will be ignored if either are set while `self` is true.
+the security group itself will be added as a source to this ingress rule. Cannot
+be combined with `cidr` or `from_group_id`.
## Attributes Reference
From f5f49be019308bb296d9218faa12e024ef55b2c2 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Wed, 30 Dec 2015 14:10:16 -0500
Subject: [PATCH 374/664] provider/azure: Track upstream library changes
vmutils.ConfigureDeploymentFromVMImage has been changed to
vmutils.ConfigureDeploymentFromPublishedVMImage in the upstream library
- this allows us to build.
---
builtin/providers/azure/resource_azure_instance.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/builtin/providers/azure/resource_azure_instance.go b/builtin/providers/azure/resource_azure_instance.go
index c30b07ea41..097b210f54 100644
--- a/builtin/providers/azure/resource_azure_instance.go
+++ b/builtin/providers/azure/resource_azure_instance.go
@@ -695,7 +695,7 @@ func retrieveVMImageDetails(
}
configureForImage := func(role *virtualmachine.Role) error {
- return vmutils.ConfigureDeploymentFromVMImage(
+ return vmutils.ConfigureDeploymentFromPublishedVMImage(
role,
img.Name,
"",
From 1e1d78329e612ad368377b6c6a6a4079d99556af Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Wed, 30 Dec 2015 17:37:24 -0500
Subject: [PATCH 375/664] core: use !windows instead of a list of unixes
This allows building on a wider variety of unix-a-likes without needing
to list them all explicitly - Windows is the special case here!
---
config_unix.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/config_unix.go b/config_unix.go
index 69d76278af..4694d5114d 100644
--- a/config_unix.go
+++ b/config_unix.go
@@ -1,4 +1,4 @@
-// +build darwin freebsd linux netbsd openbsd
+// +build !windows
package main
From 32ce8fbcb403745679804f4980632946b3542239 Mon Sep 17 00:00:00 2001
From: Colin Hebert
Date: Fri, 1 Jan 2016 09:57:21 +0100
Subject: [PATCH 376/664] Add network_mode support to docker
---
builtin/providers/docker/resource_docker_container.go | 6 ++++++
builtin/providers/docker/resource_docker_container_funcs.go | 4 ++++
builtin/providers/docker/resource_docker_container_test.go | 1 +
.../source/docs/providers/docker/r/container.html.markdown | 1 +
4 files changed, 12 insertions(+)
diff --git a/builtin/providers/docker/resource_docker_container.go b/builtin/providers/docker/resource_docker_container.go
index 323850499a..ea73ca4f57 100644
--- a/builtin/providers/docker/resource_docker_container.go
+++ b/builtin/providers/docker/resource_docker_container.go
@@ -238,6 +238,12 @@ func resourceDockerContainer() *schema.Resource {
Optional: true,
ForceNew: true,
},
+
+ "network_mode": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
},
}
}
diff --git a/builtin/providers/docker/resource_docker_container_funcs.go b/builtin/providers/docker/resource_docker_container_funcs.go
index 814941bba3..110d5cc850 100644
--- a/builtin/providers/docker/resource_docker_container_funcs.go
+++ b/builtin/providers/docker/resource_docker_container_funcs.go
@@ -136,6 +136,10 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
hostConfig.LogConfig.Config = mapTypeMapValsToString(v.(map[string]interface{}))
}
+ if v, ok := d.GetOk("network_mode"); ok {
+ hostConfig.NetworkMode = v
+ }
+
createOpts.HostConfig = hostConfig
var retContainer *dc.Container
diff --git a/builtin/providers/docker/resource_docker_container_test.go b/builtin/providers/docker/resource_docker_container_test.go
index df8ba0cb8a..a5c36a5c2b 100644
--- a/builtin/providers/docker/resource_docker_container_test.go
+++ b/builtin/providers/docker/resource_docker_container_test.go
@@ -155,5 +155,6 @@ resource "docker_container" "foo" {
max-size = "10m"
max-file = 20
}
+ network_mode = "bridge"
}
`
diff --git a/website/source/docs/providers/docker/r/container.html.markdown b/website/source/docs/providers/docker/r/container.html.markdown
index 920288eb25..8ea7968135 100644
--- a/website/source/docs/providers/docker/r/container.html.markdown
+++ b/website/source/docs/providers/docker/r/container.html.markdown
@@ -68,6 +68,7 @@ The following arguments are supported:
Defaults to "json-file".
* `log_opts` - (Optional) Key/value pairs to use as options for the logging
driver.
+* `network_mode` - (Optional) Network mode of the container.
## Ports
From ad0a76366101d79ba19f8e944120c91342d8ba5c Mon Sep 17 00:00:00 2001
From: Colin Hebert
Date: Fri, 1 Jan 2016 10:12:43 +0100
Subject: [PATCH 377/664] Convert v to string
---
builtin/providers/docker/resource_docker_container_funcs.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/builtin/providers/docker/resource_docker_container_funcs.go b/builtin/providers/docker/resource_docker_container_funcs.go
index 110d5cc850..5be5091e46 100644
--- a/builtin/providers/docker/resource_docker_container_funcs.go
+++ b/builtin/providers/docker/resource_docker_container_funcs.go
@@ -137,7 +137,7 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
}
if v, ok := d.GetOk("network_mode"); ok {
- hostConfig.NetworkMode = v
+ hostConfig.NetworkMode = v.(string)
}
createOpts.HostConfig = hostConfig
From 2112f763ee86b424e2a2067a1d0e4ba164284fe8 Mon Sep 17 00:00:00 2001
From: Elliot Graebert
Date: Fri, 1 Jan 2016 15:47:36 -0800
Subject: [PATCH 378/664] Added support for the encryption flag on
ebs_block_devices in launch configurations
---
.../aws/resource_aws_launch_configuration.go | 11 +++++++++++
.../aws/r/launch_configuration.html.markdown | 1 +
2 files changed, 12 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_launch_configuration.go b/builtin/providers/aws/resource_aws_launch_configuration.go
index a257a10b44..f115169f00 100644
--- a/builtin/providers/aws/resource_aws_launch_configuration.go
+++ b/builtin/providers/aws/resource_aws_launch_configuration.go
@@ -185,6 +185,13 @@ func resourceAwsLaunchConfiguration() *schema.Resource {
Computed: true,
ForceNew: true,
},
+
+ "encrypted": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ Computed: true,
+ ForceNew: true,
+ },
},
},
Set: func(v interface{}) int {
@@ -326,6 +333,7 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
bd := v.(map[string]interface{})
ebs := &autoscaling.Ebs{
DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)),
+ Encrypted: aws.Bool(bd["encrypted"].(bool)),
}
if v, ok := bd["snapshot_id"].(string); ok && v != "" {
@@ -570,6 +578,9 @@ func readBlockDevicesFromLaunchConfiguration(d *schema.ResourceData, lc *autosca
if bdm.Ebs != nil && bdm.Ebs.Iops != nil {
bd["iops"] = *bdm.Ebs.Iops
}
+ if bdm.Ebs != nil && bdm.Ebs.Encrypted != nil {
+ bd["encrypted"] = *bdm.Ebs.Encrypted
+ }
if bdm.DeviceName != nil && *bdm.DeviceName == *rootDeviceName {
blockDevices["root"] = bd
} else {
diff --git a/website/source/docs/providers/aws/r/launch_configuration.html.markdown b/website/source/docs/providers/aws/r/launch_configuration.html.markdown
index dd7dd84fcb..9bb5501f4a 100644
--- a/website/source/docs/providers/aws/r/launch_configuration.html.markdown
+++ b/website/source/docs/providers/aws/r/launch_configuration.html.markdown
@@ -144,6 +144,7 @@ Each `ebs_block_device` supports the following:
This must be set with a `volume_type` of `"io1"`.
* `delete_on_termination` - (Optional) Whether the volume should be destroyed
on instance termination (Default: `true`).
+* `encryption` - (Optional) Whether the volume should be encrypted or not. Do not use this option if you are using `snapshot_id` as the encryption flag will be determined by the snapshot. (Default: `false`).
Modifying any `ebs_block_device` currently requires resource replacement.
From f09280891ccd7e5c8ce31b4475ce10622b28824f Mon Sep 17 00:00:00 2001
From: Colin Hebert
Date: Sat, 2 Jan 2016 12:20:55 +0100
Subject: [PATCH 379/664] Add support of custom networks in docker
---
builtin/providers/docker/provider.go | 1 +
.../docker/resource_docker_network.go | 135 ++++++++++++++++++
.../docker/resource_docker_network_funcs.go | 115 +++++++++++++++
.../docker/resource_docker_network_test.go | 65 +++++++++
.../providers/docker/r/network.html.markdown | 49 +++++++
website/source/layouts/docker.erb | 4 +
6 files changed, 369 insertions(+)
create mode 100644 builtin/providers/docker/resource_docker_network.go
create mode 100644 builtin/providers/docker/resource_docker_network_funcs.go
create mode 100644 builtin/providers/docker/resource_docker_network_test.go
create mode 100644 website/source/docs/providers/docker/r/network.html.markdown
diff --git a/builtin/providers/docker/provider.go b/builtin/providers/docker/provider.go
index fdc8b77194..799fd9bdb0 100644
--- a/builtin/providers/docker/provider.go
+++ b/builtin/providers/docker/provider.go
@@ -28,6 +28,7 @@ func Provider() terraform.ResourceProvider {
ResourcesMap: map[string]*schema.Resource{
"docker_container": resourceDockerContainer(),
"docker_image": resourceDockerImage(),
+ "docker_network": resourceDockerNetwork(),
},
ConfigureFunc: providerConfigure,
diff --git a/builtin/providers/docker/resource_docker_network.go b/builtin/providers/docker/resource_docker_network.go
new file mode 100644
index 0000000000..4c14b2dea0
--- /dev/null
+++ b/builtin/providers/docker/resource_docker_network.go
@@ -0,0 +1,135 @@
+package docker
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+
+ "github.com/hashicorp/terraform/helper/hashcode"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceDockerNetwork() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceDockerNetworkCreate,
+ Read: resourceDockerNetworkRead,
+ Delete: resourceDockerNetworkDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "check_duplicate": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "driver": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ Computed: true,
+ },
+
+ "options": &schema.Schema{
+ Type: schema.TypeMap,
+ Optional: true,
+ ForceNew: true,
+ Computed: true,
+ },
+
+ "ipam_driver": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "ipam_config": &schema.Schema{
+ Type: schema.TypeSet,
+ Optional: true,
+ ForceNew: true,
+ Elem: getIpamConfigElem(),
+ Set: resourceDockerIpamConfigHash,
+ },
+
+ "id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
+ "scope": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func getIpamConfigElem() *schema.Resource {
+ return &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "subnet": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "ip_range": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "gateway": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "aux_address": &schema.Schema{
+ Type: schema.TypeMap,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ }
+}
+
+func resourceDockerIpamConfigHash(v interface{}) int {
+ var buf bytes.Buffer
+ m := v.(map[string]interface{})
+
+ if v, ok := m["subnet"]; ok {
+ buf.WriteString(fmt.Sprintf("%v-", v.(string)))
+ }
+
+ if v, ok := m["ip_range"]; ok {
+ buf.WriteString(fmt.Sprintf("%v-", v.(string)))
+ }
+
+ if v, ok := m["gateway"]; ok {
+ buf.WriteString(fmt.Sprintf("%v-", v.(string)))
+ }
+
+ if v, ok := m["aux_address"]; ok {
+ auxAddress := v.(map[string]interface{})
+
+ keys := make([]string, len(auxAddress))
+ i := 0
+ for k, _ := range auxAddress {
+ keys[i] = k
+ i++
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ buf.WriteString(fmt.Sprintf("%v-%v-", k, auxAddress[k].(string)))
+ }
+ }
+
+ return hashcode.String(buf.String())
+}
diff --git a/builtin/providers/docker/resource_docker_network_funcs.go b/builtin/providers/docker/resource_docker_network_funcs.go
new file mode 100644
index 0000000000..61954f4aff
--- /dev/null
+++ b/builtin/providers/docker/resource_docker_network_funcs.go
@@ -0,0 +1,115 @@
+package docker
+
+import (
+ "fmt"
+
+ dc "github.com/fsouza/go-dockerclient"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceDockerNetworkCreate(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*dc.Client)
+
+ createOpts := dc.CreateNetworkOptions{
+ Name: d.Get("name").(string),
+ }
+ if v, ok := d.GetOk("check_duplicate"); ok {
+ createOpts.CheckDuplicate = v.(bool)
+ }
+ if v, ok := d.GetOk("driver"); ok {
+ createOpts.Driver = v.(string)
+ }
+ if v, ok := d.GetOk("options"); ok {
+ createOpts.Options = v.(map[string]interface{})
+ }
+
+ ipamOpts := dc.IPAMOptions{}
+ ipamOptsSet := false
+ if v, ok := d.GetOk("ipam_driver"); ok {
+ ipamOpts.Driver = v.(string)
+ ipamOptsSet = true
+ }
+ if v, ok := d.GetOk("ipam_config"); ok {
+ ipamOpts.Config = ipamConfigSetToIpamConfigs(v.(*schema.Set))
+ ipamOptsSet = true
+ }
+
+ if ipamOptsSet {
+ createOpts.IPAM = ipamOpts
+ }
+
+ var err error
+ var retNetwork *dc.Network
+ if retNetwork, err = client.CreateNetwork(createOpts); err != nil {
+ return fmt.Errorf("Unable to create network: %s", err)
+ }
+ if retNetwork == nil {
+ return fmt.Errorf("Returned network is nil")
+ }
+
+ d.SetId(retNetwork.ID)
+ d.Set("name", retNetwork.Name)
+ d.Set("scope", retNetwork.Scope)
+ d.Set("driver", retNetwork.Driver)
+ d.Set("options", retNetwork.Options)
+
+ return nil
+}
+
+func resourceDockerNetworkRead(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*dc.Client)
+
+ var err error
+ var retNetwork *dc.Network
+ if retNetwork, err = client.NetworkInfo(d.Id()); err != nil {
+ if _, ok := err.(*dc.NoSuchNetwork); !ok {
+ return fmt.Errorf("Unable to inspect network: %s", err)
+ }
+ }
+ if retNetwork == nil {
+ d.SetId("")
+ return nil
+ }
+
+ d.Set("scope", retNetwork.Scope)
+ d.Set("driver", retNetwork.Driver)
+ d.Set("options", retNetwork.Options)
+
+ return nil
+}
+
+func resourceDockerNetworkDelete(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*dc.Client)
+
+ if err := client.RemoveNetwork(d.Id()); err != nil {
+ if _, ok := err.(*dc.NoSuchNetwork); !ok {
+ return fmt.Errorf("Error deleting network %s: %s", d.Id(), err)
+ }
+ }
+
+ d.SetId("")
+ return nil
+}
+
+func ipamConfigSetToIpamConfigs(ipamConfigSet *schema.Set) []dc.IPAMConfig {
+ ipamConfigs := make([]dc.IPAMConfig, ipamConfigSet.Len())
+
+ for i, ipamConfigInt := range ipamConfigSet.List() {
+ ipamConfigRaw := ipamConfigInt.(map[string]interface{})
+
+ ipamConfig := dc.IPAMConfig{}
+ ipamConfig.Subnet = ipamConfigRaw["subnet"].(string)
+ ipamConfig.IPRange = ipamConfigRaw["ip_range"].(string)
+ ipamConfig.Gateway = ipamConfigRaw["gateway"].(string)
+
+ auxAddressRaw := ipamConfigRaw["aux_address"].(map[string]interface{})
+ ipamConfig.AuxAddress = make(map[string]string, len(auxAddressRaw))
+ for k, v := range auxAddressRaw {
+ ipamConfig.AuxAddress[k] = v.(string)
+ }
+
+ ipamConfigs[i] = ipamConfig
+ }
+
+ return ipamConfigs
+}
diff --git a/builtin/providers/docker/resource_docker_network_test.go b/builtin/providers/docker/resource_docker_network_test.go
new file mode 100644
index 0000000000..6e3bb4e380
--- /dev/null
+++ b/builtin/providers/docker/resource_docker_network_test.go
@@ -0,0 +1,65 @@
+package docker
+
+import (
+ "fmt"
+ "testing"
+
+ dc "github.com/fsouza/go-dockerclient"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccDockerNetwork_basic(t *testing.T) {
+ var n dc.Network
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccDockerNetworkConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccNetwork("docker_network.foo", &n),
+ ),
+ },
+ },
+ })
+}
+
+func testAccNetwork(n string, network *dc.Network) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No ID is set")
+ }
+
+ client := testAccProvider.Meta().(*dc.Client)
+ networks, err := client.ListNetworks()
+ if err != nil {
+ return err
+ }
+
+ for _, n := range networks {
+ if n.ID == rs.Primary.ID {
+ inspected, err := client.NetworkInfo(n.ID)
+ if err != nil {
+ return fmt.Errorf("Network could not be obtained: %s", err)
+ }
+ *network = *inspected
+ return nil
+ }
+ }
+
+ return fmt.Errorf("Network not found: %s", rs.Primary.ID)
+ }
+}
+
+const testAccDockerNetworkConfig = `
+resource "docker_network" "foo" {
+ name = "bar"
+}
+`
diff --git a/website/source/docs/providers/docker/r/network.html.markdown b/website/source/docs/providers/docker/r/network.html.markdown
new file mode 100644
index 0000000000..77d4d02f17
--- /dev/null
+++ b/website/source/docs/providers/docker/r/network.html.markdown
@@ -0,0 +1,49 @@
+---
+layout: "docker"
+page_title: "Docker: docker_network"
+sidebar_current: "docs-docker-resource-network"
+description: |-
+ Manages a Docker Network.
+---
+
+# docker\_network
+
+Manages a Docker Network. This can be used alongside
+[docker\_container](/docs/providers/docker/r/container.html)
+to create virtual networks within the docker environment.
+
+## Example Usage
+
+```
+# Find the latest Ubuntu precise image.
+resource "docker_network" "private_network" {
+ name = "my_network"
+}
+
+# Access it somewhere else with ${docker_image.docker_network.name}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required, string) The name of the Docker network.
+* `check_duplicate` - (Optional, boolean) Requests daemon to check for networks with same name.
+* `driver` - (Optional, string) Name of the network driver to use. Defaults to `bridge` driver.
+* `options` - (Optional, map of strings) Network specific options to be used by the drivers.
+* `ipam_driver` - (Optional, string) Driver used by the custom IP scheme of the network.
+* `ipam_config` - (Optional, block) Configuration of the custom IP scheme of the network.
+
+The `ipam_config` block supports:
+
+* `subnet` - (Optional, string)
+* `ip_range` - (Optional, string)
+* `gateway` - (Optional, string)
+* `aux_address` - (Optional, map of string)
+
+## Attributes Reference
+
+The following attributes are exported in addition to the above configuration:
+
+* `id` (string)
+* `scope` (string)
diff --git a/website/source/layouts/docker.erb b/website/source/layouts/docker.erb
index 5bb5a1514d..d5ae7e2ca4 100644
--- a/website/source/layouts/docker.erb
+++ b/website/source/layouts/docker.erb
@@ -20,6 +20,10 @@
>
docker_image
+
+ >
+ docker_network
+
From acf643b96fda9c170c2ef85aba559f7eda19f31c Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Mon, 4 Jan 2016 10:55:20 -0500
Subject: [PATCH 380/664] provider/digitalocean: Document defaults
---
website/source/docs/providers/do/r/droplet.html.markdown | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/website/source/docs/providers/do/r/droplet.html.markdown b/website/source/docs/providers/do/r/droplet.html.markdown
index bfb8ed5509..0a3352211f 100644
--- a/website/source/docs/providers/do/r/droplet.html.markdown
+++ b/website/source/docs/providers/do/r/droplet.html.markdown
@@ -32,9 +32,11 @@ The following arguments are supported:
* `name` - (Required) The droplet name
* `region` - (Required) The region to start in
* `size` - (Required) The instance size to start
-* `backups` - (Optional) Boolean controlling if backups are made.
-* `ipv6` - (Optional) Boolean controlling if IPv6 is enabled.
-* `private_networking` - (Optional) Boolean controlling if private networks are enabled.
+* `backups` - (Optional) Boolean controlling if backups are made. Defaults to
+ false.
+* `ipv6` - (Optional) Boolean controlling if IPv6 is enabled. Defaults to false.
+* `private_networking` - (Optional) Boolean controlling if private networks are
+ enabled. Defaults to false.
* `ssh_keys` - (Optional) A list of SSH IDs or fingerprints to enable in
the format `[12345, 123456]`. To retrieve this info, use a tool such
as `curl` with the [DigitalOcean API](https://developers.digitalocean.com/#keys),
From 4fc31abc6fac163de4cf3e0ee7e1341020d7acfa Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Mon, 4 Jan 2016 09:59:21 -0600
Subject: [PATCH 381/664] fix typo
---
.../source/docs/providers/aws/r/route53_record.html.markdown | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/website/source/docs/providers/aws/r/route53_record.html.markdown b/website/source/docs/providers/aws/r/route53_record.html.markdown
index b1c6bf7550..11b4b5c592 100644
--- a/website/source/docs/providers/aws/r/route53_record.html.markdown
+++ b/website/source/docs/providers/aws/r/route53_record.html.markdown
@@ -103,7 +103,7 @@ record from one another. Required for each weighted record.
default in Terraform. This allows Terraform to distinquish between a `0` value
and an empty value in the configuration (none specified). As a result, a
`weight` of `-1` will be present in the statefile if `weight` is omitted in the
-configuraiton.
+configuration.
Exactly one of `records` or `alias` must be specified: this determines whether it's an alias record.
From e22376f6a005b1002c17a7000baa3ccd5e65ba46 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Mon, 4 Jan 2016 11:03:51 -0500
Subject: [PATCH 382/664] provider/aws: Document `display_name` on SNS Topic
Fixes #3799
---
website/source/docs/providers/aws/r/sns_topic.html.markdown | 1 +
1 file changed, 1 insertion(+)
diff --git a/website/source/docs/providers/aws/r/sns_topic.html.markdown b/website/source/docs/providers/aws/r/sns_topic.html.markdown
index 62a3c23f74..b17d5536fd 100644
--- a/website/source/docs/providers/aws/r/sns_topic.html.markdown
+++ b/website/source/docs/providers/aws/r/sns_topic.html.markdown
@@ -23,6 +23,7 @@ resource "aws_sns_topic" "user_updates" {
The following arguments are supported:
* `name` - (Required) The friendly name for the SNS topic
+* `display_name` - (Optional) The display name for the SNS topic
* `policy` - (Optional) The fully-formed AWS policy as JSON
* `delivery_policy` - (Optional) The SNS delivery policy
From 9096d4360df9ea3aa821c11ad7dbf927f59dfca6 Mon Sep 17 00:00:00 2001
From: Clint
Date: Mon, 4 Jan 2016 10:05:41 -0600
Subject: [PATCH 383/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e84873d702..bd13a1274f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -61,6 +61,7 @@ BUG FIXES:
* provider/aws: Fix issue force destroying a versioned S3 bucket [GH-4168]
* provider/aws: Update DB Replica to honor storage type [GH-4155]
* provider/aws: Fix issue creating AWS RDS replicas across regions [GH-4215]
+ * provider/aws: Fix issue with Route53 and zero weighted records [GH-4427]
* provider/aws: Fix issue with iam_profile in aws_instance when a path is specified [GH-3663]
* provider/aws: Refactor AWS Authentication chain to fix issue with authentication and IAM [GH-4254]
* provider/aws: Fix issue with finding S3 Hosted Zone ID for eu-central-1 region [GH-4236]
From 74152fb6db4f5bf8ca732476026d7325071b7e1d Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Mon, 4 Jan 2016 11:44:22 -0500
Subject: [PATCH 384/664] Revert "Update Build documentation to use t2.micro"
---
website/source/intro/getting-started/build.html.md | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/website/source/intro/getting-started/build.html.md b/website/source/intro/getting-started/build.html.md
index f2c6c19ba8..aa3c7c506d 100644
--- a/website/source/intro/getting-started/build.html.md
+++ b/website/source/intro/getting-started/build.html.md
@@ -60,7 +60,7 @@ provider "aws" {
resource "aws_instance" "example" {
ami = "ami-408c7f28"
- instance_type = "t2.micro"
+ instance_type = "t1.micro"
}
```
@@ -113,7 +113,7 @@ $ terraform plan
+ aws_instance.example
ami: "" => "ami-408c7f28"
availability_zone: "" => ""
- instance_type: "" => "t2.micro"
+ instance_type: "" => "t1.micro"
key_name: "" => ""
private_dns: "" => ""
private_ip: "" => ""
@@ -149,7 +149,7 @@ since Terraform waits for the EC2 instance to become available.
$ terraform apply
aws_instance.example: Creating...
ami: "" => "ami-408c7f28"
- instance_type: "" => "t2.micro"
+ instance_type: "" => "t1.micro"
Apply complete! Resources: 1 added, 0 changed, 0 destroyed.
@@ -174,7 +174,7 @@ aws_instance.example:
id = i-e60900cd
ami = ami-408c7f28
availability_zone = us-east-1c
- instance_type = t2.micro
+ instance_type = t1.micro
key_name =
private_dns = domU-12-31-39-12-38-AB.compute-1.internal
private_ip = 10.200.59.89
From 3c330f6e19a266a471123e90cef884ef2bf8949f Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Mon, 4 Jan 2016 10:44:53 -0500
Subject: [PATCH 385/664] provider/aws: Fix RDS unexpected state config
This commit adds the various states (taken from the RDS documentation
here: http://amzn.to/1OHqi6g) to the list of allowable pending states
when creating an RDS instance.
In particular, `resetting-master-credentials` is returned when creating
an `aws_db_instance` from a snapshot. Fixes #4477.
---
builtin/providers/aws/resource_aws_db_instance.go | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_db_instance.go b/builtin/providers/aws/resource_aws_db_instance.go
index a034b7953a..00de73fc7a 100644
--- a/builtin/providers/aws/resource_aws_db_instance.go
+++ b/builtin/providers/aws/resource_aws_db_instance.go
@@ -383,7 +383,8 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
"[INFO] Waiting for DB Instance to be available")
stateConf := &resource.StateChangeConf{
- Pending: []string{"creating", "backing-up", "modifying"},
+ Pending: []string{"creating", "backing-up", "modifying", "resetting-master-credentials",
+ "maintenance", "renaming", "rebooting", "upgrading"},
Target: "available",
Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta),
Timeout: 40 * time.Minute,
@@ -494,7 +495,8 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
"[INFO] Waiting for DB Instance to be available")
stateConf := &resource.StateChangeConf{
- Pending: []string{"creating", "backing-up", "modifying"},
+ Pending: []string{"creating", "backing-up", "modifying", "resetting-master-credentials",
+ "maintenance", "renaming", "rebooting", "upgrading"},
Target: "available",
Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta),
Timeout: 40 * time.Minute,
From 5e9e22d4fd8c11ad3560ab75cdbf093fd1e31add Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Mon, 4 Jan 2016 13:19:46 -0600
Subject: [PATCH 386/664] provider/google: Allow acctests to set credentials
via file
Makes things easier on Travis.
---
builtin/providers/google/provider_test.go | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/builtin/providers/google/provider_test.go b/builtin/providers/google/provider_test.go
index 827a7f5753..51654a6688 100644
--- a/builtin/providers/google/provider_test.go
+++ b/builtin/providers/google/provider_test.go
@@ -1,6 +1,7 @@
package google
import (
+ "io/ioutil"
"os"
"testing"
@@ -29,6 +30,14 @@ func TestProvider_impl(t *testing.T) {
}
func testAccPreCheck(t *testing.T) {
+ if v := os.Getenv("GOOGLE_CREDENTIALS_FILE"); v != "" {
+ creds, err := ioutil.ReadFile(v)
+ if err != nil {
+ t.Fatalf("Error reading GOOGLE_CREDENTIALS_FILE path: %s", err)
+ }
+ os.Setenv("GOOGLE_CREDENTIALS", string(creds))
+ }
+
if v := os.Getenv("GOOGLE_CREDENTIALS"); v == "" {
t.Fatal("GOOGLE_CREDENTIALS must be set for acceptance tests")
}
From 6e3609564462321132aee7047abc6d3f0adf1f30 Mon Sep 17 00:00:00 2001
From: Colin Hebert
Date: Mon, 4 Jan 2016 20:58:54 +0100
Subject: [PATCH 387/664] Add the networks entry
---
builtin/providers/docker/resource_docker_container.go | 6 ++++++
.../providers/docker/resource_docker_container_funcs.go | 8 ++++++++
.../docs/providers/docker/r/container.html.markdown | 1 +
3 files changed, 15 insertions(+)
diff --git a/builtin/providers/docker/resource_docker_container.go b/builtin/providers/docker/resource_docker_container.go
index 323850499a..f20ff43f08 100644
--- a/builtin/providers/docker/resource_docker_container.go
+++ b/builtin/providers/docker/resource_docker_container.go
@@ -238,6 +238,12 @@ func resourceDockerContainer() *schema.Resource {
Optional: true,
ForceNew: true,
},
+
+ "networks": &schema.Schema{
+ Type: schema.TypeSet,
+ Optional: true,
+ ForceNew: true,
+ },
},
}
}
diff --git a/builtin/providers/docker/resource_docker_container_funcs.go b/builtin/providers/docker/resource_docker_container_funcs.go
index 814941bba3..605db710ce 100644
--- a/builtin/providers/docker/resource_docker_container_funcs.go
+++ b/builtin/providers/docker/resource_docker_container_funcs.go
@@ -148,6 +148,14 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
d.SetId(retContainer.ID)
+ if v, ok := d.GetOk("networks"); ok {
+ connectionOpts := &dc.NetworkConnectionOptions{Container: retContainer.ID}
+
+ for _, network := range v.(*schema.Set).List() {
+ client.ConnectNetwork(network.(string), connectionOpts)
+ }
+ }
+
creationTime = time.Now()
if err := client.StartContainer(retContainer.ID, hostConfig); err != nil {
return fmt.Errorf("Unable to start container: %s", err)
diff --git a/website/source/docs/providers/docker/r/container.html.markdown b/website/source/docs/providers/docker/r/container.html.markdown
index 920288eb25..e8ae91c153 100644
--- a/website/source/docs/providers/docker/r/container.html.markdown
+++ b/website/source/docs/providers/docker/r/container.html.markdown
@@ -68,6 +68,7 @@ The following arguments are supported:
Defaults to "json-file".
* `log_opts` - (Optional) Key/value pairs to use as options for the logging
driver.
+* `networks` - (Optional, set of strings) Id of the networks in which the container is.
## Ports
From 35188f3694e1f658db51d225621fe15bea074666 Mon Sep 17 00:00:00 2001
From: Colin Hebert
Date: Mon, 4 Jan 2016 21:03:53 +0100
Subject: [PATCH 388/664] Fix typo
---
builtin/providers/docker/resource_docker_container_funcs.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/builtin/providers/docker/resource_docker_container_funcs.go b/builtin/providers/docker/resource_docker_container_funcs.go
index 605db710ce..08cfe190c4 100644
--- a/builtin/providers/docker/resource_docker_container_funcs.go
+++ b/builtin/providers/docker/resource_docker_container_funcs.go
@@ -149,7 +149,7 @@ func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) err
d.SetId(retContainer.ID)
if v, ok := d.GetOk("networks"); ok {
- connectionOpts := &dc.NetworkConnectionOptions{Container: retContainer.ID}
+ connectionOpts := dc.NetworkConnectionOptions{Container: retContainer.ID}
for _, network := range v.(*schema.Set).List() {
client.ConnectNetwork(network.(string), connectionOpts)
From c94815d56d93fc22a363109437223551334bb7f3 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Mon, 4 Jan 2016 14:09:16 -0600
Subject: [PATCH 389/664] provider/aws: Update some IAM tests
---
.../aws/resource_aws_iam_role_policy_test.go | 30 +++++++++++++++++--
.../resource_aws_iam_saml_provider_test.go | 25 ++++++++++++++--
.../aws/resource_aws_iam_user_policy_test.go | 30 +++++++++++++++++--
3 files changed, 79 insertions(+), 6 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_iam_role_policy_test.go b/builtin/providers/aws/resource_aws_iam_role_policy_test.go
index 219c676ebc..3f3256435f 100644
--- a/builtin/providers/aws/resource_aws_iam_role_policy_test.go
+++ b/builtin/providers/aws/resource_aws_iam_role_policy_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -39,8 +40,33 @@ func TestAccAWSIAMRolePolicy_basic(t *testing.T) {
}
func testAccCheckIAMRolePolicyDestroy(s *terraform.State) error {
- if len(s.RootModule().Resources) > 0 {
- return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
+ iamconn := testAccProvider.Meta().(*AWSClient).iamconn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_iam_role_policy" {
+ continue
+ }
+
+ role, name := resourceAwsIamRolePolicyParseId(rs.Primary.ID)
+
+ request := &iam.GetRolePolicyInput{
+ PolicyName: aws.String(name),
+ RoleName: aws.String(role),
+ }
+
+ var err error
+ getResp, err := iamconn.GetRolePolicy(request)
+ if err != nil {
+ if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" {
+ // none found, that's good
+ return nil
+ }
+ return fmt.Errorf("Error reading IAM policy %s from role %s: %s", name, role, err)
+ }
+
+ if getResp != nil {
+ return fmt.Errorf("Found IAM Role, expected none: %s", getResp)
+ }
}
return nil
diff --git a/builtin/providers/aws/resource_aws_iam_saml_provider_test.go b/builtin/providers/aws/resource_aws_iam_saml_provider_test.go
index 63ed395883..4118a062ae 100644
--- a/builtin/providers/aws/resource_aws_iam_saml_provider_test.go
+++ b/builtin/providers/aws/resource_aws_iam_saml_provider_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -33,8 +34,28 @@ func TestAccAWSIAMSamlProvider_basic(t *testing.T) {
}
func testAccCheckIAMSamlProviderDestroy(s *terraform.State) error {
- if len(s.RootModule().Resources) > 0 {
- return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
+ iamconn := testAccProvider.Meta().(*AWSClient).iamconn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_iam_saml_provider" {
+ continue
+ }
+
+ input := &iam.GetSAMLProviderInput{
+ SAMLProviderArn: aws.String(rs.Primary.ID),
+ }
+ out, err := iamconn.GetSAMLProvider(input)
+ if err != nil {
+ if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" {
+ // none found, that's good
+ return nil
+ }
+ return fmt.Errorf("Error reading IAM SAML Provider, out: %s, err: %s", out, err)
+ }
+
+ if out != nil {
+ return fmt.Errorf("Found IAM SAML Provider, expected none: %s", out)
+ }
}
return nil
diff --git a/builtin/providers/aws/resource_aws_iam_user_policy_test.go b/builtin/providers/aws/resource_aws_iam_user_policy_test.go
index f5c5201808..019d82506a 100644
--- a/builtin/providers/aws/resource_aws_iam_user_policy_test.go
+++ b/builtin/providers/aws/resource_aws_iam_user_policy_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -39,8 +40,33 @@ func TestAccAWSIAMUserPolicy_basic(t *testing.T) {
}
func testAccCheckIAMUserPolicyDestroy(s *terraform.State) error {
- if len(s.RootModule().Resources) > 0 {
- return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
+ iamconn := testAccProvider.Meta().(*AWSClient).iamconn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_iam_user_policy" {
+ continue
+ }
+
+ role, name := resourceAwsIamRolePolicyParseId(rs.Primary.ID)
+
+ request := &iam.GetRolePolicyInput{
+ PolicyName: aws.String(name),
+ RoleName: aws.String(role),
+ }
+
+ var err error
+ getResp, err := iamconn.GetRolePolicy(request)
+ if err != nil {
+ if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" {
+ // none found, that's good
+ return nil
+ }
+ return fmt.Errorf("Error reading IAM policy %s from role %s: %s", name, role, err)
+ }
+
+ if getResp != nil {
+ return fmt.Errorf("Found IAM Role, expected none: %s", getResp)
+ }
}
return nil
From c519ea74c523f3375e785590d543e4bdcfa86467 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Mon, 4 Jan 2016 16:14:30 -0500
Subject: [PATCH 390/664] provider/aws: Don't set NatGatewayId with no value
This fixes create aws_route_table resources in regions which do not
support the NAT Gateway yet (e.g. eu-central) - unless a value is
explicitly set in which case the API call will fail until such time as
NAT Gateway is supported.
Fixes #4499.
---
builtin/providers/aws/resource_aws_route_table.go | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/builtin/providers/aws/resource_aws_route_table.go b/builtin/providers/aws/resource_aws_route_table.go
index 752b771fef..6ba2153e6b 100644
--- a/builtin/providers/aws/resource_aws_route_table.go
+++ b/builtin/providers/aws/resource_aws_route_table.go
@@ -290,12 +290,15 @@ func resourceAwsRouteTableUpdate(d *schema.ResourceData, meta interface{}) error
RouteTableId: aws.String(d.Id()),
DestinationCidrBlock: aws.String(m["cidr_block"].(string)),
GatewayId: aws.String(m["gateway_id"].(string)),
- NatGatewayId: aws.String(m["nat_gateway_id"].(string)),
InstanceId: aws.String(m["instance_id"].(string)),
VpcPeeringConnectionId: aws.String(m["vpc_peering_connection_id"].(string)),
NetworkInterfaceId: aws.String(m["network_interface_id"].(string)),
}
+ if m["nat_gateway_id"].(string) != "" {
+ opts.NatGatewayId = aws.String(m["nat_gateway_id"].(string))
+ }
+
log.Printf("[INFO] Creating route for %s: %#v", d.Id(), opts)
if _, err := conn.CreateRoute(&opts); err != nil {
return err
From 028664a0156f4f846f10ac292d23d86acbe8ce01 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Mon, 4 Jan 2016 15:21:38 -0600
Subject: [PATCH 391/664] provider/digitalocean: acctest improvements
* Add SSH Keys to all droplets in tests, this prevents acctests from
spamming account owner email with root password details
* Add a new helper/acctest package to be a home for random string / int
implementations used in tests.
* Insert some random details into record tests to prevent collisions
* Normalize config style in tests to hclfmt conventions
---
.../resource_digitalocean_droplet_test.go | 86 ++++++++++-------
.../resource_digitalocean_floating_ip_test.go | 27 +++---
.../resource_digitalocean_record_test.go | 92 +++++++++----------
helper/acctest/acctest.go | 2 +
helper/acctest/random.go | 35 +++++++
5 files changed, 151 insertions(+), 91 deletions(-)
create mode 100644 helper/acctest/acctest.go
create mode 100644 helper/acctest/random.go
diff --git a/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go b/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go
index d3a37a82ca..3a72e3c5dc 100644
--- a/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go
+++ b/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go
@@ -293,43 +293,67 @@ func testAccCheckDigitalOceanDropletRecreated(t *testing.T,
//
//}
-const testAccCheckDigitalOceanDropletConfig_basic = `
-resource "digitalocean_droplet" "foobar" {
- name = "foo"
- size = "512mb"
- image = "centos-5-8-x32"
- region = "nyc3"
- user_data = "foobar"
+var testAccCheckDigitalOceanDropletConfig_basic = fmt.Sprintf(`
+resource "digitalocean_ssh_key" "foobar" {
+ name = "foobar"
+ public_key = "%s"
}
-`
-const testAccCheckDigitalOceanDropletConfig_userdata_update = `
resource "digitalocean_droplet" "foobar" {
- name = "foo"
- size = "512mb"
- image = "centos-5-8-x32"
- region = "nyc3"
- user_data = "foobar foobar"
+ name = "foo"
+ size = "512mb"
+ image = "centos-5-8-x32"
+ region = "nyc3"
+ user_data = "foobar"
+ ssh_keys = ["${digitalocean_ssh_key.foobar.id}"]
}
-`
+`, testAccValidPublicKey)
-const testAccCheckDigitalOceanDropletConfig_RenameAndResize = `
-resource "digitalocean_droplet" "foobar" {
- name = "baz"
- size = "1gb"
- image = "centos-5-8-x32"
- region = "nyc3"
+var testAccCheckDigitalOceanDropletConfig_userdata_update = fmt.Sprintf(`
+resource "digitalocean_ssh_key" "foobar" {
+ name = "foobar"
+ public_key = "%s"
}
-`
+
+resource "digitalocean_droplet" "foobar" {
+ name = "foo"
+ size = "512mb"
+ image = "centos-5-8-x32"
+ region = "nyc3"
+ user_data = "foobar foobar"
+ ssh_keys = ["${digitalocean_ssh_key.foobar.id}"]
+}
+`, testAccValidPublicKey)
+
+var testAccCheckDigitalOceanDropletConfig_RenameAndResize = fmt.Sprintf(`
+resource "digitalocean_ssh_key" "foobar" {
+ name = "foobar"
+ public_key = "%s"
+}
+
+resource "digitalocean_droplet" "foobar" {
+ name = "baz"
+ size = "1gb"
+ image = "centos-5-8-x32"
+ region = "nyc3"
+ ssh_keys = ["${digitalocean_ssh_key.foobar.id}"]
+}
+`, testAccValidPublicKey)
// IPV6 only in singapore
-const testAccCheckDigitalOceanDropletConfig_PrivateNetworkingIpv6 = `
-resource "digitalocean_droplet" "foobar" {
- name = "baz"
- size = "1gb"
- image = "centos-5-8-x32"
- region = "sgp1"
- ipv6 = true
- private_networking = true
+var testAccCheckDigitalOceanDropletConfig_PrivateNetworkingIpv6 = fmt.Sprintf(`
+resource "digitalocean_ssh_key" "foobar" {
+ name = "foobar"
+ public_key = "%s"
}
-`
+
+resource "digitalocean_droplet" "foobar" {
+ name = "baz"
+ size = "1gb"
+ image = "centos-5-8-x32"
+ region = "sgp1"
+ ipv6 = true
+ private_networking = true
+ ssh_keys = ["${digitalocean_ssh_key.foobar.id}"]
+}
+`, testAccValidPublicKey)
diff --git a/builtin/providers/digitalocean/resource_digitalocean_floating_ip_test.go b/builtin/providers/digitalocean/resource_digitalocean_floating_ip_test.go
index 8ae003a1d4..d1a7882fcc 100644
--- a/builtin/providers/digitalocean/resource_digitalocean_floating_ip_test.go
+++ b/builtin/providers/digitalocean/resource_digitalocean_floating_ip_test.go
@@ -101,21 +101,26 @@ func testAccCheckDigitalOceanFloatingIPExists(n string, floatingIP *godo.Floatin
var testAccCheckDigitalOceanFloatingIPConfig_region = `
resource "digitalocean_floating_ip" "foobar" {
- region = "nyc3"
+ region = "nyc3"
}`
-var testAccCheckDigitalOceanFloatingIPConfig_droplet = `
+var testAccCheckDigitalOceanFloatingIPConfig_droplet = fmt.Sprintf(`
+resource "digitalocean_ssh_key" "foobar" {
+ name = "foobar"
+ public_key = "%s"
+}
resource "digitalocean_droplet" "foobar" {
- name = "baz"
- size = "1gb"
- image = "centos-5-8-x32"
- region = "sgp1"
- ipv6 = true
- private_networking = true
+ name = "baz"
+ size = "1gb"
+ image = "centos-5-8-x32"
+ region = "sgp1"
+ ipv6 = true
+ private_networking = true
+ ssh_keys = ["${digitalocean_ssh_key.foobar.id}"]
}
resource "digitalocean_floating_ip" "foobar" {
- droplet_id = "${digitalocean_droplet.foobar.id}"
- region = "${digitalocean_droplet.foobar.region}"
-}`
+ droplet_id = "${digitalocean_droplet.foobar.id}"
+ region = "${digitalocean_droplet.foobar.region}"
+}`, testAccValidPublicKey)
diff --git a/builtin/providers/digitalocean/resource_digitalocean_record_test.go b/builtin/providers/digitalocean/resource_digitalocean_record_test.go
index 7a4123bd60..9552e031e6 100644
--- a/builtin/providers/digitalocean/resource_digitalocean_record_test.go
+++ b/builtin/providers/digitalocean/resource_digitalocean_record_test.go
@@ -6,12 +6,14 @@ import (
"testing"
"github.com/digitalocean/godo"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccDigitalOceanRecord_Basic(t *testing.T) {
var record godo.DomainRecord
+ domain := fmt.Sprintf("foobar-test-terraform-%s.com", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -19,14 +21,14 @@ func TestAccDigitalOceanRecord_Basic(t *testing.T) {
CheckDestroy: testAccCheckDigitalOceanRecordDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccCheckDigitalOceanRecordConfig_basic,
+ Config: fmt.Sprintf(testAccCheckDigitalOceanRecordConfig_basic, domain),
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanRecordExists("digitalocean_record.foobar", &record),
testAccCheckDigitalOceanRecordAttributes(&record),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "name", "terraform"),
resource.TestCheckResourceAttr(
- "digitalocean_record.foobar", "domain", "foobar-test-terraform.com"),
+ "digitalocean_record.foobar", "domain", domain),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "value", "192.168.0.10"),
),
@@ -37,6 +39,7 @@ func TestAccDigitalOceanRecord_Basic(t *testing.T) {
func TestAccDigitalOceanRecord_Updated(t *testing.T) {
var record godo.DomainRecord
+ domain := fmt.Sprintf("foobar-test-terraform-%s.com", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -44,14 +47,14 @@ func TestAccDigitalOceanRecord_Updated(t *testing.T) {
CheckDestroy: testAccCheckDigitalOceanRecordDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccCheckDigitalOceanRecordConfig_basic,
+ Config: fmt.Sprintf(testAccCheckDigitalOceanRecordConfig_basic, domain),
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanRecordExists("digitalocean_record.foobar", &record),
testAccCheckDigitalOceanRecordAttributes(&record),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "name", "terraform"),
resource.TestCheckResourceAttr(
- "digitalocean_record.foobar", "domain", "foobar-test-terraform.com"),
+ "digitalocean_record.foobar", "domain", domain),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "value", "192.168.0.10"),
resource.TestCheckResourceAttr(
@@ -59,14 +62,15 @@ func TestAccDigitalOceanRecord_Updated(t *testing.T) {
),
},
resource.TestStep{
- Config: testAccCheckDigitalOceanRecordConfig_new_value,
+ Config: fmt.Sprintf(
+ testAccCheckDigitalOceanRecordConfig_new_value, domain),
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanRecordExists("digitalocean_record.foobar", &record),
testAccCheckDigitalOceanRecordAttributesUpdated(&record),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "name", "terraform"),
resource.TestCheckResourceAttr(
- "digitalocean_record.foobar", "domain", "foobar-test-terraform.com"),
+ "digitalocean_record.foobar", "domain", domain),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "value", "192.168.0.11"),
resource.TestCheckResourceAttr(
@@ -79,6 +83,7 @@ func TestAccDigitalOceanRecord_Updated(t *testing.T) {
func TestAccDigitalOceanRecord_HostnameValue(t *testing.T) {
var record godo.DomainRecord
+ domain := fmt.Sprintf("foobar-test-terraform-%s.com", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -86,14 +91,15 @@ func TestAccDigitalOceanRecord_HostnameValue(t *testing.T) {
CheckDestroy: testAccCheckDigitalOceanRecordDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccCheckDigitalOceanRecordConfig_cname,
+ Config: fmt.Sprintf(
+ testAccCheckDigitalOceanRecordConfig_cname, domain),
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanRecordExists("digitalocean_record.foobar", &record),
- testAccCheckDigitalOceanRecordAttributesHostname("a", &record),
+ testAccCheckDigitalOceanRecordAttributesHostname("a.foobar-test-terraform.com", &record),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "name", "terraform"),
resource.TestCheckResourceAttr(
- "digitalocean_record.foobar", "domain", "foobar-test-terraform.com"),
+ "digitalocean_record.foobar", "domain", domain),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "value", "a.foobar-test-terraform.com."),
resource.TestCheckResourceAttr(
@@ -106,6 +112,7 @@ func TestAccDigitalOceanRecord_HostnameValue(t *testing.T) {
func TestAccDigitalOceanRecord_ExternalHostnameValue(t *testing.T) {
var record godo.DomainRecord
+ domain := fmt.Sprintf("foobar-test-terraform-%s.com", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -113,14 +120,15 @@ func TestAccDigitalOceanRecord_ExternalHostnameValue(t *testing.T) {
CheckDestroy: testAccCheckDigitalOceanRecordDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccCheckDigitalOceanRecordConfig_external_cname,
+ Config: fmt.Sprintf(
+ testAccCheckDigitalOceanRecordConfig_external_cname, domain),
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanRecordExists("digitalocean_record.foobar", &record),
testAccCheckDigitalOceanRecordAttributesHostname("a.foobar-test-terraform.net", &record),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "name", "terraform"),
resource.TestCheckResourceAttr(
- "digitalocean_record.foobar", "domain", "foobar-test-terraform.com"),
+ "digitalocean_record.foobar", "domain", domain),
resource.TestCheckResourceAttr(
"digitalocean_record.foobar", "value", "a.foobar-test-terraform.net."),
resource.TestCheckResourceAttr(
@@ -225,70 +233,56 @@ func testAccCheckDigitalOceanRecordAttributesHostname(data string, record *godo.
const testAccCheckDigitalOceanRecordConfig_basic = `
resource "digitalocean_domain" "foobar" {
- name = "foobar-test-terraform.com"
- ip_address = "192.168.0.10"
+ name = "%s"
+ ip_address = "192.168.0.10"
}
resource "digitalocean_record" "foobar" {
- domain = "${digitalocean_domain.foobar.name}"
+ domain = "${digitalocean_domain.foobar.name}"
- name = "terraform"
- value = "192.168.0.10"
- type = "A"
+ name = "terraform"
+ value = "192.168.0.10"
+ type = "A"
}`
const testAccCheckDigitalOceanRecordConfig_new_value = `
resource "digitalocean_domain" "foobar" {
- name = "foobar-test-terraform.com"
- ip_address = "192.168.0.10"
+ name = "%s"
+ ip_address = "192.168.0.10"
}
resource "digitalocean_record" "foobar" {
- domain = "${digitalocean_domain.foobar.name}"
+ domain = "${digitalocean_domain.foobar.name}"
- name = "terraform"
- value = "192.168.0.11"
- type = "A"
+ name = "terraform"
+ value = "192.168.0.11"
+ type = "A"
}`
const testAccCheckDigitalOceanRecordConfig_cname = `
resource "digitalocean_domain" "foobar" {
- name = "foobar-test-terraform.com"
- ip_address = "192.168.0.10"
+ name = "%s"
+ ip_address = "192.168.0.10"
}
resource "digitalocean_record" "foobar" {
- domain = "${digitalocean_domain.foobar.name}"
+ domain = "${digitalocean_domain.foobar.name}"
- name = "terraform"
- value = "a.foobar-test-terraform.com."
- type = "CNAME"
-}`
-
-const testAccCheckDigitalOceanRecordConfig_relative_cname = `
-resource "digitalocean_domain" "foobar" {
- name = "foobar-test-terraform.com"
- ip_address = "192.168.0.10"
-}
-
-resource "digitalocean_record" "foobar" {
- domain = "${digitalocean_domain.foobar.name}"
-
- name = "terraform"
- value = "a.b"
- type = "CNAME"
+ name = "terraform"
+ value = "a.foobar-test-terraform.com."
+ type = "CNAME"
}`
const testAccCheckDigitalOceanRecordConfig_external_cname = `
resource "digitalocean_domain" "foobar" {
- name = "foobar-test-terraform.com"
- ip_address = "192.168.0.10"
+ name = "%s"
+ ip_address = "192.168.0.10"
}
resource "digitalocean_record" "foobar" {
- domain = "${digitalocean_domain.foobar.name}"
+ domain = "${digitalocean_domain.foobar.name}"
- name = "terraform"
- value = "a.foobar-test-terraform.net."
- type = "CNAME"
+ name = "terraform"
+ value = "a.foobar-test-terraform.net."
+ type = "CNAME"
}`
diff --git a/helper/acctest/acctest.go b/helper/acctest/acctest.go
new file mode 100644
index 0000000000..9d31031a47
--- /dev/null
+++ b/helper/acctest/acctest.go
@@ -0,0 +1,2 @@
+// Package acctest contains for Terraform Acceptance Tests
+package acctest
diff --git a/helper/acctest/random.go b/helper/acctest/random.go
new file mode 100644
index 0000000000..5317a58b4e
--- /dev/null
+++ b/helper/acctest/random.go
@@ -0,0 +1,35 @@
+package acctest
+
+import (
+ "math/rand"
+ "time"
+)
+
+// Helpers for generating random tidbits for use in identifiers to prevent
+// collisions in acceptance tests.
+
+// RandString generates a random alphanumeric string of the length specified
+func RandString(strlen int) string {
+ return RandStringFromCharSet(strlen, CharSetAlphaNum)
+}
+
+// RandStringFromCharSet generates a random string by selecting characters from
+// the charset provided
+func RandStringFromCharSet(strlen int, charSet string) string {
+ rand.Seed(time.Now().UTC().UnixNano())
+ result := make([]byte, strlen)
+ for i := 0; i < strlen; i++ {
+ result[i] = charSet[rand.Intn(len(charSet))]
+ }
+ return string(result)
+}
+
+const (
+ // CharSetAlphaNum is the alphanumeric character set for use with
+ // RandStringFromCharSet
+ CharSetAlphaNum = "abcdefghijklmnopqrstuvwxyz012346789"
+
+ // CharSetAlpha is the alphabetical character set for use with
+ // RandStringFromCharSet
+ CharSetAlpha = "abcdefghijklmnopqrstuvwxyz"
+)
From 81779aa1d4f094a45ffd1d45986d4925970e8c08 Mon Sep 17 00:00:00 2001
From: Jakub Janczak
Date: Sun, 14 Dec 2014 18:54:01 +0100
Subject: [PATCH 392/664] Fixing the situation when you've got an organization
app, and want to have it in a private area instead
---
.../providers/heroku/resource_heroku_app.go | 87 +++++++++++++++----
1 file changed, 70 insertions(+), 17 deletions(-)
diff --git a/builtin/providers/heroku/resource_heroku_app.go b/builtin/providers/heroku/resource_heroku_app.go
index 4c2f3bf97a..a6672bd0df 100644
--- a/builtin/providers/heroku/resource_heroku_app.go
+++ b/builtin/providers/heroku/resource_heroku_app.go
@@ -9,13 +9,24 @@ import (
"github.com/hashicorp/terraform/helper/schema"
)
+type herokuApplication struct {
+ Name string
+ Region string
+ Stack string
+ GitURL string
+ WebURL string
+ OrganizationName string
+ Locked bool
+}
+
// type application is used to store all the details of a heroku app
type application struct {
Id string // Id of the resource
- App *heroku.App // The heroku application
- Client *heroku.Service // Client to interact with the heroku API
- Vars map[string]string // The vars on the application
+ App *herokuApplication // The heroku application
+ Client *heroku.Service // Client to interact with the heroku API
+ Vars map[string]string // The vars on the application
+ Organization bool // is the application organization app
}
// Updates the application to have the latest from remote
@@ -23,9 +34,37 @@ func (a *application) Update() error {
var errs []error
var err error
- a.App, err = a.Client.AppInfo(a.Id)
- if err != nil {
- errs = append(errs, err)
+ if !a.Organization {
+ app, err := a.Client.AppInfo(a.Id)
+ if err != nil {
+ errs = append(errs, err)
+ } else {
+ a.App = &herokuApplication{}
+ a.App.Name = app.Name
+ a.App.Region = app.Region.Name
+ a.App.Stack = app.Stack.Name
+ a.App.GitURL = app.GitURL
+ a.App.WebURL = app.WebURL
+ }
+ } else {
+ app, err := a.Client.OrganizationAppInfo(a.Id)
+ if err != nil {
+ errs = append(errs, err)
+ } else {
+ // No inheritance between OrganizationApp and App is killing it :/
+ a.App = &herokuApplication{}
+ a.App.Name = app.Name
+ a.App.Region = app.Region.Name
+ a.App.Stack = app.Stack.Name
+ a.App.GitURL = app.GitURL
+ a.App.WebURL = app.WebURL
+ if app.Organization != nil {
+ a.App.OrganizationName = app.Organization.Name
+ } else {
+ log.Println("[DEBUG] Something is wrong - didn't get information about organization name, while the app is marked as being so")
+ }
+ a.App.Locked = app.Locked
+ }
}
a.Vars, err = retrieveConfigVars(a.Id, a.Client)
@@ -122,13 +161,18 @@ func resourceHerokuApp() *schema.Resource {
}
}
+func isOrganizationApp(d *schema.ResourceData) bool {
+ _, ok := d.GetOk("organization.0.name")
+ return ok
+}
+
func switchHerokuAppCreate(d *schema.ResourceData, meta interface{}) error {
orgCount := d.Get("organization.#").(int)
if orgCount > 1 {
return fmt.Errorf("Error Creating Heroku App: Only 1 Heroku Organization is permitted")
}
- if _, ok := d.GetOk("organization.0.name"); ok {
+ if isOrganizationApp(d) {
return resourceHerokuOrgAppCreate(d, meta)
} else {
return resourceHerokuAppCreate(d, meta)
@@ -236,13 +280,7 @@ func resourceHerokuOrgAppCreate(d *schema.ResourceData, meta interface{}) error
func resourceHerokuAppRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*heroku.Service)
- app, err := resourceHerokuAppRetrieve(d.Id(), client)
- if err != nil {
- return err
- }
- // Only set the config_vars that we have set in the configuration.
- // The "all_config_vars" field has all of them.
configVars := make(map[string]string)
care := make(map[string]struct{})
for _, v := range d.Get("config_vars").([]interface{}) {
@@ -250,6 +288,15 @@ func resourceHerokuAppRead(d *schema.ResourceData, meta interface{}) error {
care[k] = struct{}{}
}
}
+
+ _, organizationApp := d.GetOk("organization.0.name")
+ // Only set the config_vars that we have set in the configuration.
+ // The "all_config_vars" field has all of them.
+ app, err := resourceHerokuAppRetrieve(d.Id(), organizationApp, client)
+ if err != nil {
+ return err
+ }
+
for k, v := range app.Vars {
if _, ok := care[k]; ok {
configVars[k] = v
@@ -261,12 +308,18 @@ func resourceHerokuAppRead(d *schema.ResourceData, meta interface{}) error {
}
d.Set("name", app.App.Name)
- d.Set("stack", app.App.Stack.Name)
- d.Set("region", app.App.Region.Name)
+ d.Set("stack", app.App.Stack)
+ d.Set("region", app.App.Region)
d.Set("git_url", app.App.GitURL)
d.Set("web_url", app.App.WebURL)
d.Set("config_vars", configVarsValue)
d.Set("all_config_vars", app.Vars)
+ if organizationApp {
+ d.Set("organization.#", "1")
+ d.Set("organization.0.name", app.App.OrganizationName)
+ d.Set("organization.0.locked", app.App.Locked)
+ d.Set("organization.0.private", false)
+ }
// We know that the hostname on heroku will be the name+herokuapp.com
// You need this to do things like create DNS CNAME records
@@ -327,8 +380,8 @@ func resourceHerokuAppDelete(d *schema.ResourceData, meta interface{}) error {
return nil
}
-func resourceHerokuAppRetrieve(id string, client *heroku.Service) (*application, error) {
- app := application{Id: id, Client: client}
+func resourceHerokuAppRetrieve(id string, organization bool, client *heroku.Service) (*application, error) {
+ app := application{Id: id, Client: client, Organization: organization}
err := app.Update()
From c52765417abeba770bf0ba9fd60370bf3443ae38 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Mon, 4 Jan 2016 12:46:52 -0600
Subject: [PATCH 393/664] provider/heroku: add acctest covering orgs; fixup
issues
Switching up ResourceData interaction to not reach into the internal
dot-notation nesting.
---
.../providers/heroku/resource_heroku_app.go | 51 ++++---
.../heroku/resource_heroku_app_test.go | 129 ++++++++++++++++--
2 files changed, 146 insertions(+), 34 deletions(-)
diff --git a/builtin/providers/heroku/resource_heroku_app.go b/builtin/providers/heroku/resource_heroku_app.go
index a6672bd0df..d8d5b316b3 100644
--- a/builtin/providers/heroku/resource_heroku_app.go
+++ b/builtin/providers/heroku/resource_heroku_app.go
@@ -9,6 +9,9 @@ import (
"github.com/hashicorp/terraform/helper/schema"
)
+// herokuApplication is a value type used to hold the details of an
+// application. We use this for common storage of values needed for the
+// heroku.App and heroku.OrganizationApp types
type herokuApplication struct {
Name string
Region string
@@ -134,10 +137,9 @@ func resourceHerokuApp() *schema.Resource {
},
"organization": &schema.Schema{
- Description: "Name of Organization to create application in. Leave blank for personal apps.",
- Type: schema.TypeList,
- Optional: true,
- ForceNew: true,
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
@@ -162,21 +164,16 @@ func resourceHerokuApp() *schema.Resource {
}
func isOrganizationApp(d *schema.ResourceData) bool {
- _, ok := d.GetOk("organization.0.name")
- return ok
+ v := d.Get("organization").([]interface{})
+ return len(v) > 0 && v[0] != nil
}
func switchHerokuAppCreate(d *schema.ResourceData, meta interface{}) error {
- orgCount := d.Get("organization.#").(int)
- if orgCount > 1 {
- return fmt.Errorf("Error Creating Heroku App: Only 1 Heroku Organization is permitted")
- }
-
if isOrganizationApp(d) {
return resourceHerokuOrgAppCreate(d, meta)
- } else {
- return resourceHerokuAppCreate(d, meta)
}
+
+ return resourceHerokuAppCreate(d, meta)
}
func resourceHerokuAppCreate(d *schema.ResourceData, meta interface{}) error {
@@ -225,19 +222,25 @@ func resourceHerokuOrgAppCreate(d *schema.ResourceData, meta interface{}) error
// Build up our creation options
opts := heroku.OrganizationAppCreateOpts{}
- if v := d.Get("organization.0.name"); v != nil {
+ v := d.Get("organization").([]interface{})
+ if len(v) > 1 {
+ return fmt.Errorf("Error Creating Heroku App: Only 1 Heroku Organization is permitted")
+ }
+ orgDetails := v[0].(map[string]interface{})
+
+ if v := orgDetails["name"]; v != nil {
vs := v.(string)
log.Printf("[DEBUG] Organization name: %s", vs)
opts.Organization = &vs
}
- if v := d.Get("organization.0.personal"); v != nil {
+ if v := orgDetails["personal"]; v != nil {
vs := v.(bool)
log.Printf("[DEBUG] Organization Personal: %t", vs)
opts.Personal = &vs
}
- if v := d.Get("organization.0.locked"); v != nil {
+ if v := orgDetails["locked"]; v != nil {
vs := v.(bool)
log.Printf("[DEBUG] Organization locked: %t", vs)
opts.Locked = &vs
@@ -289,7 +292,8 @@ func resourceHerokuAppRead(d *schema.ResourceData, meta interface{}) error {
}
}
- _, organizationApp := d.GetOk("organization.0.name")
+ organizationApp := isOrganizationApp(d)
+
// Only set the config_vars that we have set in the configuration.
// The "all_config_vars" field has all of them.
app, err := resourceHerokuAppRetrieve(d.Id(), organizationApp, client)
@@ -315,10 +319,15 @@ func resourceHerokuAppRead(d *schema.ResourceData, meta interface{}) error {
d.Set("config_vars", configVarsValue)
d.Set("all_config_vars", app.Vars)
if organizationApp {
- d.Set("organization.#", "1")
- d.Set("organization.0.name", app.App.OrganizationName)
- d.Set("organization.0.locked", app.App.Locked)
- d.Set("organization.0.private", false)
+ orgDetails := map[string]interface{}{
+ "name": app.App.OrganizationName,
+ "locked": app.App.Locked,
+ "private": false,
+ }
+ err := d.Set("organization", []interface{}{orgDetails})
+ if err != nil {
+ return err
+ }
}
// We know that the hostname on heroku will be the name+herokuapp.com
diff --git a/builtin/providers/heroku/resource_heroku_app_test.go b/builtin/providers/heroku/resource_heroku_app_test.go
index 185d4b7d70..17bb7afd14 100644
--- a/builtin/providers/heroku/resource_heroku_app_test.go
+++ b/builtin/providers/heroku/resource_heroku_app_test.go
@@ -2,6 +2,7 @@ package heroku
import (
"fmt"
+ "os"
"testing"
"github.com/cyberdelia/heroku-go/v3"
@@ -102,6 +103,31 @@ func TestAccHerokuApp_NukeVars(t *testing.T) {
})
}
+func TestAccHerokuApp_Organization(t *testing.T) {
+ var app heroku.OrganizationApp
+ org := os.Getenv("HEROKU_ORGANIZATION")
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() {
+ testAccPreCheck(t)
+ if org == "" {
+ t.Skip("HEROKU_ORGANIZATION is not set; skipping test.")
+ }
+ },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckHerokuAppDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: fmt.Sprintf(testAccCheckHerokuAppConfig_organization, org),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckHerokuAppExistsOrg("heroku_app.foobar", &app),
+ testAccCheckHerokuAppAttributesOrg(&app, org),
+ ),
+ },
+ },
+ })
+}
+
func testAccCheckHerokuAppDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*heroku.Service)
@@ -197,6 +223,39 @@ func testAccCheckHerokuAppAttributesNoVars(app *heroku.App) resource.TestCheckFu
}
}
+func testAccCheckHerokuAppAttributesOrg(app *heroku.OrganizationApp, org string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ client := testAccProvider.Meta().(*heroku.Service)
+
+ if app.Region.Name != "us" {
+ return fmt.Errorf("Bad region: %s", app.Region.Name)
+ }
+
+ if app.Stack.Name != "cedar-14" {
+ return fmt.Errorf("Bad stack: %s", app.Stack.Name)
+ }
+
+ if app.Name != "terraform-test-app" {
+ return fmt.Errorf("Bad name: %s", app.Name)
+ }
+
+ if app.Organization == nil || app.Organization.Name != org {
+ return fmt.Errorf("Bad org: %v", app.Organization)
+ }
+
+ vars, err := client.ConfigVarInfo(app.Name)
+ if err != nil {
+ return err
+ }
+
+ if vars["FOO"] != "bar" {
+ return fmt.Errorf("Bad config vars: %v", vars)
+ }
+
+ return nil
+ }
+}
+
func testAccCheckHerokuAppExists(n string, app *heroku.App) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
@@ -227,29 +286,73 @@ func testAccCheckHerokuAppExists(n string, app *heroku.App) resource.TestCheckFu
}
}
+func testAccCheckHerokuAppExistsOrg(n string, app *heroku.OrganizationApp) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No App Name is set")
+ }
+
+ client := testAccProvider.Meta().(*heroku.Service)
+
+ foundApp, err := client.OrganizationAppInfo(rs.Primary.ID)
+
+ if err != nil {
+ return err
+ }
+
+ if foundApp.Name != rs.Primary.ID {
+ return fmt.Errorf("App not found")
+ }
+
+ *app = *foundApp
+
+ return nil
+ }
+}
+
const testAccCheckHerokuAppConfig_basic = `
resource "heroku_app" "foobar" {
- name = "terraform-test-app"
- region = "us"
+ name = "terraform-test-app"
+ region = "us"
- config_vars {
- FOO = "bar"
- }
+ config_vars {
+ FOO = "bar"
+ }
}`
const testAccCheckHerokuAppConfig_updated = `
resource "heroku_app" "foobar" {
- name = "terraform-test-renamed"
- region = "us"
+ name = "terraform-test-renamed"
+ region = "us"
- config_vars {
- FOO = "bing"
- BAZ = "bar"
- }
+ config_vars {
+ FOO = "bing"
+ BAZ = "bar"
+ }
}`
const testAccCheckHerokuAppConfig_no_vars = `
resource "heroku_app" "foobar" {
- name = "terraform-test-app"
- region = "us"
+ name = "terraform-test-app"
+ region = "us"
+}`
+
+const testAccCheckHerokuAppConfig_organization = `
+resource "heroku_app" "foobar" {
+ name = "terraform-test-app"
+ region = "us"
+
+ organization {
+ name = "%s"
+ }
+
+ config_vars {
+ FOO = "bar"
+ }
}`
From 983b34291d048069c90dd6f24b32a34894f9bbce Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Mon, 4 Jan 2016 16:29:31 -0600
Subject: [PATCH 394/664] provider/google: fix InstanceGroupManager
CheckDestroy in tests
Nil check was just backwards. Vetted by comparing to other tests with
similar CheckDestroy implementations
---
.../google/resource_compute_instance_group_manager_test.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/builtin/providers/google/resource_compute_instance_group_manager_test.go b/builtin/providers/google/resource_compute_instance_group_manager_test.go
index 4d5bd7c131..5bdb116518 100644
--- a/builtin/providers/google/resource_compute_instance_group_manager_test.go
+++ b/builtin/providers/google/resource_compute_instance_group_manager_test.go
@@ -69,7 +69,7 @@ func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error {
}
_, err := config.clientCompute.InstanceGroupManagers.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
- if err != nil {
+ if err == nil {
return fmt.Errorf("InstanceGroupManager still exists")
}
}
From adf4280aff833e025441df4d104a505101eeeb02 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Mon, 4 Jan 2016 16:34:31 -0600
Subject: [PATCH 395/664] provider/digitalocean: prevent collision on domain
acctest
---
.../resource_digitalocean_domain_test.go | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/builtin/providers/digitalocean/resource_digitalocean_domain_test.go b/builtin/providers/digitalocean/resource_digitalocean_domain_test.go
index 2801414ee7..a5484c1e10 100644
--- a/builtin/providers/digitalocean/resource_digitalocean_domain_test.go
+++ b/builtin/providers/digitalocean/resource_digitalocean_domain_test.go
@@ -5,12 +5,14 @@ import (
"testing"
"github.com/digitalocean/godo"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccDigitalOceanDomain_Basic(t *testing.T) {
var domain godo.Domain
+ domainName := fmt.Sprintf("foobar-test-terraform-%s.com", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -18,12 +20,12 @@ func TestAccDigitalOceanDomain_Basic(t *testing.T) {
CheckDestroy: testAccCheckDigitalOceanDomainDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccCheckDigitalOceanDomainConfig_basic,
+ Config: fmt.Sprintf(testAccCheckDigitalOceanDomainConfig_basic, domainName),
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanDomainExists("digitalocean_domain.foobar", &domain),
- testAccCheckDigitalOceanDomainAttributes(&domain),
+ testAccCheckDigitalOceanDomainAttributes(&domain, domainName),
resource.TestCheckResourceAttr(
- "digitalocean_domain.foobar", "name", "foobar-test-terraform.com"),
+ "digitalocean_domain.foobar", "name", domainName),
resource.TestCheckResourceAttr(
"digitalocean_domain.foobar", "ip_address", "192.168.0.10"),
),
@@ -51,10 +53,10 @@ func testAccCheckDigitalOceanDomainDestroy(s *terraform.State) error {
return nil
}
-func testAccCheckDigitalOceanDomainAttributes(domain *godo.Domain) resource.TestCheckFunc {
+func testAccCheckDigitalOceanDomainAttributes(domain *godo.Domain, name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
- if domain.Name != "foobar-test-terraform.com" {
+ if domain.Name != name {
return fmt.Errorf("Bad name: %s", domain.Name)
}
@@ -94,6 +96,6 @@ func testAccCheckDigitalOceanDomainExists(n string, domain *godo.Domain) resourc
const testAccCheckDigitalOceanDomainConfig_basic = `
resource "digitalocean_domain" "foobar" {
- name = "foobar-test-terraform.com"
- ip_address = "192.168.0.10"
+ name = "%s"
+ ip_address = "192.168.0.10"
}`
From dd3a2aa4e937ebc3db7aa9dfe098a9e2177f2839 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Mon, 4 Jan 2016 16:56:26 -0600
Subject: [PATCH 396/664] provider/aws: Dynamo DB test/destroy updates
---
.../aws/resource_aws_dynamodb_table.go | 32 +++++++++++++++++++
.../aws/resource_aws_dynamodb_table_test.go | 13 +++++---
2 files changed, 40 insertions(+), 5 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_dynamodb_table.go b/builtin/providers/aws/resource_aws_dynamodb_table.go
index 0606cde2e8..7c9fd43348 100644
--- a/builtin/providers/aws/resource_aws_dynamodb_table.go
+++ b/builtin/providers/aws/resource_aws_dynamodb_table.go
@@ -7,6 +7,7 @@ import (
"strings"
"time"
+ "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/aws/aws-sdk-go/aws"
@@ -660,6 +661,37 @@ func resourceAwsDynamoDbTableDelete(d *schema.ResourceData, meta interface{}) er
if err != nil {
return err
}
+
+ params := &dynamodb.DescribeTableInput{
+ TableName: aws.String(d.Id()),
+ }
+
+ err = resource.Retry(10*time.Minute, func() error {
+ t, err := dynamodbconn.DescribeTable(params)
+ if err != nil {
+ if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" {
+ return nil
+ }
+ // Didn't recognize the error, so shouldn't retry.
+ return resource.RetryError{Err: err}
+ }
+
+ if t != nil {
+ if t.Table.TableStatus != nil && strings.ToLower(*t.Table.TableStatus) == "deleting" {
+ log.Printf("[DEBUG] AWS Dynamo DB table (%s) is still deleting", d.Id())
+ return fmt.Errorf("still deleting")
+ }
+ }
+
+ // we should be not found or deleting, so error here
+ return resource.RetryError{Err: fmt.Errorf("[ERR] Error deleting Dynamo DB table, unexpected state: %s", t)}
+ })
+
+ // check error from retry
+ if err != nil {
+ return err
+ }
+
return nil
}
diff --git a/builtin/providers/aws/resource_aws_dynamodb_table_test.go b/builtin/providers/aws/resource_aws_dynamodb_table_test.go
index 425cd204f9..114837ce38 100644
--- a/builtin/providers/aws/resource_aws_dynamodb_table_test.go
+++ b/builtin/providers/aws/resource_aws_dynamodb_table_test.go
@@ -2,6 +2,7 @@ package aws
import (
"fmt"
+ "log"
"testing"
"github.com/aws/aws-sdk-go/aws"
@@ -11,7 +12,7 @@ import (
"github.com/hashicorp/terraform/terraform"
)
-func TestAccAWSDynamoDbTable(t *testing.T) {
+func TestAccAWSDynamoDbTable_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@@ -101,21 +102,23 @@ func testAccCheckAWSDynamoDbTableDestroy(s *terraform.State) error {
continue
}
- fmt.Printf("[DEBUG] Checking if DynamoDB table %s exists", rs.Primary.ID)
+ log.Printf("[DEBUG] Checking if DynamoDB table %s exists", rs.Primary.ID)
// Check if queue exists by checking for its attributes
params := &dynamodb.DescribeTableInput{
TableName: aws.String(rs.Primary.ID),
}
+
_, err := conn.DescribeTable(params)
if err == nil {
return fmt.Errorf("DynamoDB table %s still exists. Failing!", rs.Primary.ID)
}
// Verify the error is what we want
- _, ok := err.(awserr.Error)
- if !ok {
- return err
+ if dbErr, ok := err.(awserr.Error); ok && dbErr.Code() == "ResourceNotFoundException" {
+ return nil
}
+
+ return err
}
return nil
From 4c6c5f57986c2ee6883a7076f06ffc30b798ed9a Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Mon, 4 Jan 2016 18:04:55 -0600
Subject: [PATCH 397/664] provider/google: Fix collisions in SQL instance
acctests
---
.../resource_sql_database_instance_test.go | 36 +++++++++++--------
1 file changed, 22 insertions(+), 14 deletions(-)
diff --git a/builtin/providers/google/resource_sql_database_instance_test.go b/builtin/providers/google/resource_sql_database_instance_test.go
index c8c32fc6b5..e31d43192d 100644
--- a/builtin/providers/google/resource_sql_database_instance_test.go
+++ b/builtin/providers/google/resource_sql_database_instance_test.go
@@ -20,6 +20,7 @@ import (
func TestAccGoogleSqlDatabaseInstance_basic(t *testing.T) {
var instance sqladmin.DatabaseInstance
+ databaseID := genRandInt()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -27,7 +28,8 @@ func TestAccGoogleSqlDatabaseInstance_basic(t *testing.T) {
CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testGoogleSqlDatabaseInstance_basic,
+ Config: fmt.Sprintf(
+ testGoogleSqlDatabaseInstance_basic, databaseID),
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseInstanceExists(
"google_sql_database_instance.instance", &instance),
@@ -41,6 +43,7 @@ func TestAccGoogleSqlDatabaseInstance_basic(t *testing.T) {
func TestAccGoogleSqlDatabaseInstance_settings_basic(t *testing.T) {
var instance sqladmin.DatabaseInstance
+ databaseID := genRandInt()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -48,7 +51,8 @@ func TestAccGoogleSqlDatabaseInstance_settings_basic(t *testing.T) {
CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testGoogleSqlDatabaseInstance_settings,
+ Config: fmt.Sprintf(
+ testGoogleSqlDatabaseInstance_settings, databaseID),
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseInstanceExists(
"google_sql_database_instance.instance", &instance),
@@ -62,6 +66,7 @@ func TestAccGoogleSqlDatabaseInstance_settings_basic(t *testing.T) {
func TestAccGoogleSqlDatabaseInstance_settings_upgrade(t *testing.T) {
var instance sqladmin.DatabaseInstance
+ databaseID := genRandInt()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -69,7 +74,8 @@ func TestAccGoogleSqlDatabaseInstance_settings_upgrade(t *testing.T) {
CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testGoogleSqlDatabaseInstance_basic,
+ Config: fmt.Sprintf(
+ testGoogleSqlDatabaseInstance_basic, databaseID),
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseInstanceExists(
"google_sql_database_instance.instance", &instance),
@@ -78,7 +84,8 @@ func TestAccGoogleSqlDatabaseInstance_settings_upgrade(t *testing.T) {
),
},
resource.TestStep{
- Config: testGoogleSqlDatabaseInstance_settings,
+ Config: fmt.Sprintf(
+ testGoogleSqlDatabaseInstance_settings, databaseID),
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseInstanceExists(
"google_sql_database_instance.instance", &instance),
@@ -92,6 +99,7 @@ func TestAccGoogleSqlDatabaseInstance_settings_upgrade(t *testing.T) {
func TestAccGoogleSqlDatabaseInstance_settings_downgrade(t *testing.T) {
var instance sqladmin.DatabaseInstance
+ databaseID := genRandInt()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -99,7 +107,8 @@ func TestAccGoogleSqlDatabaseInstance_settings_downgrade(t *testing.T) {
CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testGoogleSqlDatabaseInstance_settings,
+ Config: fmt.Sprintf(
+ testGoogleSqlDatabaseInstance_settings, databaseID),
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseInstanceExists(
"google_sql_database_instance.instance", &instance),
@@ -108,7 +117,8 @@ func TestAccGoogleSqlDatabaseInstance_settings_downgrade(t *testing.T) {
),
},
resource.TestStep{
- Config: testGoogleSqlDatabaseInstance_basic,
+ Config: fmt.Sprintf(
+ testGoogleSqlDatabaseInstance_basic, databaseID),
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleSqlDatabaseInstanceExists(
"google_sql_database_instance.instance", &instance),
@@ -319,9 +329,7 @@ func testAccGoogleSqlDatabaseInstanceDestroy(s *terraform.State) error {
return nil
}
-var databaseId = genRandInt()
-
-var testGoogleSqlDatabaseInstance_basic = fmt.Sprintf(`
+var testGoogleSqlDatabaseInstance_basic = `
resource "google_sql_database_instance" "instance" {
name = "tf-lw-%d"
region = "us-central"
@@ -330,9 +338,9 @@ resource "google_sql_database_instance" "instance" {
crash_safe_replication = false
}
}
-`, databaseId)
+`
-var testGoogleSqlDatabaseInstance_settings = fmt.Sprintf(`
+var testGoogleSqlDatabaseInstance_settings = `
resource "google_sql_database_instance" "instance" {
name = "tf-lw-%d"
region = "us-central"
@@ -361,11 +369,11 @@ resource "google_sql_database_instance" "instance" {
activation_policy = "ON_DEMAND"
}
}
-`, databaseId)
+`
// Note - this test is not feasible to run unless we generate
// backups first.
-var testGoogleSqlDatabaseInstance_replica = fmt.Sprintf(`
+var testGoogleSqlDatabaseInstance_replica = `
resource "google_sql_database_instance" "instance_master" {
name = "tf-lw-%d"
database_version = "MYSQL_5_6"
@@ -406,4 +414,4 @@ resource "google_sql_database_instance" "instance" {
verify_server_certificate = false
}
}
-`, genRandInt(), genRandInt())
+`
From c57aae34c157a5538b91e409be1983c114cbab39 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Mon, 4 Jan 2016 19:00:53 -0600
Subject: [PATCH 398/664] provider/google: skip failing test so build can go
green
Failure reason filed as
https://github.com/hashicorp/terraform/issues/4504, fixing PR can unskip
test as it resolve the underlying issue.
---
.../google/resource_compute_project_metadata_test.go | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/builtin/providers/google/resource_compute_project_metadata_test.go b/builtin/providers/google/resource_compute_project_metadata_test.go
index 2644433864..d4a9f07d28 100644
--- a/builtin/providers/google/resource_compute_project_metadata_test.go
+++ b/builtin/providers/google/resource_compute_project_metadata_test.go
@@ -13,6 +13,8 @@ import (
func TestAccComputeProjectMetadata_basic(t *testing.T) {
var project compute.Project
+ t.Skip("See https://github.com/hashicorp/terraform/issues/4504")
+
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@@ -193,7 +195,7 @@ resource "google_compute_project_metadata" "fizzbuzz" {
const testAccComputeProject_basic1_metadata = `
resource "google_compute_project_metadata" "fizzbuzz" {
metadata {
- kiwi = "papaya"
+ kiwi = "papaya"
finches = "darwinism"
}
}`
@@ -201,7 +203,7 @@ resource "google_compute_project_metadata" "fizzbuzz" {
const testAccComputeProject_modify0_metadata = `
resource "google_compute_project_metadata" "fizzbuzz" {
metadata {
- paper = "pen"
+ paper = "pen"
genghis_khan = "french bread"
happy = "smiling"
}
@@ -210,7 +212,7 @@ resource "google_compute_project_metadata" "fizzbuzz" {
const testAccComputeProject_modify1_metadata = `
resource "google_compute_project_metadata" "fizzbuzz" {
metadata {
- paper = "pen"
+ paper = "pen"
paris = "french bread"
happy = "laughing"
}
From f72322ca3339c6500f78ac1726d565ef2281807a Mon Sep 17 00:00:00 2001
From: Colin Hebert
Date: Tue, 5 Jan 2016 03:46:24 +0100
Subject: [PATCH 399/664] Add Elem and Set to the network set
---
builtin/providers/docker/resource_docker_container.go | 2 ++
1 file changed, 2 insertions(+)
diff --git a/builtin/providers/docker/resource_docker_container.go b/builtin/providers/docker/resource_docker_container.go
index f20ff43f08..6e5bc07ddf 100644
--- a/builtin/providers/docker/resource_docker_container.go
+++ b/builtin/providers/docker/resource_docker_container.go
@@ -243,6 +243,8 @@ func resourceDockerContainer() *schema.Resource {
Type: schema.TypeSet,
Optional: true,
ForceNew: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ Set: stringSetHash,
},
},
}
From 82d8e48a27077d28890acacd480f26a520e6568f Mon Sep 17 00:00:00 2001
From: Garrett Barboza
Date: Mon, 4 Jan 2016 21:07:52 -0600
Subject: [PATCH 400/664] Add iam_server_certificate nuances to docs.
AWS does some funky stuff to handle all the variations in certificates that CA's like to hand out to users. This commit adds a note about this and details how to avoid issues. See #3837 for more information.
---
.../docs/providers/aws/r/iam_server_certificate.html.markdown | 2 ++
1 file changed, 2 insertions(+)
diff --git a/website/source/docs/providers/aws/r/iam_server_certificate.html.markdown b/website/source/docs/providers/aws/r/iam_server_certificate.html.markdown
index 820bc6f896..8fe189887d 100644
--- a/website/source/docs/providers/aws/r/iam_server_certificate.html.markdown
+++ b/website/source/docs/providers/aws/r/iam_server_certificate.html.markdown
@@ -91,6 +91,8 @@ The following arguments are supported:
AWS CloudFront, the path must be in format `/cloudfront/your_path_here`.
See [IAM Identifiers][1] for more details on IAM Paths.
+~> **NOTE:** AWS performs behind-the-scenes modifications to some certificate files if they do not adhere to a specific format. These modifications will result in terraform forever believing that it needs to update the resources since the local and AWS file contents will not match after theses modifications occur. In order to prevent this from happening you must ensure that all your PEM-encoded files use UNIX line-breaks and that `certificate_body` contains only one certificate. All other certificates should go in `certificate_chain`. It is common for some Certificate Authorities to issue certificate files that have DOS line-breaks and that are actually multiple certificates concatenated together in order to form a full certificate chain.
+
## Attributes Reference
* `id` - The unique Server Certificate name
From 5c6304ed57c192eef3a252001172dcd662f09922 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 5 Jan 2016 08:29:59 -0600
Subject: [PATCH 401/664] provider/google: skip remainder of metadata tests
Any of the tests can fail due to
https://github.com/hashicorp/terraform/issues/4504
---
.../google/resource_compute_project_metadata_test.go | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/builtin/providers/google/resource_compute_project_metadata_test.go b/builtin/providers/google/resource_compute_project_metadata_test.go
index d4a9f07d28..cb0145d8d3 100644
--- a/builtin/providers/google/resource_compute_project_metadata_test.go
+++ b/builtin/providers/google/resource_compute_project_metadata_test.go
@@ -38,6 +38,8 @@ func TestAccComputeProjectMetadata_basic(t *testing.T) {
func TestAccComputeProjectMetadata_modify_1(t *testing.T) {
var project compute.Project
+ t.Skip("See https://github.com/hashicorp/terraform/issues/4504")
+
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@@ -74,6 +76,8 @@ func TestAccComputeProjectMetadata_modify_1(t *testing.T) {
func TestAccComputeProjectMetadata_modify_2(t *testing.T) {
var project compute.Project
+ t.Skip("See https://github.com/hashicorp/terraform/issues/4504")
+
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
From 094e380bb1feea4ef754327b19c07a0cfcf19a3d Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Tue, 5 Jan 2016 09:53:19 -0500
Subject: [PATCH 402/664] Add Solaris builds of Terraform
---
scripts/build.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/build.sh b/scripts/build.sh
index 222e1879e4..a4cf523919 100755
--- a/scripts/build.sh
+++ b/scripts/build.sh
@@ -16,7 +16,7 @@ GIT_DIRTY=$(test -n "`git status --porcelain`" && echo "+CHANGES" || true)
# Determine the arch/os combos we're building for
XC_ARCH=${XC_ARCH:-"386 amd64 arm"}
-XC_OS=${XC_OS:-linux darwin windows freebsd openbsd}
+XC_OS=${XC_OS:-linux darwin windows freebsd openbsd solaris}
# Get dependencies unless running in quick mode
From e916bd152759ebb90bd3219d05cd2de0fd93b1be Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 5 Jan 2016 09:06:32 -0600
Subject: [PATCH 403/664] provider/google: enchance storage acctests to avoid
collisions
Generate bucket names and object names per test instead of once at the
top level. Should help avoid failures like this one:
https://travis-ci.org/hashicorp/terraform/jobs/100254008
All storage tests checked on this commit:
```
TF_ACC=1 go test -v ./builtin/providers/google -run TestAccGoogleStorage
=== RUN TestAccGoogleStorageBucketAcl_basic
--- PASS: TestAccGoogleStorageBucketAcl_basic (8.90s)
=== RUN TestAccGoogleStorageBucketAcl_upgrade
--- PASS: TestAccGoogleStorageBucketAcl_upgrade (14.18s)
=== RUN TestAccGoogleStorageBucketAcl_downgrade
--- PASS: TestAccGoogleStorageBucketAcl_downgrade (12.83s)
=== RUN TestAccGoogleStorageBucketAcl_predefined
--- PASS: TestAccGoogleStorageBucketAcl_predefined (4.51s)
=== RUN TestAccGoogleStorageObject_basic
--- PASS: TestAccGoogleStorageObject_basic (3.77s)
=== RUN TestAccGoogleStorageObjectAcl_basic
--- PASS: TestAccGoogleStorageObjectAcl_basic (4.85s)
=== RUN TestAccGoogleStorageObjectAcl_upgrade
--- PASS: TestAccGoogleStorageObjectAcl_upgrade (7.68s)
=== RUN TestAccGoogleStorageObjectAcl_downgrade
--- PASS: TestAccGoogleStorageObjectAcl_downgrade (7.37s)
=== RUN TestAccGoogleStorageObjectAcl_predefined
--- PASS: TestAccGoogleStorageObjectAcl_predefined (4.16s)
PASS
ok github.com/hashicorp/terraform/builtin/providers/google 68.275s
```
---
.../resource_storage_bucket_acl_test.go | 86 +++++++-----
.../google/resource_storage_bucket_test.go | 73 +++++-----
.../resource_storage_object_acl_test.go | 131 ++++++++++--------
helper/acctest/random.go | 13 +-
4 files changed, 178 insertions(+), 125 deletions(-)
diff --git a/builtin/providers/google/resource_storage_bucket_acl_test.go b/builtin/providers/google/resource_storage_bucket_acl_test.go
index 6f23d1882e..a8b11e8f62 100644
--- a/builtin/providers/google/resource_storage_bucket_acl_test.go
+++ b/builtin/providers/google/resource_storage_bucket_acl_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -18,19 +19,22 @@ var roleEntityBasic3_owner = "OWNER:user-yetanotheremail@gmail.com"
var roleEntityBasic3_reader = "READER:user-yetanotheremail@gmail.com"
-var testAclBucketName = fmt.Sprintf("%s-%d", "tf-test-acl-bucket", genRandInt())
+func testAclBucketName() string {
+ return fmt.Sprintf("%s-%d", "tf-test-acl-bucket", acctest.RandInt())
+}
func TestAccGoogleStorageBucketAcl_basic(t *testing.T) {
+ bucketName := testAclBucketName()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleStorageBucketAclDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testGoogleStorageBucketsAclBasic1,
+ Config: testGoogleStorageBucketsAclBasic1(bucketName),
Check: resource.ComposeTestCheckFunc(
- testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic1),
- testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2),
+ testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic1),
+ testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2),
),
},
},
@@ -38,33 +42,34 @@ func TestAccGoogleStorageBucketAcl_basic(t *testing.T) {
}
func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) {
+ bucketName := testAclBucketName()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleStorageBucketAclDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testGoogleStorageBucketsAclBasic1,
+ Config: testGoogleStorageBucketsAclBasic1(bucketName),
Check: resource.ComposeTestCheckFunc(
- testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic1),
- testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2),
+ testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic1),
+ testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2),
),
},
resource.TestStep{
- Config: testGoogleStorageBucketsAclBasic2,
+ Config: testGoogleStorageBucketsAclBasic2(bucketName),
Check: resource.ComposeTestCheckFunc(
- testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2),
- testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic3_owner),
+ testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2),
+ testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic3_owner),
),
},
resource.TestStep{
- Config: testGoogleStorageBucketsAclBasicDelete,
+ Config: testGoogleStorageBucketsAclBasicDelete(bucketName),
Check: resource.ComposeTestCheckFunc(
- testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic1),
- testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic2),
- testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic3_owner),
+ testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic1),
+ testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic2),
+ testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic3_owner),
),
},
},
@@ -72,33 +77,34 @@ func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) {
}
func TestAccGoogleStorageBucketAcl_downgrade(t *testing.T) {
+ bucketName := testAclBucketName()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleStorageBucketAclDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testGoogleStorageBucketsAclBasic2,
+ Config: testGoogleStorageBucketsAclBasic2(bucketName),
Check: resource.ComposeTestCheckFunc(
- testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2),
- testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic3_owner),
+ testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2),
+ testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic3_owner),
),
},
resource.TestStep{
- Config: testGoogleStorageBucketsAclBasic3,
+ Config: testGoogleStorageBucketsAclBasic3(bucketName),
Check: resource.ComposeTestCheckFunc(
- testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2),
- testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic3_reader),
+ testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2),
+ testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic3_reader),
),
},
resource.TestStep{
- Config: testGoogleStorageBucketsAclBasicDelete,
+ Config: testGoogleStorageBucketsAclBasicDelete(bucketName),
Check: resource.ComposeTestCheckFunc(
- testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic1),
- testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic2),
- testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic3_owner),
+ testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic1),
+ testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic2),
+ testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic3_owner),
),
},
},
@@ -112,7 +118,7 @@ func TestAccGoogleStorageBucketAcl_predefined(t *testing.T) {
CheckDestroy: testAccGoogleStorageBucketAclDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testGoogleStorageBucketsAclPredefined,
+ Config: testGoogleStorageBucketsAclPredefined(bucketName),
},
},
})
@@ -172,7 +178,8 @@ func testAccGoogleStorageBucketAclDestroy(s *terraform.State) error {
return nil
}
-var testGoogleStorageBucketsAclBasic1 = fmt.Sprintf(`
+func testGoogleStorageBucketsAclBasic1(bucketName string) string {
+ return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
}
@@ -181,9 +188,11 @@ resource "google_storage_bucket_acl" "acl" {
bucket = "${google_storage_bucket.bucket.name}"
role_entity = ["%s", "%s"]
}
-`, testAclBucketName, roleEntityBasic1, roleEntityBasic2)
+`, bucketName, roleEntityBasic1, roleEntityBasic2)
+}
-var testGoogleStorageBucketsAclBasic2 = fmt.Sprintf(`
+func testGoogleStorageBucketsAclBasic2(bucketName string) string {
+ return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
}
@@ -192,9 +201,11 @@ resource "google_storage_bucket_acl" "acl" {
bucket = "${google_storage_bucket.bucket.name}"
role_entity = ["%s", "%s"]
}
-`, testAclBucketName, roleEntityBasic2, roleEntityBasic3_owner)
+`, bucketName, roleEntityBasic2, roleEntityBasic3_owner)
+}
-var testGoogleStorageBucketsAclBasicDelete = fmt.Sprintf(`
+func testGoogleStorageBucketsAclBasicDelete(bucketName string) string {
+ return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
}
@@ -203,9 +214,11 @@ resource "google_storage_bucket_acl" "acl" {
bucket = "${google_storage_bucket.bucket.name}"
role_entity = []
}
-`, testAclBucketName)
+`, bucketName)
+}
-var testGoogleStorageBucketsAclBasic3 = fmt.Sprintf(`
+func testGoogleStorageBucketsAclBasic3(bucketName string) string {
+ return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
}
@@ -214,9 +227,11 @@ resource "google_storage_bucket_acl" "acl" {
bucket = "${google_storage_bucket.bucket.name}"
role_entity = ["%s", "%s"]
}
-`, testAclBucketName, roleEntityBasic2, roleEntityBasic3_reader)
+`, bucketName, roleEntityBasic2, roleEntityBasic3_reader)
+}
-var testGoogleStorageBucketsAclPredefined = fmt.Sprintf(`
+func testGoogleStorageBucketsAclPredefined(bucketName string) string {
+ return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
}
@@ -226,4 +241,5 @@ resource "google_storage_bucket_acl" "acl" {
predefined_acl = "projectPrivate"
default_acl = "projectPrivate"
}
-`, testAclBucketName)
+`, bucketName)
+}
diff --git a/builtin/providers/google/resource_storage_bucket_test.go b/builtin/providers/google/resource_storage_bucket_test.go
index a5e7ea6361..35fc8f3081 100644
--- a/builtin/providers/google/resource_storage_bucket_test.go
+++ b/builtin/providers/google/resource_storage_bucket_test.go
@@ -5,6 +5,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -13,7 +14,7 @@ import (
)
func TestAccStorage_basic(t *testing.T) {
- var bucketName string
+ bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -21,10 +22,10 @@ func TestAccStorage_basic(t *testing.T) {
CheckDestroy: testAccGoogleStorageDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testGoogleStorageBucketsReaderDefaults,
+ Config: testGoogleStorageBucketsReaderDefaults(bucketName),
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudStorageBucketExists(
- "google_storage_bucket.bucket", &bucketName),
+ "google_storage_bucket.bucket", bucketName),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "location", "US"),
resource.TestCheckResourceAttr(
@@ -36,7 +37,7 @@ func TestAccStorage_basic(t *testing.T) {
}
func TestAccStorageCustomAttributes(t *testing.T) {
- var bucketName string
+ bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -44,10 +45,10 @@ func TestAccStorageCustomAttributes(t *testing.T) {
CheckDestroy: testAccGoogleStorageDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testGoogleStorageBucketsReaderCustomAttributes,
+ Config: testGoogleStorageBucketsReaderCustomAttributes(bucketName),
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudStorageBucketExists(
- "google_storage_bucket.bucket", &bucketName),
+ "google_storage_bucket.bucket", bucketName),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "location", "EU"),
resource.TestCheckResourceAttr(
@@ -59,7 +60,7 @@ func TestAccStorageCustomAttributes(t *testing.T) {
}
func TestAccStorageBucketUpdate(t *testing.T) {
- var bucketName string
+ bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -67,10 +68,10 @@ func TestAccStorageBucketUpdate(t *testing.T) {
CheckDestroy: testAccGoogleStorageDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testGoogleStorageBucketsReaderDefaults,
+ Config: testGoogleStorageBucketsReaderDefaults(bucketName),
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudStorageBucketExists(
- "google_storage_bucket.bucket", &bucketName),
+ "google_storage_bucket.bucket", bucketName),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "location", "US"),
resource.TestCheckResourceAttr(
@@ -78,10 +79,10 @@ func TestAccStorageBucketUpdate(t *testing.T) {
),
},
resource.TestStep{
- Config: testGoogleStorageBucketsReaderCustomAttributes,
+ Config: testGoogleStorageBucketsReaderCustomAttributes(bucketName),
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudStorageBucketExists(
- "google_storage_bucket.bucket", &bucketName),
+ "google_storage_bucket.bucket", bucketName),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "predefined_acl", "publicReadWrite"),
resource.TestCheckResourceAttr(
@@ -95,7 +96,7 @@ func TestAccStorageBucketUpdate(t *testing.T) {
}
func TestAccStorageForceDestroy(t *testing.T) {
- var bucketName string
+ bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -103,29 +104,29 @@ func TestAccStorageForceDestroy(t *testing.T) {
CheckDestroy: testAccGoogleStorageDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testGoogleStorageBucketsReaderCustomAttributes,
+ Config: testGoogleStorageBucketsReaderCustomAttributes(bucketName),
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudStorageBucketExists(
- "google_storage_bucket.bucket", &bucketName),
+ "google_storage_bucket.bucket", bucketName),
),
},
resource.TestStep{
- Config: testGoogleStorageBucketsReaderCustomAttributes,
+ Config: testGoogleStorageBucketsReaderCustomAttributes(bucketName),
Check: resource.ComposeTestCheckFunc(
- testAccCheckCloudStorageBucketPutItem(&bucketName),
+ testAccCheckCloudStorageBucketPutItem(bucketName),
),
},
resource.TestStep{
Config: "",
Check: resource.ComposeTestCheckFunc(
- testAccCheckCloudStorageBucketMissing(&bucketName),
+ testAccCheckCloudStorageBucketMissing(bucketName),
),
},
},
})
}
-func testAccCheckCloudStorageBucketExists(n string, bucketName *string) resource.TestCheckFunc {
+func testAccCheckCloudStorageBucketExists(n string, bucketName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
@@ -147,12 +148,14 @@ func testAccCheckCloudStorageBucketExists(n string, bucketName *string) resource
return fmt.Errorf("Bucket not found")
}
- *bucketName = found.Name
+ if found.Name != bucketName {
+ return fmt.Errorf("expected name %s, got %s", bucketName, found.Name)
+ }
return nil
}
}
-func testAccCheckCloudStorageBucketPutItem(bucketName *string) resource.TestCheckFunc {
+func testAccCheckCloudStorageBucketPutItem(bucketName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
@@ -161,7 +164,7 @@ func testAccCheckCloudStorageBucketPutItem(bucketName *string) resource.TestChec
object := &storage.Object{Name: "bucketDestroyTestFile"}
// This needs to use Media(io.Reader) call, otherwise it does not go to /upload API and fails
- if res, err := config.clientStorage.Objects.Insert(*bucketName, object).Media(dataReader).Do(); err == nil {
+ if res, err := config.clientStorage.Objects.Insert(bucketName, object).Media(dataReader).Do(); err == nil {
fmt.Printf("Created object %v at location %v\n\n", res.Name, res.SelfLink)
} else {
return fmt.Errorf("Objects.Insert failed: %v", err)
@@ -171,20 +174,20 @@ func testAccCheckCloudStorageBucketPutItem(bucketName *string) resource.TestChec
}
}
-func testAccCheckCloudStorageBucketMissing(bucketName *string) resource.TestCheckFunc {
+func testAccCheckCloudStorageBucketMissing(bucketName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
- _, err := config.clientStorage.Buckets.Get(*bucketName).Do()
+ _, err := config.clientStorage.Buckets.Get(bucketName).Do()
if err == nil {
- return fmt.Errorf("Found %s", *bucketName)
+ return fmt.Errorf("Found %s", bucketName)
}
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
return nil
- } else {
- return err
}
+
+ return err
}
}
@@ -205,19 +208,21 @@ func testAccGoogleStorageDestroy(s *terraform.State) error {
return nil
}
-var randInt = genRandInt()
-
-var testGoogleStorageBucketsReaderDefaults = fmt.Sprintf(`
+func testGoogleStorageBucketsReaderDefaults(bucketName string) string {
+ return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
- name = "tf-test-bucket-%d"
+ name = "%s"
+}
+`, bucketName)
}
-`, randInt)
-var testGoogleStorageBucketsReaderCustomAttributes = fmt.Sprintf(`
+func testGoogleStorageBucketsReaderCustomAttributes(bucketName string) string {
+ return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
- name = "tf-test-bucket-%d"
+ name = "%s"
predefined_acl = "publicReadWrite"
location = "EU"
force_destroy = "true"
}
-`, randInt)
+`, bucketName)
+}
diff --git a/builtin/providers/google/resource_storage_object_acl_test.go b/builtin/providers/google/resource_storage_object_acl_test.go
index ff14f683c8..5cac86a14b 100644
--- a/builtin/providers/google/resource_storage_object_acl_test.go
+++ b/builtin/providers/google/resource_storage_object_acl_test.go
@@ -14,10 +14,15 @@ import (
)
var tfObjectAcl, errObjectAcl = ioutil.TempFile("", "tf-gce-test")
-var testAclObjectName = fmt.Sprintf("%s-%d", "tf-test-acl-object",
- rand.New(rand.NewSource(time.Now().UnixNano())).Int())
+
+func testAclObjectName() string {
+ return fmt.Sprintf("%s-%d", "tf-test-acl-object",
+ rand.New(rand.NewSource(time.Now().UnixNano())).Int())
+}
func TestAccGoogleStorageObjectAcl_basic(t *testing.T) {
+ bucketName := testAclBucketName()
+ objectName := testAclObjectName()
objectData := []byte("data data data")
ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644)
resource.Test(t, resource.TestCase{
@@ -31,12 +36,12 @@ func TestAccGoogleStorageObjectAcl_basic(t *testing.T) {
CheckDestroy: testAccGoogleStorageObjectAclDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testGoogleStorageObjectsAclBasic1,
+ Config: testGoogleStorageObjectsAclBasic1(bucketName, objectName),
Check: resource.ComposeTestCheckFunc(
- testAccCheckGoogleStorageObjectAcl(testAclBucketName,
- testAclObjectName, roleEntityBasic1),
- testAccCheckGoogleStorageObjectAcl(testAclBucketName,
- testAclObjectName, roleEntityBasic2),
+ testAccCheckGoogleStorageObjectAcl(bucketName,
+ objectName, roleEntityBasic1),
+ testAccCheckGoogleStorageObjectAcl(bucketName,
+ objectName, roleEntityBasic2),
),
},
},
@@ -44,6 +49,8 @@ func TestAccGoogleStorageObjectAcl_basic(t *testing.T) {
}
func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) {
+ bucketName := testAclBucketName()
+ objectName := testAclObjectName()
objectData := []byte("data data data")
ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644)
resource.Test(t, resource.TestCase{
@@ -57,34 +64,34 @@ func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) {
CheckDestroy: testAccGoogleStorageObjectAclDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testGoogleStorageObjectsAclBasic1,
+ Config: testGoogleStorageObjectsAclBasic1(bucketName, objectName),
Check: resource.ComposeTestCheckFunc(
- testAccCheckGoogleStorageObjectAcl(testAclBucketName,
- testAclObjectName, roleEntityBasic1),
- testAccCheckGoogleStorageObjectAcl(testAclBucketName,
- testAclObjectName, roleEntityBasic2),
+ testAccCheckGoogleStorageObjectAcl(bucketName,
+ objectName, roleEntityBasic1),
+ testAccCheckGoogleStorageObjectAcl(bucketName,
+ objectName, roleEntityBasic2),
),
},
resource.TestStep{
- Config: testGoogleStorageObjectsAclBasic2,
+ Config: testGoogleStorageObjectsAclBasic2(bucketName, objectName),
Check: resource.ComposeTestCheckFunc(
- testAccCheckGoogleStorageObjectAcl(testAclBucketName,
- testAclObjectName, roleEntityBasic2),
- testAccCheckGoogleStorageObjectAcl(testAclBucketName,
- testAclObjectName, roleEntityBasic3_owner),
+ testAccCheckGoogleStorageObjectAcl(bucketName,
+ objectName, roleEntityBasic2),
+ testAccCheckGoogleStorageObjectAcl(bucketName,
+ objectName, roleEntityBasic3_owner),
),
},
resource.TestStep{
- Config: testGoogleStorageObjectsAclBasicDelete,
+ Config: testGoogleStorageObjectsAclBasicDelete(bucketName, objectName),
Check: resource.ComposeTestCheckFunc(
- testAccCheckGoogleStorageObjectAclDelete(testAclBucketName,
- testAclObjectName, roleEntityBasic1),
- testAccCheckGoogleStorageObjectAclDelete(testAclBucketName,
- testAclObjectName, roleEntityBasic2),
- testAccCheckGoogleStorageObjectAclDelete(testAclBucketName,
- testAclObjectName, roleEntityBasic3_reader),
+ testAccCheckGoogleStorageObjectAclDelete(bucketName,
+ objectName, roleEntityBasic1),
+ testAccCheckGoogleStorageObjectAclDelete(bucketName,
+ objectName, roleEntityBasic2),
+ testAccCheckGoogleStorageObjectAclDelete(bucketName,
+ objectName, roleEntityBasic3_reader),
),
},
},
@@ -92,6 +99,8 @@ func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) {
}
func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) {
+ bucketName := testAclBucketName()
+ objectName := testAclObjectName()
objectData := []byte("data data data")
ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644)
resource.Test(t, resource.TestCase{
@@ -105,34 +114,34 @@ func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) {
CheckDestroy: testAccGoogleStorageObjectAclDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testGoogleStorageObjectsAclBasic2,
+ Config: testGoogleStorageObjectsAclBasic2(bucketName, objectName),
Check: resource.ComposeTestCheckFunc(
- testAccCheckGoogleStorageObjectAcl(testAclBucketName,
- testAclObjectName, roleEntityBasic2),
- testAccCheckGoogleStorageObjectAcl(testAclBucketName,
- testAclObjectName, roleEntityBasic3_owner),
+ testAccCheckGoogleStorageObjectAcl(bucketName,
+ objectName, roleEntityBasic2),
+ testAccCheckGoogleStorageObjectAcl(bucketName,
+ objectName, roleEntityBasic3_owner),
),
},
resource.TestStep{
- Config: testGoogleStorageObjectsAclBasic3,
+ Config: testGoogleStorageObjectsAclBasic3(bucketName, objectName),
Check: resource.ComposeTestCheckFunc(
- testAccCheckGoogleStorageObjectAcl(testAclBucketName,
- testAclObjectName, roleEntityBasic2),
- testAccCheckGoogleStorageObjectAcl(testAclBucketName,
- testAclObjectName, roleEntityBasic3_reader),
+ testAccCheckGoogleStorageObjectAcl(bucketName,
+ objectName, roleEntityBasic2),
+ testAccCheckGoogleStorageObjectAcl(bucketName,
+ objectName, roleEntityBasic3_reader),
),
},
resource.TestStep{
- Config: testGoogleStorageObjectsAclBasicDelete,
+ Config: testGoogleStorageObjectsAclBasicDelete(bucketName, objectName),
Check: resource.ComposeTestCheckFunc(
- testAccCheckGoogleStorageObjectAclDelete(testAclBucketName,
- testAclObjectName, roleEntityBasic1),
- testAccCheckGoogleStorageObjectAclDelete(testAclBucketName,
- testAclObjectName, roleEntityBasic2),
- testAccCheckGoogleStorageObjectAclDelete(testAclBucketName,
- testAclObjectName, roleEntityBasic3_reader),
+ testAccCheckGoogleStorageObjectAclDelete(bucketName,
+ objectName, roleEntityBasic1),
+ testAccCheckGoogleStorageObjectAclDelete(bucketName,
+ objectName, roleEntityBasic2),
+ testAccCheckGoogleStorageObjectAclDelete(bucketName,
+ objectName, roleEntityBasic3_reader),
),
},
},
@@ -140,6 +149,8 @@ func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) {
}
func TestAccGoogleStorageObjectAcl_predefined(t *testing.T) {
+ bucketName := testAclBucketName()
+ objectName := testAclObjectName()
objectData := []byte("data data data")
ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644)
resource.Test(t, resource.TestCase{
@@ -153,7 +164,7 @@ func TestAccGoogleStorageObjectAcl_predefined(t *testing.T) {
CheckDestroy: testAccGoogleStorageObjectAclDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testGoogleStorageObjectsAclPredefined,
+ Config: testGoogleStorageObjectsAclPredefined(bucketName, objectName),
},
},
})
@@ -216,7 +227,8 @@ func testAccGoogleStorageObjectAclDestroy(s *terraform.State) error {
return nil
}
-var testGoogleStorageObjectsAclBasicDelete = fmt.Sprintf(`
+func testGoogleStorageObjectsAclBasicDelete(bucketName string, objectName string) string {
+ return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
}
@@ -232,9 +244,11 @@ resource "google_storage_object_acl" "acl" {
bucket = "${google_storage_bucket.bucket.name}"
role_entity = []
}
-`, testAclBucketName, testAclObjectName, tfObjectAcl.Name())
+`, bucketName, objectName, tfObjectAcl.Name())
+}
-var testGoogleStorageObjectsAclBasic1 = fmt.Sprintf(`
+func testGoogleStorageObjectsAclBasic1(bucketName string, objectName string) string {
+ return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
}
@@ -250,10 +264,12 @@ resource "google_storage_object_acl" "acl" {
bucket = "${google_storage_bucket.bucket.name}"
role_entity = ["%s", "%s"]
}
-`, testAclBucketName, testAclObjectName, tfObjectAcl.Name(),
- roleEntityBasic1, roleEntityBasic2)
+`, bucketName, objectName, tfObjectAcl.Name(),
+ roleEntityBasic1, roleEntityBasic2)
+}
-var testGoogleStorageObjectsAclBasic2 = fmt.Sprintf(`
+func testGoogleStorageObjectsAclBasic2(bucketName string, objectName string) string {
+ return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
}
@@ -269,10 +285,12 @@ resource "google_storage_object_acl" "acl" {
bucket = "${google_storage_bucket.bucket.name}"
role_entity = ["%s", "%s"]
}
-`, testAclBucketName, testAclObjectName, tfObjectAcl.Name(),
- roleEntityBasic2, roleEntityBasic3_owner)
+`, bucketName, objectName, tfObjectAcl.Name(),
+ roleEntityBasic2, roleEntityBasic3_owner)
+}
-var testGoogleStorageObjectsAclBasic3 = fmt.Sprintf(`
+func testGoogleStorageObjectsAclBasic3(bucketName string, objectName string) string {
+ return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
}
@@ -288,10 +306,12 @@ resource "google_storage_object_acl" "acl" {
bucket = "${google_storage_bucket.bucket.name}"
role_entity = ["%s", "%s"]
}
-`, testAclBucketName, testAclObjectName, tfObjectAcl.Name(),
- roleEntityBasic2, roleEntityBasic3_reader)
+`, bucketName, objectName, tfObjectAcl.Name(),
+ roleEntityBasic2, roleEntityBasic3_reader)
+}
-var testGoogleStorageObjectsAclPredefined = fmt.Sprintf(`
+func testGoogleStorageObjectsAclPredefined(bucketName string, objectName string) string {
+ return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
}
@@ -307,4 +327,5 @@ resource "google_storage_object_acl" "acl" {
bucket = "${google_storage_bucket.bucket.name}"
predefined_acl = "projectPrivate"
}
-`, testAclBucketName, testAclObjectName, tfObjectAcl.Name())
+`, bucketName, objectName, tfObjectAcl.Name())
+}
diff --git a/helper/acctest/random.go b/helper/acctest/random.go
index 5317a58b4e..fbc4428d79 100644
--- a/helper/acctest/random.go
+++ b/helper/acctest/random.go
@@ -8,6 +8,12 @@ import (
// Helpers for generating random tidbits for use in identifiers to prevent
// collisions in acceptance tests.
+// RandInt generates a random integer
+func RandInt() int {
+ reseed()
+ return rand.New(rand.NewSource(time.Now().UnixNano())).Int()
+}
+
// RandString generates a random alphanumeric string of the length specified
func RandString(strlen int) string {
return RandStringFromCharSet(strlen, CharSetAlphaNum)
@@ -16,7 +22,7 @@ func RandString(strlen int) string {
// RandStringFromCharSet generates a random string by selecting characters from
// the charset provided
func RandStringFromCharSet(strlen int, charSet string) string {
- rand.Seed(time.Now().UTC().UnixNano())
+ reseed()
result := make([]byte, strlen)
for i := 0; i < strlen; i++ {
result[i] = charSet[rand.Intn(len(charSet))]
@@ -24,6 +30,11 @@ func RandStringFromCharSet(strlen int, charSet string) string {
return string(result)
}
+// Seeds random with current timestamp
+func reseed() {
+ rand.Seed(time.Now().UTC().UnixNano())
+}
+
const (
// CharSetAlphaNum is the alphanumeric character set for use with
// RandStringFromCharSet
From 1510277f45eec6121f265b1c24ddee543c5e65da Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 5 Jan 2016 09:33:34 -0600
Subject: [PATCH 404/664] update AWS Service Directory delete method and test
---
...esource_aws_directory_service_directory.go | 12 ++++++--
...ce_aws_directory_service_directory_test.go | 30 ++++++++++++++++---
2 files changed, 35 insertions(+), 7 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_directory_service_directory.go b/builtin/providers/aws/resource_aws_directory_service_directory.go
index 1fdb9491ee..e5065e378e 100644
--- a/builtin/providers/aws/resource_aws_directory_service_directory.go
+++ b/builtin/providers/aws/resource_aws_directory_service_directory.go
@@ -8,6 +8,7 @@ import (
"github.com/hashicorp/terraform/helper/schema"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/directoryservice"
"github.com/hashicorp/terraform/helper/resource"
)
@@ -252,6 +253,8 @@ func resourceAwsDirectoryServiceDirectoryDelete(d *schema.ResourceData, meta int
input := directoryservice.DeleteDirectoryInput{
DirectoryId: aws.String(d.Id()),
}
+
+ log.Printf("[DEBUG] Delete Directory input: %s", input)
_, err := dsconn.DeleteDirectory(&input)
if err != nil {
return err
@@ -261,17 +264,20 @@ func resourceAwsDirectoryServiceDirectoryDelete(d *schema.ResourceData, meta int
log.Printf("[DEBUG] Waiting for DS (%q) to be deleted", d.Id())
stateConf := &resource.StateChangeConf{
Pending: []string{"Deleting"},
- Target: "",
+ Target: "Deleted",
Refresh: func() (interface{}, string, error) {
resp, err := dsconn.DescribeDirectories(&directoryservice.DescribeDirectoriesInput{
DirectoryIds: []*string{aws.String(d.Id())},
})
if err != nil {
- return nil, "", err
+ if dserr, ok := err.(awserr.Error); ok && dserr.Code() == "EntityDoesNotExistException" {
+ return 42, "Deleted", nil
+ }
+ return nil, "error", err
}
if len(resp.DirectoryDescriptions) == 0 {
- return nil, "", nil
+ return 42, "Deleted", nil
}
ds := resp.DirectoryDescriptions[0]
diff --git a/builtin/providers/aws/resource_aws_directory_service_directory_test.go b/builtin/providers/aws/resource_aws_directory_service_directory_test.go
index b10174bdb0..fefdeb751f 100644
--- a/builtin/providers/aws/resource_aws_directory_service_directory_test.go
+++ b/builtin/providers/aws/resource_aws_directory_service_directory_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/directoryservice"
"github.com/hashicorp/terraform/helper/resource"
@@ -65,12 +66,33 @@ func TestAccAWSDirectoryServiceDirectory_withAliasAndSso(t *testing.T) {
}
func testAccCheckDirectoryServiceDirectoryDestroy(s *terraform.State) error {
- if len(s.RootModule().Resources) > 0 {
- return fmt.Errorf("Expected all resources to be gone, but found: %#v",
- s.RootModule().Resources)
+ dsconn := testAccProvider.Meta().(*AWSClient).dsconn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_directory_service_directory" {
+ continue
+ }
+
+ input := directoryservice.DescribeDirectoriesInput{
+ DirectoryIds: []*string{aws.String(rs.Primary.ID)},
+ }
+ out, err := dsconn.DescribeDirectories(&input)
+ if err != nil {
+ // EntityDoesNotExistException means it's gone, this is good
+ if dserr, ok := err.(awserr.Error); ok && dserr.Code() == "EntityDoesNotExistException" {
+ return nil
+ }
+ return err
+ }
+
+ if out != nil && len(out.DirectoryDescriptions) > 0 {
+ return fmt.Errorf("Expected AWS Directory Service Directory to be gone, but was still found")
+ }
+
+ return nil
}
- return nil
+ return fmt.Errorf("Default error in Service Directory Test")
}
func testAccCheckServiceDirectoryExists(name string) resource.TestCheckFunc {
From f0d1193f8f6bb1885f2b0624a2852bc328387c44 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 5 Jan 2016 10:17:20 -0600
Subject: [PATCH 405/664] provider/aws: Update Lambda create error handling to
be more flexible
---
builtin/providers/aws/resource_aws_lambda_function.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/builtin/providers/aws/resource_aws_lambda_function.go b/builtin/providers/aws/resource_aws_lambda_function.go
index 324016455e..a450182767 100644
--- a/builtin/providers/aws/resource_aws_lambda_function.go
+++ b/builtin/providers/aws/resource_aws_lambda_function.go
@@ -157,7 +157,7 @@ func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) e
// IAM profiles can take ~10 seconds to propagate in AWS:
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console
// Error creating Lambda function: InvalidParameterValueException: The role defined for the task cannot be assumed by Lambda.
- if awsErr.Code() == "InvalidParameterValueException" && strings.Contains(awsErr.Message(), "The role defined for the task cannot be assumed by Lambda.") {
+ if awsErr.Code() == "InvalidParameterValueException" && strings.Contains(awsErr.Message(), "cannot be assumed by Lambda.") {
log.Printf("[DEBUG] Invalid IAM Instance Profile referenced, retrying...")
time.Sleep(2 * time.Second)
continue
From a006a6a399b944d1a48ac8545b4e92abd5e91085 Mon Sep 17 00:00:00 2001
From: Lars Wander
Date: Tue, 5 Jan 2016 11:37:52 -0500
Subject: [PATCH 406/664] provider/google: Fix project metadata sshkeys from
showing up
---
builtin/providers/google/metadata.go | 6 ++++--
builtin/providers/google/resource_compute_instance.go | 2 +-
.../providers/google/resource_compute_project_metadata.go | 2 +-
.../google/resource_compute_project_metadata_test.go | 6 ------
4 files changed, 6 insertions(+), 10 deletions(-)
diff --git a/builtin/providers/google/metadata.go b/builtin/providers/google/metadata.go
index e75c450228..e2ebd18a3d 100644
--- a/builtin/providers/google/metadata.go
+++ b/builtin/providers/google/metadata.go
@@ -60,11 +60,13 @@ func MetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interfa
}
// Format metadata from the server data format -> schema data format
-func MetadataFormatSchema(md *compute.Metadata) map[string]interface{} {
+func MetadataFormatSchema(curMDMap map[string]interface{}, md *compute.Metadata) map[string]interface{} {
newMD := make(map[string]interface{})
for _, kv := range md.Items {
- newMD[kv.Key] = *kv.Value
+ if _, ok := curMDMap[kv.Key]; ok {
+ newMD[kv.Key] = *kv.Value
+ }
}
return newMD
diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go
index 8ca7664853..66e0b5e850 100644
--- a/builtin/providers/google/resource_compute_instance.go
+++ b/builtin/providers/google/resource_compute_instance.go
@@ -562,7 +562,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error
// Synch metadata
md := instance.Metadata
- _md := MetadataFormatSchema(md)
+ _md := MetadataFormatSchema(d.Get("metadata").(map[string]interface{}), md)
delete(_md, "startup-script")
if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists {
diff --git a/builtin/providers/google/resource_compute_project_metadata.go b/builtin/providers/google/resource_compute_project_metadata.go
index c2f8a4a5fa..c549415c22 100644
--- a/builtin/providers/google/resource_compute_project_metadata.go
+++ b/builtin/providers/google/resource_compute_project_metadata.go
@@ -90,7 +90,7 @@ func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{}
md := project.CommonInstanceMetadata
- if err = d.Set("metadata", MetadataFormatSchema(md)); err != nil {
+ if err = d.Set("metadata", MetadataFormatSchema(d.Get("metadata").(map[string]interface{}), md)); err != nil {
return fmt.Errorf("Error setting metadata: %s", err)
}
diff --git a/builtin/providers/google/resource_compute_project_metadata_test.go b/builtin/providers/google/resource_compute_project_metadata_test.go
index cb0145d8d3..7be3dfb263 100644
--- a/builtin/providers/google/resource_compute_project_metadata_test.go
+++ b/builtin/providers/google/resource_compute_project_metadata_test.go
@@ -13,8 +13,6 @@ import (
func TestAccComputeProjectMetadata_basic(t *testing.T) {
var project compute.Project
- t.Skip("See https://github.com/hashicorp/terraform/issues/4504")
-
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@@ -38,8 +36,6 @@ func TestAccComputeProjectMetadata_basic(t *testing.T) {
func TestAccComputeProjectMetadata_modify_1(t *testing.T) {
var project compute.Project
- t.Skip("See https://github.com/hashicorp/terraform/issues/4504")
-
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
@@ -76,8 +72,6 @@ func TestAccComputeProjectMetadata_modify_1(t *testing.T) {
func TestAccComputeProjectMetadata_modify_2(t *testing.T) {
var project compute.Project
- t.Skip("See https://github.com/hashicorp/terraform/issues/4504")
-
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
From 65fb52a38e196f4462e1b380e7fe82e69efe1cb3 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 5 Jan 2016 11:14:03 -0600
Subject: [PATCH 407/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index bd13a1274f..0d2158a4c9 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -74,6 +74,7 @@ BUG FIXES:
* provider/aws: Use body or URL for all CloudFormation stack updates [GH-4370]
* provider/azure: Update for [breaking change to upstream client library](https://github.com/Azure/azure-sdk-for-go/commit/68d50cb53a73edfeb7f17f5e86cdc8eb359a9528). [GH-4300]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
+ * provider/google: Fix project metadata sshKeys from showing up and causing unnecessary diffs [GH-4512]
* provider/openstack: Handle volumes in "deleting" state [GH-4204]
* provider/rundeck: Tolerate Rundeck server not returning project name when reading a job [GH-4301]
* provider/vsphere: Create and attach additional disks before bootup [GH-4196]
From 6b733a09eb5c6f209d082505927856876a5d1bb0 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 5 Jan 2016 11:22:57 -0600
Subject: [PATCH 408/664] provider/aws: more retrying with Lambda
---
...esource_aws_lambda_event_source_mapping.go | 17 ++++++++++-
.../aws/resource_aws_lambda_function.go | 30 ++++++++++---------
2 files changed, 32 insertions(+), 15 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_lambda_event_source_mapping.go b/builtin/providers/aws/resource_aws_lambda_event_source_mapping.go
index 4adb3f0448..055c991cc0 100644
--- a/builtin/providers/aws/resource_aws_lambda_event_source_mapping.go
+++ b/builtin/providers/aws/resource_aws_lambda_event_source_mapping.go
@@ -179,7 +179,22 @@ func resourceAwsLambdaEventSourceMappingUpdate(d *schema.ResourceData, meta inte
Enabled: aws.Bool(d.Get("enabled").(bool)),
}
- _, err := conn.UpdateEventSourceMapping(params)
+ err := resource.Retry(1*time.Minute, func() error {
+ _, err := conn.UpdateEventSourceMapping(params)
+ if err != nil {
+ if awserr, ok := err.(awserr.Error); ok {
+ if awserr.Code() == "InvalidParameterValueException" {
+ // Retryable
+ return awserr
+ }
+ }
+ // Not retryable
+ return resource.RetryError{Err: err}
+ }
+ // No error
+ return nil
+ })
+
if err != nil {
return fmt.Errorf("Error updating Lambda event source mapping: %s", err)
}
diff --git a/builtin/providers/aws/resource_aws_lambda_function.go b/builtin/providers/aws/resource_aws_lambda_function.go
index a450182767..b47a6a5a0f 100644
--- a/builtin/providers/aws/resource_aws_lambda_function.go
+++ b/builtin/providers/aws/resource_aws_lambda_function.go
@@ -5,7 +5,6 @@ import (
"fmt"
"io/ioutil"
"log"
- "strings"
"time"
"github.com/aws/aws-sdk-go/aws"
@@ -15,6 +14,7 @@ import (
"errors"
+ "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
@@ -149,22 +149,24 @@ func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) e
Timeout: aws.Int64(int64(d.Get("timeout").(int))),
}
- var err error
- for i := 0; i < 5; i++ {
+ // IAM profiles can take ~10 seconds to propagate in AWS:
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console
+ // Error creating Lambda function: InvalidParameterValueException: The role defined for the task cannot be assumed by Lambda.
+ err := resource.Retry(1*time.Minute, func() error {
_, err = conn.CreateFunction(params)
- if awsErr, ok := err.(awserr.Error); ok {
-
- // IAM profiles can take ~10 seconds to propagate in AWS:
- // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console
- // Error creating Lambda function: InvalidParameterValueException: The role defined for the task cannot be assumed by Lambda.
- if awsErr.Code() == "InvalidParameterValueException" && strings.Contains(awsErr.Message(), "cannot be assumed by Lambda.") {
- log.Printf("[DEBUG] Invalid IAM Instance Profile referenced, retrying...")
- time.Sleep(2 * time.Second)
- continue
+ if err != nil {
+ if awserr, ok := err.(awserr.Error); ok {
+ if awserr.Code() == "InvalidParameterValueException" {
+ // Retryable
+ return awserr
+ }
}
+ // Not retryable
+ return resource.RetryError{Err: err}
}
- break
- }
+ // No error
+ return nil
+ })
if err != nil {
return fmt.Errorf("Error creating Lambda function: %s", err)
}
From 312f2dd6e329cf7a8691fc21df5c0f808e66bd92 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 5 Jan 2016 11:27:49 -0600
Subject: [PATCH 409/664] document why we retry in lambda source mapping
---
.../aws/resource_aws_lambda_event_source_mapping.go | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_lambda_event_source_mapping.go b/builtin/providers/aws/resource_aws_lambda_event_source_mapping.go
index 055c991cc0..053b2adafa 100644
--- a/builtin/providers/aws/resource_aws_lambda_event_source_mapping.go
+++ b/builtin/providers/aws/resource_aws_lambda_event_source_mapping.go
@@ -91,6 +91,13 @@ func resourceAwsLambdaEventSourceMappingCreate(d *schema.ResourceData, meta inte
Enabled: aws.Bool(d.Get("enabled").(bool)),
}
+ // IAM profiles and roles can take some time to propagate in AWS:
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console
+ // Error creating Lambda function: InvalidParameterValueException: The
+ // function defined for the task cannot be assumed by Lambda.
+ //
+ // The role may exist, but the permissions may not have propagated, so we
+ // retry
err := resource.Retry(1*time.Minute, func() error {
eventSourceMappingConfiguration, err := conn.CreateEventSourceMapping(params)
if err != nil {
From 449ffe027f250a494e4b5f4a16a5bafccf34d2de Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 5 Jan 2016 11:35:21 -0600
Subject: [PATCH 410/664] fix error with undefined err
---
builtin/providers/aws/resource_aws_lambda_function.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/builtin/providers/aws/resource_aws_lambda_function.go b/builtin/providers/aws/resource_aws_lambda_function.go
index b47a6a5a0f..4cce32f4d9 100644
--- a/builtin/providers/aws/resource_aws_lambda_function.go
+++ b/builtin/providers/aws/resource_aws_lambda_function.go
@@ -153,7 +153,7 @@ func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) e
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console
// Error creating Lambda function: InvalidParameterValueException: The role defined for the task cannot be assumed by Lambda.
err := resource.Retry(1*time.Minute, func() error {
- _, err = conn.CreateFunction(params)
+ _, err := conn.CreateFunction(params)
if err != nil {
if awserr, ok := err.(awserr.Error); ok {
if awserr.Code() == "InvalidParameterValueException" {
From c4aff4a585d85a22e40b7cba38b091d2fa04dffe Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 5 Jan 2016 12:26:44 -0600
Subject: [PATCH 411/664] provider/google: Some more collision avoidance test
tweaks
---
.../google/resource_compute_disk_test.go | 12 ++--
.../resource_compute_forwarding_rule_test.go | 58 +++++++++++--------
2 files changed, 42 insertions(+), 28 deletions(-)
diff --git a/builtin/providers/google/resource_compute_disk_test.go b/builtin/providers/google/resource_compute_disk_test.go
index 659affff8e..c4f5c4daeb 100644
--- a/builtin/providers/google/resource_compute_disk_test.go
+++ b/builtin/providers/google/resource_compute_disk_test.go
@@ -4,12 +4,14 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
)
func TestAccComputeDisk_basic(t *testing.T) {
+ diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
var disk compute.Disk
resource.Test(t, resource.TestCase{
@@ -18,7 +20,7 @@ func TestAccComputeDisk_basic(t *testing.T) {
CheckDestroy: testAccCheckComputeDiskDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeDisk_basic,
+ Config: testAccComputeDisk_basic(diskName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeDiskExists(
"google_compute_disk.foobar", &disk),
@@ -75,11 +77,13 @@ func testAccCheckComputeDiskExists(n string, disk *compute.Disk) resource.TestCh
}
}
-const testAccComputeDisk_basic = `
+func testAccComputeDisk_basic(diskName string) string {
+ return fmt.Sprintf(`
resource "google_compute_disk" "foobar" {
- name = "terraform-test"
+ name = "%s"
image = "debian-7-wheezy-v20140814"
size = 50
type = "pd-ssd"
zone = "us-central1-a"
-}`
+}`, diskName)
+}
diff --git a/builtin/providers/google/resource_compute_forwarding_rule_test.go b/builtin/providers/google/resource_compute_forwarding_rule_test.go
index ee0a000568..08e9fa51e9 100644
--- a/builtin/providers/google/resource_compute_forwarding_rule_test.go
+++ b/builtin/providers/google/resource_compute_forwarding_rule_test.go
@@ -4,11 +4,14 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccComputeForwardingRule_basic(t *testing.T) {
+ poolName := fmt.Sprintf("tf-%s", acctest.RandString(10))
+ ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -16,7 +19,7 @@ func TestAccComputeForwardingRule_basic(t *testing.T) {
CheckDestroy: testAccCheckComputeForwardingRuleDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeForwardingRule_basic,
+ Config: testAccComputeForwardingRule_basic(poolName, ruleName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeForwardingRuleExists(
"google_compute_forwarding_rule.foobar"),
@@ -27,6 +30,9 @@ func TestAccComputeForwardingRule_basic(t *testing.T) {
}
func TestAccComputeForwardingRule_ip(t *testing.T) {
+ addrName := fmt.Sprintf("tf-%s", acctest.RandString(10))
+ poolName := fmt.Sprintf("tf-%s", acctest.RandString(10))
+ ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -34,7 +40,7 @@ func TestAccComputeForwardingRule_ip(t *testing.T) {
CheckDestroy: testAccCheckComputeForwardingRuleDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeForwardingRule_ip,
+ Config: testAccComputeForwardingRule_ip(addrName, poolName, ruleName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeForwardingRuleExists(
"google_compute_forwarding_rule.foobar"),
@@ -89,36 +95,40 @@ func testAccCheckComputeForwardingRuleExists(n string) resource.TestCheckFunc {
}
}
-const testAccComputeForwardingRule_basic = `
+func testAccComputeForwardingRule_basic(poolName, ruleName string) string {
+ return fmt.Sprintf(`
resource "google_compute_target_pool" "foobar-tp" {
- description = "Resource created for Terraform acceptance testing"
- instances = ["us-central1-a/foo", "us-central1-b/bar"]
- name = "terraform-test"
+ description = "Resource created for Terraform acceptance testing"
+ instances = ["us-central1-a/foo", "us-central1-b/bar"]
+ name = "%s"
}
resource "google_compute_forwarding_rule" "foobar" {
- description = "Resource created for Terraform acceptance testing"
- ip_protocol = "UDP"
- name = "terraform-test"
- port_range = "80-81"
- target = "${google_compute_target_pool.foobar-tp.self_link}"
+ description = "Resource created for Terraform acceptance testing"
+ ip_protocol = "UDP"
+ name = "%s"
+ port_range = "80-81"
+ target = "${google_compute_target_pool.foobar-tp.self_link}"
+}
+`, poolName, ruleName)
}
-`
-const testAccComputeForwardingRule_ip = `
+func testAccComputeForwardingRule_ip(addrName, poolName, ruleName string) string {
+ return fmt.Sprintf(`
resource "google_compute_address" "foo" {
- name = "foo"
+ name = "%s"
}
resource "google_compute_target_pool" "foobar-tp" {
- description = "Resource created for Terraform acceptance testing"
- instances = ["us-central1-a/foo", "us-central1-b/bar"]
- name = "terraform-test"
+ description = "Resource created for Terraform acceptance testing"
+ instances = ["us-central1-a/foo", "us-central1-b/bar"]
+ name = "%s"
}
resource "google_compute_forwarding_rule" "foobar" {
- description = "Resource created for Terraform acceptance testing"
- ip_address = "${google_compute_address.foo.address}"
- ip_protocol = "TCP"
- name = "terraform-test"
- port_range = "80-81"
- target = "${google_compute_target_pool.foobar-tp.self_link}"
+ description = "Resource created for Terraform acceptance testing"
+ ip_address = "${google_compute_address.foo.address}"
+ ip_protocol = "TCP"
+ name = "%s"
+ port_range = "80-81"
+ target = "${google_compute_target_pool.foobar-tp.self_link}"
+}
+`, addrName, poolName, ruleName)
}
-`
From 8677f8eea7d8fe3355bd75df372519c22d3a0ae2 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 5 Jan 2016 12:39:30 -0600
Subject: [PATCH 412/664] provider/google: Collision fixes in compute backend
service tests
---
.../resource_compute_backend_service_test.go | 109 ++++++++++--------
1 file changed, 63 insertions(+), 46 deletions(-)
diff --git a/builtin/providers/google/resource_compute_backend_service_test.go b/builtin/providers/google/resource_compute_backend_service_test.go
index 70b420ba4f..174aa3e621 100644
--- a/builtin/providers/google/resource_compute_backend_service_test.go
+++ b/builtin/providers/google/resource_compute_backend_service_test.go
@@ -4,12 +4,16 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
)
func TestAccComputeBackendService_basic(t *testing.T) {
+ serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
+ checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
+ extraCheckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
var svc compute.BackendService
resource.Test(t, resource.TestCase{
@@ -18,14 +22,15 @@ func TestAccComputeBackendService_basic(t *testing.T) {
CheckDestroy: testAccCheckComputeBackendServiceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeBackendService_basic,
+ Config: testAccComputeBackendService_basic(serviceName, checkName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeBackendServiceExists(
"google_compute_backend_service.foobar", &svc),
),
},
resource.TestStep{
- Config: testAccComputeBackendService_basicModified,
+ Config: testAccComputeBackendService_basicModified(
+ serviceName, checkName, extraCheckName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeBackendServiceExists(
"google_compute_backend_service.foobar", &svc),
@@ -36,6 +41,10 @@ func TestAccComputeBackendService_basic(t *testing.T) {
}
func TestAccComputeBackendService_withBackend(t *testing.T) {
+ serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
+ igName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
+ itName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
+ checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
var svc compute.BackendService
resource.Test(t, resource.TestCase{
@@ -44,7 +53,8 @@ func TestAccComputeBackendService_withBackend(t *testing.T) {
CheckDestroy: testAccCheckComputeBackendServiceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeBackendService_withBackend,
+ Config: testAccComputeBackendService_withBackend(
+ serviceName, igName, itName, checkName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeBackendServiceExists(
"google_compute_backend_service.lipsum", &svc),
@@ -111,83 +121,90 @@ func testAccCheckComputeBackendServiceExists(n string, svc *compute.BackendServi
}
}
-const testAccComputeBackendService_basic = `
+func testAccComputeBackendService_basic(serviceName, checkName string) string {
+ return fmt.Sprintf(`
resource "google_compute_backend_service" "foobar" {
- name = "blablah"
- health_checks = ["${google_compute_http_health_check.zero.self_link}"]
+ name = "%s"
+ health_checks = ["${google_compute_http_health_check.zero.self_link}"]
}
resource "google_compute_http_health_check" "zero" {
- name = "tf-test-zero"
- request_path = "/"
- check_interval_sec = 1
- timeout_sec = 1
+ name = "%s"
+ request_path = "/"
+ check_interval_sec = 1
+ timeout_sec = 1
+}
+`, serviceName, checkName)
}
-`
-const testAccComputeBackendService_basicModified = `
+func testAccComputeBackendService_basicModified(serviceName, checkOne, checkTwo string) string {
+ return fmt.Sprintf(`
resource "google_compute_backend_service" "foobar" {
- name = "blablah"
+ name = "%s"
health_checks = ["${google_compute_http_health_check.one.self_link}"]
}
resource "google_compute_http_health_check" "zero" {
- name = "tf-test-zero"
+ name = "%s"
request_path = "/"
check_interval_sec = 1
timeout_sec = 1
}
resource "google_compute_http_health_check" "one" {
- name = "tf-test-one"
+ name = "%s"
request_path = "/one"
check_interval_sec = 30
timeout_sec = 30
}
-`
+`, serviceName, checkOne, checkTwo)
+}
-const testAccComputeBackendService_withBackend = `
+func testAccComputeBackendService_withBackend(
+ serviceName, igName, itName, checkName string) string {
+ return fmt.Sprintf(`
resource "google_compute_backend_service" "lipsum" {
- name = "hello-world-bs"
- description = "Hello World 1234"
- port_name = "http"
- protocol = "HTTP"
- timeout_sec = 10
+ name = "%s"
+ description = "Hello World 1234"
+ port_name = "http"
+ protocol = "HTTP"
+ timeout_sec = 10
- backend {
- group = "${google_compute_instance_group_manager.foobar.instance_group}"
- }
+ backend {
+ group = "${google_compute_instance_group_manager.foobar.instance_group}"
+ }
- health_checks = ["${google_compute_http_health_check.default.self_link}"]
+ health_checks = ["${google_compute_http_health_check.default.self_link}"]
}
resource "google_compute_instance_group_manager" "foobar" {
- name = "terraform-test"
- instance_template = "${google_compute_instance_template.foobar.self_link}"
- base_instance_name = "foobar"
- zone = "us-central1-f"
- target_size = 1
+ name = "%s"
+ instance_template = "${google_compute_instance_template.foobar.self_link}"
+ base_instance_name = "foobar"
+ zone = "us-central1-f"
+ target_size = 1
}
resource "google_compute_instance_template" "foobar" {
- name = "terraform-test"
- machine_type = "n1-standard-1"
+ name = "%s"
+ machine_type = "n1-standard-1"
- network_interface {
- network = "default"
- }
+ network_interface {
+ network = "default"
+ }
- disk {
- source_image = "debian-7-wheezy-v20140814"
- auto_delete = true
- boot = true
- }
+ disk {
+ source_image = "debian-7-wheezy-v20140814"
+ auto_delete = true
+ boot = true
+ }
}
resource "google_compute_http_health_check" "default" {
- name = "test2"
- request_path = "/"
- check_interval_sec = 1
- timeout_sec = 1
+ name = "%s"
+ request_path = "/"
+ check_interval_sec = 1
+ timeout_sec = 1
+}
+`, serviceName, igName, itName, checkName)
}
-`
From a48e713fe01072eb8abb662c48e0c792a0bdacd5 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Tue, 5 Jan 2016 16:43:52 -0500
Subject: [PATCH 413/664] provider/azurerm: Register needed Azure providers
---
builtin/providers/azurerm/config.go | 6 ++++++
builtin/providers/azurerm/provider.go | 30 +++++++++++++++++++++++++++
2 files changed, 36 insertions(+)
diff --git a/builtin/providers/azurerm/config.go b/builtin/providers/azurerm/config.go
index 12911512b1..0d39ac8ef7 100644
--- a/builtin/providers/azurerm/config.go
+++ b/builtin/providers/azurerm/config.go
@@ -36,6 +36,7 @@ type ArmClient struct {
vnetGatewayClient network.VirtualNetworkGatewaysClient
vnetClient network.VirtualNetworksClient
+ providers resources.ProvidersClient
resourceGroupClient resources.GroupsClient
tagsClient resources.TagsClient
@@ -160,6 +161,11 @@ func (c *Config) getArmClient() (*ArmClient, error) {
rgc.Sender = autorest.CreateSender(withRequestLogging())
client.resourceGroupClient = rgc
+ pc := resources.NewProvidersClient(c.SubscriptionID)
+ pc.Authorizer = spt
+ pc.Sender = autorest.CreateSender(withRequestLogging())
+ client.providers = pc
+
tc := resources.NewTagsClient(c.SubscriptionID)
tc.Authorizer = spt
tc.Sender = autorest.CreateSender(withRequestLogging())
diff --git a/builtin/providers/azurerm/provider.go b/builtin/providers/azurerm/provider.go
index 9f6476c53a..c64d151483 100644
--- a/builtin/providers/azurerm/provider.go
+++ b/builtin/providers/azurerm/provider.go
@@ -1,6 +1,8 @@
package azurerm
import (
+ "fmt"
+ "net/http"
"strings"
"github.com/hashicorp/terraform/helper/schema"
@@ -70,9 +72,37 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
return nil, err
}
+ err = registerAzureResourceProvidersWithSubscription(&config, client)
+ if err != nil {
+ return nil, err
+ }
+
return client, nil
}
+// registerAzureResourceProvidersWithSubscription uses the providers client to register
+// all Azure resource providers which the Terraform provider may require (regardless of
+// whether they are actually used by the configuration or not). It was confirmed by Microsoft
+// that this is the approach their own internal tools also take.
+func registerAzureResourceProvidersWithSubscription(config *Config, client *ArmClient) error {
+ providerClient := client.providers
+
+ providers := []string{"Microsoft.Network"}
+
+ for _, v := range providers {
+ res, err := providerClient.Register(v)
+ if err != nil {
+ return err
+ }
+
+ if res.StatusCode != http.StatusOK {
+ return fmt.Errorf("Error registering provider %q with subscription %q", v, config.SubscriptionID)
+ }
+ }
+
+ return nil
+}
+
func azureRMNormalizeLocation(location interface{}) string {
input := location.(string)
return strings.Replace(strings.ToLower(input), " ", "", -1)
From fb80ec8d33ffc5388fb7df64281ee260eb89047e Mon Sep 17 00:00:00 2001
From: Lars Wander
Date: Tue, 5 Jan 2016 16:47:10 -0500
Subject: [PATCH 414/664] provider/google: remove conflicting names from
acceptance tests
---
.../google/resource_compute_address_test.go | 7 +-
.../resource_compute_autoscaler_test.go | 25 +++---
.../google/resource_compute_firewall_test.go | 17 ++--
.../resource_compute_global_address_test.go | 7 +-
...rce_compute_global_forwarding_rule_test.go | 35 +++++----
...resource_compute_http_health_check_test.go | 19 ++---
...esource_compute_https_health_check_test.go | 19 ++---
...rce_compute_instance_group_manager_test.go | 35 +++++----
...resource_compute_instance_template_test.go | 23 +++---
.../google/resource_compute_instance_test.go | 77 ++++++++++---------
.../google/resource_compute_network_test.go | 7 +-
.../google/resource_compute_route_test.go | 9 ++-
.../resource_compute_ssl_certificate_test.go | 7 +-
...resource_compute_target_http_proxy_test.go | 29 +++----
...esource_compute_target_https_proxy_test.go | 35 +++++----
.../resource_compute_target_pool_test.go | 7 +-
.../google/resource_compute_url_map_test.go | 41 +++++-----
.../resource_compute_vpn_gateway_test.go | 9 ++-
.../resource_compute_vpn_tunnel_test.go | 21 ++---
.../google/resource_container_cluster_test.go | 13 ++--
.../google/resource_dns_managed_zone_test.go | 7 +-
.../google/resource_dns_record_set_test.go | 7 +-
.../resource_pubsub_subscription_test.go | 9 ++-
.../google/resource_pubsub_topic_test.go | 7 +-
.../google/resource_sql_database_test.go | 7 +-
25 files changed, 255 insertions(+), 224 deletions(-)
diff --git a/builtin/providers/google/resource_compute_address_test.go b/builtin/providers/google/resource_compute_address_test.go
index 90988bb2ce..e15d11dcf5 100644
--- a/builtin/providers/google/resource_compute_address_test.go
+++ b/builtin/providers/google/resource_compute_address_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
@@ -75,7 +76,7 @@ func testAccCheckComputeAddressExists(n string, addr *compute.Address) resource.
}
}
-const testAccComputeAddress_basic = `
+var testAccComputeAddress_basic = fmt.Sprintf(`
resource "google_compute_address" "foobar" {
- name = "terraform-test"
-}`
+ name = "address-test-%s"
+}`, acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_autoscaler_test.go b/builtin/providers/google/resource_compute_autoscaler_test.go
index 7dba5520db..4cdaa90198 100644
--- a/builtin/providers/google/resource_compute_autoscaler_test.go
+++ b/builtin/providers/google/resource_compute_autoscaler_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
@@ -130,9 +131,9 @@ func testAccCheckAutoscalerUpdated(n string, max int64) resource.TestCheckFunc {
}
}
-const testAccAutoscaler_basic = `
+var testAccAutoscaler_basic = fmt.Sprintf(`
resource "google_compute_instance_template" "foobar" {
- name = "terraform-test-template-foobar"
+ name = "ascaler-test-%s"
machine_type = "n1-standard-1"
can_ip_forward = false
tags = ["foo", "bar"]
@@ -158,13 +159,13 @@ resource "google_compute_instance_template" "foobar" {
resource "google_compute_target_pool" "foobar" {
description = "Resource created for Terraform acceptance testing"
- name = "terraform-test-tpool-foobar"
+ name = "ascaler-test-%s"
session_affinity = "CLIENT_IP_PROTO"
}
resource "google_compute_instance_group_manager" "foobar" {
description = "Terraform test instance group manager"
- name = "terraform-test-groupmanager"
+ name = "ascaler-test-%s"
instance_template = "${google_compute_instance_template.foobar.self_link}"
target_pools = ["${google_compute_target_pool.foobar.self_link}"]
base_instance_name = "foobar"
@@ -173,7 +174,7 @@ resource "google_compute_instance_group_manager" "foobar" {
resource "google_compute_autoscaler" "foobar" {
description = "Resource created for Terraform acceptance testing"
- name = "terraform-test-ascaler"
+ name = "ascaler-test-%s"
zone = "us-central1-a"
target = "${google_compute_instance_group_manager.foobar.self_link}"
autoscaling_policy = {
@@ -185,11 +186,11 @@ resource "google_compute_autoscaler" "foobar" {
}
}
-}`
+}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
-const testAccAutoscaler_update = `
+var testAccAutoscaler_update = fmt.Sprintf(`
resource "google_compute_instance_template" "foobar" {
- name = "terraform-test-template-foobar"
+ name = "ascaler-test-%s"
machine_type = "n1-standard-1"
can_ip_forward = false
tags = ["foo", "bar"]
@@ -215,13 +216,13 @@ resource "google_compute_instance_template" "foobar" {
resource "google_compute_target_pool" "foobar" {
description = "Resource created for Terraform acceptance testing"
- name = "terraform-test-tpool-foobar"
+ name = "ascaler-test-%s"
session_affinity = "CLIENT_IP_PROTO"
}
resource "google_compute_instance_group_manager" "foobar" {
description = "Terraform test instance group manager"
- name = "terraform-test-groupmanager"
+ name = "ascaler-test-%s"
instance_template = "${google_compute_instance_template.foobar.self_link}"
target_pools = ["${google_compute_target_pool.foobar.self_link}"]
base_instance_name = "foobar"
@@ -230,7 +231,7 @@ resource "google_compute_instance_group_manager" "foobar" {
resource "google_compute_autoscaler" "foobar" {
description = "Resource created for Terraform acceptance testing"
- name = "terraform-test-ascaler"
+ name = "ascaler-test-%s"
zone = "us-central1-a"
target = "${google_compute_instance_group_manager.foobar.self_link}"
autoscaling_policy = {
@@ -242,4 +243,4 @@ resource "google_compute_autoscaler" "foobar" {
}
}
-}`
+}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_firewall_test.go b/builtin/providers/google/resource_compute_firewall_test.go
index a4a489fba1..8edab92606 100644
--- a/builtin/providers/google/resource_compute_firewall_test.go
+++ b/builtin/providers/google/resource_compute_firewall_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
@@ -118,14 +119,14 @@ func testAccCheckComputeFirewallPorts(
}
}
-const testAccComputeFirewall_basic = `
+var testAccComputeFirewall_basic = fmt.Sprintf(`
resource "google_compute_network" "foobar" {
- name = "terraform-test"
+ name = "firewall-test-%s"
ipv4_range = "10.0.0.0/16"
}
resource "google_compute_firewall" "foobar" {
- name = "terraform-test"
+ name = "firewall-test-%s"
description = "Resource created for Terraform acceptance testing"
network = "${google_compute_network.foobar.name}"
source_tags = ["foo"]
@@ -133,16 +134,16 @@ resource "google_compute_firewall" "foobar" {
allow {
protocol = "icmp"
}
-}`
+}`, acctest.RandString(10), acctest.RandString(10))
-const testAccComputeFirewall_update = `
+var testAccComputeFirewall_update = fmt.Sprintf(`
resource "google_compute_network" "foobar" {
- name = "terraform-test"
+ name = "firewall-test-%s"
ipv4_range = "10.0.0.0/16"
}
resource "google_compute_firewall" "foobar" {
- name = "terraform-test"
+ name = "firewall-test-%s"
description = "Resource created for Terraform acceptance testing"
network = "${google_compute_network.foobar.name}"
source_tags = ["foo"]
@@ -151,4 +152,4 @@ resource "google_compute_firewall" "foobar" {
protocol = "tcp"
ports = ["80-255"]
}
-}`
+}`, acctest.RandString(10), acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_global_address_test.go b/builtin/providers/google/resource_compute_global_address_test.go
index 2ef7b97ea7..9ed49d836d 100644
--- a/builtin/providers/google/resource_compute_global_address_test.go
+++ b/builtin/providers/google/resource_compute_global_address_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
@@ -75,7 +76,7 @@ func testAccCheckComputeGlobalAddressExists(n string, addr *compute.Address) res
}
}
-const testAccComputeGlobalAddress_basic = `
+var testAccComputeGlobalAddress_basic = fmt.Sprintf(`
resource "google_compute_global_address" "foobar" {
- name = "terraform-test"
-}`
+ name = "address-test-%s"
+}`, acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_global_forwarding_rule_test.go b/builtin/providers/google/resource_compute_global_forwarding_rule_test.go
index 58f65c25d4..cadae7feb4 100644
--- a/builtin/providers/google/resource_compute_global_forwarding_rule_test.go
+++ b/builtin/providers/google/resource_compute_global_forwarding_rule_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@@ -95,41 +96,41 @@ func testAccCheckComputeGlobalForwardingRuleExists(n string) resource.TestCheckF
}
}
-const testAccComputeGlobalForwardingRule_basic1 = `
+var testAccComputeGlobalForwardingRule_basic1 = fmt.Sprintf(`
resource "google_compute_global_forwarding_rule" "foobar" {
description = "Resource created for Terraform acceptance testing"
ip_protocol = "TCP"
- name = "terraform-test"
+ name = "gforward-test-%s"
port_range = "80"
target = "${google_compute_target_http_proxy.foobar1.self_link}"
}
resource "google_compute_target_http_proxy" "foobar1" {
description = "Resource created for Terraform acceptance testing"
- name = "terraform-test1"
+ name = "gforward-test-%s"
url_map = "${google_compute_url_map.foobar.self_link}"
}
resource "google_compute_target_http_proxy" "foobar2" {
description = "Resource created for Terraform acceptance testing"
- name = "terraform-test2"
+ name = "gforward-test-%s"
url_map = "${google_compute_url_map.foobar.self_link}"
}
resource "google_compute_backend_service" "foobar" {
- name = "service"
+ name = "gforward-test-%s"
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
}
resource "google_compute_http_health_check" "zero" {
- name = "tf-test-zero"
+ name = "gforward-test-%s"
request_path = "/"
check_interval_sec = 1
timeout_sec = 1
}
resource "google_compute_url_map" "foobar" {
- name = "myurlmap"
+ name = "gforward-test-%s"
default_service = "${google_compute_backend_service.foobar.self_link}"
host_rule {
hosts = ["mysite.com", "myothersite.com"]
@@ -149,43 +150,44 @@ resource "google_compute_url_map" "foobar" {
service = "${google_compute_backend_service.foobar.self_link}"
}
}
-`
+`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10),
+ acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
-const testAccComputeGlobalForwardingRule_basic2 = `
+var testAccComputeGlobalForwardingRule_basic2 = fmt.Sprintf(`
resource "google_compute_global_forwarding_rule" "foobar" {
description = "Resource created for Terraform acceptance testing"
ip_protocol = "TCP"
- name = "terraform-test"
+ name = "gforward-test-%s"
port_range = "80"
target = "${google_compute_target_http_proxy.foobar2.self_link}"
}
resource "google_compute_target_http_proxy" "foobar1" {
description = "Resource created for Terraform acceptance testing"
- name = "terraform-test1"
+ name = "gforward-test-%s"
url_map = "${google_compute_url_map.foobar.self_link}"
}
resource "google_compute_target_http_proxy" "foobar2" {
description = "Resource created for Terraform acceptance testing"
- name = "terraform-test2"
+ name = "gforward-test-%s"
url_map = "${google_compute_url_map.foobar.self_link}"
}
resource "google_compute_backend_service" "foobar" {
- name = "service"
+ name = "gforward-test-%s"
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
}
resource "google_compute_http_health_check" "zero" {
- name = "tf-test-zero"
+ name = "gforward-test-%s"
request_path = "/"
check_interval_sec = 1
timeout_sec = 1
}
resource "google_compute_url_map" "foobar" {
- name = "myurlmap"
+ name = "gforward-test-%s"
default_service = "${google_compute_backend_service.foobar.self_link}"
host_rule {
hosts = ["mysite.com", "myothersite.com"]
@@ -205,4 +207,5 @@ resource "google_compute_url_map" "foobar" {
service = "${google_compute_backend_service.foobar.self_link}"
}
}
-`
+`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10),
+ acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_http_health_check_test.go b/builtin/providers/google/resource_compute_http_health_check_test.go
index c37c770bb1..7734ab28f4 100644
--- a/builtin/providers/google/resource_compute_http_health_check_test.go
+++ b/builtin/providers/google/resource_compute_http_health_check_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
@@ -137,35 +138,35 @@ func testAccCheckComputeHttpHealthCheckThresholds(healthy, unhealthy int64, heal
}
}
-const testAccComputeHttpHealthCheck_basic = `
+var testAccComputeHttpHealthCheck_basic = fmt.Sprintf(`
resource "google_compute_http_health_check" "foobar" {
check_interval_sec = 3
description = "Resource created for Terraform acceptance testing"
healthy_threshold = 3
host = "foobar"
- name = "terraform-test"
+ name = "httphealth-test-%s"
port = "80"
request_path = "/health_check"
timeout_sec = 2
unhealthy_threshold = 3
}
-`
+`, acctest.RandString(10))
-const testAccComputeHttpHealthCheck_update1 = `
+var testAccComputeHttpHealthCheck_update1 = fmt.Sprintf(`
resource "google_compute_http_health_check" "foobar" {
- name = "terraform-test"
+ name = "httphealth-test-%s"
description = "Resource created for Terraform acceptance testing"
request_path = "/not_default"
}
-`
+`, acctest.RandString(10))
/* Change description, restore request_path to default, and change
* thresholds from defaults */
-const testAccComputeHttpHealthCheck_update2 = `
+var testAccComputeHttpHealthCheck_update2 = fmt.Sprintf(`
resource "google_compute_http_health_check" "foobar" {
- name = "terraform-test"
+ name = "httphealth-test-%s"
description = "Resource updated for Terraform acceptance testing"
healthy_threshold = 10
unhealthy_threshold = 10
}
-`
+`, acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_https_health_check_test.go b/builtin/providers/google/resource_compute_https_health_check_test.go
index d263bfd881..c7510c325c 100644
--- a/builtin/providers/google/resource_compute_https_health_check_test.go
+++ b/builtin/providers/google/resource_compute_https_health_check_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
@@ -137,35 +138,35 @@ func testAccCheckComputeHttpsHealthCheckThresholds(healthy, unhealthy int64, hea
}
}
-const testAccComputeHttpsHealthCheck_basic = `
+var testAccComputeHttpsHealthCheck_basic = fmt.Sprintf(`
resource "google_compute_https_health_check" "foobar" {
check_interval_sec = 3
description = "Resource created for Terraform acceptance testing"
healthy_threshold = 3
host = "foobar"
- name = "terraform-test"
+ name = "httpshealth-test-%s"
port = "80"
request_path = "/health_check"
timeout_sec = 2
unhealthy_threshold = 3
}
-`
+`, acctest.RandString(10))
-const testAccComputeHttpsHealthCheck_update1 = `
+var testAccComputeHttpsHealthCheck_update1 = fmt.Sprintf(`
resource "google_compute_https_health_check" "foobar" {
- name = "terraform-test"
+ name = "httpshealth-test-%s"
description = "Resource created for Terraform acceptance testing"
request_path = "/not_default"
}
-`
+`, acctest.RandString(10))
/* Change description, restore request_path to default, and change
* thresholds from defaults */
-const testAccComputeHttpsHealthCheck_update2 = `
+var testAccComputeHttpsHealthCheck_update2 = fmt.Sprintf(`
resource "google_compute_https_health_check" "foobar" {
- name = "terraform-test"
+ name = "httpshealth-test-%s"
description = "Resource updated for Terraform acceptance testing"
healthy_threshold = 10
unhealthy_threshold = 10
}
-`
+`, acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_instance_group_manager_test.go b/builtin/providers/google/resource_compute_instance_group_manager_test.go
index 5bdb116518..0cf4791cb7 100644
--- a/builtin/providers/google/resource_compute_instance_group_manager_test.go
+++ b/builtin/providers/google/resource_compute_instance_group_manager_test.go
@@ -6,6 +6,7 @@ import (
"google.golang.org/api/compute/v1"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@@ -146,9 +147,9 @@ func testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool st
}
}
-const testAccInstanceGroupManager_basic = `
+var testAccInstanceGroupManager_basic = fmt.Sprintf(`
resource "google_compute_instance_template" "igm-basic" {
- name = "terraform-test-igm-basic"
+ name = "igm-test-%s"
machine_type = "n1-standard-1"
can_ip_forward = false
tags = ["foo", "bar"]
@@ -174,13 +175,13 @@ resource "google_compute_instance_template" "igm-basic" {
resource "google_compute_target_pool" "igm-basic" {
description = "Resource created for Terraform acceptance testing"
- name = "terraform-test-igm-basic"
+ name = "igm-test-%s"
session_affinity = "CLIENT_IP_PROTO"
}
resource "google_compute_instance_group_manager" "igm-basic" {
description = "Terraform test instance group manager"
- name = "terraform-test-igm-basic"
+ name = "igm-test-%s"
instance_template = "${google_compute_instance_template.igm-basic.self_link}"
target_pools = ["${google_compute_target_pool.igm-basic.self_link}"]
base_instance_name = "igm-basic"
@@ -190,17 +191,17 @@ resource "google_compute_instance_group_manager" "igm-basic" {
resource "google_compute_instance_group_manager" "igm-no-tp" {
description = "Terraform test instance group manager"
- name = "terraform-test-igm-no-tp"
+ name = "igm-test-%s"
instance_template = "${google_compute_instance_template.igm-basic.self_link}"
base_instance_name = "igm-no-tp"
zone = "us-central1-c"
target_size = 2
}
-`
+`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
-const testAccInstanceGroupManager_update = `
+var testAccInstanceGroupManager_update = fmt.Sprintf(`
resource "google_compute_instance_template" "igm-update" {
- name = "terraform-test-igm-update"
+ name = "igm-test-%s"
machine_type = "n1-standard-1"
can_ip_forward = false
tags = ["foo", "bar"]
@@ -226,24 +227,24 @@ resource "google_compute_instance_template" "igm-update" {
resource "google_compute_target_pool" "igm-update" {
description = "Resource created for Terraform acceptance testing"
- name = "terraform-test-igm-update"
+ name = "igm-test-%s"
session_affinity = "CLIENT_IP_PROTO"
}
resource "google_compute_instance_group_manager" "igm-update" {
description = "Terraform test instance group manager"
- name = "terraform-test-igm-update"
+ name = "igm-test-%s"
instance_template = "${google_compute_instance_template.igm-update.self_link}"
target_pools = ["${google_compute_target_pool.igm-update.self_link}"]
base_instance_name = "igm-update"
zone = "us-central1-c"
target_size = 2
-}`
+}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
// Change IGM's instance template and target size
-const testAccInstanceGroupManager_update2 = `
+var testAccInstanceGroupManager_update2 = fmt.Sprintf(`
resource "google_compute_instance_template" "igm-update" {
- name = "terraform-test-igm-update"
+ name = "igm-test-%s"
machine_type = "n1-standard-1"
can_ip_forward = false
tags = ["foo", "bar"]
@@ -269,12 +270,12 @@ resource "google_compute_instance_template" "igm-update" {
resource "google_compute_target_pool" "igm-update" {
description = "Resource created for Terraform acceptance testing"
- name = "terraform-test-igm-update"
+ name = "igm-test-%s"
session_affinity = "CLIENT_IP_PROTO"
}
resource "google_compute_instance_template" "igm-update2" {
- name = "terraform-test-igm-update2"
+ name = "igm-test-%s"
machine_type = "n1-standard-1"
can_ip_forward = false
tags = ["foo", "bar"]
@@ -300,10 +301,10 @@ resource "google_compute_instance_template" "igm-update2" {
resource "google_compute_instance_group_manager" "igm-update" {
description = "Terraform test instance group manager"
- name = "terraform-test-igm-update"
+ name = "igm-test-%s"
instance_template = "${google_compute_instance_template.igm-update2.self_link}"
target_pools = ["${google_compute_target_pool.igm-update.self_link}"]
base_instance_name = "igm-update"
zone = "us-central1-c"
target_size = 3
-}`
+}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_instance_template_test.go b/builtin/providers/google/resource_compute_instance_template_test.go
index 82f88b4ac7..a36987b2ca 100644
--- a/builtin/providers/google/resource_compute_instance_template_test.go
+++ b/builtin/providers/google/resource_compute_instance_template_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
@@ -201,9 +202,9 @@ func testAccCheckComputeInstanceTemplateTag(instanceTemplate *compute.InstanceTe
}
}
-const testAccComputeInstanceTemplate_basic = `
+var testAccComputeInstanceTemplate_basic = fmt.Sprintf(`
resource "google_compute_instance_template" "foobar" {
- name = "terraform-test"
+ name = "instancet-test-%s"
machine_type = "n1-standard-1"
can_ip_forward = false
tags = ["foo", "bar"]
@@ -230,15 +231,15 @@ resource "google_compute_instance_template" "foobar" {
service_account {
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
}
-}`
+}`, acctest.RandString(10))
-const testAccComputeInstanceTemplate_ip = `
+var testAccComputeInstanceTemplate_ip = fmt.Sprintf(`
resource "google_compute_address" "foo" {
- name = "foo"
+ name = "instancet-test-%s"
}
resource "google_compute_instance_template" "foobar" {
- name = "terraform-test"
+ name = "instancet-test-%s"
machine_type = "n1-standard-1"
tags = ["foo", "bar"]
@@ -256,11 +257,11 @@ resource "google_compute_instance_template" "foobar" {
metadata {
foo = "bar"
}
-}`
+}`, acctest.RandString(10), acctest.RandString(10))
-const testAccComputeInstanceTemplate_disks = `
+var testAccComputeInstanceTemplate_disks = fmt.Sprintf(`
resource "google_compute_disk" "foobar" {
- name = "terraform-test-foobar"
+ name = "instancet-test-%s"
image = "debian-7-wheezy-v20140814"
size = 10
type = "pd-ssd"
@@ -268,7 +269,7 @@ resource "google_compute_disk" "foobar" {
}
resource "google_compute_instance_template" "foobar" {
- name = "terraform-test"
+ name = "instancet-test-%s"
machine_type = "n1-standard-1"
disk {
@@ -291,4 +292,4 @@ resource "google_compute_instance_template" "foobar" {
metadata {
foo = "bar"
}
-}`
+}`, acctest.RandString(10), acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_instance_test.go b/builtin/providers/google/resource_compute_instance_test.go
index 4cee16a51b..a9b571a7b1 100644
--- a/builtin/providers/google/resource_compute_instance_test.go
+++ b/builtin/providers/google/resource_compute_instance_test.go
@@ -5,6 +5,7 @@ import (
"strings"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
@@ -436,9 +437,9 @@ func testAccCheckComputeInstanceServiceAccount(instance *compute.Instance, scope
}
}
-const testAccComputeInstance_basic_deprecated_network = `
+var testAccComputeInstance_basic_deprecated_network = fmt.Sprintf(`
resource "google_compute_instance" "foobar" {
- name = "terraform-test"
+ name = "instance-test-%s"
machine_type = "n1-standard-1"
zone = "us-central1-a"
can_ip_forward = false
@@ -455,11 +456,11 @@ resource "google_compute_instance" "foobar" {
metadata {
foo = "bar"
}
-}`
+}`, acctest.RandString(10))
-const testAccComputeInstance_update_deprecated_network = `
+var testAccComputeInstance_update_deprecated_network = fmt.Sprintf(`
resource "google_compute_instance" "foobar" {
- name = "terraform-test"
+ name = "instance-test-%s"
machine_type = "n1-standard-1"
zone = "us-central1-a"
tags = ["baz"]
@@ -475,11 +476,11 @@ resource "google_compute_instance" "foobar" {
metadata {
bar = "baz"
}
-}`
+}`, acctest.RandString(10))
-const testAccComputeInstance_basic = `
+var testAccComputeInstance_basic = fmt.Sprintf(`
resource "google_compute_instance" "foobar" {
- name = "terraform-test"
+ name = "instance-test-%s"
machine_type = "n1-standard-1"
zone = "us-central1-a"
can_ip_forward = false
@@ -499,11 +500,11 @@ resource "google_compute_instance" "foobar" {
}
metadata_startup_script = "echo Hello"
-}`
+}`, acctest.RandString(10))
-const testAccComputeInstance_basic2 = `
+var testAccComputeInstance_basic2 = fmt.Sprintf(`
resource "google_compute_instance" "foobar" {
- name = "terraform-test"
+ name = "instance-test-%s"
machine_type = "n1-standard-1"
zone = "us-central1-a"
can_ip_forward = false
@@ -521,11 +522,11 @@ resource "google_compute_instance" "foobar" {
metadata {
foo = "bar"
}
-}`
+}`, acctest.RandString(10))
-const testAccComputeInstance_basic3 = `
+var testAccComputeInstance_basic3 = fmt.Sprintf(`
resource "google_compute_instance" "foobar" {
- name = "terraform-test"
+ name = "instance-test-%s"
machine_type = "n1-standard-1"
zone = "us-central1-a"
can_ip_forward = false
@@ -542,13 +543,13 @@ resource "google_compute_instance" "foobar" {
metadata {
foo = "bar"
}
-}`
+}`, acctest.RandString(10))
// Update zone to ForceNew, and change metadata k/v entirely
// Generates diff mismatch
-const testAccComputeInstance_forceNewAndChangeMetadata = `
+var testAccComputeInstance_forceNewAndChangeMetadata = fmt.Sprintf(`
resource "google_compute_instance" "foobar" {
- name = "terraform-test"
+ name = "instance-test-%s"
machine_type = "n1-standard-1"
zone = "us-central1-a"
zone = "us-central1-b"
@@ -566,12 +567,12 @@ resource "google_compute_instance" "foobar" {
metadata {
qux = "true"
}
-}`
+}`, acctest.RandString(10))
// Update metadata, tags, and network_interface
-const testAccComputeInstance_update = `
+var testAccComputeInstance_update = fmt.Sprintf(`
resource "google_compute_instance" "foobar" {
- name = "terraform-test"
+ name = "instance-test-%s"
machine_type = "n1-standard-1"
zone = "us-central1-a"
tags = ["baz"]
@@ -588,15 +589,15 @@ resource "google_compute_instance" "foobar" {
metadata {
bar = "baz"
}
-}`
+}`, acctest.RandString(10))
-const testAccComputeInstance_ip = `
+var testAccComputeInstance_ip = fmt.Sprintf(`
resource "google_compute_address" "foo" {
- name = "foo"
+ name = "instance-test-%s"
}
resource "google_compute_instance" "foobar" {
- name = "terraform-test"
+ name = "instance-test-%s"
machine_type = "n1-standard-1"
zone = "us-central1-a"
tags = ["foo", "bar"]
@@ -615,18 +616,18 @@ resource "google_compute_instance" "foobar" {
metadata {
foo = "bar"
}
-}`
+}`, acctest.RandString(10), acctest.RandString(10))
-const testAccComputeInstance_disks = `
+var testAccComputeInstance_disks = fmt.Sprintf(`
resource "google_compute_disk" "foobar" {
- name = "terraform-test-disk"
+ name = "instance-test-%s"
size = 10
type = "pd-ssd"
zone = "us-central1-a"
}
resource "google_compute_instance" "foobar" {
- name = "terraform-test"
+ name = "instance-test-%s"
machine_type = "n1-standard-1"
zone = "us-central1-a"
@@ -646,11 +647,11 @@ resource "google_compute_instance" "foobar" {
metadata {
foo = "bar"
}
-}`
+}`, acctest.RandString(10), acctest.RandString(10))
-const testAccComputeInstance_local_ssd = `
+var testAccComputeInstance_local_ssd = fmt.Sprintf(`
resource "google_compute_instance" "local-ssd" {
- name = "terraform-test"
+ name = "instance-test-%s"
machine_type = "n1-standard-1"
zone = "us-central1-a"
@@ -667,11 +668,11 @@ resource "google_compute_instance" "local-ssd" {
network = "default"
}
-}`
+}`, acctest.RandString(10))
-const testAccComputeInstance_service_account = `
+var testAccComputeInstance_service_account = fmt.Sprintf(`
resource "google_compute_instance" "foobar" {
- name = "terraform-test"
+ name = "instance-test-%s"
machine_type = "n1-standard-1"
zone = "us-central1-a"
@@ -690,11 +691,11 @@ resource "google_compute_instance" "foobar" {
"storage-ro",
]
}
-}`
+}`, acctest.RandString(10))
-const testAccComputeInstance_scheduling = `
+var testAccComputeInstance_scheduling = fmt.Sprintf(`
resource "google_compute_instance" "foobar" {
- name = "terraform-test"
+ name = "instance-test-%s"
machine_type = "n1-standard-1"
zone = "us-central1-a"
@@ -708,4 +709,4 @@ resource "google_compute_instance" "foobar" {
scheduling {
}
-}`
+}`, acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_network_test.go b/builtin/providers/google/resource_compute_network_test.go
index 89827f5762..4337bf7f71 100644
--- a/builtin/providers/google/resource_compute_network_test.go
+++ b/builtin/providers/google/resource_compute_network_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
@@ -75,8 +76,8 @@ func testAccCheckComputeNetworkExists(n string, network *compute.Network) resour
}
}
-const testAccComputeNetwork_basic = `
+var testAccComputeNetwork_basic = fmt.Sprintf(`
resource "google_compute_network" "foobar" {
- name = "terraform-test"
+ name = "network-test-%s"
ipv4_range = "10.0.0.0/16"
-}`
+}`, acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_route_test.go b/builtin/providers/google/resource_compute_route_test.go
index e4b8627e93..dff2ed0037 100644
--- a/builtin/providers/google/resource_compute_route_test.go
+++ b/builtin/providers/google/resource_compute_route_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/compute/v1"
@@ -75,16 +76,16 @@ func testAccCheckComputeRouteExists(n string, route *compute.Route) resource.Tes
}
}
-const testAccComputeRoute_basic = `
+var testAccComputeRoute_basic = fmt.Sprintf(`
resource "google_compute_network" "foobar" {
- name = "terraform-test"
+ name = "route-test-%s"
ipv4_range = "10.0.0.0/16"
}
resource "google_compute_route" "foobar" {
- name = "terraform-test"
+ name = "route-test-%s"
dest_range = "15.0.0.0/24"
network = "${google_compute_network.foobar.name}"
next_hop_ip = "10.0.1.5"
priority = 100
-}`
+}`, acctest.RandString(10), acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_ssl_certificate_test.go b/builtin/providers/google/resource_compute_ssl_certificate_test.go
index a237bea165..373e0ab303 100644
--- a/builtin/providers/google/resource_compute_ssl_certificate_test.go
+++ b/builtin/providers/google/resource_compute_ssl_certificate_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@@ -70,11 +71,11 @@ func testAccCheckComputeSslCertificateExists(n string) resource.TestCheckFunc {
}
}
-const testAccComputeSslCertificate_basic = `
+var testAccComputeSslCertificate_basic = fmt.Sprintf(`
resource "google_compute_ssl_certificate" "foobar" {
- name = "terraform-test"
+ name = "sslcert-test-%s"
description = "very descriptive"
private_key = "${file("test-fixtures/ssl_cert/test.key")}"
certificate = "${file("test-fixtures/ssl_cert/test.crt")}"
}
-`
+`, acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_target_http_proxy_test.go b/builtin/providers/google/resource_compute_target_http_proxy_test.go
index 6337ada57f..c1dd3bbe7f 100644
--- a/builtin/providers/google/resource_compute_target_http_proxy_test.go
+++ b/builtin/providers/google/resource_compute_target_http_proxy_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@@ -97,27 +98,27 @@ func testAccCheckComputeTargetHttpProxyExists(n string) resource.TestCheckFunc {
}
}
-const testAccComputeTargetHttpProxy_basic1 = `
+var testAccComputeTargetHttpProxy_basic1 = fmt.Sprintf(`
resource "google_compute_target_http_proxy" "foobar" {
description = "Resource created for Terraform acceptance testing"
- name = "terraform-test"
+ name = "httpproxy-test-%s"
url_map = "${google_compute_url_map.foobar1.self_link}"
}
resource "google_compute_backend_service" "foobar" {
- name = "service"
+ name = "httpproxy-test-%s"
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
}
resource "google_compute_http_health_check" "zero" {
- name = "tf-test-zero"
+ name = "httpproxy-test-%s"
request_path = "/"
check_interval_sec = 1
timeout_sec = 1
}
resource "google_compute_url_map" "foobar1" {
- name = "myurlmap1"
+ name = "httpproxy-test-%s"
default_service = "${google_compute_backend_service.foobar.self_link}"
host_rule {
hosts = ["mysite.com", "myothersite.com"]
@@ -139,7 +140,7 @@ resource "google_compute_url_map" "foobar1" {
}
resource "google_compute_url_map" "foobar2" {
- name = "myurlmap2"
+ name = "httpproxy-test-%s"
default_service = "${google_compute_backend_service.foobar.self_link}"
host_rule {
hosts = ["mysite.com", "myothersite.com"]
@@ -159,29 +160,29 @@ resource "google_compute_url_map" "foobar2" {
service = "${google_compute_backend_service.foobar.self_link}"
}
}
-`
+`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
-const testAccComputeTargetHttpProxy_basic2 = `
+var testAccComputeTargetHttpProxy_basic2 = fmt.Sprintf(`
resource "google_compute_target_http_proxy" "foobar" {
description = "Resource created for Terraform acceptance testing"
- name = "terraform-test"
+ name = "httpproxy-test-%s"
url_map = "${google_compute_url_map.foobar2.self_link}"
}
resource "google_compute_backend_service" "foobar" {
- name = "service"
+ name = "httpproxy-test-%s"
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
}
resource "google_compute_http_health_check" "zero" {
- name = "tf-test-zero"
+ name = "httpproxy-test-%s"
request_path = "/"
check_interval_sec = 1
timeout_sec = 1
}
resource "google_compute_url_map" "foobar1" {
- name = "myurlmap1"
+ name = "httpproxy-test-%s"
default_service = "${google_compute_backend_service.foobar.self_link}"
host_rule {
hosts = ["mysite.com", "myothersite.com"]
@@ -203,7 +204,7 @@ resource "google_compute_url_map" "foobar1" {
}
resource "google_compute_url_map" "foobar2" {
- name = "myurlmap2"
+ name = "httpproxy-test-%s"
default_service = "${google_compute_backend_service.foobar.self_link}"
host_rule {
hosts = ["mysite.com", "myothersite.com"]
@@ -223,4 +224,4 @@ resource "google_compute_url_map" "foobar2" {
service = "${google_compute_backend_service.foobar.self_link}"
}
}
-`
+`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_target_https_proxy_test.go b/builtin/providers/google/resource_compute_target_https_proxy_test.go
index af3704d3e0..f8d731f080 100644
--- a/builtin/providers/google/resource_compute_target_https_proxy_test.go
+++ b/builtin/providers/google/resource_compute_target_https_proxy_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@@ -97,28 +98,28 @@ func testAccCheckComputeTargetHttpsProxyExists(n string) resource.TestCheckFunc
}
}
-const testAccComputeTargetHttpsProxy_basic1 = `
+var testAccComputeTargetHttpsProxy_basic1 = fmt.Sprintf(`
resource "google_compute_target_https_proxy" "foobar" {
description = "Resource created for Terraform acceptance testing"
- name = "terraform-test"
+ name = "httpsproxy-test-%s"
url_map = "${google_compute_url_map.foobar.self_link}"
ssl_certificates = ["${google_compute_ssl_certificate.foobar1.self_link}"]
}
resource "google_compute_backend_service" "foobar" {
- name = "service"
+ name = "httpsproxy-test-%s"
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
}
resource "google_compute_http_health_check" "zero" {
- name = "tf-test-zero"
+ name = "httpsproxy-test-%s"
request_path = "/"
check_interval_sec = 1
timeout_sec = 1
}
resource "google_compute_url_map" "foobar" {
- name = "myurlmap"
+ name = "httpsproxy-test-%s"
default_service = "${google_compute_backend_service.foobar.self_link}"
host_rule {
hosts = ["mysite.com", "myothersite.com"]
@@ -140,42 +141,43 @@ resource "google_compute_url_map" "foobar" {
}
resource "google_compute_ssl_certificate" "foobar1" {
- name = "terraform-test1"
+ name = "httpsproxy-test-%s"
description = "very descriptive"
private_key = "${file("test-fixtures/ssl_cert/test.key")}"
certificate = "${file("test-fixtures/ssl_cert/test.crt")}"
}
resource "google_compute_ssl_certificate" "foobar2" {
- name = "terraform-test2"
+ name = "httpsproxy-test-%s"
description = "very descriptive"
private_key = "${file("test-fixtures/ssl_cert/test.key")}"
certificate = "${file("test-fixtures/ssl_cert/test.crt")}"
}
-`
+`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10),
+ acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
-const testAccComputeTargetHttpsProxy_basic2 = `
+var testAccComputeTargetHttpsProxy_basic2 = fmt.Sprintf(`
resource "google_compute_target_https_proxy" "foobar" {
description = "Resource created for Terraform acceptance testing"
- name = "terraform-test"
+ name = "httpsproxy-test-%s"
url_map = "${google_compute_url_map.foobar.self_link}"
ssl_certificates = ["${google_compute_ssl_certificate.foobar1.self_link}"]
}
resource "google_compute_backend_service" "foobar" {
- name = "service"
+ name = "httpsproxy-test-%s"
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
}
resource "google_compute_http_health_check" "zero" {
- name = "tf-test-zero"
+ name = "httpsproxy-test-%s"
request_path = "/"
check_interval_sec = 1
timeout_sec = 1
}
resource "google_compute_url_map" "foobar" {
- name = "myurlmap"
+ name = "httpsproxy-test-%s"
default_service = "${google_compute_backend_service.foobar.self_link}"
host_rule {
hosts = ["mysite.com", "myothersite.com"]
@@ -197,16 +199,17 @@ resource "google_compute_url_map" "foobar" {
}
resource "google_compute_ssl_certificate" "foobar1" {
- name = "terraform-test1"
+ name = "httpsproxy-test-%s"
description = "very descriptive"
private_key = "${file("test-fixtures/ssl_cert/test.key")}"
certificate = "${file("test-fixtures/ssl_cert/test.crt")}"
}
resource "google_compute_ssl_certificate" "foobar2" {
- name = "terraform-test2"
+ name = "httpsproxy-test-%s"
description = "very descriptive"
private_key = "${file("test-fixtures/ssl_cert/test.key")}"
certificate = "${file("test-fixtures/ssl_cert/test.crt")}"
}
-`
+`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10),
+ acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_target_pool_test.go b/builtin/providers/google/resource_compute_target_pool_test.go
index 4a65eaac65..2ab48d319c 100644
--- a/builtin/providers/google/resource_compute_target_pool_test.go
+++ b/builtin/providers/google/resource_compute_target_pool_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@@ -71,10 +72,10 @@ func testAccCheckComputeTargetPoolExists(n string) resource.TestCheckFunc {
}
}
-const testAccComputeTargetPool_basic = `
+var testAccComputeTargetPool_basic = fmt.Sprintf(`
resource "google_compute_target_pool" "foobar" {
description = "Resource created for Terraform acceptance testing"
instances = ["us-central1-a/foo", "us-central1-b/bar"]
- name = "terraform-test"
+ name = "tpool-test-%s"
session_affinity = "CLIENT_IP_PROTO"
-}`
+}`, acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_url_map_test.go b/builtin/providers/google/resource_compute_url_map_test.go
index ac2f08b135..0f43df5f4e 100644
--- a/builtin/providers/google/resource_compute_url_map_test.go
+++ b/builtin/providers/google/resource_compute_url_map_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@@ -119,21 +120,21 @@ func testAccCheckComputeUrlMapExists(n string) resource.TestCheckFunc {
}
}
-const testAccComputeUrlMap_basic1 = `
+var testAccComputeUrlMap_basic1 = fmt.Sprintf(`
resource "google_compute_backend_service" "foobar" {
- name = "service"
+ name = "urlmap-test-%s"
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
}
resource "google_compute_http_health_check" "zero" {
- name = "tf-test-zero"
+ name = "urlmap-test-%s"
request_path = "/"
check_interval_sec = 1
timeout_sec = 1
}
resource "google_compute_url_map" "foobar" {
- name = "myurlmap"
+ name = "urlmap-test-%s"
default_service = "${google_compute_backend_service.foobar.self_link}"
host_rule {
@@ -156,23 +157,23 @@ resource "google_compute_url_map" "foobar" {
service = "${google_compute_backend_service.foobar.self_link}"
}
}
-`
+`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
-const testAccComputeUrlMap_basic2 = `
+var testAccComputeUrlMap_basic2 = fmt.Sprintf(`
resource "google_compute_backend_service" "foobar" {
- name = "service"
+ name = "urlmap-test-%s"
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
}
resource "google_compute_http_health_check" "zero" {
- name = "tf-test-zero"
+ name = "urlmap-test-%s"
request_path = "/"
check_interval_sec = 1
timeout_sec = 1
}
resource "google_compute_url_map" "foobar" {
- name = "myurlmap"
+ name = "urlmap-test-%s"
default_service = "${google_compute_backend_service.foobar.self_link}"
host_rule {
@@ -195,23 +196,23 @@ resource "google_compute_url_map" "foobar" {
service = "${google_compute_backend_service.foobar.self_link}"
}
}
-`
+`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
-const testAccComputeUrlMap_advanced1 = `
+var testAccComputeUrlMap_advanced1 = fmt.Sprintf(`
resource "google_compute_backend_service" "foobar" {
- name = "service"
+ name = "urlmap-test-%s"
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
}
resource "google_compute_http_health_check" "zero" {
- name = "tf-test-zero"
+ name = "urlmap-test-%s"
request_path = "/"
check_interval_sec = 1
timeout_sec = 1
}
resource "google_compute_url_map" "foobar" {
- name = "myurlmap"
+ name = "urlmap-test-%s"
default_service = "${google_compute_backend_service.foobar.self_link}"
host_rule {
@@ -242,23 +243,23 @@ resource "google_compute_url_map" "foobar" {
}
}
}
-`
+`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
-const testAccComputeUrlMap_advanced2 = `
+var testAccComputeUrlMap_advanced2 = fmt.Sprintf(`
resource "google_compute_backend_service" "foobar" {
- name = "service"
+ name = "urlmap-test-%s"
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
}
resource "google_compute_http_health_check" "zero" {
- name = "tf-test-zero"
+ name = "urlmap-test-%s"
request_path = "/"
check_interval_sec = 1
timeout_sec = 1
}
resource "google_compute_url_map" "foobar" {
- name = "myurlmap"
+ name = "urlmap-test-%s"
default_service = "${google_compute_backend_service.foobar.self_link}"
host_rule {
@@ -308,4 +309,4 @@ resource "google_compute_url_map" "foobar" {
}
}
}
-`
+`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_vpn_gateway_test.go b/builtin/providers/google/resource_compute_vpn_gateway_test.go
index 1d62704239..1011808a89 100644
--- a/builtin/providers/google/resource_compute_vpn_gateway_test.go
+++ b/builtin/providers/google/resource_compute_vpn_gateway_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -79,13 +80,13 @@ func testAccCheckComputeVpnGatewayExists(n string) resource.TestCheckFunc {
}
}
-const testAccComputeVpnGateway_basic = `
+var testAccComputeVpnGateway_basic = fmt.Sprintf(`
resource "google_compute_network" "foobar" {
- name = "tf-test-network"
+ name = "gateway-test-%s"
ipv4_range = "10.0.0.0/16"
}
resource "google_compute_vpn_gateway" "foobar" {
- name = "tf-test-vpn-gateway"
+ name = "gateway-test-%s"
network = "${google_compute_network.foobar.self_link}"
region = "us-central1"
-} `
+}`, acctest.RandString(10), acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_vpn_tunnel_test.go b/builtin/providers/google/resource_compute_vpn_tunnel_test.go
index 4bb666879b..007441eeba 100644
--- a/builtin/providers/google/resource_compute_vpn_tunnel_test.go
+++ b/builtin/providers/google/resource_compute_vpn_tunnel_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -79,29 +80,29 @@ func testAccCheckComputeVpnTunnelExists(n string) resource.TestCheckFunc {
}
}
-const testAccComputeVpnTunnel_basic = `
+var testAccComputeVpnTunnel_basic = fmt.Sprintf(`
resource "google_compute_network" "foobar" {
- name = "tf-test-network"
+ name = "tunnel-test-%s"
ipv4_range = "10.0.0.0/16"
}
resource "google_compute_address" "foobar" {
- name = "tf-test-static-ip"
+ name = "tunnel-test-%s"
region = "us-central1"
}
resource "google_compute_vpn_gateway" "foobar" {
- name = "tf-test-vpn-gateway"
+ name = "tunnel-test-%s"
network = "${google_compute_network.foobar.self_link}"
region = "${google_compute_address.foobar.region}"
}
resource "google_compute_forwarding_rule" "foobar_esp" {
- name = "tf-test-fr-esp"
+ name = "tunnel-test-%s"
region = "${google_compute_vpn_gateway.foobar.region}"
ip_protocol = "ESP"
ip_address = "${google_compute_address.foobar.address}"
target = "${google_compute_vpn_gateway.foobar.self_link}"
}
resource "google_compute_forwarding_rule" "foobar_udp500" {
- name = "tf-test-fr-udp500"
+ name = "tunnel-test-%s"
region = "${google_compute_forwarding_rule.foobar_esp.region}"
ip_protocol = "UDP"
port_range = "500"
@@ -109,7 +110,7 @@ resource "google_compute_forwarding_rule" "foobar_udp500" {
target = "${google_compute_vpn_gateway.foobar.self_link}"
}
resource "google_compute_forwarding_rule" "foobar_udp4500" {
- name = "tf-test-fr-udp4500"
+ name = "tunnel-test-%s"
region = "${google_compute_forwarding_rule.foobar_udp500.region}"
ip_protocol = "UDP"
port_range = "4500"
@@ -117,9 +118,11 @@ resource "google_compute_forwarding_rule" "foobar_udp4500" {
target = "${google_compute_vpn_gateway.foobar.self_link}"
}
resource "google_compute_vpn_tunnel" "foobar" {
- name = "tf-test-vpn-tunnel"
+ name = "tunnel-test-%s"
region = "${google_compute_forwarding_rule.foobar_udp4500.region}"
target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}"
shared_secret = "unguessable"
peer_ip = "0.0.0.0"
-}`
+}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10),
+ acctest.RandString(10), acctest.RandString(10), acctest.RandString(10),
+ acctest.RandString(10))
diff --git a/builtin/providers/google/resource_container_cluster_test.go b/builtin/providers/google/resource_container_cluster_test.go
index ea4a5a597b..11cf1378e7 100644
--- a/builtin/providers/google/resource_container_cluster_test.go
+++ b/builtin/providers/google/resource_container_cluster_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@@ -89,9 +90,9 @@ func testAccCheckContainerClusterExists(n string) resource.TestCheckFunc {
}
}
-const testAccContainerCluster_basic = `
+var testAccContainerCluster_basic = fmt.Sprintf(`
resource "google_container_cluster" "primary" {
- name = "terraform-foo-bar-test"
+ name = "cluster-test-%s"
zone = "us-central1-a"
initial_node_count = 3
@@ -99,11 +100,11 @@ resource "google_container_cluster" "primary" {
username = "mr.yoda"
password = "adoy.rm"
}
-}`
+}`, acctest.RandString(10))
-const testAccContainerCluster_withNodeConfig = `
+var testAccContainerCluster_withNodeConfig = fmt.Sprintf(`
resource "google_container_cluster" "with_node_config" {
- name = "terraform-foo-bar-with-nodeconfig"
+ name = "cluster-test-%s"
zone = "us-central1-f"
initial_node_count = 1
@@ -122,4 +123,4 @@ resource "google_container_cluster" "with_node_config" {
"https://www.googleapis.com/auth/monitoring"
]
}
-}`
+}`, acctest.RandString(10))
diff --git a/builtin/providers/google/resource_dns_managed_zone_test.go b/builtin/providers/google/resource_dns_managed_zone_test.go
index 2f91dfcc8e..b90fc8697d 100644
--- a/builtin/providers/google/resource_dns_managed_zone_test.go
+++ b/builtin/providers/google/resource_dns_managed_zone_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"google.golang.org/api/dns/v1"
@@ -75,9 +76,9 @@ func testAccCheckDnsManagedZoneExists(n string, zone *dns.ManagedZone) resource.
}
}
-const testAccDnsManagedZone_basic = `
+var testAccDnsManagedZone_basic = fmt.Sprintf(`
resource "google_dns_managed_zone" "foobar" {
- name = "terraform-test"
+ name = "mzone-test-%s"
dns_name = "terraform.test."
description = "Test Description"
-}`
+}`, acctest.RandString(10))
diff --git a/builtin/providers/google/resource_dns_record_set_test.go b/builtin/providers/google/resource_dns_record_set_test.go
index 5ff1233885..0eb331d5b7 100644
--- a/builtin/providers/google/resource_dns_record_set_test.go
+++ b/builtin/providers/google/resource_dns_record_set_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@@ -76,9 +77,9 @@ func testAccCheckDnsRecordSetExists(name string) resource.TestCheckFunc {
}
}
-const testAccDnsRecordSet_basic = `
+var testAccDnsRecordSet_basic = fmt.Sprintf(`
resource "google_dns_managed_zone" "parent-zone" {
- name = "terraform-test-zone"
+ name = "dnsrecord-test-%s"
dns_name = "terraform.test."
description = "Test Description"
}
@@ -89,4 +90,4 @@ resource "google_dns_record_set" "foobar" {
rrdatas = ["127.0.0.1", "127.0.0.10"]
ttl = 600
}
-`
+`, acctest.RandString(10))
diff --git a/builtin/providers/google/resource_pubsub_subscription_test.go b/builtin/providers/google/resource_pubsub_subscription_test.go
index 0bbed3aed7..9cc0a218b3 100644
--- a/builtin/providers/google/resource_pubsub_subscription_test.go
+++ b/builtin/providers/google/resource_pubsub_subscription_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@@ -62,12 +63,12 @@ func testAccPubsubSubscriptionExists(n string) resource.TestCheckFunc {
}
}
-const testAccPubsubSubscription = `
+var testAccPubsubSubscription = fmt.Sprintf(`
resource "google_pubsub_topic" "foobar_sub" {
- name = "foobar_sub"
+ name = "pssub-test-%s"
}
resource "google_pubsub_subscription" "foobar_sub" {
- name = "foobar_sub"
+ name = "pssub-test-%s"
topic = "${google_pubsub_topic.foobar_sub.name}"
-}`
+}`, acctest.RandString(10), acctest.RandString(10))
diff --git a/builtin/providers/google/resource_pubsub_topic_test.go b/builtin/providers/google/resource_pubsub_topic_test.go
index 3d6c655c7d..f81b9c21d1 100644
--- a/builtin/providers/google/resource_pubsub_topic_test.go
+++ b/builtin/providers/google/resource_pubsub_topic_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@@ -62,7 +63,7 @@ func testAccPubsubTopicExists(n string) resource.TestCheckFunc {
}
}
-const testAccPubsubTopic = `
+var testAccPubsubTopic = fmt.Sprintf(`
resource "google_pubsub_topic" "foobar" {
- name = "foobar"
-}`
+ name = "pstopic-test-%s"
+}`, acctest.RandString(10))
diff --git a/builtin/providers/google/resource_sql_database_test.go b/builtin/providers/google/resource_sql_database_test.go
index 70d7e5f056..30b146a9c7 100644
--- a/builtin/providers/google/resource_sql_database_test.go
+++ b/builtin/providers/google/resource_sql_database_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -100,7 +101,7 @@ func testAccGoogleSqlDatabaseDestroy(s *terraform.State) error {
var testGoogleSqlDatabase_basic = fmt.Sprintf(`
resource "google_sql_database_instance" "instance" {
- name = "tf-lw-%d"
+ name = "sqldatabase-test-%s"
region = "us-central"
settings {
tier = "D0"
@@ -108,7 +109,7 @@ resource "google_sql_database_instance" "instance" {
}
resource "google_sql_database" "database" {
- name = "database1"
+ name = "sqldatabase-test-%s"
instance = "${google_sql_database_instance.instance.name}"
}
-`, genRandInt())
+`, acctest.RandString(10), acctest.RandString(10))
From 055482a9f5e3be0d0d115d59a08df55818155efa Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Tue, 5 Jan 2016 16:16:32 -0600
Subject: [PATCH 415/664] providers/aws: Update VPN Gateway test
---
.../aws/resource_aws_vpn_gateway_test.go | 15 +++++++++++++--
1 file changed, 13 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_vpn_gateway_test.go b/builtin/providers/aws/resource_aws_vpn_gateway_test.go
index d6b01f3134..3a4bb17472 100644
--- a/builtin/providers/aws/resource_aws_vpn_gateway_test.go
+++ b/builtin/providers/aws/resource_aws_vpn_gateway_test.go
@@ -128,10 +128,21 @@ func testAccCheckVpnGatewayDestroy(s *terraform.State) error {
VpnGatewayIds: []*string{aws.String(rs.Primary.ID)},
})
if err == nil {
- if len(resp.VpnGateways) > 0 {
- return fmt.Errorf("still exists")
+ var v *ec2.VpnGateway
+ for _, g := range resp.VpnGateways {
+ if *g.VpnGatewayId == rs.Primary.ID {
+ v = g
+ }
}
+ if v == nil {
+ // wasn't found
+ return nil
+ }
+
+ if *v.State != "deleted" {
+ return fmt.Errorf("Expected VpnGateway to be in deleted state, but was not: %s", v)
+ }
return nil
}
From a8d2ad3ebe4f20bffb0c36edde64fbcd91e827a5 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Tue, 5 Jan 2016 17:37:54 -0600
Subject: [PATCH 416/664] refactor s3 bucket test to expect non-empty plan
pushing to master but paging @catsby for post-hoc review
---
.../aws/resource_aws_s3_bucket_test.go | 1 +
helper/resource/testing.go | 32 ++++++++++++-------
2 files changed, 21 insertions(+), 12 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_s3_bucket_test.go b/builtin/providers/aws/resource_aws_s3_bucket_test.go
index f37ae882ac..f303243d5d 100644
--- a/builtin/providers/aws/resource_aws_s3_bucket_test.go
+++ b/builtin/providers/aws/resource_aws_s3_bucket_test.go
@@ -188,6 +188,7 @@ func TestAccAWSS3Bucket_shouldFailNotFound(t *testing.T) {
testAccCheckAWSS3BucketExists("aws_s3_bucket.bucket"),
testAccCheckAWSS3DestroyBucket("aws_s3_bucket.bucket"),
),
+ ExpectNonEmptyPlan: true,
},
},
})
diff --git a/helper/resource/testing.go b/helper/resource/testing.go
index db74d8d2ee..94e12498ab 100644
--- a/helper/resource/testing.go
+++ b/helper/resource/testing.go
@@ -82,6 +82,10 @@ type TestStep struct {
// Destroy will create a destroy plan if set to true.
Destroy bool
+
+ // ExpectNonEmptyPlan can be set to true for specific types of tests that are
+ // looking to verify that a diff occurs
+ ExpectNonEmptyPlan bool
}
// Test performs an acceptance test on a resource.
@@ -273,13 +277,13 @@ func testStep(
// Now, verify that Plan is now empty and we don't have a perpetual diff issue
// We do this with TWO plans. One without a refresh.
- if p, err := ctx.Plan(); err != nil {
+ var p *terraform.Plan
+ if p, err = ctx.Plan(); err != nil {
return state, fmt.Errorf("Error on follow-up plan: %s", err)
- } else {
- if p.Diff != nil && !p.Diff.Empty() {
- return state, fmt.Errorf(
- "After applying this step, the plan was not empty:\n\n%s", p)
- }
+ }
+ if p.Diff != nil && !p.Diff.Empty() && !step.ExpectNonEmptyPlan {
+ return state, fmt.Errorf(
+ "After applying this step, the plan was not empty:\n\n%s", p)
}
// And another after a Refresh.
@@ -288,13 +292,17 @@ func testStep(
return state, fmt.Errorf(
"Error on follow-up refresh: %s", err)
}
- if p, err := ctx.Plan(); err != nil {
+ if p, err = ctx.Plan(); err != nil {
return state, fmt.Errorf("Error on second follow-up plan: %s", err)
- } else {
- if p.Diff != nil && !p.Diff.Empty() {
- return state, fmt.Errorf(
- "After applying this step and refreshing, the plan was not empty:\n\n%s", p)
- }
+ }
+ if p.Diff != nil && !p.Diff.Empty() && !step.ExpectNonEmptyPlan {
+ return state, fmt.Errorf(
+ "After applying this step and refreshing, the plan was not empty:\n\n%s", p)
+ }
+
+ // Made it here, but expected a non-empty plan, fail!
+ if step.ExpectNonEmptyPlan && (p.Diff == nil || p.Diff.Empty()) {
+ return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!")
}
// Made it here? Good job test step!
From 9a0ecd05eb77bf3840068df142c63ea054d10f36 Mon Sep 17 00:00:00 2001
From: Lars Wander
Date: Tue, 5 Jan 2016 19:49:06 -0500
Subject: [PATCH 417/664] provider/google: limit hardcoded test resource names
---
.../google/resource_compute_firewall_test.go | 72 +--
...rce_compute_global_forwarding_rule_test.go | 222 ++++----
...rce_compute_instance_group_manager_test.go | 288 +++++-----
.../google/resource_compute_instance_test.go | 522 ++++++++++--------
...resource_compute_target_http_proxy_test.go | 228 ++++----
.../google/resource_dns_record_set_test.go | 41 +-
.../google/resource_sql_database_test.go | 4 +-
7 files changed, 735 insertions(+), 642 deletions(-)
diff --git a/builtin/providers/google/resource_compute_firewall_test.go b/builtin/providers/google/resource_compute_firewall_test.go
index 8edab92606..3fa6b305b7 100644
--- a/builtin/providers/google/resource_compute_firewall_test.go
+++ b/builtin/providers/google/resource_compute_firewall_test.go
@@ -12,6 +12,8 @@ import (
func TestAccComputeFirewall_basic(t *testing.T) {
var firewall compute.Firewall
+ networkName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10))
+ firewallName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -19,7 +21,7 @@ func TestAccComputeFirewall_basic(t *testing.T) {
CheckDestroy: testAccCheckComputeFirewallDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeFirewall_basic,
+ Config: testAccComputeFirewall_basic(networkName, firewallName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeFirewallExists(
"google_compute_firewall.foobar", &firewall),
@@ -31,6 +33,8 @@ func TestAccComputeFirewall_basic(t *testing.T) {
func TestAccComputeFirewall_update(t *testing.T) {
var firewall compute.Firewall
+ networkName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10))
+ firewallName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -38,14 +42,14 @@ func TestAccComputeFirewall_update(t *testing.T) {
CheckDestroy: testAccCheckComputeFirewallDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeFirewall_basic,
+ Config: testAccComputeFirewall_basic(networkName, firewallName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeFirewallExists(
"google_compute_firewall.foobar", &firewall),
),
},
resource.TestStep{
- Config: testAccComputeFirewall_update,
+ Config: testAccComputeFirewall_update(networkName, firewallName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeFirewallExists(
"google_compute_firewall.foobar", &firewall),
@@ -119,37 +123,41 @@ func testAccCheckComputeFirewallPorts(
}
}
-var testAccComputeFirewall_basic = fmt.Sprintf(`
-resource "google_compute_network" "foobar" {
- name = "firewall-test-%s"
- ipv4_range = "10.0.0.0/16"
+func testAccComputeFirewall_basic(network, firewall string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_network" "foobar" {
+ name = "firewall-test-%s"
+ ipv4_range = "10.0.0.0/16"
+ }
+
+ resource "google_compute_firewall" "foobar" {
+ name = "firewall-test-%s"
+ description = "Resource created for Terraform acceptance testing"
+ network = "${google_compute_network.foobar.name}"
+ source_tags = ["foo"]
+
+ allow {
+ protocol = "icmp"
+ }
+ }`, network, firewall)
}
-resource "google_compute_firewall" "foobar" {
- name = "firewall-test-%s"
- description = "Resource created for Terraform acceptance testing"
- network = "${google_compute_network.foobar.name}"
- source_tags = ["foo"]
-
- allow {
- protocol = "icmp"
+func testAccComputeFirewall_update(network, firewall string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_network" "foobar" {
+ name = "firewall-test-%s"
+ ipv4_range = "10.0.0.0/16"
}
-}`, acctest.RandString(10), acctest.RandString(10))
-var testAccComputeFirewall_update = fmt.Sprintf(`
-resource "google_compute_network" "foobar" {
- name = "firewall-test-%s"
- ipv4_range = "10.0.0.0/16"
+ resource "google_compute_firewall" "foobar" {
+ name = "firewall-test-%s"
+ description = "Resource created for Terraform acceptance testing"
+ network = "${google_compute_network.foobar.name}"
+ source_tags = ["foo"]
+
+ allow {
+ protocol = "tcp"
+ ports = ["80-255"]
+ }
+ }`, network, firewall)
}
-
-resource "google_compute_firewall" "foobar" {
- name = "firewall-test-%s"
- description = "Resource created for Terraform acceptance testing"
- network = "${google_compute_network.foobar.name}"
- source_tags = ["foo"]
-
- allow {
- protocol = "tcp"
- ports = ["80-255"]
- }
-}`, acctest.RandString(10), acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_global_forwarding_rule_test.go b/builtin/providers/google/resource_compute_global_forwarding_rule_test.go
index cadae7feb4..f81361c7b8 100644
--- a/builtin/providers/google/resource_compute_global_forwarding_rule_test.go
+++ b/builtin/providers/google/resource_compute_global_forwarding_rule_test.go
@@ -10,13 +10,20 @@ import (
)
func TestAccComputeGlobalForwardingRule_basic(t *testing.T) {
+ fr := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
+ proxy1 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
+ proxy2 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
+ backend := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
+ hc := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
+ urlmap := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
+
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeGlobalForwardingRuleDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeGlobalForwardingRule_basic1,
+ Config: testAccComputeGlobalForwardingRule_basic1(fr, proxy1, proxy2, backend, hc, urlmap),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeGlobalForwardingRuleExists(
"google_compute_global_forwarding_rule.foobar"),
@@ -27,13 +34,20 @@ func TestAccComputeGlobalForwardingRule_basic(t *testing.T) {
}
func TestAccComputeGlobalForwardingRule_update(t *testing.T) {
+ fr := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
+ proxy1 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
+ proxy2 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
+ backend := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
+ hc := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
+ urlmap := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
+
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeGlobalForwardingRuleDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeGlobalForwardingRule_basic1,
+ Config: testAccComputeGlobalForwardingRule_basic1(fr, proxy1, proxy2, backend, hc, urlmap),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeGlobalForwardingRuleExists(
"google_compute_global_forwarding_rule.foobar"),
@@ -41,7 +55,7 @@ func TestAccComputeGlobalForwardingRule_update(t *testing.T) {
},
resource.TestStep{
- Config: testAccComputeGlobalForwardingRule_basic2,
+ Config: testAccComputeGlobalForwardingRule_basic2(fr, proxy1, proxy2, backend, hc, urlmap),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeGlobalForwardingRuleExists(
"google_compute_global_forwarding_rule.foobar"),
@@ -96,116 +110,116 @@ func testAccCheckComputeGlobalForwardingRuleExists(n string) resource.TestCheckF
}
}
-var testAccComputeGlobalForwardingRule_basic1 = fmt.Sprintf(`
-resource "google_compute_global_forwarding_rule" "foobar" {
- description = "Resource created for Terraform acceptance testing"
- ip_protocol = "TCP"
- name = "gforward-test-%s"
- port_range = "80"
- target = "${google_compute_target_http_proxy.foobar1.self_link}"
-}
-
-resource "google_compute_target_http_proxy" "foobar1" {
- description = "Resource created for Terraform acceptance testing"
- name = "gforward-test-%s"
- url_map = "${google_compute_url_map.foobar.self_link}"
-}
-
-resource "google_compute_target_http_proxy" "foobar2" {
- description = "Resource created for Terraform acceptance testing"
- name = "gforward-test-%s"
- url_map = "${google_compute_url_map.foobar.self_link}"
-}
-
-resource "google_compute_backend_service" "foobar" {
- name = "gforward-test-%s"
- health_checks = ["${google_compute_http_health_check.zero.self_link}"]
-}
-
-resource "google_compute_http_health_check" "zero" {
- name = "gforward-test-%s"
- request_path = "/"
- check_interval_sec = 1
- timeout_sec = 1
-}
-
-resource "google_compute_url_map" "foobar" {
- name = "gforward-test-%s"
- default_service = "${google_compute_backend_service.foobar.self_link}"
- host_rule {
- hosts = ["mysite.com", "myothersite.com"]
- path_matcher = "boop"
+func testAccComputeGlobalForwardingRule_basic1(fr, proxy1, proxy2, backend, hc, urlmap string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_global_forwarding_rule" "foobar" {
+ description = "Resource created for Terraform acceptance testing"
+ ip_protocol = "TCP"
+ name = "%s"
+ port_range = "80"
+ target = "${google_compute_target_http_proxy.foobar1.self_link}"
}
- path_matcher {
+
+ resource "google_compute_target_http_proxy" "foobar1" {
+ description = "Resource created for Terraform acceptance testing"
+ name = "%s"
+ url_map = "${google_compute_url_map.foobar.self_link}"
+ }
+
+ resource "google_compute_target_http_proxy" "foobar2" {
+ description = "Resource created for Terraform acceptance testing"
+ name = "%s"
+ url_map = "${google_compute_url_map.foobar.self_link}"
+ }
+
+ resource "google_compute_backend_service" "foobar" {
+ name = "%s"
+ health_checks = ["${google_compute_http_health_check.zero.self_link}"]
+ }
+
+ resource "google_compute_http_health_check" "zero" {
+ name = "%s"
+ request_path = "/"
+ check_interval_sec = 1
+ timeout_sec = 1
+ }
+
+ resource "google_compute_url_map" "foobar" {
+ name = "%s"
default_service = "${google_compute_backend_service.foobar.self_link}"
- name = "boop"
- path_rule {
- paths = ["/*"]
+ host_rule {
+ hosts = ["mysite.com", "myothersite.com"]
+ path_matcher = "boop"
+ }
+ path_matcher {
+ default_service = "${google_compute_backend_service.foobar.self_link}"
+ name = "boop"
+ path_rule {
+ paths = ["/*"]
+ service = "${google_compute_backend_service.foobar.self_link}"
+ }
+ }
+ test {
+ host = "mysite.com"
+ path = "/*"
service = "${google_compute_backend_service.foobar.self_link}"
}
+ }`, fr, proxy1, proxy2, backend, hc, urlmap)
+}
+
+func testAccComputeGlobalForwardingRule_basic2(fr, proxy1, proxy2, backend, hc, urlmap string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_global_forwarding_rule" "foobar" {
+ description = "Resource created for Terraform acceptance testing"
+ ip_protocol = "TCP"
+ name = "%s"
+ port_range = "80"
+ target = "${google_compute_target_http_proxy.foobar2.self_link}"
}
- test {
- host = "mysite.com"
- path = "/*"
- service = "${google_compute_backend_service.foobar.self_link}"
+
+ resource "google_compute_target_http_proxy" "foobar1" {
+ description = "Resource created for Terraform acceptance testing"
+ name = "%s"
+ url_map = "${google_compute_url_map.foobar.self_link}"
}
-}
-`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10),
- acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
-var testAccComputeGlobalForwardingRule_basic2 = fmt.Sprintf(`
-resource "google_compute_global_forwarding_rule" "foobar" {
- description = "Resource created for Terraform acceptance testing"
- ip_protocol = "TCP"
- name = "gforward-test-%s"
- port_range = "80"
- target = "${google_compute_target_http_proxy.foobar2.self_link}"
-}
-
-resource "google_compute_target_http_proxy" "foobar1" {
- description = "Resource created for Terraform acceptance testing"
- name = "gforward-test-%s"
- url_map = "${google_compute_url_map.foobar.self_link}"
-}
-
-resource "google_compute_target_http_proxy" "foobar2" {
- description = "Resource created for Terraform acceptance testing"
- name = "gforward-test-%s"
- url_map = "${google_compute_url_map.foobar.self_link}"
-}
-
-resource "google_compute_backend_service" "foobar" {
- name = "gforward-test-%s"
- health_checks = ["${google_compute_http_health_check.zero.self_link}"]
-}
-
-resource "google_compute_http_health_check" "zero" {
- name = "gforward-test-%s"
- request_path = "/"
- check_interval_sec = 1
- timeout_sec = 1
-}
-
-resource "google_compute_url_map" "foobar" {
- name = "gforward-test-%s"
- default_service = "${google_compute_backend_service.foobar.self_link}"
- host_rule {
- hosts = ["mysite.com", "myothersite.com"]
- path_matcher = "boop"
+ resource "google_compute_target_http_proxy" "foobar2" {
+ description = "Resource created for Terraform acceptance testing"
+ name = "%s"
+ url_map = "${google_compute_url_map.foobar.self_link}"
}
- path_matcher {
+
+ resource "google_compute_backend_service" "foobar" {
+ name = "%s"
+ health_checks = ["${google_compute_http_health_check.zero.self_link}"]
+ }
+
+ resource "google_compute_http_health_check" "zero" {
+ name = "%s"
+ request_path = "/"
+ check_interval_sec = 1
+ timeout_sec = 1
+ }
+
+ resource "google_compute_url_map" "foobar" {
+ name = "%s"
default_service = "${google_compute_backend_service.foobar.self_link}"
- name = "boop"
- path_rule {
- paths = ["/*"]
+ host_rule {
+ hosts = ["mysite.com", "myothersite.com"]
+ path_matcher = "boop"
+ }
+ path_matcher {
+ default_service = "${google_compute_backend_service.foobar.self_link}"
+ name = "boop"
+ path_rule {
+ paths = ["/*"]
+ service = "${google_compute_backend_service.foobar.self_link}"
+ }
+ }
+ test {
+ host = "mysite.com"
+ path = "/*"
service = "${google_compute_backend_service.foobar.self_link}"
}
- }
- test {
- host = "mysite.com"
- path = "/*"
- service = "${google_compute_backend_service.foobar.self_link}"
- }
+ }`, fr, proxy1, proxy2, backend, hc, urlmap)
}
-`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10),
- acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_instance_group_manager_test.go b/builtin/providers/google/resource_compute_instance_group_manager_test.go
index 0cf4791cb7..f7f2c147cc 100644
--- a/builtin/providers/google/resource_compute_instance_group_manager_test.go
+++ b/builtin/providers/google/resource_compute_instance_group_manager_test.go
@@ -14,13 +14,18 @@ import (
func TestAccInstanceGroupManager_basic(t *testing.T) {
var manager compute.InstanceGroupManager
+ template := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
+ target := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
+ igm1 := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
+ igm2 := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
+
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccInstanceGroupManager_basic,
+ Config: testAccInstanceGroupManager_basic(template, target, igm1, igm2),
Check: resource.ComposeTestCheckFunc(
testAccCheckInstanceGroupManagerExists(
"google_compute_instance_group_manager.igm-basic", &manager),
@@ -35,26 +40,31 @@ func TestAccInstanceGroupManager_basic(t *testing.T) {
func TestAccInstanceGroupManager_update(t *testing.T) {
var manager compute.InstanceGroupManager
+ template1 := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
+ target := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
+ template2 := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
+ igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
+
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccInstanceGroupManager_update,
+ Config: testAccInstanceGroupManager_update(template1, target, igm),
Check: resource.ComposeTestCheckFunc(
testAccCheckInstanceGroupManagerExists(
"google_compute_instance_group_manager.igm-update", &manager),
),
},
resource.TestStep{
- Config: testAccInstanceGroupManager_update2,
+ Config: testAccInstanceGroupManager_update2(template1, target, template2, igm),
Check: resource.ComposeTestCheckFunc(
testAccCheckInstanceGroupManagerExists(
"google_compute_instance_group_manager.igm-update", &manager),
testAccCheckInstanceGroupManagerUpdated(
"google_compute_instance_group_manager.igm-update", 3,
- "google_compute_target_pool.igm-update", "terraform-test-igm-update2"),
+ "google_compute_target_pool.igm-update", template2),
),
},
},
@@ -147,164 +157,170 @@ func testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool st
}
}
-var testAccInstanceGroupManager_basic = fmt.Sprintf(`
-resource "google_compute_instance_template" "igm-basic" {
- name = "igm-test-%s"
- machine_type = "n1-standard-1"
- can_ip_forward = false
- tags = ["foo", "bar"]
+func testAccInstanceGroupManager_basic(template, target, igm1, igm2 string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_instance_template" "igm-basic" {
+ name = "%s"
+ machine_type = "n1-standard-1"
+ can_ip_forward = false
+ tags = ["foo", "bar"]
- disk {
- source_image = "debian-cloud/debian-7-wheezy-v20140814"
- auto_delete = true
- boot = true
+ disk {
+ source_image = "debian-cloud/debian-7-wheezy-v20140814"
+ auto_delete = true
+ boot = true
+ }
+
+ network_interface {
+ network = "default"
+ }
+
+ metadata {
+ foo = "bar"
+ }
+
+ service_account {
+ scopes = ["userinfo-email", "compute-ro", "storage-ro"]
+ }
}
- network_interface {
- network = "default"
+ resource "google_compute_target_pool" "igm-basic" {
+ description = "Resource created for Terraform acceptance testing"
+ name = "%s"
+ session_affinity = "CLIENT_IP_PROTO"
}
- metadata {
- foo = "bar"
+ resource "google_compute_instance_group_manager" "igm-basic" {
+ description = "Terraform test instance group manager"
+ name = "%s"
+ instance_template = "${google_compute_instance_template.igm-basic.self_link}"
+ target_pools = ["${google_compute_target_pool.igm-basic.self_link}"]
+ base_instance_name = "igm-basic"
+ zone = "us-central1-c"
+ target_size = 2
}
- service_account {
- scopes = ["userinfo-email", "compute-ro", "storage-ro"]
+ resource "google_compute_instance_group_manager" "igm-no-tp" {
+ description = "Terraform test instance group manager"
+ name = "%s"
+ instance_template = "${google_compute_instance_template.igm-basic.self_link}"
+ base_instance_name = "igm-no-tp"
+ zone = "us-central1-c"
+ target_size = 2
}
+ `, template, target, igm1, igm2)
}
-resource "google_compute_target_pool" "igm-basic" {
- description = "Resource created for Terraform acceptance testing"
- name = "igm-test-%s"
- session_affinity = "CLIENT_IP_PROTO"
-}
+func testAccInstanceGroupManager_update(template, target, igm string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_instance_template" "igm-update" {
+ name = "%s"
+ machine_type = "n1-standard-1"
+ can_ip_forward = false
+ tags = ["foo", "bar"]
-resource "google_compute_instance_group_manager" "igm-basic" {
- description = "Terraform test instance group manager"
- name = "igm-test-%s"
- instance_template = "${google_compute_instance_template.igm-basic.self_link}"
- target_pools = ["${google_compute_target_pool.igm-basic.self_link}"]
- base_instance_name = "igm-basic"
- zone = "us-central1-c"
- target_size = 2
-}
+ disk {
+ source_image = "debian-cloud/debian-7-wheezy-v20140814"
+ auto_delete = true
+ boot = true
+ }
-resource "google_compute_instance_group_manager" "igm-no-tp" {
- description = "Terraform test instance group manager"
- name = "igm-test-%s"
- instance_template = "${google_compute_instance_template.igm-basic.self_link}"
- base_instance_name = "igm-no-tp"
- zone = "us-central1-c"
- target_size = 2
-}
-`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
+ network_interface {
+ network = "default"
+ }
-var testAccInstanceGroupManager_update = fmt.Sprintf(`
-resource "google_compute_instance_template" "igm-update" {
- name = "igm-test-%s"
- machine_type = "n1-standard-1"
- can_ip_forward = false
- tags = ["foo", "bar"]
+ metadata {
+ foo = "bar"
+ }
- disk {
- source_image = "debian-cloud/debian-7-wheezy-v20140814"
- auto_delete = true
- boot = true
+ service_account {
+ scopes = ["userinfo-email", "compute-ro", "storage-ro"]
+ }
}
- network_interface {
- network = "default"
+ resource "google_compute_target_pool" "igm-update" {
+ description = "Resource created for Terraform acceptance testing"
+ name = "%s"
+ session_affinity = "CLIENT_IP_PROTO"
}
- metadata {
- foo = "bar"
- }
-
- service_account {
- scopes = ["userinfo-email", "compute-ro", "storage-ro"]
- }
+ resource "google_compute_instance_group_manager" "igm-update" {
+ description = "Terraform test instance group manager"
+ name = "%s"
+ instance_template = "${google_compute_instance_template.igm-update.self_link}"
+ target_pools = ["${google_compute_target_pool.igm-update.self_link}"]
+ base_instance_name = "igm-update"
+ zone = "us-central1-c"
+ target_size = 2
+ }`, template, target, igm)
}
-resource "google_compute_target_pool" "igm-update" {
- description = "Resource created for Terraform acceptance testing"
- name = "igm-test-%s"
- session_affinity = "CLIENT_IP_PROTO"
-}
-
-resource "google_compute_instance_group_manager" "igm-update" {
- description = "Terraform test instance group manager"
- name = "igm-test-%s"
- instance_template = "${google_compute_instance_template.igm-update.self_link}"
- target_pools = ["${google_compute_target_pool.igm-update.self_link}"]
- base_instance_name = "igm-update"
- zone = "us-central1-c"
- target_size = 2
-}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
-
// Change IGM's instance template and target size
-var testAccInstanceGroupManager_update2 = fmt.Sprintf(`
-resource "google_compute_instance_template" "igm-update" {
- name = "igm-test-%s"
- machine_type = "n1-standard-1"
- can_ip_forward = false
- tags = ["foo", "bar"]
+func testAccInstanceGroupManager_update2(template1, target, template2, igm string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_instance_template" "igm-update" {
+ name = "%s"
+ machine_type = "n1-standard-1"
+ can_ip_forward = false
+ tags = ["foo", "bar"]
- disk {
- source_image = "debian-cloud/debian-7-wheezy-v20140814"
- auto_delete = true
- boot = true
+ disk {
+ source_image = "debian-cloud/debian-7-wheezy-v20140814"
+ auto_delete = true
+ boot = true
+ }
+
+ network_interface {
+ network = "default"
+ }
+
+ metadata {
+ foo = "bar"
+ }
+
+ service_account {
+ scopes = ["userinfo-email", "compute-ro", "storage-ro"]
+ }
}
- network_interface {
- network = "default"
+ resource "google_compute_target_pool" "igm-update" {
+ description = "Resource created for Terraform acceptance testing"
+ name = "%s"
+ session_affinity = "CLIENT_IP_PROTO"
}
- metadata {
- foo = "bar"
+ resource "google_compute_instance_template" "igm-update2" {
+ name = "%s"
+ machine_type = "n1-standard-1"
+ can_ip_forward = false
+ tags = ["foo", "bar"]
+
+ disk {
+ source_image = "debian-cloud/debian-7-wheezy-v20140814"
+ auto_delete = true
+ boot = true
+ }
+
+ network_interface {
+ network = "default"
+ }
+
+ metadata {
+ foo = "bar"
+ }
+
+ service_account {
+ scopes = ["userinfo-email", "compute-ro", "storage-ro"]
+ }
}
- service_account {
- scopes = ["userinfo-email", "compute-ro", "storage-ro"]
- }
+ resource "google_compute_instance_group_manager" "igm-update" {
+ description = "Terraform test instance group manager"
+ name = "%s"
+ instance_template = "${google_compute_instance_template.igm-update2.self_link}"
+ target_pools = ["${google_compute_target_pool.igm-update.self_link}"]
+ base_instance_name = "igm-update"
+ zone = "us-central1-c"
+ target_size = 3
+ }`, template1, target, template2, igm)
}
-
-resource "google_compute_target_pool" "igm-update" {
- description = "Resource created for Terraform acceptance testing"
- name = "igm-test-%s"
- session_affinity = "CLIENT_IP_PROTO"
-}
-
-resource "google_compute_instance_template" "igm-update2" {
- name = "igm-test-%s"
- machine_type = "n1-standard-1"
- can_ip_forward = false
- tags = ["foo", "bar"]
-
- disk {
- source_image = "debian-cloud/debian-7-wheezy-v20140814"
- auto_delete = true
- boot = true
- }
-
- network_interface {
- network = "default"
- }
-
- metadata {
- foo = "bar"
- }
-
- service_account {
- scopes = ["userinfo-email", "compute-ro", "storage-ro"]
- }
-}
-
-resource "google_compute_instance_group_manager" "igm-update" {
- description = "Terraform test instance group manager"
- name = "igm-test-%s"
- instance_template = "${google_compute_instance_template.igm-update2.self_link}"
- target_pools = ["${google_compute_target_pool.igm-update.self_link}"]
- base_instance_name = "igm-update"
- zone = "us-central1-c"
- target_size = 3
-}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
diff --git a/builtin/providers/google/resource_compute_instance_test.go b/builtin/providers/google/resource_compute_instance_test.go
index a9b571a7b1..9a2c3a7879 100644
--- a/builtin/providers/google/resource_compute_instance_test.go
+++ b/builtin/providers/google/resource_compute_instance_test.go
@@ -13,6 +13,7 @@ import (
func TestAccComputeInstance_basic_deprecated_network(t *testing.T) {
var instance compute.Instance
+ var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -20,13 +21,13 @@ func TestAccComputeInstance_basic_deprecated_network(t *testing.T) {
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeInstance_basic_deprecated_network,
+ Config: testAccComputeInstance_basic_deprecated_network(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
testAccCheckComputeInstanceTag(&instance, "foo"),
testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"),
- testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true),
+ testAccCheckComputeInstanceDisk(&instance, instanceName, true, true),
),
},
},
@@ -35,6 +36,7 @@ func TestAccComputeInstance_basic_deprecated_network(t *testing.T) {
func TestAccComputeInstance_basic1(t *testing.T) {
var instance compute.Instance
+ var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -42,14 +44,14 @@ func TestAccComputeInstance_basic1(t *testing.T) {
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeInstance_basic,
+ Config: testAccComputeInstance_basic(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
testAccCheckComputeInstanceTag(&instance, "foo"),
testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"),
testAccCheckComputeInstanceMetadata(&instance, "baz", "qux"),
- testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true),
+ testAccCheckComputeInstanceDisk(&instance, instanceName, true, true),
),
},
},
@@ -58,6 +60,7 @@ func TestAccComputeInstance_basic1(t *testing.T) {
func TestAccComputeInstance_basic2(t *testing.T) {
var instance compute.Instance
+ var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -65,13 +68,13 @@ func TestAccComputeInstance_basic2(t *testing.T) {
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeInstance_basic2,
+ Config: testAccComputeInstance_basic2(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
testAccCheckComputeInstanceTag(&instance, "foo"),
testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"),
- testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true),
+ testAccCheckComputeInstanceDisk(&instance, instanceName, true, true),
),
},
},
@@ -80,6 +83,7 @@ func TestAccComputeInstance_basic2(t *testing.T) {
func TestAccComputeInstance_basic3(t *testing.T) {
var instance compute.Instance
+ var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -87,13 +91,13 @@ func TestAccComputeInstance_basic3(t *testing.T) {
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeInstance_basic3,
+ Config: testAccComputeInstance_basic3(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
testAccCheckComputeInstanceTag(&instance, "foo"),
testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"),
- testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true),
+ testAccCheckComputeInstanceDisk(&instance, instanceName, true, true),
),
},
},
@@ -102,6 +106,8 @@ func TestAccComputeInstance_basic3(t *testing.T) {
func TestAccComputeInstance_IP(t *testing.T) {
var instance compute.Instance
+ var ipName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
+ var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -109,7 +115,7 @@ func TestAccComputeInstance_IP(t *testing.T) {
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeInstance_ip,
+ Config: testAccComputeInstance_ip(ipName, instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
@@ -122,6 +128,8 @@ func TestAccComputeInstance_IP(t *testing.T) {
func TestAccComputeInstance_disks(t *testing.T) {
var instance compute.Instance
+ var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
+ var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -129,12 +137,12 @@ func TestAccComputeInstance_disks(t *testing.T) {
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeInstance_disks,
+ Config: testAccComputeInstance_disks(diskName, instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
- testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true),
- testAccCheckComputeInstanceDisk(&instance, "terraform-test-disk", false, false),
+ testAccCheckComputeInstanceDisk(&instance, instanceName, true, true),
+ testAccCheckComputeInstanceDisk(&instance, diskName, false, false),
),
},
},
@@ -143,6 +151,7 @@ func TestAccComputeInstance_disks(t *testing.T) {
func TestAccComputeInstance_local_ssd(t *testing.T) {
var instance compute.Instance
+ var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -150,11 +159,11 @@ func TestAccComputeInstance_local_ssd(t *testing.T) {
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeInstance_local_ssd,
+ Config: testAccComputeInstance_local_ssd(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.local-ssd", &instance),
- testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true),
+ testAccCheckComputeInstanceDisk(&instance, instanceName, true, true),
),
},
},
@@ -163,6 +172,7 @@ func TestAccComputeInstance_local_ssd(t *testing.T) {
func TestAccComputeInstance_update_deprecated_network(t *testing.T) {
var instance compute.Instance
+ var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -170,14 +180,14 @@ func TestAccComputeInstance_update_deprecated_network(t *testing.T) {
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeInstance_basic_deprecated_network,
+ Config: testAccComputeInstance_basic_deprecated_network(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
),
},
resource.TestStep{
- Config: testAccComputeInstance_update_deprecated_network,
+ Config: testAccComputeInstance_update_deprecated_network(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
@@ -192,6 +202,7 @@ func TestAccComputeInstance_update_deprecated_network(t *testing.T) {
func TestAccComputeInstance_forceNewAndChangeMetadata(t *testing.T) {
var instance compute.Instance
+ var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -199,14 +210,14 @@ func TestAccComputeInstance_forceNewAndChangeMetadata(t *testing.T) {
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeInstance_basic,
+ Config: testAccComputeInstance_basic(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
),
},
resource.TestStep{
- Config: testAccComputeInstance_forceNewAndChangeMetadata,
+ Config: testAccComputeInstance_forceNewAndChangeMetadata(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
@@ -220,6 +231,7 @@ func TestAccComputeInstance_forceNewAndChangeMetadata(t *testing.T) {
func TestAccComputeInstance_update(t *testing.T) {
var instance compute.Instance
+ var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -227,14 +239,14 @@ func TestAccComputeInstance_update(t *testing.T) {
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeInstance_basic,
+ Config: testAccComputeInstance_basic(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
),
},
resource.TestStep{
- Config: testAccComputeInstance_update,
+ Config: testAccComputeInstance_update(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
@@ -250,6 +262,7 @@ func TestAccComputeInstance_update(t *testing.T) {
func TestAccComputeInstance_service_account(t *testing.T) {
var instance compute.Instance
+ var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -257,7 +270,7 @@ func TestAccComputeInstance_service_account(t *testing.T) {
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeInstance_service_account,
+ Config: testAccComputeInstance_service_account(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
@@ -275,6 +288,7 @@ func TestAccComputeInstance_service_account(t *testing.T) {
func TestAccComputeInstance_scheduling(t *testing.T) {
var instance compute.Instance
+ var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -282,7 +296,7 @@ func TestAccComputeInstance_scheduling(t *testing.T) {
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeInstance_scheduling,
+ Config: testAccComputeInstance_scheduling(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
@@ -437,276 +451,300 @@ func testAccCheckComputeInstanceServiceAccount(instance *compute.Instance, scope
}
}
-var testAccComputeInstance_basic_deprecated_network = fmt.Sprintf(`
-resource "google_compute_instance" "foobar" {
- name = "instance-test-%s"
- machine_type = "n1-standard-1"
- zone = "us-central1-a"
- can_ip_forward = false
- tags = ["foo", "bar"]
+func testAccComputeInstance_basic_deprecated_network(instance string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_instance" "foobar" {
+ name = "%s"
+ machine_type = "n1-standard-1"
+ zone = "us-central1-a"
+ can_ip_forward = false
+ tags = ["foo", "bar"]
- disk {
- image = "debian-7-wheezy-v20140814"
- }
+ disk {
+ image = "debian-7-wheezy-v20140814"
+ }
- network {
- source = "default"
- }
+ network {
+ source = "default"
+ }
- metadata {
- foo = "bar"
- }
-}`, acctest.RandString(10))
+ metadata {
+ foo = "bar"
+ }
+ }`, instance)
+}
-var testAccComputeInstance_update_deprecated_network = fmt.Sprintf(`
-resource "google_compute_instance" "foobar" {
- name = "instance-test-%s"
- machine_type = "n1-standard-1"
- zone = "us-central1-a"
- tags = ["baz"]
+func testAccComputeInstance_update_deprecated_network(instance string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_instance" "foobar" {
+ name = "%s"
+ machine_type = "n1-standard-1"
+ zone = "us-central1-a"
+ tags = ["baz"]
- disk {
- image = "debian-7-wheezy-v20140814"
- }
+ disk {
+ image = "debian-7-wheezy-v20140814"
+ }
- network {
- source = "default"
- }
+ network {
+ source = "default"
+ }
- metadata {
- bar = "baz"
- }
-}`, acctest.RandString(10))
+ metadata {
+ bar = "baz"
+ }
+ }`, instance)
+}
-var testAccComputeInstance_basic = fmt.Sprintf(`
-resource "google_compute_instance" "foobar" {
- name = "instance-test-%s"
- machine_type = "n1-standard-1"
- zone = "us-central1-a"
- can_ip_forward = false
- tags = ["foo", "bar"]
+func testAccComputeInstance_basic(instance string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_instance" "foobar" {
+ name = "%s"
+ machine_type = "n1-standard-1"
+ zone = "us-central1-a"
+ can_ip_forward = false
+ tags = ["foo", "bar"]
- disk {
- image = "debian-7-wheezy-v20140814"
- }
+ disk {
+ image = "debian-7-wheezy-v20140814"
+ }
- network_interface {
- network = "default"
- }
+ network_interface {
+ network = "default"
+ }
- metadata {
- foo = "bar"
- baz = "qux"
- }
+ metadata {
+ foo = "bar"
+ baz = "qux"
+ }
- metadata_startup_script = "echo Hello"
-}`, acctest.RandString(10))
+ metadata_startup_script = "echo Hello"
+ }`, instance)
+}
-var testAccComputeInstance_basic2 = fmt.Sprintf(`
-resource "google_compute_instance" "foobar" {
- name = "instance-test-%s"
- machine_type = "n1-standard-1"
- zone = "us-central1-a"
- can_ip_forward = false
- tags = ["foo", "bar"]
+func testAccComputeInstance_basic2(instance string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_instance" "foobar" {
+ name = "%s"
+ machine_type = "n1-standard-1"
+ zone = "us-central1-a"
+ can_ip_forward = false
+ tags = ["foo", "bar"]
- disk {
- image = "debian-cloud/debian-7-wheezy-v20140814"
- }
+ disk {
+ image = "debian-cloud/debian-7-wheezy-v20140814"
+ }
- network_interface {
- network = "default"
- }
+ network_interface {
+ network = "default"
+ }
- metadata {
- foo = "bar"
- }
-}`, acctest.RandString(10))
+ metadata {
+ foo = "bar"
+ }
+ }`, instance)
+}
-var testAccComputeInstance_basic3 = fmt.Sprintf(`
-resource "google_compute_instance" "foobar" {
- name = "instance-test-%s"
- machine_type = "n1-standard-1"
- zone = "us-central1-a"
- can_ip_forward = false
- tags = ["foo", "bar"]
+func testAccComputeInstance_basic3(instance string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_instance" "foobar" {
+ name = "%s"
+ machine_type = "n1-standard-1"
+ zone = "us-central1-a"
+ can_ip_forward = false
+ tags = ["foo", "bar"]
- disk {
- image = "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140814"
- }
+ disk {
+ image = "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140814"
+ }
- network_interface {
- network = "default"
- }
+ network_interface {
+ network = "default"
+ }
- metadata {
- foo = "bar"
- }
-}`, acctest.RandString(10))
+ metadata {
+ foo = "bar"
+ }
+ }`, instance)
+}
// Update zone to ForceNew, and change metadata k/v entirely
// Generates diff mismatch
-var testAccComputeInstance_forceNewAndChangeMetadata = fmt.Sprintf(`
-resource "google_compute_instance" "foobar" {
- name = "instance-test-%s"
- machine_type = "n1-standard-1"
- zone = "us-central1-a"
- zone = "us-central1-b"
- tags = ["baz"]
+func testAccComputeInstance_forceNewAndChangeMetadata(instance string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_instance" "foobar" {
+ name = "%s"
+ machine_type = "n1-standard-1"
+ zone = "us-central1-a"
+ zone = "us-central1-b"
+ tags = ["baz"]
- disk {
- image = "debian-7-wheezy-v20140814"
- }
+ disk {
+ image = "debian-7-wheezy-v20140814"
+ }
- network_interface {
- network = "default"
- access_config { }
- }
+ network_interface {
+ network = "default"
+ access_config { }
+ }
- metadata {
- qux = "true"
- }
-}`, acctest.RandString(10))
+ metadata {
+ qux = "true"
+ }
+ }`, instance)
+}
// Update metadata, tags, and network_interface
-var testAccComputeInstance_update = fmt.Sprintf(`
-resource "google_compute_instance" "foobar" {
- name = "instance-test-%s"
- machine_type = "n1-standard-1"
- zone = "us-central1-a"
- tags = ["baz"]
+func testAccComputeInstance_update(instance string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_instance" "foobar" {
+ name = "%s"
+ machine_type = "n1-standard-1"
+ zone = "us-central1-a"
+ tags = ["baz"]
- disk {
- image = "debian-7-wheezy-v20140814"
- }
-
- network_interface {
- network = "default"
- access_config { }
- }
-
- metadata {
- bar = "baz"
- }
-}`, acctest.RandString(10))
-
-var testAccComputeInstance_ip = fmt.Sprintf(`
-resource "google_compute_address" "foo" {
- name = "instance-test-%s"
-}
-
-resource "google_compute_instance" "foobar" {
- name = "instance-test-%s"
- machine_type = "n1-standard-1"
- zone = "us-central1-a"
- tags = ["foo", "bar"]
-
- disk {
- image = "debian-7-wheezy-v20140814"
- }
-
- network_interface {
- network = "default"
- access_config {
- nat_ip = "${google_compute_address.foo.address}"
+ disk {
+ image = "debian-7-wheezy-v20140814"
}
- }
- metadata {
- foo = "bar"
- }
-}`, acctest.RandString(10), acctest.RandString(10))
+ network_interface {
+ network = "default"
+ access_config { }
+ }
-var testAccComputeInstance_disks = fmt.Sprintf(`
-resource "google_compute_disk" "foobar" {
- name = "instance-test-%s"
- size = 10
- type = "pd-ssd"
- zone = "us-central1-a"
+ metadata {
+ bar = "baz"
+ }
+ }`, instance)
}
-resource "google_compute_instance" "foobar" {
- name = "instance-test-%s"
- machine_type = "n1-standard-1"
- zone = "us-central1-a"
-
- disk {
- image = "debian-7-wheezy-v20140814"
+func testAccComputeInstance_ip(ip, instance string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_address" "foo" {
+ name = "%s"
}
- disk {
- disk = "${google_compute_disk.foobar.name}"
- auto_delete = false
+ resource "google_compute_instance" "foobar" {
+ name = "%s"
+ machine_type = "n1-standard-1"
+ zone = "us-central1-a"
+ tags = ["foo", "bar"]
+
+ disk {
+ image = "debian-7-wheezy-v20140814"
+ }
+
+ network_interface {
+ network = "default"
+ access_config {
+ nat_ip = "${google_compute_address.foo.address}"
+ }
+ }
+
+ metadata {
+ foo = "bar"
+ }
+ }`, ip, instance)
+}
+
+func testAccComputeInstance_disks(disk, instance string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_disk" "foobar" {
+ name = "%s"
+ size = 10
+ type = "pd-ssd"
+ zone = "us-central1-a"
}
- network_interface {
- network = "default"
- }
+ resource "google_compute_instance" "foobar" {
+ name = "%s"
+ machine_type = "n1-standard-1"
+ zone = "us-central1-a"
- metadata {
- foo = "bar"
- }
-}`, acctest.RandString(10), acctest.RandString(10))
+ disk {
+ image = "debian-7-wheezy-v20140814"
+ }
-var testAccComputeInstance_local_ssd = fmt.Sprintf(`
-resource "google_compute_instance" "local-ssd" {
- name = "instance-test-%s"
- machine_type = "n1-standard-1"
- zone = "us-central1-a"
+ disk {
+ disk = "${google_compute_disk.foobar.name}"
+ auto_delete = false
+ }
- disk {
- image = "debian-7-wheezy-v20140814"
- }
+ network_interface {
+ network = "default"
+ }
- disk {
- type = "local-ssd"
- scratch = true
- }
+ metadata {
+ foo = "bar"
+ }
+ }`, disk, instance)
+}
- network_interface {
- network = "default"
- }
+func testAccComputeInstance_local_ssd(instance string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_instance" "local-ssd" {
+ name = "%s"
+ machine_type = "n1-standard-1"
+ zone = "us-central1-a"
-}`, acctest.RandString(10))
+ disk {
+ image = "debian-7-wheezy-v20140814"
+ }
-var testAccComputeInstance_service_account = fmt.Sprintf(`
-resource "google_compute_instance" "foobar" {
- name = "instance-test-%s"
- machine_type = "n1-standard-1"
- zone = "us-central1-a"
+ disk {
+ type = "local-ssd"
+ scratch = true
+ }
- disk {
- image = "debian-7-wheezy-v20140814"
- }
+ network_interface {
+ network = "default"
+ }
- network_interface {
- network = "default"
- }
+ }`, instance)
+}
- service_account {
- scopes = [
- "userinfo-email",
- "compute-ro",
- "storage-ro",
- ]
- }
-}`, acctest.RandString(10))
+func testAccComputeInstance_service_account(instance string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_instance" "foobar" {
+ name = "%s"
+ machine_type = "n1-standard-1"
+ zone = "us-central1-a"
-var testAccComputeInstance_scheduling = fmt.Sprintf(`
-resource "google_compute_instance" "foobar" {
- name = "instance-test-%s"
- machine_type = "n1-standard-1"
- zone = "us-central1-a"
+ disk {
+ image = "debian-7-wheezy-v20140814"
+ }
- disk {
- image = "debian-7-wheezy-v20140814"
- }
+ network_interface {
+ network = "default"
+ }
- network_interface {
- network = "default"
- }
+ service_account {
+ scopes = [
+ "userinfo-email",
+ "compute-ro",
+ "storage-ro",
+ ]
+ }
+ }`, instance)
+}
- scheduling {
- }
-}`, acctest.RandString(10))
+func testAccComputeInstance_scheduling(instance string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_instance" "foobar" {
+ name = "%s"
+ machine_type = "n1-standard-1"
+ zone = "us-central1-a"
+
+ disk {
+ image = "debian-7-wheezy-v20140814"
+ }
+
+ network_interface {
+ network = "default"
+ }
+
+ scheduling {
+ }
+ }`, instance)
+}
diff --git a/builtin/providers/google/resource_compute_target_http_proxy_test.go b/builtin/providers/google/resource_compute_target_http_proxy_test.go
index c1dd3bbe7f..591a3eaa55 100644
--- a/builtin/providers/google/resource_compute_target_http_proxy_test.go
+++ b/builtin/providers/google/resource_compute_target_http_proxy_test.go
@@ -10,6 +10,11 @@ import (
)
func TestAccComputeTargetHttpProxy_basic(t *testing.T) {
+ target := fmt.Sprintf("thttp-test-%s", acctest.RandString(10))
+ backend := fmt.Sprintf("thttp-test-%s", acctest.RandString(10))
+ hc := fmt.Sprintf("thttp-test-%s", acctest.RandString(10))
+ urlmap1 := fmt.Sprintf("thttp-test-%s", acctest.RandString(10))
+ urlmap2 := fmt.Sprintf("thttp-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -17,7 +22,7 @@ func TestAccComputeTargetHttpProxy_basic(t *testing.T) {
CheckDestroy: testAccCheckComputeTargetHttpProxyDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeTargetHttpProxy_basic1,
+ Config: testAccComputeTargetHttpProxy_basic1(target, backend, hc, urlmap1, urlmap2),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeTargetHttpProxyExists(
"google_compute_target_http_proxy.foobar"),
@@ -28,6 +33,11 @@ func TestAccComputeTargetHttpProxy_basic(t *testing.T) {
}
func TestAccComputeTargetHttpProxy_update(t *testing.T) {
+ target := fmt.Sprintf("thttp-test-%s", acctest.RandString(10))
+ backend := fmt.Sprintf("thttp-test-%s", acctest.RandString(10))
+ hc := fmt.Sprintf("thttp-test-%s", acctest.RandString(10))
+ urlmap1 := fmt.Sprintf("thttp-test-%s", acctest.RandString(10))
+ urlmap2 := fmt.Sprintf("thttp-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -35,7 +45,7 @@ func TestAccComputeTargetHttpProxy_update(t *testing.T) {
CheckDestroy: testAccCheckComputeTargetHttpProxyDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccComputeTargetHttpProxy_basic1,
+ Config: testAccComputeTargetHttpProxy_basic1(target, backend, hc, urlmap1, urlmap2),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeTargetHttpProxyExists(
"google_compute_target_http_proxy.foobar"),
@@ -43,7 +53,7 @@ func TestAccComputeTargetHttpProxy_update(t *testing.T) {
},
resource.TestStep{
- Config: testAccComputeTargetHttpProxy_basic2,
+ Config: testAccComputeTargetHttpProxy_basic2(target, backend, hc, urlmap1, urlmap2),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeTargetHttpProxyExists(
"google_compute_target_http_proxy.foobar"),
@@ -98,130 +108,134 @@ func testAccCheckComputeTargetHttpProxyExists(n string) resource.TestCheckFunc {
}
}
-var testAccComputeTargetHttpProxy_basic1 = fmt.Sprintf(`
-resource "google_compute_target_http_proxy" "foobar" {
- description = "Resource created for Terraform acceptance testing"
- name = "httpproxy-test-%s"
- url_map = "${google_compute_url_map.foobar1.self_link}"
-}
-
-resource "google_compute_backend_service" "foobar" {
- name = "httpproxy-test-%s"
- health_checks = ["${google_compute_http_health_check.zero.self_link}"]
-}
-
-resource "google_compute_http_health_check" "zero" {
- name = "httpproxy-test-%s"
- request_path = "/"
- check_interval_sec = 1
- timeout_sec = 1
-}
-
-resource "google_compute_url_map" "foobar1" {
- name = "httpproxy-test-%s"
- default_service = "${google_compute_backend_service.foobar.self_link}"
- host_rule {
- hosts = ["mysite.com", "myothersite.com"]
- path_matcher = "boop"
+func testAccComputeTargetHttpProxy_basic1(target, backend, hc, urlmap1, urlmap2 string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_target_http_proxy" "foobar" {
+ description = "Resource created for Terraform acceptance testing"
+ name = "%s"
+ url_map = "${google_compute_url_map.foobar1.self_link}"
}
- path_matcher {
+
+ resource "google_compute_backend_service" "foobar" {
+ name = "%s"
+ health_checks = ["${google_compute_http_health_check.zero.self_link}"]
+ }
+
+ resource "google_compute_http_health_check" "zero" {
+ name = "%s"
+ request_path = "/"
+ check_interval_sec = 1
+ timeout_sec = 1
+ }
+
+ resource "google_compute_url_map" "foobar1" {
+ name = "%s"
default_service = "${google_compute_backend_service.foobar.self_link}"
- name = "boop"
- path_rule {
- paths = ["/*"]
+ host_rule {
+ hosts = ["mysite.com", "myothersite.com"]
+ path_matcher = "boop"
+ }
+ path_matcher {
+ default_service = "${google_compute_backend_service.foobar.self_link}"
+ name = "boop"
+ path_rule {
+ paths = ["/*"]
+ service = "${google_compute_backend_service.foobar.self_link}"
+ }
+ }
+ test {
+ host = "mysite.com"
+ path = "/*"
service = "${google_compute_backend_service.foobar.self_link}"
}
}
- test {
- host = "mysite.com"
- path = "/*"
- service = "${google_compute_backend_service.foobar.self_link}"
- }
-}
-resource "google_compute_url_map" "foobar2" {
- name = "httpproxy-test-%s"
- default_service = "${google_compute_backend_service.foobar.self_link}"
- host_rule {
- hosts = ["mysite.com", "myothersite.com"]
- path_matcher = "boop"
- }
- path_matcher {
+ resource "google_compute_url_map" "foobar2" {
+ name = "%s"
default_service = "${google_compute_backend_service.foobar.self_link}"
- name = "boop"
- path_rule {
- paths = ["/*"]
+ host_rule {
+ hosts = ["mysite.com", "myothersite.com"]
+ path_matcher = "boop"
+ }
+ path_matcher {
+ default_service = "${google_compute_backend_service.foobar.self_link}"
+ name = "boop"
+ path_rule {
+ paths = ["/*"]
+ service = "${google_compute_backend_service.foobar.self_link}"
+ }
+ }
+ test {
+ host = "mysite.com"
+ path = "/*"
service = "${google_compute_backend_service.foobar.self_link}"
}
}
- test {
- host = "mysite.com"
- path = "/*"
- service = "${google_compute_backend_service.foobar.self_link}"
+ `, target, backend, hc, urlmap1, urlmap2)
+}
+
+func testAccComputeTargetHttpProxy_basic2(target, backend, hc, urlmap1, urlmap2 string) string {
+ return fmt.Sprintf(`
+ resource "google_compute_target_http_proxy" "foobar" {
+ description = "Resource created for Terraform acceptance testing"
+ name = "%s"
+ url_map = "${google_compute_url_map.foobar2.self_link}"
}
-}
-`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
-var testAccComputeTargetHttpProxy_basic2 = fmt.Sprintf(`
-resource "google_compute_target_http_proxy" "foobar" {
- description = "Resource created for Terraform acceptance testing"
- name = "httpproxy-test-%s"
- url_map = "${google_compute_url_map.foobar2.self_link}"
-}
-
-resource "google_compute_backend_service" "foobar" {
- name = "httpproxy-test-%s"
- health_checks = ["${google_compute_http_health_check.zero.self_link}"]
-}
-
-resource "google_compute_http_health_check" "zero" {
- name = "httpproxy-test-%s"
- request_path = "/"
- check_interval_sec = 1
- timeout_sec = 1
-}
-
-resource "google_compute_url_map" "foobar1" {
- name = "httpproxy-test-%s"
- default_service = "${google_compute_backend_service.foobar.self_link}"
- host_rule {
- hosts = ["mysite.com", "myothersite.com"]
- path_matcher = "boop"
+ resource "google_compute_backend_service" "foobar" {
+ name = "%s"
+ health_checks = ["${google_compute_http_health_check.zero.self_link}"]
}
- path_matcher {
+
+ resource "google_compute_http_health_check" "zero" {
+ name = "%s"
+ request_path = "/"
+ check_interval_sec = 1
+ timeout_sec = 1
+ }
+
+ resource "google_compute_url_map" "foobar1" {
+ name = "%s"
default_service = "${google_compute_backend_service.foobar.self_link}"
- name = "boop"
- path_rule {
- paths = ["/*"]
+ host_rule {
+ hosts = ["mysite.com", "myothersite.com"]
+ path_matcher = "boop"
+ }
+ path_matcher {
+ default_service = "${google_compute_backend_service.foobar.self_link}"
+ name = "boop"
+ path_rule {
+ paths = ["/*"]
+ service = "${google_compute_backend_service.foobar.self_link}"
+ }
+ }
+ test {
+ host = "mysite.com"
+ path = "/*"
service = "${google_compute_backend_service.foobar.self_link}"
}
}
- test {
- host = "mysite.com"
- path = "/*"
- service = "${google_compute_backend_service.foobar.self_link}"
- }
-}
-resource "google_compute_url_map" "foobar2" {
- name = "httpproxy-test-%s"
- default_service = "${google_compute_backend_service.foobar.self_link}"
- host_rule {
- hosts = ["mysite.com", "myothersite.com"]
- path_matcher = "boop"
- }
- path_matcher {
+ resource "google_compute_url_map" "foobar2" {
+ name = "%s"
default_service = "${google_compute_backend_service.foobar.self_link}"
- name = "boop"
- path_rule {
- paths = ["/*"]
+ host_rule {
+ hosts = ["mysite.com", "myothersite.com"]
+ path_matcher = "boop"
+ }
+ path_matcher {
+ default_service = "${google_compute_backend_service.foobar.self_link}"
+ name = "boop"
+ path_rule {
+ paths = ["/*"]
+ service = "${google_compute_backend_service.foobar.self_link}"
+ }
+ }
+ test {
+ host = "mysite.com"
+ path = "/*"
service = "${google_compute_backend_service.foobar.self_link}"
}
}
- test {
- host = "mysite.com"
- path = "/*"
- service = "${google_compute_backend_service.foobar.self_link}"
- }
+ `, target, backend, hc, urlmap1, urlmap2)
}
-`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
diff --git a/builtin/providers/google/resource_dns_record_set_test.go b/builtin/providers/google/resource_dns_record_set_test.go
index 0eb331d5b7..94c7fce16b 100644
--- a/builtin/providers/google/resource_dns_record_set_test.go
+++ b/builtin/providers/google/resource_dns_record_set_test.go
@@ -10,16 +10,17 @@ import (
)
func TestAccDnsRecordSet_basic(t *testing.T) {
+ zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDnsRecordSetDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccDnsRecordSet_basic,
+ Config: testAccDnsRecordSet_basic(zoneName),
Check: resource.ComposeTestCheckFunc(
testAccCheckDnsRecordSetExists(
- "google_dns_record_set.foobar"),
+ "google_dns_record_set.foobar", zoneName),
),
},
},
@@ -43,11 +44,11 @@ func testAccCheckDnsRecordSetDestroy(s *terraform.State) error {
return nil
}
-func testAccCheckDnsRecordSetExists(name string) resource.TestCheckFunc {
+func testAccCheckDnsRecordSetExists(resourceType, resourceName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
- rs, ok := s.RootModule().Resources[name]
+ rs, ok := s.RootModule().Resources[resourceType]
if !ok {
- return fmt.Errorf("Not found: %s", name)
+ return fmt.Errorf("Not found: %s", resourceName)
}
dnsName := rs.Primary.Attributes["name"]
@@ -60,7 +61,7 @@ func testAccCheckDnsRecordSetExists(name string) resource.TestCheckFunc {
config := testAccProvider.Meta().(*Config)
resp, err := config.clientDns.ResourceRecordSets.List(
- config.Project, "terraform-test-zone").Name(dnsName).Type(dnsType).Do()
+ config.Project, resourceName).Name(dnsName).Type(dnsType).Do()
if err != nil {
return fmt.Errorf("Error confirming DNS RecordSet existence: %#v", err)
}
@@ -77,17 +78,19 @@ func testAccCheckDnsRecordSetExists(name string) resource.TestCheckFunc {
}
}
-var testAccDnsRecordSet_basic = fmt.Sprintf(`
-resource "google_dns_managed_zone" "parent-zone" {
- name = "dnsrecord-test-%s"
- dns_name = "terraform.test."
- description = "Test Description"
+func testAccDnsRecordSet_basic(zoneName string) string {
+ return fmt.Sprintf(`
+ resource "google_dns_managed_zone" "parent-zone" {
+ name = "%s"
+ dns_name = "terraform.test."
+ description = "Test Description"
+ }
+ resource "google_dns_record_set" "foobar" {
+ managed_zone = "${google_dns_managed_zone.parent-zone.name}"
+ name = "test-record.terraform.test."
+ type = "A"
+ rrdatas = ["127.0.0.1", "127.0.0.10"]
+ ttl = 600
+ }
+ `, zoneName)
}
-resource "google_dns_record_set" "foobar" {
- managed_zone = "${google_dns_managed_zone.parent-zone.name}"
- name = "test-record.terraform.test."
- type = "A"
- rrdatas = ["127.0.0.1", "127.0.0.10"]
- ttl = 600
-}
-`, acctest.RandString(10))
diff --git a/builtin/providers/google/resource_sql_database_test.go b/builtin/providers/google/resource_sql_database_test.go
index 30b146a9c7..509fa1de1f 100644
--- a/builtin/providers/google/resource_sql_database_test.go
+++ b/builtin/providers/google/resource_sql_database_test.go
@@ -101,7 +101,7 @@ func testAccGoogleSqlDatabaseDestroy(s *terraform.State) error {
var testGoogleSqlDatabase_basic = fmt.Sprintf(`
resource "google_sql_database_instance" "instance" {
- name = "sqldatabase-test-%s"
+ name = "sqldatabasetest%s"
region = "us-central"
settings {
tier = "D0"
@@ -109,7 +109,7 @@ resource "google_sql_database_instance" "instance" {
}
resource "google_sql_database" "database" {
- name = "sqldatabase-test-%s"
+ name = "sqldatabasetest%s"
instance = "${google_sql_database_instance.instance.name}"
}
`, acctest.RandString(10), acctest.RandString(10))
From 42a3800ec2586326012ae83cb957adaa751adc9d Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Tue, 5 Jan 2016 18:56:39 -0600
Subject: [PATCH 418/664] provider/azure: Fix up acctest destroy checks
Some resources can only be queried via the network configuration - if
the network configuration does not exist we were failing, however that
is a desirable state since without a network configuration for the
subscription the resources in question cannot exist.
---
builtin/providers/azure/resource_azure_dns_server_test.go | 5 +++++
builtin/providers/azure/resource_azure_local_network_test.go | 5 +++++
.../providers/azure/resource_azure_virtual_network_test.go | 5 +++++
3 files changed, 15 insertions(+)
diff --git a/builtin/providers/azure/resource_azure_dns_server_test.go b/builtin/providers/azure/resource_azure_dns_server_test.go
index 8b8e335b4b..ac87ebc262 100644
--- a/builtin/providers/azure/resource_azure_dns_server_test.go
+++ b/builtin/providers/azure/resource_azure_dns_server_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/Azure/azure-sdk-for-go/management"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@@ -98,6 +99,10 @@ func testAccCheckAzureDnsServerDestroy(s *terraform.State) error {
netConf, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
+ // This is desirable - if there is no network config there can't be any DNS Servers
+ if management.IsResourceNotFoundError(err) {
+ continue
+ }
return fmt.Errorf("Error retrieving networking configuration from Azure: %s", err)
}
diff --git a/builtin/providers/azure/resource_azure_local_network_test.go b/builtin/providers/azure/resource_azure_local_network_test.go
index 2f9f0fdda7..18e09de34c 100644
--- a/builtin/providers/azure/resource_azure_local_network_test.go
+++ b/builtin/providers/azure/resource_azure_local_network_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/Azure/azure-sdk-for-go/management"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@@ -109,6 +110,10 @@ func testAccAzureLocalNetworkConnectionDestroyed(s *terraform.State) error {
netConf, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
+ // This is desirable - if there is no network config there can be no gateways
+ if management.IsResourceNotFoundError(err) {
+ continue
+ }
return err
}
diff --git a/builtin/providers/azure/resource_azure_virtual_network_test.go b/builtin/providers/azure/resource_azure_virtual_network_test.go
index f6d637f16c..716556bbd4 100644
--- a/builtin/providers/azure/resource_azure_virtual_network_test.go
+++ b/builtin/providers/azure/resource_azure_virtual_network_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/Azure/azure-sdk-for-go/management"
"github.com/Azure/azure-sdk-for-go/management/virtualnetwork"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
@@ -185,6 +186,10 @@ func testAccCheckAzureVirtualNetworkDestroy(s *terraform.State) error {
nc, err := vnetClient.GetVirtualNetworkConfiguration()
if err != nil {
+ if management.IsResourceNotFoundError(err) {
+ // This is desirable - no configuration = no networks
+ continue
+ }
return fmt.Errorf("Error retrieving Virtual Network Configuration: %s", err)
}
From 65567cfbdc49f58e26d9c6e0a91fda5ee22a3ded Mon Sep 17 00:00:00 2001
From: Elliot Graebert
Date: Tue, 5 Jan 2016 23:36:39 -0800
Subject: [PATCH 419/664] Added an acceptance test
---
.../resource_aws_launch_configuration_test.go | 64 +++++++++++++++++++
1 file changed, 64 insertions(+)
diff --git a/builtin/providers/aws/resource_aws_launch_configuration_test.go b/builtin/providers/aws/resource_aws_launch_configuration_test.go
index 1e914c86df..3cb5e50f4a 100644
--- a/builtin/providers/aws/resource_aws_launch_configuration_test.go
+++ b/builtin/providers/aws/resource_aws_launch_configuration_test.go
@@ -89,6 +89,52 @@ func TestAccAWSLaunchConfiguration_withSpotPrice(t *testing.T) {
})
}
+func testAccCheckAWSLaunchConfigurationWithEncryption(conf *autoscaling.LaunchConfiguration) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ // Map out the block devices by name, which should be unique.
+ blockDevices := make(map[string]*autoscaling.BlockDeviceMapping)
+ for _, blockDevice := range conf.BlockDeviceMappings {
+ blockDevices[*blockDevice.DeviceName] = blockDevice
+ }
+
+ // Check if the root block device exists.
+ if _, ok := blockDevices["/dev/sda1"]; !ok {
+ return fmt.Errorf("block device doesn't exist: /dev/sda1")
+ } else if blockDevices["/dev/sda1"].Ebs.Encrypted != nil {
+ return fmt.Errorf("root device should not include value for Encrypted")
+ }
+
+ // Check if the secondary block device exists.
+ if _, ok := blockDevices["/dev/sdb"]; !ok {
+ return fmt.Errorf("block device doesn't exist: /dev/sdb")
+ } else if !*blockDevices["/dev/sdb"].Ebs.Encrypted {
+ return fmt.Errorf("block device isn't encrypted as expected: /dev/sdb")
+ }
+
+ return nil
+ }
+}
+
+func TestAccAWSLaunchConfiguration_withEncryption(t *testing.T) {
+ var conf autoscaling.LaunchConfiguration
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSLaunchConfigurationDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSLaunchConfigurationWithEncryption,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.baz", &conf),
+
+ testAccCheckAWSLaunchConfigurationWithEncryption(&conf),
+ ),
+ },
+ },
+ })
+}
+
func testAccCheckAWSLaunchConfigurationGeneratedNamePrefix(
resource, prefix string) resource.TestCheckFunc {
return func(s *terraform.State) error {
@@ -273,3 +319,21 @@ resource "aws_launch_configuration" "baz" {
associate_public_ip_address = false
}
`
+
+const testAccAWSLaunchConfigurationWithEncryption = `
+resource "aws_launch_configuration" "baz" {
+ image_id = "ami-5189a661"
+ instance_type = "t2.micro"
+ associate_public_ip_address = false
+
+ root_block_device {
+ volume_type = "gp2"
+ volume_size = 11
+ }
+ ebs_block_device {
+ device_name = "/dev/sdb"
+ volume_size = 9
+ encrypted = true
+ }
+}
+`
From 265910c05112455bf645a3e2b27eecb63638a79c Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Wed, 6 Jan 2016 09:20:52 -0600
Subject: [PATCH 420/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0d2158a4c9..5d8014acd2 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -41,6 +41,7 @@ IMPROVEMENTS:
* provider/docker: Add support for setting memory, swap and CPU shares on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting labels on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting log driver and options on `docker_container` resources [GH-3761]
+ * provider/heroku: Improve handling of Applications within an Organization [GH-4495]
* provider/vsphere: Add support for custom vm params on `vsphere_virtual_machine` [GH-3867]
* provider/vsphere: Rename vcenter_server config parameter to something clearer [GH-3718]
* provider/vsphere: Make allow_unverified_ssl a configuable on the provider [GH-3933]
From 2be03ddf064d152a5ee0142fed9b1377daf58e3e Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Wed, 6 Jan 2016 10:22:54 -0600
Subject: [PATCH 421/664] provider/aws: Update
testAccAwsVpnConnectionRouteDestroy method
---
.../aws/resource_vpn_connection_route_test.go | 55 +++++++++++++++++--
1 file changed, 51 insertions(+), 4 deletions(-)
diff --git a/builtin/providers/aws/resource_vpn_connection_route_test.go b/builtin/providers/aws/resource_vpn_connection_route_test.go
index dbe91649e5..328638a05a 100644
--- a/builtin/providers/aws/resource_vpn_connection_route_test.go
+++ b/builtin/providers/aws/resource_vpn_connection_route_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/resource"
@@ -44,11 +45,57 @@ func TestAccAWSVpnConnectionRoute_basic(t *testing.T) {
}
func testAccAwsVpnConnectionRouteDestroy(s *terraform.State) error {
- if len(s.RootModule().Resources) > 0 {
- return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
- }
+ conn := testAccProvider.Meta().(*AWSClient).ec2conn
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_vpn_connection_route" {
+ continue
+ }
- return nil
+ cidrBlock, vpnConnectionId := resourceAwsVpnConnectionRouteParseId(rs.Primary.ID)
+
+ routeFilters := []*ec2.Filter{
+ &ec2.Filter{
+ Name: aws.String("route.destination-cidr-block"),
+ Values: []*string{aws.String(cidrBlock)},
+ },
+ &ec2.Filter{
+ Name: aws.String("vpn-connection-id"),
+ Values: []*string{aws.String(vpnConnectionId)},
+ },
+ }
+
+ resp, err := conn.DescribeVpnConnections(&ec2.DescribeVpnConnectionsInput{
+ Filters: routeFilters,
+ })
+ if err != nil {
+ if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnConnectionID.NotFound" {
+ // not found, all good
+ return nil
+ }
+ return err
+ }
+
+ var vpnc *ec2.VpnConnection
+ if resp != nil {
+ // range over the connections and isolate the one we created
+ for _, v := range resp.VpnConnections {
+ if *v.VpnConnectionId == vpnConnectionId {
+ vpnc = v
+ }
+ }
+
+ if vpnc == nil {
+ // vpn connection not found, so that's good...
+ return nil
+ }
+
+ if vpnc.State != nil && *vpnc.State == "deleted" {
+ return nil
+ }
+ }
+
+ }
+ return fmt.Errorf("Fall through error, Check Destroy criteria not met")
}
func testAccAwsVpnConnectionRoute(
From 266f216a13395a68d929ef402db01e6833880979 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Wed, 6 Jan 2016 11:19:42 -0600
Subject: [PATCH 422/664] provider/aws: Update Ops works tests, error catching
---
.../aws/resource_aws_opsworks_stack.go | 9 ++++--
.../aws/resource_aws_opsworks_stack_test.go | 29 ++++++++++++++++---
2 files changed, 31 insertions(+), 7 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_opsworks_stack.go b/builtin/providers/aws/resource_aws_opsworks_stack.go
index 08fe2ab3e3..b3f398ace4 100644
--- a/builtin/providers/aws/resource_aws_opsworks_stack.go
+++ b/builtin/providers/aws/resource_aws_opsworks_stack.go
@@ -256,6 +256,7 @@ func resourceAwsOpsworksStackRead(d *schema.ResourceData, meta interface{}) erro
if err != nil {
if awserr, ok := err.(awserr.Error); ok {
if awserr.Code() == "ResourceNotFoundException" {
+ log.Printf("[DEBUG] OpsWorks stack (%s) not found", d.Id())
d.SetId("")
return nil
}
@@ -319,7 +320,7 @@ func resourceAwsOpsworksStackCreate(d *schema.ResourceData, meta interface{}) er
req.DefaultAvailabilityZone = aws.String(defaultAvailabilityZone.(string))
}
- log.Printf("[DEBUG] Creating OpsWorks stack: %s", *req.Name)
+ log.Printf("[DEBUG] Creating OpsWorks stack: %s", req)
var resp *opsworks.CreateStackOutput
err = resource.Retry(20*time.Minute, func() error {
@@ -336,7 +337,9 @@ func resourceAwsOpsworksStackCreate(d *schema.ResourceData, meta interface{}) er
// The full error we're looking for looks something like
// the following:
// Service Role Arn: [...] is not yet propagated, please try again in a couple of minutes
- if opserr.Code() == "ValidationException" && strings.Contains(opserr.Message(), "not yet propagated") {
+ propErr := "not yet propagated"
+ trustErr := "not the necessary trust relationship"
+ if opserr.Code() == "ValidationException" && (strings.Contains(opserr.Message(), trustErr) || strings.Contains(opserr.Message(), propErr)) {
log.Printf("[INFO] Waiting for service IAM role to propagate")
return cerr
}
@@ -411,7 +414,7 @@ func resourceAwsOpsworksStackUpdate(d *schema.ResourceData, meta interface{}) er
Version: aws.String(d.Get("configuration_manager_version").(string)),
}
- log.Printf("[DEBUG] Updating OpsWorks stack: %s", d.Id())
+ log.Printf("[DEBUG] Updating OpsWorks stack: %s", req)
_, err = client.UpdateStack(req)
if err != nil {
diff --git a/builtin/providers/aws/resource_aws_opsworks_stack_test.go b/builtin/providers/aws/resource_aws_opsworks_stack_test.go
index ab23dc879c..b745c5fdd8 100644
--- a/builtin/providers/aws/resource_aws_opsworks_stack_test.go
+++ b/builtin/providers/aws/resource_aws_opsworks_stack_test.go
@@ -8,6 +8,7 @@ import (
"github.com/hashicorp/terraform/terraform"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/opsworks"
)
@@ -358,9 +359,29 @@ func testAccAwsOpsworksCheckVpc(s *terraform.State) error {
}
func testAccCheckAwsOpsworksStackDestroy(s *terraform.State) error {
- if len(s.RootModule().Resources) > 0 {
- return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
- }
+ opsworksconn := testAccProvider.Meta().(*AWSClient).opsworksconn
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_opsworks_stack" {
+ continue
+ }
- return nil
+ req := &opsworks.DescribeStacksInput{
+ StackIds: []*string{
+ aws.String(rs.Primary.ID),
+ },
+ }
+
+ _, err := opsworksconn.DescribeStacks(req)
+ if err != nil {
+ if awserr, ok := err.(awserr.Error); ok {
+ if awserr.Code() == "ResourceNotFoundException" {
+ // not found, all good
+ return nil
+ }
+ }
+ return err
+ }
+
+ }
+ return fmt.Errorf("Fall through error for OpsWorks stack test")
}
From 88c3933356db8161558a720080df937e5d9f0d90 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Wed, 6 Jan 2016 14:43:20 -0600
Subject: [PATCH 425/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5d8014acd2..f43df7a209 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -22,6 +22,7 @@ IMPROVEMENTS:
* core: Support HTTP basic auth in consul remote state [GH-4166]
* core: Improve error message on resource arity mismatch [GH-4244]
* core: Add support for unary operators + and - to the interpolation syntax [GH-3621]
+ * core: Add SSH agent support for Windows [GH-4323]
* provider/aws: Add `placement_group` as an option for `aws_autoscaling_group` [GH-3704]
* provider/aws: Add support for DynamoDB Table StreamSpecifications [GH-4208]
* provider/aws: Add `name_prefix` to Security Groups [GH-4167]
From adcbe85e3b672cd9f9d2232ccd5c69839af80f1e Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Wed, 6 Jan 2016 14:44:55 -0600
Subject: [PATCH 426/664] provider/aws: Clean up OpsWorks tests to use us-east,
validate destroy of custom layer
---
...resource_aws_opsworks_custom_layer_test.go | 32 +++++++++++++++++--
.../aws/resource_aws_opsworks_stack_test.go | 20 ++++++------
2 files changed, 39 insertions(+), 13 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_opsworks_custom_layer_test.go b/builtin/providers/aws/resource_aws_opsworks_custom_layer_test.go
index 477bd2b866..ed3d0fad6e 100644
--- a/builtin/providers/aws/resource_aws_opsworks_custom_layer_test.go
+++ b/builtin/providers/aws/resource_aws_opsworks_custom_layer_test.go
@@ -4,6 +4,9 @@ import (
"fmt"
"testing"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/opsworks"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@@ -131,11 +134,30 @@ func TestAccAWSOpsworksCustomLayer(t *testing.T) {
}
func testAccCheckAwsOpsworksCustomLayerDestroy(s *terraform.State) error {
- if len(s.RootModule().Resources) > 0 {
- return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
+ opsworksconn := testAccProvider.Meta().(*AWSClient).opsworksconn
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_opsworks_custom_layer" {
+ continue
+ }
+ req := &opsworks.DescribeLayersInput{
+ LayerIds: []*string{
+ aws.String(rs.Primary.ID),
+ },
+ }
+
+ _, err := opsworksconn.DescribeLayers(req)
+ if err != nil {
+ if awserr, ok := err.(awserr.Error); ok {
+ if awserr.Code() == "ResourceNotFoundException" {
+ // not found, good to go
+ return nil
+ }
+ }
+ return err
+ }
}
- return nil
+ return fmt.Errorf("Fall through error on OpsWorks custom layer test")
}
var testAccAwsOpsworksCustomLayerSecurityGroups = `
@@ -160,6 +182,10 @@ resource "aws_security_group" "tf-ops-acc-layer2" {
`
var testAccAwsOpsworksCustomLayerConfigCreate = testAccAwsOpsworksStackConfigNoVpcCreate + testAccAwsOpsworksCustomLayerSecurityGroups + `
+provider "aws" {
+ region = "us-east-1"
+}
+
resource "aws_opsworks_custom_layer" "tf-acc" {
stack_id = "${aws_opsworks_stack.tf-acc.id}"
name = "tf-ops-acc-custom-layer"
diff --git a/builtin/providers/aws/resource_aws_opsworks_stack_test.go b/builtin/providers/aws/resource_aws_opsworks_stack_test.go
index b745c5fdd8..97efcdd66a 100644
--- a/builtin/providers/aws/resource_aws_opsworks_stack_test.go
+++ b/builtin/providers/aws/resource_aws_opsworks_stack_test.go
@@ -91,10 +91,10 @@ resource "aws_iam_instance_profile" "opsworks_instance" {
var testAccAwsOpsworksStackConfigNoVpcCreate = testAccAwsOpsworksStackIamConfig + `
resource "aws_opsworks_stack" "tf-acc" {
name = "tf-opsworks-acc"
- region = "us-west-2"
+ region = "us-east-1"
service_role_arn = "${aws_iam_role.opsworks_service.arn}"
default_instance_profile_arn = "${aws_iam_instance_profile.opsworks_instance.arn}"
- default_availability_zone = "us-west-2a"
+ default_availability_zone = "us-east-1c"
default_os = "Amazon Linux 2014.09"
default_root_device_type = "ebs"
custom_json = "{\"key\": \"value\"}"
@@ -105,10 +105,10 @@ resource "aws_opsworks_stack" "tf-acc" {
var testAccAWSOpsworksStackConfigNoVpcUpdate = testAccAwsOpsworksStackIamConfig + `
resource "aws_opsworks_stack" "tf-acc" {
name = "tf-opsworks-acc"
- region = "us-west-2"
+ region = "us-east-1"
service_role_arn = "${aws_iam_role.opsworks_service.arn}"
default_instance_profile_arn = "${aws_iam_instance_profile.opsworks_instance.arn}"
- default_availability_zone = "us-west-2a"
+ default_availability_zone = "us-east-1c"
default_os = "Amazon Linux 2014.09"
default_root_device_type = "ebs"
custom_json = "{\"key\": \"value\"}"
@@ -153,11 +153,11 @@ resource "aws_vpc" "tf-acc" {
resource "aws_subnet" "tf-acc" {
vpc_id = "${aws_vpc.tf-acc.id}"
cidr_block = "${aws_vpc.tf-acc.cidr_block}"
- availability_zone = "us-west-2a"
+ availability_zone = "us-east-1c"
}
resource "aws_opsworks_stack" "tf-acc" {
name = "tf-opsworks-acc"
- region = "us-west-2"
+ region = "us-east-1"
vpc_id = "${aws_vpc.tf-acc.id}"
default_subnet_id = "${aws_subnet.tf-acc.id}"
service_role_arn = "${aws_iam_role.opsworks_service.arn}"
@@ -177,11 +177,11 @@ resource "aws_vpc" "tf-acc" {
resource "aws_subnet" "tf-acc" {
vpc_id = "${aws_vpc.tf-acc.id}"
cidr_block = "${aws_vpc.tf-acc.cidr_block}"
- availability_zone = "us-west-2a"
+ availability_zone = "us-east-1c"
}
resource "aws_opsworks_stack" "tf-acc" {
name = "tf-opsworks-acc"
- region = "us-west-2"
+ region = "us-east-1"
vpc_id = "${aws_vpc.tf-acc.id}"
default_subnet_id = "${aws_subnet.tf-acc.id}"
service_role_arn = "${aws_iam_role.opsworks_service.arn}"
@@ -235,7 +235,7 @@ var testAccAwsOpsworksStackCheckResourceAttrsCreate = resource.ComposeTestCheckF
resource.TestCheckResourceAttr(
"aws_opsworks_stack.tf-acc",
"default_availability_zone",
- "us-west-2a",
+ "us-east-1c",
),
resource.TestCheckResourceAttr(
"aws_opsworks_stack.tf-acc",
@@ -273,7 +273,7 @@ var testAccAwsOpsworksStackCheckResourceAttrsUpdate = resource.ComposeTestCheckF
resource.TestCheckResourceAttr(
"aws_opsworks_stack.tf-acc",
"default_availability_zone",
- "us-west-2a",
+ "us-east-1c",
),
resource.TestCheckResourceAttr(
"aws_opsworks_stack.tf-acc",
From e1b62c76ada7a808b456fafb7e2e234622c9822b Mon Sep 17 00:00:00 2001
From: Joseph Kordish
Date: Sun, 27 Dec 2015 16:28:56 -0600
Subject: [PATCH 427/664] add sha1 interpolation
---
config/interpolate_funcs.go | 17 +++++++++++++++++
config/interpolate_funcs_test.go | 12 ++++++++++++
2 files changed, 29 insertions(+)
diff --git a/config/interpolate_funcs.go b/config/interpolate_funcs.go
index 5538763c0c..0ca16b56cd 100644
--- a/config/interpolate_funcs.go
+++ b/config/interpolate_funcs.go
@@ -2,7 +2,9 @@ package config
import (
"bytes"
+ "crypto/sha1"
"encoding/base64"
+ "encoding/hex"
"errors"
"fmt"
"io/ioutil"
@@ -38,6 +40,7 @@ func init() {
"lower": interpolationFuncLower(),
"replace": interpolationFuncReplace(),
"split": interpolationFuncSplit(),
+ "sha1": interpolationFuncSha1(),
"base64encode": interpolationFuncBase64Encode(),
"base64decode": interpolationFuncBase64Decode(),
"upper": interpolationFuncUpper(),
@@ -586,3 +589,17 @@ func interpolationFuncUpper() ast.Function {
},
}
}
+
+func interpolationFuncSha1() ast.Function {
+ return ast.Function{
+ ArgTypes: []ast.Type{ast.TypeString},
+ ReturnType: ast.TypeString,
+ Callback: func(args []interface{}) (interface{}, error) {
+ s := args[0].(string)
+ h := sha1.New()
+ h.Write([]byte(s))
+ hash := hex.EncodeToString(h.Sum(nil))
+ return hash, nil
+ },
+ }
+}
diff --git a/config/interpolate_funcs_test.go b/config/interpolate_funcs_test.go
index 3aeb50db17..8c633361d9 100644
--- a/config/interpolate_funcs_test.go
+++ b/config/interpolate_funcs_test.go
@@ -834,6 +834,18 @@ func TestInterpolateFuncUpper(t *testing.T) {
})
}
+func TestInterpolateFuncSha1(t *testing.T) {
+ testFunction(t, testFunctionConfig{
+ Cases: []testFunctionCase{
+ {
+ `${sha1("test")}`,
+ "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3",
+ false,
+ },
+ },
+ })
+}
+
type testFunctionConfig struct {
Cases []testFunctionCase
Vars map[string]ast.Variable
From 21fe576cb5eca56c5ca9d1527db61b3c2c5aa26b Mon Sep 17 00:00:00 2001
From: Joseph Kordish
Date: Wed, 6 Jan 2016 15:19:54 -0600
Subject: [PATCH 428/664] added the function to the docs
---
website/source/docs/configuration/interpolation.html.md | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/website/source/docs/configuration/interpolation.html.md b/website/source/docs/configuration/interpolation.html.md
index bcabe58ff1..2fc97bd312 100644
--- a/website/source/docs/configuration/interpolation.html.md
+++ b/website/source/docs/configuration/interpolation.html.md
@@ -80,6 +80,10 @@ The supported built-in functions are:
* `base64encode(string)` - Returns a base64-encoded representation of the
given string.
+ * `sha1(string)` - Returns a sha1 hash representation of the
+ given string.
+ Example: `"${sha1(concat(aws_vpc.default.tags.customer, "-s3-bucket"))}"`
+
* `cidrhost(iprange, hostnum)` - Takes an IP address range in CIDR notation
and creates an IP address with the given host number. For example,
``cidrhost("10.0.0.0/8", 2)`` returns ``10.0.0.2``.
@@ -95,7 +99,7 @@ The supported built-in functions are:
CIDR notation (like ``10.0.0.0/8``) and extends its prefix to include an
additional subnet number. For example,
``cidrsubnet("10.0.0.0/8", 8, 2)`` returns ``10.2.0.0/16``.
-
+
* `coalesce(string1, string2, ...)` - Returns the first non-empty value from
the given arguments. At least two arguments must be provided.
From 3af8ee45b00f95072b4a41f32fb2a923e88c14ad Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Wed, 6 Jan 2016 15:56:11 -0600
Subject: [PATCH 429/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f43df7a209..32132ab220 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -36,6 +36,7 @@ IMPROVEMENTS:
* provider/aws: Allow changing private IPs for ENIs [GH-4307]
* provider/aws: Retry MalformedPolicy errors due to newly created principals in S3 Buckets [GH-4315]
* provider/aws: Validate `name` on `db_subnet_group` against AWS requirements [GH-4340]
+ * provider/aws: wait for ASG capacity on update [GH-3947]
* provider/cloudstack: performance improvements [GH-4150]
* provider/docker: Add support for setting the entry point on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting the restart policy on `docker_container` resources [GH-3761]
From aee43165d5a13095dfcd2884be57858c8e0c52a1 Mon Sep 17 00:00:00 2001
From: Paul Hinze
Date: Wed, 6 Jan 2016 15:59:09 -0600
Subject: [PATCH 430/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 32132ab220..7729c5d0a4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -23,6 +23,7 @@ IMPROVEMENTS:
* core: Improve error message on resource arity mismatch [GH-4244]
* core: Add support for unary operators + and - to the interpolation syntax [GH-3621]
* core: Add SSH agent support for Windows [GH-4323]
+ * core: Add `sha1()` interpolation function [GH-4450]
* provider/aws: Add `placement_group` as an option for `aws_autoscaling_group` [GH-3704]
* provider/aws: Add support for DynamoDB Table StreamSpecifications [GH-4208]
* provider/aws: Add `name_prefix` to Security Groups [GH-4167]
From 41e9416ab29766ea1f1be8a60a63915cbcbda47e Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Wed, 6 Jan 2016 14:07:15 -0800
Subject: [PATCH 431/664] provider/azurerm: Set user agent
---
builtin/providers/azurerm/config.go | 39 ++++++++++++++++++++++++++++-
1 file changed, 38 insertions(+), 1 deletion(-)
diff --git a/builtin/providers/azurerm/config.go b/builtin/providers/azurerm/config.go
index 0d39ac8ef7..ab68d1ff69 100644
--- a/builtin/providers/azurerm/config.go
+++ b/builtin/providers/azurerm/config.go
@@ -1,16 +1,18 @@
package azurerm
import (
+ "fmt"
"log"
"net/http"
+ "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest"
"github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/azure-sdk-for-go/arm/compute"
"github.com/Azure/azure-sdk-for-go/arm/network"
"github.com/Azure/azure-sdk-for-go/arm/resources"
"github.com/Azure/azure-sdk-for-go/arm/scheduler"
"github.com/Azure/azure-sdk-for-go/arm/storage"
- "github.com/Azure/go-autorest/autorest"
+ "github.com/hashicorp/terraform/terraform"
)
// ArmClient contains the handles to all the specific Azure Resource Manager
@@ -58,6 +60,17 @@ func withRequestLogging() autorest.SendDecorator {
}
}
+func setUserAgent(client *autorest.Client) {
+ var version string
+ if terraform.VersionPrerelease != "" {
+ version = fmt.Sprintf("%s-%s", terraform.Version, terraform.VersionPrerelease)
+ } else {
+ version = terraform.Version
+ }
+
+ client.UserAgent = fmt.Sprintf("HashiCorp-Terraform-v%s", version)
+}
+
// getArmClient is a helper method which returns a fully instantiated
// *ArmClient based on the Config's current settings.
func (c *Config) getArmClient() (*ArmClient, error) {
@@ -72,121 +85,145 @@ func (c *Config) getArmClient() (*ArmClient, error) {
// NOTE: these declarations should be left separate for clarity should the
// clients be wished to be configured with custom Responders/PollingModess etc...
asc := compute.NewAvailabilitySetsClient(c.SubscriptionID)
+ setUserAgent(&asc.Client)
asc.Authorizer = spt
asc.Sender = autorest.CreateSender(withRequestLogging())
client.availSetClient = asc
uoc := compute.NewUsageOperationsClient(c.SubscriptionID)
+ setUserAgent(&uoc.Client)
uoc.Authorizer = spt
uoc.Sender = autorest.CreateSender(withRequestLogging())
client.usageOpsClient = uoc
vmeic := compute.NewVirtualMachineExtensionImagesClient(c.SubscriptionID)
+ setUserAgent(&vmeic.Client)
vmeic.Authorizer = spt
vmeic.Sender = autorest.CreateSender(withRequestLogging())
client.vmExtensionImageClient = vmeic
vmec := compute.NewVirtualMachineExtensionsClient(c.SubscriptionID)
+ setUserAgent(&vmec.Client)
vmec.Authorizer = spt
vmec.Sender = autorest.CreateSender(withRequestLogging())
client.vmExtensionClient = vmec
vmic := compute.NewVirtualMachineImagesClient(c.SubscriptionID)
+ setUserAgent(&vmic.Client)
vmic.Authorizer = spt
vmic.Sender = autorest.CreateSender(withRequestLogging())
client.vmImageClient = vmic
vmc := compute.NewVirtualMachinesClient(c.SubscriptionID)
+ setUserAgent(&vmc.Client)
vmc.Authorizer = spt
vmc.Sender = autorest.CreateSender(withRequestLogging())
client.vmClient = vmc
agc := network.NewApplicationGatewaysClient(c.SubscriptionID)
+ setUserAgent(&agc.Client)
agc.Authorizer = spt
agc.Sender = autorest.CreateSender(withRequestLogging())
client.appGatewayClient = agc
ifc := network.NewInterfacesClient(c.SubscriptionID)
+ setUserAgent(&ifc.Client)
ifc.Authorizer = spt
ifc.Sender = autorest.CreateSender(withRequestLogging())
client.ifaceClient = ifc
lbc := network.NewLoadBalancersClient(c.SubscriptionID)
+ setUserAgent(&lbc.Client)
lbc.Authorizer = spt
lbc.Sender = autorest.CreateSender(withRequestLogging())
client.loadBalancerClient = lbc
lgc := network.NewLocalNetworkGatewaysClient(c.SubscriptionID)
+ setUserAgent(&lgc.Client)
lgc.Authorizer = spt
lgc.Sender = autorest.CreateSender(withRequestLogging())
client.localNetConnClient = lgc
pipc := network.NewPublicIPAddressesClient(c.SubscriptionID)
+ setUserAgent(&pipc.Client)
pipc.Authorizer = spt
pipc.Sender = autorest.CreateSender(withRequestLogging())
client.publicIPClient = pipc
sgc := network.NewSecurityGroupsClient(c.SubscriptionID)
+ setUserAgent(&sgc.Client)
sgc.Authorizer = spt
sgc.Sender = autorest.CreateSender(withRequestLogging())
client.secGroupClient = sgc
src := network.NewSecurityRulesClient(c.SubscriptionID)
+ setUserAgent(&src.Client)
src.Authorizer = spt
src.Sender = autorest.CreateSender(withRequestLogging())
client.secRuleClient = src
snc := network.NewSubnetsClient(c.SubscriptionID)
+ setUserAgent(&snc.Client)
snc.Authorizer = spt
snc.Sender = autorest.CreateSender(withRequestLogging())
client.subnetClient = snc
vgcc := network.NewVirtualNetworkGatewayConnectionsClient(c.SubscriptionID)
+ setUserAgent(&vgcc.Client)
vgcc.Authorizer = spt
vgcc.Sender = autorest.CreateSender(withRequestLogging())
client.vnetGatewayConnectionsClient = vgcc
vgc := network.NewVirtualNetworkGatewaysClient(c.SubscriptionID)
+ setUserAgent(&vgc.Client)
vgc.Authorizer = spt
vgc.Sender = autorest.CreateSender(withRequestLogging())
client.vnetGatewayClient = vgc
vnc := network.NewVirtualNetworksClient(c.SubscriptionID)
+ setUserAgent(&vnc.Client)
vnc.Authorizer = spt
vnc.Sender = autorest.CreateSender(withRequestLogging())
client.vnetClient = vnc
rgc := resources.NewGroupsClient(c.SubscriptionID)
+ setUserAgent(&rgc.Client)
rgc.Authorizer = spt
rgc.Sender = autorest.CreateSender(withRequestLogging())
client.resourceGroupClient = rgc
pc := resources.NewProvidersClient(c.SubscriptionID)
+ setUserAgent(&pc.Client)
pc.Authorizer = spt
pc.Sender = autorest.CreateSender(withRequestLogging())
client.providers = pc
tc := resources.NewTagsClient(c.SubscriptionID)
+ setUserAgent(&tc.Client)
tc.Authorizer = spt
tc.Sender = autorest.CreateSender(withRequestLogging())
client.tagsClient = tc
jc := scheduler.NewJobsClient(c.SubscriptionID)
+ setUserAgent(&jc.Client)
jc.Authorizer = spt
jc.Sender = autorest.CreateSender(withRequestLogging())
client.jobsClient = jc
jcc := scheduler.NewJobCollectionsClient(c.SubscriptionID)
+ setUserAgent(&jcc.Client)
jcc.Authorizer = spt
jcc.Sender = autorest.CreateSender(withRequestLogging())
client.jobsCollectionsClient = jcc
ssc := storage.NewAccountsClient(c.SubscriptionID)
+ setUserAgent(&ssc.Client)
ssc.Authorizer = spt
ssc.Sender = autorest.CreateSender(withRequestLogging())
client.storageServiceClient = ssc
suc := storage.NewUsageOperationsClient(c.SubscriptionID)
+ setUserAgent(&suc.Client)
suc.Authorizer = spt
suc.Sender = autorest.CreateSender(withRequestLogging())
client.storageUsageClient = suc
From 9346355ed593488b5c999c62c28f6b958a7edba8 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Wed, 6 Jan 2016 16:11:45 -0800
Subject: [PATCH 432/664] provider/azure: Wait longer for storage blob to go
---
builtin/providers/azure/resource_azure_instance.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/builtin/providers/azure/resource_azure_instance.go b/builtin/providers/azure/resource_azure_instance.go
index 097b210f54..1ad82c1f62 100644
--- a/builtin/providers/azure/resource_azure_instance.go
+++ b/builtin/providers/azure/resource_azure_instance.go
@@ -622,7 +622,7 @@ func resourceAzureInstanceDelete(d *schema.ResourceData, meta interface{}) error
return err
}
- err = resource.Retry(5*time.Minute, func() error {
+ err = resource.Retry(15*time.Minute, func() error {
exists, err := blobClient.BlobExists(
storageContainterName, fmt.Sprintf(osDiskBlobNameFormat, name),
)
From 72d3d7ed9b83985959298e26751007b67b0d5787 Mon Sep 17 00:00:00 2001
From: Radek Simko
Date: Thu, 7 Jan 2016 11:44:49 +0000
Subject: [PATCH 433/664] provider/aws: Fix template_url/template_body conflict
---
.../aws/resource_aws_cloudformation_stack.go | 6 +-
.../resource_aws_cloudformation_stack_test.go | 81 +++++++++++++++++++
.../cloudformation-template.json | 19 +++++
3 files changed, 103 insertions(+), 3 deletions(-)
create mode 100644 builtin/providers/aws/test-fixtures/cloudformation-template.json
diff --git a/builtin/providers/aws/resource_aws_cloudformation_stack.go b/builtin/providers/aws/resource_aws_cloudformation_stack.go
index d59b393292..ded8ee25a5 100644
--- a/builtin/providers/aws/resource_aws_cloudformation_stack.go
+++ b/builtin/providers/aws/resource_aws_cloudformation_stack.go
@@ -269,12 +269,12 @@ func resourceAwsCloudFormationStackUpdate(d *schema.ResourceData, meta interface
}
// Either TemplateBody or TemplateURL are required for each change
- if v, ok := d.GetOk("template_body"); ok {
- input.TemplateBody = aws.String(normalizeJson(v.(string)))
- }
if v, ok := d.GetOk("template_url"); ok {
input.TemplateURL = aws.String(v.(string))
}
+ if v, ok := d.GetOk("template_body"); ok && input.TemplateURL == nil {
+ input.TemplateBody = aws.String(normalizeJson(v.(string)))
+ }
if d.HasChange("capabilities") {
input.Capabilities = expandStringList(d.Get("capabilities").(*schema.Set).List())
diff --git a/builtin/providers/aws/resource_aws_cloudformation_stack_test.go b/builtin/providers/aws/resource_aws_cloudformation_stack_test.go
index 192995685b..f4d21dae87 100644
--- a/builtin/providers/aws/resource_aws_cloudformation_stack_test.go
+++ b/builtin/providers/aws/resource_aws_cloudformation_stack_test.go
@@ -2,7 +2,9 @@ package aws
import (
"fmt"
+ "math/rand"
"testing"
+ "time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudformation"
@@ -89,6 +91,31 @@ func TestAccAWSCloudFormation_withParams(t *testing.T) {
})
}
+// Regression for https://github.com/hashicorp/terraform/issues/4534
+func TestAccAWSCloudFormation_withUrl_withParams(t *testing.T) {
+ var stack cloudformation.Stack
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSCloudFormationDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSCloudFormationConfig_templateUrl_withParams,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with-url-and-params", &stack),
+ ),
+ },
+ resource.TestStep{
+ Config: testAccAWSCloudFormationConfig_templateUrl_withParams_modified,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with-url-and-params", &stack),
+ ),
+ },
+ },
+ })
+}
+
func testAccCheckCloudFormationStackExists(n string, stack *cloudformation.Stack) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
@@ -293,3 +320,57 @@ var testAccAWSCloudFormationConfig_withParams = fmt.Sprintf(
var testAccAWSCloudFormationConfig_withParams_modified = fmt.Sprintf(
tpl_testAccAWSCloudFormationConfig_withParams,
"12.0.0.0/16")
+
+var tpl_testAccAWSCloudFormationConfig_templateUrl_withParams = `
+resource "aws_s3_bucket" "b" {
+ bucket = "%s"
+ acl = "public-read"
+ policy = <
Date: Thu, 7 Jan 2016 01:27:24 +0000
Subject: [PATCH 434/664] Azure RM Availability Sets. Adds Schema, CRUD,
Acceptance Tests and Documentation
---
builtin/providers/azurerm/provider.go | 3 +-
.../azurerm/resource_arm_availability_set.go | 140 ++++++++++++++++++
.../resource_arm_availability_set_test.go | 136 +++++++++++++++++
.../azurerm/r/availability_set.html.markdown | 48 ++++++
website/source/layouts/azurerm.erb | 4 +
5 files changed, 330 insertions(+), 1 deletion(-)
create mode 100644 builtin/providers/azurerm/resource_arm_availability_set.go
create mode 100644 builtin/providers/azurerm/resource_arm_availability_set_test.go
create mode 100644 website/source/docs/providers/azurerm/r/availability_set.html.markdown
diff --git a/builtin/providers/azurerm/provider.go b/builtin/providers/azurerm/provider.go
index c64d151483..612109fc32 100644
--- a/builtin/providers/azurerm/provider.go
+++ b/builtin/providers/azurerm/provider.go
@@ -42,6 +42,7 @@ func Provider() terraform.ResourceProvider {
"azurerm_resource_group": resourceArmResourceGroup(),
"azurerm_virtual_network": resourceArmVirtualNetwork(),
"azurerm_local_network_gateway": resourceArmLocalNetworkGateway(),
+ "azurerm_availability_set": resourceArmAvailabilitySet(),
},
ConfigureFunc: providerConfigure,
@@ -87,7 +88,7 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
func registerAzureResourceProvidersWithSubscription(config *Config, client *ArmClient) error {
providerClient := client.providers
- providers := []string{"Microsoft.Network"}
+ providers := []string{"Microsoft.Network", "Microsoft.Compute"}
for _, v := range providers {
res, err := providerClient.Register(v)
diff --git a/builtin/providers/azurerm/resource_arm_availability_set.go b/builtin/providers/azurerm/resource_arm_availability_set.go
new file mode 100644
index 0000000000..3209d70080
--- /dev/null
+++ b/builtin/providers/azurerm/resource_arm_availability_set.go
@@ -0,0 +1,140 @@
+package azurerm
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+
+ "github.com/Azure/azure-sdk-for-go/arm/compute"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceArmAvailabilitySet() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceArmAvailabilitySetCreate,
+ Read: resourceArmAvailabilitySetRead,
+ Update: resourceArmAvailabilitySetCreate,
+ Delete: resourceArmAvailabilitySetDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "location": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ StateFunc: azureRMNormalizeLocation,
+ },
+
+ "platform_update_domain_count": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 5,
+ ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(int)
+ if value > 20 {
+ errors = append(errors, fmt.Errorf(
+ "Maximum value for `platform_update_domain_count` is 20"))
+ }
+ return
+ },
+ },
+
+ "platform_fault_domain_count": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 3,
+ ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(int)
+ if value > 3 {
+ errors = append(errors, fmt.Errorf(
+ "Maximum value for `platform_fault_domain_count` is 3", k))
+ }
+ return
+ },
+ },
+
+ "resource_group_name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ },
+ }
+}
+
+func resourceArmAvailabilitySetCreate(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*ArmClient)
+ availSetClient := client.availSetClient
+
+ log.Printf("[INFO] preparing arguments for Azure ARM Availability Set creation.")
+
+ name := d.Get("name").(string)
+ location := d.Get("location").(string)
+ resGroup := d.Get("resource_group_name").(string)
+ updateDomainCount := d.Get("platform_update_domain_count").(int)
+ faultDomainCount := d.Get("platform_fault_domain_count").(int)
+
+ availSet := compute.AvailabilitySet{
+ Name: &name,
+ Location: &location,
+ Properties: &compute.AvailabilitySetProperties{
+ PlatformFaultDomainCount: &faultDomainCount,
+ PlatformUpdateDomainCount: &updateDomainCount,
+ },
+ }
+
+ resp, err := availSetClient.CreateOrUpdate(resGroup, name, availSet)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(*resp.ID)
+
+ return resourceArmAvailabilitySetRead(d, meta)
+}
+
+func resourceArmAvailabilitySetRead(d *schema.ResourceData, meta interface{}) error {
+ availSetClient := meta.(*ArmClient).availSetClient
+
+ id, err := parseAzureResourceID(d.Id())
+ if err != nil {
+ return err
+ }
+ resGroup := id.ResourceGroup
+ name := id.Path["availabilitySets"]
+
+ resp, err := availSetClient.Get(resGroup, name)
+ if resp.StatusCode == http.StatusNotFound {
+ d.SetId("")
+ return nil
+ }
+ if err != nil {
+ return fmt.Errorf("Error making Read request on Azure Availability Set %s: %s", name, err)
+ }
+
+ availSet := *resp.Properties
+ d.Set("platform_update_domain_count", availSet.PlatformUpdateDomainCount)
+ d.Set("platform_fault_domain_count", availSet.PlatformFaultDomainCount)
+
+ return nil
+}
+
+func resourceArmAvailabilitySetDelete(d *schema.ResourceData, meta interface{}) error {
+ availSetClient := meta.(*ArmClient).availSetClient
+
+ id, err := parseAzureResourceID(d.Id())
+ if err != nil {
+ return err
+ }
+ resGroup := id.ResourceGroup
+ name := id.Path["availabilitySets"]
+
+ _, err = availSetClient.Delete(resGroup, name)
+
+ return err
+}
diff --git a/builtin/providers/azurerm/resource_arm_availability_set_test.go b/builtin/providers/azurerm/resource_arm_availability_set_test.go
new file mode 100644
index 0000000000..867b7484ed
--- /dev/null
+++ b/builtin/providers/azurerm/resource_arm_availability_set_test.go
@@ -0,0 +1,136 @@
+package azurerm
+
+import (
+ "fmt"
+ "net/http"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAzureRMAvailabilitySet_basic(t *testing.T) {
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMAvailabilitySetDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAzureRMVAvailabilitySet_basic,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMAvailabilitySetExists("azurerm_availability_set.test"),
+ resource.TestCheckResourceAttr(
+ "azurerm_availability_set.test", "name", "acceptanceTestAvailabilitySet1"),
+ resource.TestCheckResourceAttr(
+ "azurerm_availability_set.test", "platform_update_domain_count", "5"),
+ resource.TestCheckResourceAttr(
+ "azurerm_availability_set.test", "platform_fault_domain_count", "3"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureRMAvailabilitySet_withDomainCounts(t *testing.T) {
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMAvailabilitySetDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAzureRMVAvailabilitySet_withDomainCounts,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMAvailabilitySetExists("azurerm_availability_set.test"),
+ resource.TestCheckResourceAttr(
+ "azurerm_availability_set.test", "name", "acceptanceTestAvailabilitySet1"),
+ resource.TestCheckResourceAttr(
+ "azurerm_availability_set.test", "platform_update_domain_count", "10"),
+ resource.TestCheckResourceAttr(
+ "azurerm_availability_set.test", "platform_fault_domain_count", "1"),
+ ),
+ },
+ },
+ })
+}
+
+func testCheckAzureRMAvailabilitySetExists(name string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ // Ensure we have enough information in state to look up in API
+ rs, ok := s.RootModule().Resources[name]
+ if !ok {
+ return fmt.Errorf("Not found: %s", name)
+ }
+
+ availSetName := rs.Primary.Attributes["name"]
+ resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"]
+ if !hasResourceGroup {
+ return fmt.Errorf("Bad: no resource group found in state for availability set: %s", availSetName)
+ }
+
+ conn := testAccProvider.Meta().(*ArmClient).availSetClient
+
+ resp, err := conn.Get(resourceGroup, availSetName)
+ if err != nil {
+ return fmt.Errorf("Bad: Get on availSetClient: %s", err)
+ }
+
+ if resp.StatusCode == http.StatusNotFound {
+ return fmt.Errorf("Bad: Availability Set %q (resource group: %q) does not exist", name, resourceGroup)
+ }
+
+ return nil
+ }
+}
+
+func testCheckAzureRMAvailabilitySetDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*ArmClient).vnetClient
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "azurerm_availability_set" {
+ continue
+ }
+
+ name := rs.Primary.Attributes["name"]
+ resourceGroup := rs.Primary.Attributes["resource_group_name"]
+
+ resp, err := conn.Get(resourceGroup, name)
+
+ if err != nil {
+ return nil
+ }
+
+ if resp.StatusCode != http.StatusNotFound {
+ return fmt.Errorf("Availability Set still exists:\n%#v", resp.Properties)
+ }
+ }
+
+ return nil
+}
+
+var testAccAzureRMVAvailabilitySet_basic = `
+resource "azurerm_resource_group" "test" {
+ name = "acceptanceTestResourceGroup1"
+ location = "West US"
+}
+resource "azurerm_availability_set" "test" {
+ name = "acceptanceTestAvailabilitySet1"
+ location = "West US"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+}
+`
+
+var testAccAzureRMVAvailabilitySet_withDomainCounts = `
+resource "azurerm_resource_group" "test" {
+ name = "acceptanceTestResourceGroup1"
+ location = "West US"
+}
+resource "azurerm_availability_set" "test" {
+ name = "acceptanceTestAvailabilitySet1"
+ location = "West US"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ platform_update_domain_count = 10
+ platform_fault_domain_count = 1
+}
+`
diff --git a/website/source/docs/providers/azurerm/r/availability_set.html.markdown b/website/source/docs/providers/azurerm/r/availability_set.html.markdown
new file mode 100644
index 0000000000..651c39b2ee
--- /dev/null
+++ b/website/source/docs/providers/azurerm/r/availability_set.html.markdown
@@ -0,0 +1,48 @@
+---
+layout: "azurerm"
+page_title: "Azure Resource Manager: azurerm_availability_set"
+sidebar_current: "docs-azurerm-resource-availability-set"
+description: |-
+ Create an availability set for virtual machines.
+---
+
+# azurerm\_availability\_set
+
+Create an availability set for virtual machines.
+
+## Example Usage
+
+```
+resource "azurerm_resource_group" "test" {
+ name = "resourceGroup1"
+ location = "West US"
+}
+
+resource "azurerm_availability_set" "test" {
+ name = "acceptanceTestAvailabilitySet1"
+ location = "West US"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) Specifies the name of the availability set. Changing this forces a
+ new resource to be created.
+
+* `resource_group_name` - (Required) The name of the resource group in which to
+ create the availability set.
+
+* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
+
+* `platform_update_domain_count` - (Optional) Specifies the number of update domains that are used. Defaults to 5.
+
+* `platform_fault_domain_count` - (Optional) Specifies the number of fault domains that are used. Defaults to 3.
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The virtual AvailabilitySet ID.
\ No newline at end of file
diff --git a/website/source/layouts/azurerm.erb b/website/source/layouts/azurerm.erb
index fe00f7939d..cbac83abfc 100644
--- a/website/source/layouts/azurerm.erb
+++ b/website/source/layouts/azurerm.erb
@@ -25,6 +25,10 @@
azurerm_local_network_gateway
+ >
+ azurerm_availability_set
+
+
From f0ce107971a7ce83eb202b49f20a130a7ba305da Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Thu, 7 Jan 2016 06:52:28 -0800
Subject: [PATCH 435/664] provider/azure: Fix up destroy checks
---
.../resource_azure_sql_database_server_firewall_rule_test.go | 5 +++++
.../azure/resource_azure_sql_database_service_test.go | 5 +++++
2 files changed, 10 insertions(+)
diff --git a/builtin/providers/azure/resource_azure_sql_database_server_firewall_rule_test.go b/builtin/providers/azure/resource_azure_sql_database_server_firewall_rule_test.go
index 9202be7e10..cdd0ab4578 100644
--- a/builtin/providers/azure/resource_azure_sql_database_server_firewall_rule_test.go
+++ b/builtin/providers/azure/resource_azure_sql_database_server_firewall_rule_test.go
@@ -2,6 +2,7 @@ package azure
import (
"fmt"
+ "strings"
"testing"
"github.com/hashicorp/terraform/helper/resource"
@@ -149,6 +150,10 @@ func testAccAzureDatabaseServerFirewallRuleDeleted(servers []string) resource.Te
for _, server := range servers {
rules, err := sqlClient.ListFirewallRules(server)
if err != nil {
+ // ¯\_(ツ)_/¯
+ if strings.Contains(err.Error(), "Cannot open server") {
+ return nil
+ }
return fmt.Errorf("Error listing Azure Database Server Firewall Rules for Server %q: %s", server, err)
}
diff --git a/builtin/providers/azure/resource_azure_sql_database_service_test.go b/builtin/providers/azure/resource_azure_sql_database_service_test.go
index 24d8657748..31ea8990e6 100644
--- a/builtin/providers/azure/resource_azure_sql_database_service_test.go
+++ b/builtin/providers/azure/resource_azure_sql_database_service_test.go
@@ -2,6 +2,7 @@ package azure
import (
"fmt"
+ "strings"
"testing"
"github.com/hashicorp/terraform/helper/resource"
@@ -146,6 +147,10 @@ func testAccCheckAzureSqlDatabaseServiceDeleted(s *terraform.State) error {
sqlClient := testAccProvider.Meta().(*Client).sqlClient
dbs, err := sqlClient.ListDatabases(*testAccAzureSqlServerName)
if err != nil {
+ // ¯\_(ツ)_/¯
+ if strings.Contains(err.Error(), "Cannot open server") {
+ return nil
+ }
return fmt.Errorf("Error issuing Azure SQL Service list request: %s", err)
}
From 1de2fde147558fc10600638c29e9340651424d14 Mon Sep 17 00:00:00 2001
From: Cliff Pracht
Date: Thu, 7 Jan 2016 09:55:43 -0500
Subject: [PATCH 436/664] Fix to not put fixed_ip in request if not defined
---
.../openstack/resource_openstack_networking_port_v2.go | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/openstack/resource_openstack_networking_port_v2.go b/builtin/providers/openstack/resource_openstack_networking_port_v2.go
index 0b8d33ad5a..46565d26d9 100644
--- a/builtin/providers/openstack/resource_openstack_networking_port_v2.go
+++ b/builtin/providers/openstack/resource_openstack_networking_port_v2.go
@@ -245,8 +245,13 @@ func resourcePortSecurityGroupsV2(d *schema.ResourceData) []string {
return groups
}
-func resourcePortFixedIpsV2(d *schema.ResourceData) []ports.IP {
+func resourcePortFixedIpsV2(d *schema.ResourceData) interface{} {
rawIP := d.Get("fixed_ip").([]interface{})
+
+ if len(rawIP) == 0 {
+ return nil
+ }
+
ip := make([]ports.IP, len(rawIP))
for i, raw := range rawIP {
rawMap := raw.(map[string]interface{})
@@ -255,8 +260,8 @@ func resourcePortFixedIpsV2(d *schema.ResourceData) []ports.IP {
IPAddress: rawMap["ip_address"].(string),
}
}
-
return ip
+
}
func resourcePortAdminStateUpV2(d *schema.ResourceData) *bool {
From d450476d63764c72101e5327c9821c111fadb373 Mon Sep 17 00:00:00 2001
From: Anthony Stanton
Date: Thu, 7 Jan 2016 16:17:52 +0100
Subject: [PATCH 437/664] Update doc - missing lambda protocol
The last update missed listing `lambda` in the list of allowed protocols.
---
.../docs/providers/aws/r/sns_topic_subscription.html.markdown | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/website/source/docs/providers/aws/r/sns_topic_subscription.html.markdown b/website/source/docs/providers/aws/r/sns_topic_subscription.html.markdown
index eeb504ae8b..7dfb992538 100644
--- a/website/source/docs/providers/aws/r/sns_topic_subscription.html.markdown
+++ b/website/source/docs/providers/aws/r/sns_topic_subscription.html.markdown
@@ -49,7 +49,7 @@ resource "aws_sns_topic_subscription" "user_updates_sqs_target" {
The following arguments are supported:
* `topic_arn` - (Required) The ARN of the SNS topic to subscribe to
-* `protocol` - (Required) The protocol to use. The possible values for this are: `sqs`, `http`, `https`, `sms`, or `application`. (`email` is an option but unsupported, see below)
+* `protocol` - (Required) The protocol to use. The possible values for this are: `sqs`, `http`, `https`, `lambda`, `sms`, or `application`. (`email` is an option but unsupported, see below)
* `endpoint` - (Required) The endpoint to send data to, the contents will vary with the protocol. (see below for more information)
* `raw_message_delivery` - (Optional) Boolean indicating whether or not to enable raw message delivery (the original message is directly passed, not wrapped in JSON with the original message in the message property).
From cef05894987ef1bdd332b509e16e92c8b809a7be Mon Sep 17 00:00:00 2001
From: Lars Wander
Date: Fri, 13 Nov 2015 15:36:03 -0500
Subject: [PATCH 438/664] provider/google: Updated Read(..) behavior to handle
deleted resources
---
builtin/providers/google/resource_compute_address.go | 1 +
.../providers/google/resource_compute_autoscaler.go | 1 +
.../google/resource_compute_backend_service.go | 1 +
builtin/providers/google/resource_compute_disk.go | 1 +
.../providers/google/resource_compute_firewall.go | 2 ++
.../google/resource_compute_forwarding_rule.go | 1 +
.../google/resource_compute_global_address.go | 1 +
.../resource_compute_global_forwarding_rule.go | 1 +
.../google/resource_compute_http_health_check.go | 1 +
.../google/resource_compute_https_health_check.go | 1 +
.../providers/google/resource_compute_instance.go | 1 +
.../resource_compute_instance_group_manager.go | 1 +
.../google/resource_compute_instance_template.go | 2 ++
builtin/providers/google/resource_compute_network.go | 1 +
.../google/resource_compute_project_metadata.go | 11 +++++++++--
builtin/providers/google/resource_compute_route.go | 1 +
.../google/resource_compute_ssl_certificate.go | 2 ++
.../google/resource_compute_target_http_proxy.go | 1 +
.../google/resource_compute_target_https_proxy.go | 1 +
.../providers/google/resource_compute_target_pool.go | 1 +
builtin/providers/google/resource_compute_url_map.go | 10 ++++++++++
.../providers/google/resource_compute_vpn_gateway.go | 10 ++++++++++
.../providers/google/resource_compute_vpn_tunnel.go | 10 ++++++++++
.../providers/google/resource_container_cluster.go | 9 +++++++++
.../providers/google/resource_dns_managed_zone.go | 1 +
builtin/providers/google/resource_dns_record_set.go | 9 +++++++++
builtin/providers/google/resource_sql_database.go | 10 ++++++++++
.../google/resource_sql_database_instance.go | 10 ++++++++++
builtin/providers/google/resource_storage_bucket.go | 12 ++++++++++--
.../providers/google/resource_storage_bucket_acl.go | 9 +++++++++
.../google/resource_storage_bucket_object.go | 10 ++++++++++
.../providers/google/resource_storage_object_acl.go | 9 +++++++++
32 files changed, 138 insertions(+), 4 deletions(-)
diff --git a/builtin/providers/google/resource_compute_address.go b/builtin/providers/google/resource_compute_address.go
index 0027df230f..15fa132723 100644
--- a/builtin/providers/google/resource_compute_address.go
+++ b/builtin/providers/google/resource_compute_address.go
@@ -82,6 +82,7 @@ func resourceComputeAddressRead(d *schema.ResourceData, meta interface{}) error
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
// The resource doesn't exist anymore
+ log.Printf("[WARN] Removing Address %q because it's gone", d.Get("name").(string))
d.SetId("")
return nil
diff --git a/builtin/providers/google/resource_compute_autoscaler.go b/builtin/providers/google/resource_compute_autoscaler.go
index 8539c62b30..89cc41b075 100644
--- a/builtin/providers/google/resource_compute_autoscaler.go
+++ b/builtin/providers/google/resource_compute_autoscaler.go
@@ -240,6 +240,7 @@ func resourceComputeAutoscalerRead(d *schema.ResourceData, meta interface{}) err
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
// The resource doesn't exist anymore
+ log.Printf("[WARN] Removing Autoscalar %q because it's gone", d.Get("name").(string))
d.SetId("")
return nil
diff --git a/builtin/providers/google/resource_compute_backend_service.go b/builtin/providers/google/resource_compute_backend_service.go
index ead6e24023..e4c1586d7c 100644
--- a/builtin/providers/google/resource_compute_backend_service.go
+++ b/builtin/providers/google/resource_compute_backend_service.go
@@ -186,6 +186,7 @@ func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{})
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
// The resource doesn't exist anymore
+ log.Printf("[WARN] Removing Backend Service %q because it's gone", d.Get("name").(string))
d.SetId("")
return nil
diff --git a/builtin/providers/google/resource_compute_disk.go b/builtin/providers/google/resource_compute_disk.go
index 1118702d6c..1df66b9bb9 100644
--- a/builtin/providers/google/resource_compute_disk.go
+++ b/builtin/providers/google/resource_compute_disk.go
@@ -141,6 +141,7 @@ func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error {
config.Project, d.Get("zone").(string), d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Disk %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
d.SetId("")
diff --git a/builtin/providers/google/resource_compute_firewall.go b/builtin/providers/google/resource_compute_firewall.go
index 1cec2c8265..f2f4fa73d2 100644
--- a/builtin/providers/google/resource_compute_firewall.go
+++ b/builtin/providers/google/resource_compute_firewall.go
@@ -3,6 +3,7 @@ package google
import (
"bytes"
"fmt"
+ "log"
"sort"
"github.com/hashicorp/terraform/helper/hashcode"
@@ -150,6 +151,7 @@ func resourceComputeFirewallRead(d *schema.ResourceData, meta interface{}) error
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
// The resource doesn't exist anymore
+ log.Printf("[WARN] Removing Firewall %q because it's gone", d.Get("name").(string))
d.SetId("")
return nil
diff --git a/builtin/providers/google/resource_compute_forwarding_rule.go b/builtin/providers/google/resource_compute_forwarding_rule.go
index ac4851e51b..e1cbdc46c9 100644
--- a/builtin/providers/google/resource_compute_forwarding_rule.go
+++ b/builtin/providers/google/resource_compute_forwarding_rule.go
@@ -139,6 +139,7 @@ func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{})
config.Project, region, d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Forwarding Rule %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
d.SetId("")
diff --git a/builtin/providers/google/resource_compute_global_address.go b/builtin/providers/google/resource_compute_global_address.go
index 74c0633cdd..58d3f5e8e7 100644
--- a/builtin/providers/google/resource_compute_global_address.go
+++ b/builtin/providers/google/resource_compute_global_address.go
@@ -64,6 +64,7 @@ func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{})
config.Project, d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Global Address %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
d.SetId("")
diff --git a/builtin/providers/google/resource_compute_global_forwarding_rule.go b/builtin/providers/google/resource_compute_global_forwarding_rule.go
index f4d3c21bfb..ce987f7165 100644
--- a/builtin/providers/google/resource_compute_global_forwarding_rule.go
+++ b/builtin/providers/google/resource_compute_global_forwarding_rule.go
@@ -131,6 +131,7 @@ func resourceComputeGlobalForwardingRuleRead(d *schema.ResourceData, meta interf
config.Project, d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Global Forwarding Rule %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
d.SetId("")
diff --git a/builtin/providers/google/resource_compute_http_health_check.go b/builtin/providers/google/resource_compute_http_health_check.go
index c53267afda..8ddae0b70f 100644
--- a/builtin/providers/google/resource_compute_http_health_check.go
+++ b/builtin/providers/google/resource_compute_http_health_check.go
@@ -187,6 +187,7 @@ func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{}
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
// The resource doesn't exist anymore
+ log.Printf("[WARN] Removing HTTP Health Check %q because it's gone", d.Get("name").(string))
d.SetId("")
return nil
diff --git a/builtin/providers/google/resource_compute_https_health_check.go b/builtin/providers/google/resource_compute_https_health_check.go
index 32a8dfb381..46affdd9e3 100644
--- a/builtin/providers/google/resource_compute_https_health_check.go
+++ b/builtin/providers/google/resource_compute_https_health_check.go
@@ -186,6 +186,7 @@ func resourceComputeHttpsHealthCheckRead(d *schema.ResourceData, meta interface{
config.Project, d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing HTTPS Health Check %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
d.SetId("")
diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go
index 66e0b5e850..56026d3b3d 100644
--- a/builtin/providers/google/resource_compute_instance.go
+++ b/builtin/providers/google/resource_compute_instance.go
@@ -285,6 +285,7 @@ func getInstance(config *Config, d *schema.ResourceData) (*compute.Instance, err
config.Project, d.Get("zone").(string), d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Instance %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
id := d.Id()
d.SetId("")
diff --git a/builtin/providers/google/resource_compute_instance_group_manager.go b/builtin/providers/google/resource_compute_instance_group_manager.go
index e8e6b33a54..25a1ced507 100644
--- a/builtin/providers/google/resource_compute_instance_group_manager.go
+++ b/builtin/providers/google/resource_compute_instance_group_manager.go
@@ -149,6 +149,7 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf
config.Project, d.Get("zone").(string), d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Instance Group Manager %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
d.SetId("")
diff --git a/builtin/providers/google/resource_compute_instance_template.go b/builtin/providers/google/resource_compute_instance_template.go
index 48be445cbb..07bcb5f4c0 100644
--- a/builtin/providers/google/resource_compute_instance_template.go
+++ b/builtin/providers/google/resource_compute_instance_template.go
@@ -2,6 +2,7 @@ package google
import (
"fmt"
+ "log"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
@@ -466,6 +467,7 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{
config.Project, d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Instance Template %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
d.SetId("")
diff --git a/builtin/providers/google/resource_compute_network.go b/builtin/providers/google/resource_compute_network.go
index 5a61f2ad65..a3c72aa114 100644
--- a/builtin/providers/google/resource_compute_network.go
+++ b/builtin/providers/google/resource_compute_network.go
@@ -74,6 +74,7 @@ func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error
config.Project, d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Network %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
d.SetId("")
diff --git a/builtin/providers/google/resource_compute_project_metadata.go b/builtin/providers/google/resource_compute_project_metadata.go
index c549415c22..c2508c8f31 100644
--- a/builtin/providers/google/resource_compute_project_metadata.go
+++ b/builtin/providers/google/resource_compute_project_metadata.go
@@ -4,10 +4,9 @@ import (
"fmt"
"log"
- // "github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
- // "google.golang.org/api/googleapi"
+ "google.golang.org/api/googleapi"
)
func resourceComputeProjectMetadata() *schema.Resource {
@@ -85,6 +84,14 @@ func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{}
log.Printf("[DEBUG] Loading project service: %s", config.Project)
project, err := config.clientCompute.Projects.Get(config.Project).Do()
if err != nil {
+ if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Project Metadata because it's gone")
+ // The resource doesn't exist anymore
+ d.SetId("")
+
+ return nil
+ }
+
return fmt.Errorf("Error loading project '%s': %s", config.Project, err)
}
diff --git a/builtin/providers/google/resource_compute_route.go b/builtin/providers/google/resource_compute_route.go
index 82b43d3580..9b5b5292fa 100644
--- a/builtin/providers/google/resource_compute_route.go
+++ b/builtin/providers/google/resource_compute_route.go
@@ -185,6 +185,7 @@ func resourceComputeRouteRead(d *schema.ResourceData, meta interface{}) error {
config.Project, d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Route %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
d.SetId("")
diff --git a/builtin/providers/google/resource_compute_ssl_certificate.go b/builtin/providers/google/resource_compute_ssl_certificate.go
index 05de350fac..a80bc2fb24 100644
--- a/builtin/providers/google/resource_compute_ssl_certificate.go
+++ b/builtin/providers/google/resource_compute_ssl_certificate.go
@@ -2,6 +2,7 @@ package google
import (
"fmt"
+ "log"
"strconv"
"github.com/hashicorp/terraform/helper/schema"
@@ -91,6 +92,7 @@ func resourceComputeSslCertificateRead(d *schema.ResourceData, meta interface{})
config.Project, d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing SSL Certificate %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
d.SetId("")
diff --git a/builtin/providers/google/resource_compute_target_http_proxy.go b/builtin/providers/google/resource_compute_target_http_proxy.go
index 6cf2ccf5d0..72644fb017 100644
--- a/builtin/providers/google/resource_compute_target_http_proxy.go
+++ b/builtin/providers/google/resource_compute_target_http_proxy.go
@@ -111,6 +111,7 @@ func resourceComputeTargetHttpProxyRead(d *schema.ResourceData, meta interface{}
config.Project, d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Target HTTP Proxy %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
d.SetId("")
diff --git a/builtin/providers/google/resource_compute_target_https_proxy.go b/builtin/providers/google/resource_compute_target_https_proxy.go
index 1ea8444414..b30fd1eab8 100644
--- a/builtin/providers/google/resource_compute_target_https_proxy.go
+++ b/builtin/providers/google/resource_compute_target_https_proxy.go
@@ -186,6 +186,7 @@ func resourceComputeTargetHttpsProxyRead(d *schema.ResourceData, meta interface{
config.Project, d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Target HTTPS Proxy %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
d.SetId("")
diff --git a/builtin/providers/google/resource_compute_target_pool.go b/builtin/providers/google/resource_compute_target_pool.go
index 91e83a46aa..fa25a1b720 100644
--- a/builtin/providers/google/resource_compute_target_pool.go
+++ b/builtin/providers/google/resource_compute_target_pool.go
@@ -330,6 +330,7 @@ func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) err
config.Project, region, d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Target Pool %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
d.SetId("")
diff --git a/builtin/providers/google/resource_compute_url_map.go b/builtin/providers/google/resource_compute_url_map.go
index 4b29c4360d..47a38431fd 100644
--- a/builtin/providers/google/resource_compute_url_map.go
+++ b/builtin/providers/google/resource_compute_url_map.go
@@ -2,10 +2,12 @@ package google
import (
"fmt"
+ "log"
"strconv"
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
+ "google.golang.org/api/googleapi"
)
func resourceComputeUrlMap() *schema.Resource {
@@ -292,6 +294,14 @@ func resourceComputeUrlMapRead(d *schema.ResourceData, meta interface{}) error {
urlMap, err := config.clientCompute.UrlMaps.Get(config.Project, name).Do()
if err != nil {
+ if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing URL Map %q because it's gone", d.Get("name").(string))
+ // The resource doesn't exist anymore
+ d.SetId("")
+
+ return nil
+ }
+
return fmt.Errorf("Error, failed to get Url Map %s: %s", name, err)
}
diff --git a/builtin/providers/google/resource_compute_vpn_gateway.go b/builtin/providers/google/resource_compute_vpn_gateway.go
index bd5350b9c3..697ec8b649 100644
--- a/builtin/providers/google/resource_compute_vpn_gateway.go
+++ b/builtin/providers/google/resource_compute_vpn_gateway.go
@@ -2,10 +2,12 @@ package google
import (
"fmt"
+ "log"
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
+ "google.golang.org/api/googleapi"
)
func resourceComputeVpnGateway() *schema.Resource {
@@ -88,6 +90,14 @@ func resourceComputeVpnGatewayRead(d *schema.ResourceData, meta interface{}) err
vpnGateway, err := vpnGatewaysService.Get(project, region, name).Do()
if err != nil {
+ if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing VPN Gateway %q because it's gone", d.Get("name").(string))
+ // The resource doesn't exist anymore
+ d.SetId("")
+
+ return nil
+ }
+
return fmt.Errorf("Error Reading VPN Gateway %s: %s", name, err)
}
diff --git a/builtin/providers/google/resource_compute_vpn_tunnel.go b/builtin/providers/google/resource_compute_vpn_tunnel.go
index 172f96a907..f6290504b8 100644
--- a/builtin/providers/google/resource_compute_vpn_tunnel.go
+++ b/builtin/providers/google/resource_compute_vpn_tunnel.go
@@ -2,10 +2,12 @@ package google
import (
"fmt"
+ "log"
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
+ "google.golang.org/api/googleapi"
)
func resourceComputeVpnTunnel() *schema.Resource {
@@ -118,6 +120,14 @@ func resourceComputeVpnTunnelRead(d *schema.ResourceData, meta interface{}) erro
vpnTunnel, err := vpnTunnelsService.Get(project, region, name).Do()
if err != nil {
+ if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing VPN Tunnel %q because it's gone", d.Get("name").(string))
+ // The resource doesn't exist anymore
+ d.SetId("")
+
+ return nil
+ }
+
return fmt.Errorf("Error Reading VPN Tunnel %s: %s", name, err)
}
diff --git a/builtin/providers/google/resource_container_cluster.go b/builtin/providers/google/resource_container_cluster.go
index 68c0b96ad0..447583b9e2 100644
--- a/builtin/providers/google/resource_container_cluster.go
+++ b/builtin/providers/google/resource_container_cluster.go
@@ -10,6 +10,7 @@ import (
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/container/v1"
+ "google.golang.org/api/googleapi"
)
func resourceContainerCluster() *schema.Resource {
@@ -312,6 +313,14 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
cluster, err := config.clientContainer.Projects.Zones.Clusters.Get(
config.Project, zoneName, d.Get("name").(string)).Do()
if err != nil {
+ if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Container Cluster %q because it's gone", d.Get("name").(string))
+ // The resource doesn't exist anymore
+ d.SetId("")
+
+ return nil
+ }
+
return err
}
diff --git a/builtin/providers/google/resource_dns_managed_zone.go b/builtin/providers/google/resource_dns_managed_zone.go
index 7253297e60..6d76c0c442 100644
--- a/builtin/providers/google/resource_dns_managed_zone.go
+++ b/builtin/providers/google/resource_dns_managed_zone.go
@@ -81,6 +81,7 @@ func resourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) error
config.Project, d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing DNS Managed Zone %q because it's gone", d.Get("name").(string))
// The resource doesn't exist anymore
d.SetId("")
diff --git a/builtin/providers/google/resource_dns_record_set.go b/builtin/providers/google/resource_dns_record_set.go
index 05fa547f72..49b1fce71b 100644
--- a/builtin/providers/google/resource_dns_record_set.go
+++ b/builtin/providers/google/resource_dns_record_set.go
@@ -7,6 +7,7 @@ import (
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/dns/v1"
+ "google.golang.org/api/googleapi"
)
func resourceDnsRecordSet() *schema.Resource {
@@ -114,6 +115,14 @@ func resourceDnsRecordSetRead(d *schema.ResourceData, meta interface{}) error {
resp, err := config.clientDns.ResourceRecordSets.List(
config.Project, zone).Name(name).Type(dnsType).Do()
if err != nil {
+ if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing DNS Record Set %q because it's gone", d.Get("name").(string))
+ // The resource doesn't exist anymore
+ d.SetId("")
+
+ return nil
+ }
+
return fmt.Errorf("Error reading DNS RecordSet: %#v", err)
}
if len(resp.Rrsets) == 0 {
diff --git a/builtin/providers/google/resource_sql_database.go b/builtin/providers/google/resource_sql_database.go
index e8715f9b0c..f66d3c5845 100644
--- a/builtin/providers/google/resource_sql_database.go
+++ b/builtin/providers/google/resource_sql_database.go
@@ -2,9 +2,11 @@ package google
import (
"fmt"
+ "log"
"github.com/hashicorp/terraform/helper/schema"
+ "google.golang.org/api/googleapi"
"google.golang.org/api/sqladmin/v1beta4"
)
@@ -75,6 +77,14 @@ func resourceSqlDatabaseRead(d *schema.ResourceData, meta interface{}) error {
database_name).Do()
if err != nil {
+ if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing SQL Database %q because it's gone", d.Get("name").(string))
+ // The resource doesn't exist anymore
+ d.SetId("")
+
+ return nil
+ }
+
return fmt.Errorf("Error, failed to get"+
"database %s in instance %s: %s", database_name,
instance_name, err)
diff --git a/builtin/providers/google/resource_sql_database_instance.go b/builtin/providers/google/resource_sql_database_instance.go
index d684839283..ff8529944a 100644
--- a/builtin/providers/google/resource_sql_database_instance.go
+++ b/builtin/providers/google/resource_sql_database_instance.go
@@ -2,9 +2,11 @@ package google
import (
"fmt"
+ "log"
"github.com/hashicorp/terraform/helper/schema"
+ "google.golang.org/api/googleapi"
"google.golang.org/api/sqladmin/v1beta4"
)
@@ -462,6 +464,14 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e
d.Get("name").(string)).Do()
if err != nil {
+ if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing SQL Database %q because it's gone", d.Get("name").(string))
+ // The resource doesn't exist anymore
+ d.SetId("")
+
+ return nil
+ }
+
return fmt.Errorf("Error retrieving instance %s: %s",
d.Get("name").(string), err)
}
diff --git a/builtin/providers/google/resource_storage_bucket.go b/builtin/providers/google/resource_storage_bucket.go
index 9118119a8f..c4e64244fb 100644
--- a/builtin/providers/google/resource_storage_bucket.go
+++ b/builtin/providers/google/resource_storage_bucket.go
@@ -7,6 +7,7 @@ import (
"github.com/hashicorp/terraform/helper/schema"
+ "google.golang.org/api/googleapi"
"google.golang.org/api/storage/v1"
)
@@ -174,8 +175,15 @@ func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error {
res, err := config.clientStorage.Buckets.Get(bucket).Do()
if err != nil {
- fmt.Printf("Error reading bucket %s: %v", bucket, err)
- return err
+ if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Bucket %q because it's gone", d.Get("name").(string))
+ // The resource doesn't exist anymore
+ d.SetId("")
+
+ return nil
+ }
+
+ return fmt.Errorf("Error reading bucket %s: %v", bucket, err)
}
log.Printf("[DEBUG] Read bucket %v at location %v\n\n", res.Name, res.SelfLink)
diff --git a/builtin/providers/google/resource_storage_bucket_acl.go b/builtin/providers/google/resource_storage_bucket_acl.go
index 3b866e0ad2..488fd85f45 100644
--- a/builtin/providers/google/resource_storage_bucket_acl.go
+++ b/builtin/providers/google/resource_storage_bucket_acl.go
@@ -7,6 +7,7 @@ import (
"github.com/hashicorp/terraform/helper/schema"
+ "google.golang.org/api/googleapi"
"google.golang.org/api/storage/v1"
)
@@ -166,6 +167,14 @@ func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) erro
res, err := config.clientStorage.BucketAccessControls.List(bucket).Do()
if err != nil {
+ if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Bucket ACL for bucket %q because it's gone", d.Get("bucket").(string))
+ // The resource doesn't exist anymore
+ d.SetId("")
+
+ return nil
+ }
+
return err
}
diff --git a/builtin/providers/google/resource_storage_bucket_object.go b/builtin/providers/google/resource_storage_bucket_object.go
index 231153a85c..198d7b6850 100644
--- a/builtin/providers/google/resource_storage_bucket_object.go
+++ b/builtin/providers/google/resource_storage_bucket_object.go
@@ -2,10 +2,12 @@ package google
import (
"fmt"
+ "log"
"os"
"github.com/hashicorp/terraform/helper/schema"
+ "google.golang.org/api/googleapi"
"google.golang.org/api/storage/v1"
)
@@ -96,6 +98,14 @@ func resourceStorageBucketObjectRead(d *schema.ResourceData, meta interface{}) e
res, err := getCall.Do()
if err != nil {
+ if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Bucket Object %q because it's gone", d.Get("name").(string))
+ // The resource doesn't exist anymore
+ d.SetId("")
+
+ return nil
+ }
+
return fmt.Errorf("Error retrieving contents of object %s: %s", name, err)
}
diff --git a/builtin/providers/google/resource_storage_object_acl.go b/builtin/providers/google/resource_storage_object_acl.go
index 5212f81db2..e4968265f7 100644
--- a/builtin/providers/google/resource_storage_object_acl.go
+++ b/builtin/providers/google/resource_storage_object_acl.go
@@ -6,6 +6,7 @@ import (
"github.com/hashicorp/terraform/helper/schema"
+ "google.golang.org/api/googleapi"
"google.golang.org/api/storage/v1"
)
@@ -134,6 +135,14 @@ func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) erro
res, err := config.clientStorage.ObjectAccessControls.List(bucket, object).Do()
if err != nil {
+ if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
+ log.Printf("[WARN] Removing Storage Object ACL for Bucket %q because it's gone", d.Get("bucket").(string))
+ // The resource doesn't exist anymore
+ d.SetId("")
+
+ return nil
+ }
+
return err
}
From 775360734ae533f3a6b7b1772022cbe763851f03 Mon Sep 17 00:00:00 2001
From: Lars Wander
Date: Thu, 7 Jan 2016 10:58:30 -0500
Subject: [PATCH 439/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7729c5d0a4..d8a23d63ab 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -83,6 +83,7 @@ BUG FIXES:
* provider/rundeck: Tolerate Rundeck server not returning project name when reading a job [GH-4301]
* provider/vsphere: Create and attach additional disks before bootup [GH-4196]
* provider/openstack: Convert block_device from a Set to a List [GH-4288]
+ * provider/google: Terraform identifies deleted resources and handles them appropriately on Read [GH-3913]
## 0.6.8 (December 2, 2015)
From 96043979c872faf9a0fa4ae7a27f34a8e43261a4 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Thu, 7 Jan 2016 10:29:16 -0600
Subject: [PATCH 440/664] add a small section on contributing
providers/resources
---
CONTRIBUTING.md | 22 ++++++++++++++++++++++
1 file changed, 22 insertions(+)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index f5554557f5..75fcac377d 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -47,6 +47,24 @@ it raises the chances we can quickly merge or address your contributions.
the issue tracker clean. The issue is still indexed and available for
future viewers, or can be re-opened if necessary.
+# Contributing to Terraform
+
+Thank you for contributing! We do have some requests that we ask you to include
+in your contribution
+
+## Providers or Resources
+
+Contributions to Providers or their Resources need to be documented and include
+relevant acceptance tests. Information on setting up the terraform.io site
+locally can be found in the [website folder][1]
+of this repository, in the README.
+
+Instructions on how to run acceptance tests can be found in our [README][2]
+in the root of this project.
+
+If you have questions about this process, please checkout our [mailing list][3]
+or #terraform-tool on Freenode.
+
## Setting up Go to work on Terraform
If you have never worked with Go before, you will have to complete the
@@ -71,3 +89,7 @@ use the Vagrantfile in this repo to stand up a dev VM).
7. If everything works well and the tests pass, run `go fmt` on your code
before submitting a pull request.
+
+[1]: https://github.com/hashicorp/terraform/tree/master/website
+[2]: https://github.com/hashicorp/terraform#acceptance-tests
+[3]: https://groups.google.com/group/terraform-tool
From 3c222b32ce865f485f1cb5b0d2a466397814f3f4 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Thu, 7 Jan 2016 08:35:19 -0800
Subject: [PATCH 441/664] provider/azure: Retry checking DB server existence
---
..._sql_database_server_firewall_rule_test.go | 26 ++++++++++++++-----
1 file changed, 19 insertions(+), 7 deletions(-)
diff --git a/builtin/providers/azure/resource_azure_sql_database_server_firewall_rule_test.go b/builtin/providers/azure/resource_azure_sql_database_server_firewall_rule_test.go
index cdd0ab4578..2c764cdb7c 100644
--- a/builtin/providers/azure/resource_azure_sql_database_server_firewall_rule_test.go
+++ b/builtin/providers/azure/resource_azure_sql_database_server_firewall_rule_test.go
@@ -4,7 +4,9 @@ import (
"fmt"
"strings"
"testing"
+ "time"
+ "github.com/Azure/azure-sdk-for-go/management/sql"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@@ -101,32 +103,42 @@ func TestAccAzureSqlDatabaseServerFirewallRuleUpdate(t *testing.T) {
func testAccAzureDatabaseServerFirewallRuleExists(name string, servers []string) resource.TestCheckFunc {
return func(s *terraform.State) error {
- resource, ok := s.RootModule().Resources[name]
+ res, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("Azure Database Server Firewall Rule %q doesn't exist.", name)
}
- if resource.Primary.ID == "" {
- return fmt.Errorf("Azure Database Server Firewall Rule %q resource ID not set.", name)
+ if res.Primary.ID == "" {
+ return fmt.Errorf("Azure Database Server Firewall Rule %q res ID not set.", name)
}
sqlClient := testAccProvider.Meta().(*Client).sqlClient
for _, server := range servers {
- rules, err := sqlClient.ListFirewallRules(server)
+ var rules sql.ListFirewallRulesResponse
+
+ err := resource.Retry(10*time.Minute, func() error {
+ var erri error
+ rules, erri = sqlClient.ListFirewallRules(server)
+ if erri != nil {
+ return fmt.Errorf("Error listing Azure Database Server Firewall Rules for Server %q: %s", server, erri)
+ }
+
+ return nil
+ })
if err != nil {
- return fmt.Errorf("Error listing Azure Database Server Firewall Rules for Server %q: %s", server, err)
+ return err
}
var found bool
for _, rule := range rules.FirewallRules {
- if rule.Name == resource.Primary.ID {
+ if rule.Name == res.Primary.ID {
found = true
break
}
}
if !found {
- return fmt.Errorf("Azure Database Server Firewall Rule %q doesn't exists on server %q.", resource.Primary.ID, server)
+ return fmt.Errorf("Azure Database Server Firewall Rule %q doesn't exists on server %q.", res.Primary.ID, server)
}
}
From 64f19c0dc3925a967345802a05771459233cd6ec Mon Sep 17 00:00:00 2001
From: Petr Artamonov
Date: Thu, 7 Jan 2016 18:13:30 +0100
Subject: [PATCH 442/664] enable reporting flag
---
builtin/provisioners/chef/resource_provisioner.go | 3 +++
1 file changed, 3 insertions(+)
diff --git a/builtin/provisioners/chef/resource_provisioner.go b/builtin/provisioners/chef/resource_provisioner.go
index 68ae6256a4..14b57a3e2e 100644
--- a/builtin/provisioners/chef/resource_provisioner.go
+++ b/builtin/provisioners/chef/resource_provisioner.go
@@ -62,6 +62,8 @@ ENV['HTTPS_PROXY'] = "{{ .HTTPSProxy }}"
{{ if .NOProxy }}no_proxy "{{ join .NOProxy "," }}"{{ end }}
{{ if .SSLVerifyMode }}ssl_verify_mode {{ .SSLVerifyMode }}{{ end }}
+
+{{ if .EnableReporting }}enable_reporting {{ .EnableReporting }}{{ end }}
`
// Provisioner represents a specificly configured chef provisioner
@@ -84,6 +86,7 @@ type Provisioner struct {
ServerURL string `mapstructure:"server_url"`
SkipInstall bool `mapstructure:"skip_install"`
SSLVerifyMode string `mapstructure:"ssl_verify_mode"`
+ EnableReporting string `mapstructure:"enable_reporting"`
ValidationClientName string `mapstructure:"validation_client_name"`
ValidationKey string `mapstructure:"validation_key"`
Version string `mapstructure:"version"`
From e2a7d4d98bc0d00f2b62e2852742208fffd08a84 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Thu, 7 Jan 2016 11:48:53 -0600
Subject: [PATCH 443/664] provider/aws: Update
testAccCheckAWSVpcPeeringConnectionDestroy to correctly check the destroyed
state
---
...esource_aws_vpc_peering_connection_test.go | 26 ++++++++++++++++---
1 file changed, 22 insertions(+), 4 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go b/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go
index 7e85659f2e..6393d4564c 100644
--- a/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go
+++ b/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go
@@ -70,14 +70,32 @@ func testAccCheckAWSVpcPeeringConnectionDestroy(s *terraform.State) error {
VpcPeeringConnectionIds: []*string{aws.String(rs.Primary.ID)},
})
- if err == nil {
- if len(describe.VpcPeeringConnections) != 0 {
- return fmt.Errorf("vpc peering connection still exists")
+ if err != nil {
+ return err
+ }
+
+ var pc *ec2.VpcPeeringConnection
+ for _, c := range describe.VpcPeeringConnections {
+ if rs.Primary.ID == *c.VpcPeeringConnectionId {
+ pc = c
}
}
+
+ if pc == nil {
+ // not found
+ return nil
+ }
+
+ if pc.Status != nil {
+ if *pc.Status.Code == "deleted" {
+ return nil
+ }
+ return fmt.Errorf("Found vpc peering connection in unexpected state: %s", pc)
+ }
+
}
- return nil
+ return fmt.Errorf("Fall through error for testAccCheckAWSVpcPeeringConnectionDestroy")
}
func testAccCheckAWSVpcPeeringConnectionExists(n string, connection *ec2.VpcPeeringConnection) resource.TestCheckFunc {
From a52c4bce6689b5b5255fbd1ea905cd525169e3c0 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Thu, 7 Jan 2016 11:00:57 -0800
Subject: [PATCH 444/664] provider/azure: Don't delete firewall rules on
non-existent servers
---
.../azure/resource_azure_sql_database_server_firewall_rule.go | 3 +++
1 file changed, 3 insertions(+)
diff --git a/builtin/providers/azure/resource_azure_sql_database_server_firewall_rule.go b/builtin/providers/azure/resource_azure_sql_database_server_firewall_rule.go
index a5cb0b2147..06df80ce14 100644
--- a/builtin/providers/azure/resource_azure_sql_database_server_firewall_rule.go
+++ b/builtin/providers/azure/resource_azure_sql_database_server_firewall_rule.go
@@ -209,6 +209,9 @@ func resourceAzureSqlDatabaseServerFirewallRuleDelete(d *schema.ResourceData, me
// go ahead and delete the rule:
log.Printf("[INFO] Issuing deletion of Azure Database Server Firewall Rule %q in Server %q.", name, serverName)
if err := sqlClient.DeleteFirewallRule(serverName, name); err != nil {
+ if strings.Contains(err.Error(), "Cannot open server") {
+ break
+ }
return fmt.Errorf("Error deleting Azure Database Server Firewall Rule %q for Server %q: %s", name, serverName, err)
}
From 523eb2107d4ccdffa13c7651e32057ee135aa473 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Thu, 7 Jan 2016 11:09:40 -0800
Subject: [PATCH 445/664] provider/azure: Don't reuse names in tests
---
builtin/providers/azure/resource_azure_instance_test.go | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/builtin/providers/azure/resource_azure_instance_test.go b/builtin/providers/azure/resource_azure_instance_test.go
index 1ed9fffb83..7593f376bd 100644
--- a/builtin/providers/azure/resource_azure_instance_test.go
+++ b/builtin/providers/azure/resource_azure_instance_test.go
@@ -411,7 +411,7 @@ resource "azure_instance" "foo" {
var testAccAzureInstance_advanced = fmt.Sprintf(`
resource "azure_virtual_network" "foo" {
- name = "terraform-vnet"
+ name = "terraform-vnet-advanced-test"
address_space = ["10.1.2.0/24"]
location = "West US"
@@ -467,7 +467,7 @@ resource "azure_instance" "foo" {
var testAccAzureInstance_update = fmt.Sprintf(`
resource "azure_virtual_network" "foo" {
- name = "terraform-vnet"
+ name = "terraform-vnet-update-test"
address_space = ["10.1.2.0/24"]
location = "West US"
@@ -501,7 +501,7 @@ resource "azure_security_group_rule" "foo" {
}
resource "azure_security_group" "bar" {
- name = "terraform-security-group2"
+ name = "terraform-security-update-group2"
location = "West US"
}
From dcce2aa4791b0d393b067ce85595db62ef6ff6e1 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Thu, 7 Jan 2016 14:16:41 -0600
Subject: [PATCH 446/664] providers/aws: Update OpsWorks tests to inject the
expected availability zone, based on if we are testing vpc or not
---
.../aws/resource_aws_opsworks_stack_test.go | 225 +++++++++---------
1 file changed, 117 insertions(+), 108 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_opsworks_stack_test.go b/builtin/providers/aws/resource_aws_opsworks_stack_test.go
index 97efcdd66a..26a72a769a 100644
--- a/builtin/providers/aws/resource_aws_opsworks_stack_test.go
+++ b/builtin/providers/aws/resource_aws_opsworks_stack_test.go
@@ -2,6 +2,7 @@ package aws
import (
"fmt"
+ "log"
"testing"
"github.com/hashicorp/terraform/helper/resource"
@@ -132,11 +133,11 @@ func TestAccAWSOpsworksStackNoVpc(t *testing.T) {
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAwsOpsworksStackConfigNoVpcCreate,
- Check: testAccAwsOpsworksStackCheckResourceAttrsCreate,
+ Check: testAccAwsOpsworksStackCheckResourceAttrsCreate("us-east-1c"),
},
resource.TestStep{
Config: testAccAWSOpsworksStackConfigNoVpcUpdate,
- Check: testAccAwsOpsworksStackCheckResourceAttrsUpdate,
+ Check: testAccAwsOpsworksStackCheckResourceAttrsUpdate("us-east-1c"),
},
},
})
@@ -153,11 +154,11 @@ resource "aws_vpc" "tf-acc" {
resource "aws_subnet" "tf-acc" {
vpc_id = "${aws_vpc.tf-acc.id}"
cidr_block = "${aws_vpc.tf-acc.cidr_block}"
- availability_zone = "us-east-1c"
+ availability_zone = "us-west-2a"
}
resource "aws_opsworks_stack" "tf-acc" {
name = "tf-opsworks-acc"
- region = "us-east-1"
+ region = "us-west-2"
vpc_id = "${aws_vpc.tf-acc.id}"
default_subnet_id = "${aws_subnet.tf-acc.id}"
service_role_arn = "${aws_iam_role.opsworks_service.arn}"
@@ -177,11 +178,11 @@ resource "aws_vpc" "tf-acc" {
resource "aws_subnet" "tf-acc" {
vpc_id = "${aws_vpc.tf-acc.id}"
cidr_block = "${aws_vpc.tf-acc.cidr_block}"
- availability_zone = "us-east-1c"
+ availability_zone = "us-west-2a"
}
resource "aws_opsworks_stack" "tf-acc" {
name = "tf-opsworks-acc"
- region = "us-east-1"
+ region = "us-west-2"
vpc_id = "${aws_vpc.tf-acc.id}"
default_subnet_id = "${aws_subnet.tf-acc.id}"
service_role_arn = "${aws_iam_role.opsworks_service.arn}"
@@ -209,12 +210,12 @@ func TestAccAWSOpsworksStackVpc(t *testing.T) {
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAwsOpsworksStackConfigVpcCreate,
- Check: testAccAwsOpsworksStackCheckResourceAttrsCreate,
+ Check: testAccAwsOpsworksStackCheckResourceAttrsCreate("us-west-2a"),
},
resource.TestStep{
Config: testAccAWSOpsworksStackConfigVpcUpdate,
Check: resource.ComposeTestCheckFunc(
- testAccAwsOpsworksStackCheckResourceAttrsUpdate,
+ testAccAwsOpsworksStackCheckResourceAttrsUpdate("us-west-2a"),
testAccAwsOpsworksCheckVpc,
),
},
@@ -226,106 +227,110 @@ func TestAccAWSOpsworksStackVpc(t *testing.T) {
//// Checkers and Utilities
////////////////////////////
-var testAccAwsOpsworksStackCheckResourceAttrsCreate = resource.ComposeTestCheckFunc(
- resource.TestCheckResourceAttr(
- "aws_opsworks_stack.tf-acc",
- "name",
- "tf-opsworks-acc",
- ),
- resource.TestCheckResourceAttr(
- "aws_opsworks_stack.tf-acc",
- "default_availability_zone",
- "us-east-1c",
- ),
- resource.TestCheckResourceAttr(
- "aws_opsworks_stack.tf-acc",
- "default_os",
- "Amazon Linux 2014.09",
- ),
- resource.TestCheckResourceAttr(
- "aws_opsworks_stack.tf-acc",
- "default_root_device_type",
- "ebs",
- ),
- resource.TestCheckResourceAttr(
- "aws_opsworks_stack.tf-acc",
- "custom_json",
- `{"key": "value"}`,
- ),
- resource.TestCheckResourceAttr(
- "aws_opsworks_stack.tf-acc",
- "configuration_manager_version",
- "11.10",
- ),
- resource.TestCheckResourceAttr(
- "aws_opsworks_stack.tf-acc",
- "use_opsworks_security_groups",
- "false",
- ),
-)
+func testAccAwsOpsworksStackCheckResourceAttrsCreate(zone string) resource.TestCheckFunc {
+ return resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(
+ "aws_opsworks_stack.tf-acc",
+ "name",
+ "tf-opsworks-acc",
+ ),
+ resource.TestCheckResourceAttr(
+ "aws_opsworks_stack.tf-acc",
+ "default_availability_zone",
+ zone,
+ ),
+ resource.TestCheckResourceAttr(
+ "aws_opsworks_stack.tf-acc",
+ "default_os",
+ "Amazon Linux 2014.09",
+ ),
+ resource.TestCheckResourceAttr(
+ "aws_opsworks_stack.tf-acc",
+ "default_root_device_type",
+ "ebs",
+ ),
+ resource.TestCheckResourceAttr(
+ "aws_opsworks_stack.tf-acc",
+ "custom_json",
+ `{"key": "value"}`,
+ ),
+ resource.TestCheckResourceAttr(
+ "aws_opsworks_stack.tf-acc",
+ "configuration_manager_version",
+ "11.10",
+ ),
+ resource.TestCheckResourceAttr(
+ "aws_opsworks_stack.tf-acc",
+ "use_opsworks_security_groups",
+ "false",
+ ),
+ )
+}
-var testAccAwsOpsworksStackCheckResourceAttrsUpdate = resource.ComposeTestCheckFunc(
- resource.TestCheckResourceAttr(
- "aws_opsworks_stack.tf-acc",
- "name",
- "tf-opsworks-acc",
- ),
- resource.TestCheckResourceAttr(
- "aws_opsworks_stack.tf-acc",
- "default_availability_zone",
- "us-east-1c",
- ),
- resource.TestCheckResourceAttr(
- "aws_opsworks_stack.tf-acc",
- "default_os",
- "Amazon Linux 2014.09",
- ),
- resource.TestCheckResourceAttr(
- "aws_opsworks_stack.tf-acc",
- "default_root_device_type",
- "ebs",
- ),
- resource.TestCheckResourceAttr(
- "aws_opsworks_stack.tf-acc",
- "custom_json",
- `{"key": "value"}`,
- ),
- resource.TestCheckResourceAttr(
- "aws_opsworks_stack.tf-acc",
- "configuration_manager_version",
- "11.10",
- ),
- resource.TestCheckResourceAttr(
- "aws_opsworks_stack.tf-acc",
- "use_opsworks_security_groups",
- "false",
- ),
- resource.TestCheckResourceAttr(
- "aws_opsworks_stack.tf-acc",
- "use_custom_cookbooks",
- "true",
- ),
- resource.TestCheckResourceAttr(
- "aws_opsworks_stack.tf-acc",
- "manage_berkshelf",
- "true",
- ),
- resource.TestCheckResourceAttr(
- "aws_opsworks_stack.tf-acc",
- "custom_cookbooks_source.0.type",
- "git",
- ),
- resource.TestCheckResourceAttr(
- "aws_opsworks_stack.tf-acc",
- "custom_cookbooks_source.0.revision",
- "master",
- ),
- resource.TestCheckResourceAttr(
- "aws_opsworks_stack.tf-acc",
- "custom_cookbooks_source.0.url",
- "https://github.com/aws/opsworks-example-cookbooks.git",
- ),
-)
+func testAccAwsOpsworksStackCheckResourceAttrsUpdate(zone string) resource.TestCheckFunc {
+ return resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(
+ "aws_opsworks_stack.tf-acc",
+ "name",
+ "tf-opsworks-acc",
+ ),
+ resource.TestCheckResourceAttr(
+ "aws_opsworks_stack.tf-acc",
+ "default_availability_zone",
+ zone,
+ ),
+ resource.TestCheckResourceAttr(
+ "aws_opsworks_stack.tf-acc",
+ "default_os",
+ "Amazon Linux 2014.09",
+ ),
+ resource.TestCheckResourceAttr(
+ "aws_opsworks_stack.tf-acc",
+ "default_root_device_type",
+ "ebs",
+ ),
+ resource.TestCheckResourceAttr(
+ "aws_opsworks_stack.tf-acc",
+ "custom_json",
+ `{"key": "value"}`,
+ ),
+ resource.TestCheckResourceAttr(
+ "aws_opsworks_stack.tf-acc",
+ "configuration_manager_version",
+ "11.10",
+ ),
+ resource.TestCheckResourceAttr(
+ "aws_opsworks_stack.tf-acc",
+ "use_opsworks_security_groups",
+ "false",
+ ),
+ resource.TestCheckResourceAttr(
+ "aws_opsworks_stack.tf-acc",
+ "use_custom_cookbooks",
+ "true",
+ ),
+ resource.TestCheckResourceAttr(
+ "aws_opsworks_stack.tf-acc",
+ "manage_berkshelf",
+ "true",
+ ),
+ resource.TestCheckResourceAttr(
+ "aws_opsworks_stack.tf-acc",
+ "custom_cookbooks_source.0.type",
+ "git",
+ ),
+ resource.TestCheckResourceAttr(
+ "aws_opsworks_stack.tf-acc",
+ "custom_cookbooks_source.0.revision",
+ "master",
+ ),
+ resource.TestCheckResourceAttr(
+ "aws_opsworks_stack.tf-acc",
+ "custom_cookbooks_source.0.url",
+ "https://github.com/aws/opsworks-example-cookbooks.git",
+ ),
+ )
+}
func testAccAwsOpsworksCheckVpc(s *terraform.State) error {
rs, ok := s.RootModule().Resources["aws_opsworks_stack.tf-acc"]
@@ -371,7 +376,7 @@ func testAccCheckAwsOpsworksStackDestroy(s *terraform.State) error {
},
}
- _, err := opsworksconn.DescribeStacks(req)
+ r, err := opsworksconn.DescribeStacks(req)
if err != nil {
if awserr, ok := err.(awserr.Error); ok {
if awserr.Code() == "ResourceNotFoundException" {
@@ -382,6 +387,10 @@ func testAccCheckAwsOpsworksStackDestroy(s *terraform.State) error {
return err
}
+ if r != nil {
+ log.Printf("\n---\nStack response: %s\n---\n", r)
+ }
+
}
return fmt.Errorf("Fall through error for OpsWorks stack test")
}
From 9a4f0a06b3494284bad1aeff8e72fcc471c7bec0 Mon Sep 17 00:00:00 2001
From: clint shryock
Date: Thu, 7 Jan 2016 15:00:55 -0600
Subject: [PATCH 447/664] clean up debugging
---
builtin/providers/aws/resource_aws_opsworks_stack_test.go | 8 +-------
1 file changed, 1 insertion(+), 7 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_opsworks_stack_test.go b/builtin/providers/aws/resource_aws_opsworks_stack_test.go
index 26a72a769a..ba34663d4f 100644
--- a/builtin/providers/aws/resource_aws_opsworks_stack_test.go
+++ b/builtin/providers/aws/resource_aws_opsworks_stack_test.go
@@ -2,7 +2,6 @@ package aws
import (
"fmt"
- "log"
"testing"
"github.com/hashicorp/terraform/helper/resource"
@@ -376,7 +375,7 @@ func testAccCheckAwsOpsworksStackDestroy(s *terraform.State) error {
},
}
- r, err := opsworksconn.DescribeStacks(req)
+ _, err := opsworksconn.DescribeStacks(req)
if err != nil {
if awserr, ok := err.(awserr.Error); ok {
if awserr.Code() == "ResourceNotFoundException" {
@@ -386,11 +385,6 @@ func testAccCheckAwsOpsworksStackDestroy(s *terraform.State) error {
}
return err
}
-
- if r != nil {
- log.Printf("\n---\nStack response: %s\n---\n", r)
- }
-
}
return fmt.Errorf("Fall through error for OpsWorks stack test")
}
From c4c5a0c7d41b4cf33b13171cf59d38c476e1937e Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Thu, 7 Jan 2016 13:02:04 -0800
Subject: [PATCH 448/664] Increase acceptance test timeout to 120m from 90m
---
Makefile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile b/Makefile
index dfb3010343..b114c8cdcf 100644
--- a/Makefile
+++ b/Makefile
@@ -39,7 +39,7 @@ testacc: fmtcheck generate
echo " make testacc TEST=./builtin/providers/aws"; \
exit 1; \
fi
- TF_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 90m
+ TF_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 120m
# testrace runs the race checker
testrace: fmtcheck generate
From 43760d46704f366c580d62200cdf1e312a00f780 Mon Sep 17 00:00:00 2001
From: stack72
Date: Thu, 7 Jan 2016 12:23:08 +0000
Subject: [PATCH 449/664] Scaffolding for the AzureRM Network Security Groups
---
builtin/providers/azurerm/provider.go | 1 +
.../azurerm/resource_arm_security_group.go | 310 ++++++++++++++++++
.../resource_arm_security_group_test.go | 283 ++++++++++++++++
.../azurerm/r/security_group.html.markdown | 83 +++++
website/source/layouts/azurerm.erb | 4 +
5 files changed, 681 insertions(+)
create mode 100644 builtin/providers/azurerm/resource_arm_security_group.go
create mode 100644 builtin/providers/azurerm/resource_arm_security_group_test.go
create mode 100644 website/source/docs/providers/azurerm/r/security_group.html.markdown
diff --git a/builtin/providers/azurerm/provider.go b/builtin/providers/azurerm/provider.go
index 612109fc32..1642c64d35 100644
--- a/builtin/providers/azurerm/provider.go
+++ b/builtin/providers/azurerm/provider.go
@@ -43,6 +43,7 @@ func Provider() terraform.ResourceProvider {
"azurerm_virtual_network": resourceArmVirtualNetwork(),
"azurerm_local_network_gateway": resourceArmLocalNetworkGateway(),
"azurerm_availability_set": resourceArmAvailabilitySet(),
+ "azurerm_security_group": resourceArmSecurityGroup(),
},
ConfigureFunc: providerConfigure,
diff --git a/builtin/providers/azurerm/resource_arm_security_group.go b/builtin/providers/azurerm/resource_arm_security_group.go
new file mode 100644
index 0000000000..e523ea2f69
--- /dev/null
+++ b/builtin/providers/azurerm/resource_arm_security_group.go
@@ -0,0 +1,310 @@
+package azurerm
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "net/http"
+ "time"
+
+ "strings"
+
+ "github.com/Azure/azure-sdk-for-go/arm/network"
+ "github.com/hashicorp/terraform/helper/hashcode"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceArmSecurityGroup() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceArmSecurityGroupCreate,
+ Read: resourceArmSecurityGroupRead,
+ Update: resourceArmSecurityGroupCreate,
+ Delete: resourceArmSecurityGroupDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "location": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ StateFunc: azureRMNormalizeLocation,
+ },
+
+ "resource_group_name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "security_rule": &schema.Schema{
+ Type: schema.TypeSet,
+ Optional: true,
+ Computed: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "description": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ if len(value) > 140 {
+ errors = append(errors, fmt.Errorf(
+ "The security rule description can be no longer than 140 chars"))
+ }
+ return
+ },
+ },
+
+ "protocol": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ValidateFunc: validateSecurityRuleProtocol,
+ },
+
+ "source_port_range": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "destination_port_range": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "source_address_prefix": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "destination_address_prefix": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "access": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ValidateFunc: validateSecurityRuleAccess,
+ },
+
+ "priority": &schema.Schema{
+ Type: schema.TypeInt,
+ Required: true,
+ ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(int)
+ if value < 100 || value > 4096 {
+ errors = append(errors, fmt.Errorf(
+ "The `priority` can only be between 100 and 4096"))
+ }
+ return
+ },
+ },
+
+ "direction": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ValidateFunc: validateSecurityRuleDirection,
+ },
+ },
+ },
+ Set: resourceArmSecurityGroupRuleHash,
+ },
+ },
+ }
+}
+
+func resourceArmSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*ArmClient)
+ secClient := client.secGroupClient
+
+ name := d.Get("name").(string)
+ location := d.Get("location").(string)
+ resGroup := d.Get("resource_group_name").(string)
+
+ sgRules, sgErr := expandAzureRmSecurityGroupRules(d)
+ if sgErr != nil {
+ return fmt.Errorf("Error Building list of Security Group Rules: %s", sgErr)
+ }
+
+ sg := network.SecurityGroup{
+ Name: &name,
+ Location: &location,
+ Properties: &network.SecurityGroupPropertiesFormat{
+ SecurityRules: &sgRules,
+ },
+ }
+
+ resp, err := secClient.CreateOrUpdate(resGroup, name, sg)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(*resp.ID)
+
+ log.Printf("[DEBUG] Waiting for Security Group (%s) to become available", name)
+ stateConf := &resource.StateChangeConf{
+ Pending: []string{"Accepted", "Updating"},
+ Target: "Succeeded",
+ Refresh: securityGroupStateRefreshFunc(client, resGroup, name),
+ Timeout: 10 * time.Minute,
+ }
+ if _, err := stateConf.WaitForState(); err != nil {
+ return fmt.Errorf("Error waiting for Securty Group (%s) to become available: %s", name, err)
+ }
+
+ return resourceArmSecurityGroupRead(d, meta)
+}
+
+func resourceArmSecurityGroupRead(d *schema.ResourceData, meta interface{}) error {
+ secGroupClient := meta.(*ArmClient).secGroupClient
+
+ id, err := parseAzureResourceID(d.Id())
+ if err != nil {
+ return err
+ }
+ resGroup := id.ResourceGroup
+ name := id.Path["networkSecurityGroups"]
+
+ resp, err := secGroupClient.Get(resGroup, name)
+ if resp.StatusCode == http.StatusNotFound {
+ d.SetId("")
+ return nil
+ }
+ if err != nil {
+ return fmt.Errorf("Error making Read request on Azure Security Group %s: %s", name, err)
+ }
+
+ return nil
+}
+
+func resourceArmSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error {
+ secGroupClient := meta.(*ArmClient).secGroupClient
+
+ id, err := parseAzureResourceID(d.Id())
+ if err != nil {
+ return err
+ }
+ resGroup := id.ResourceGroup
+ name := id.Path["networkSecurityGroups"]
+
+ _, err = secGroupClient.Delete(resGroup, name)
+
+ return err
+}
+
+func resourceArmSecurityGroupRuleHash(v interface{}) int {
+ var buf bytes.Buffer
+ m := v.(map[string]interface{})
+ buf.WriteString(fmt.Sprintf("%s-", m["protocol"].(string)))
+ buf.WriteString(fmt.Sprintf("%s-", m["source_port_range"].(string)))
+ buf.WriteString(fmt.Sprintf("%s-", m["destination_port_range"].(string)))
+ buf.WriteString(fmt.Sprintf("%s-", m["source_address_prefix"].(string)))
+ buf.WriteString(fmt.Sprintf("%s-", m["destination_address_prefix"].(string)))
+ buf.WriteString(fmt.Sprintf("%s-", m["access"].(string)))
+ buf.WriteString(fmt.Sprintf("%d-", m["priority"].(int)))
+ buf.WriteString(fmt.Sprintf("%s-", m["direction"].(string)))
+
+ return hashcode.String(buf.String())
+}
+
+func securityGroupStateRefreshFunc(client *ArmClient, resourceGroupName string, securityGroupName string) resource.StateRefreshFunc {
+ return func() (interface{}, string, error) {
+ res, err := client.secGroupClient.Get(resourceGroupName, securityGroupName)
+ if err != nil {
+ return nil, "", fmt.Errorf("Error issuing read request in securityGroupStateRefreshFunc to Azure ARM for security group '%s' (RG: '%s'): %s", securityGroupName, resourceGroupName, err)
+ }
+
+ return res, *res.Properties.ProvisioningState, nil
+ }
+}
+
+func expandAzureRmSecurityGroupRules(d *schema.ResourceData) ([]network.SecurityRule, error) {
+ sgRules := d.Get("security_rule").(*schema.Set).List()
+ rules := make([]network.SecurityRule, 0, len(sgRules))
+
+ for _, sgRaw := range sgRules {
+ data := sgRaw.(map[string]interface{})
+
+ source_port_range := data["source_port_range"].(string)
+ destination_port_range := data["destination_port_range"].(string)
+ source_address_prefix := data["source_address_prefix"].(string)
+ destination_address_prefix := data["destination_address_prefix"].(string)
+ priority := data["priority"].(int)
+
+ properties := network.SecurityRulePropertiesFormat{
+ SourcePortRange: &source_port_range,
+ DestinationPortRange: &destination_port_range,
+ SourceAddressPrefix: &source_address_prefix,
+ DestinationAddressPrefix: &destination_address_prefix,
+ Priority: &priority,
+ Access: network.SecurityRuleAccess(data["access"].(string)),
+ Direction: network.SecurityRuleDirection(data["direction"].(string)),
+ Protocol: network.SecurityRuleProtocol(data["protocol"].(string)),
+ }
+
+ if v := data["description"].(string); v != "" {
+ properties.Description = &v
+ }
+
+ name := data["name"].(string)
+ rule := network.SecurityRule{
+ Name: &name,
+ Properties: &properties,
+ }
+
+ rules = append(rules, rule)
+ }
+
+ return rules, nil
+}
+
+func validateSecurityRuleProtocol(v interface{}, k string) (ws []string, errors []error) {
+ value := strings.ToLower(v.(string))
+ viewTypes := map[string]bool{
+ "tcp": true,
+ "udp": true,
+ "*": true,
+ }
+
+ if !viewTypes[value] {
+ errors = append(errors, fmt.Errorf("Security Rule Protocol can only be Tcp, Udp or *"))
+ }
+ return
+}
+
+func validateSecurityRuleAccess(v interface{}, k string) (ws []string, errors []error) {
+ value := strings.ToLower(v.(string))
+ viewTypes := map[string]bool{
+ "allow": true,
+ "deny": true,
+ }
+
+ if !viewTypes[value] {
+ errors = append(errors, fmt.Errorf("Security Rule Access can only be Allow or Deny"))
+ }
+ return
+}
+
+func validateSecurityRuleDirection(v interface{}, k string) (ws []string, errors []error) {
+ value := strings.ToLower(v.(string))
+ viewTypes := map[string]bool{
+ "inbound": true,
+ "outbound": true,
+ }
+
+ if !viewTypes[value] {
+ errors = append(errors, fmt.Errorf("Security Rule Directions can only be Inbound or Outbound"))
+ }
+ return
+}
diff --git a/builtin/providers/azurerm/resource_arm_security_group_test.go b/builtin/providers/azurerm/resource_arm_security_group_test.go
new file mode 100644
index 0000000000..8431399e04
--- /dev/null
+++ b/builtin/providers/azurerm/resource_arm_security_group_test.go
@@ -0,0 +1,283 @@
+package azurerm
+
+import (
+ "fmt"
+ "net/http"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestResourceAzureRMSecurityGroupProtocol_validation(t *testing.T) {
+ cases := []struct {
+ Value string
+ ErrCount int
+ }{
+ {
+ Value: "Random",
+ ErrCount: 1,
+ },
+ {
+ Value: "tcp",
+ ErrCount: 0,
+ },
+ {
+ Value: "TCP",
+ ErrCount: 0,
+ },
+ {
+ Value: "*",
+ ErrCount: 0,
+ },
+ {
+ Value: "Udp",
+ ErrCount: 0,
+ },
+ {
+ Value: "Tcp",
+ ErrCount: 0,
+ },
+ }
+
+ for _, tc := range cases {
+ _, errors := validateSecurityRuleProtocol(tc.Value, "azurerm_security_group")
+
+ if len(errors) != tc.ErrCount {
+ t.Fatalf("Expected the Azure RM Security Group protocol to trigger a validation error")
+ }
+ }
+}
+
+func TestResourceAzureRMSecurityGroupAccess_validation(t *testing.T) {
+ cases := []struct {
+ Value string
+ ErrCount int
+ }{
+ {
+ Value: "Random",
+ ErrCount: 1,
+ },
+ {
+ Value: "Allow",
+ ErrCount: 0,
+ },
+ {
+ Value: "Deny",
+ ErrCount: 0,
+ },
+ {
+ Value: "ALLOW",
+ ErrCount: 0,
+ },
+ {
+ Value: "deny",
+ ErrCount: 0,
+ },
+ }
+
+ for _, tc := range cases {
+ _, errors := validateSecurityRuleAccess(tc.Value, "azurerm_security_group")
+
+ if len(errors) != tc.ErrCount {
+ t.Fatalf("Expected the Azure RM Security Group access to trigger a validation error")
+ }
+ }
+}
+
+func TestResourceAzureRMSecurityGroupDirection_validation(t *testing.T) {
+ cases := []struct {
+ Value string
+ ErrCount int
+ }{
+ {
+ Value: "Random",
+ ErrCount: 1,
+ },
+ {
+ Value: "Inbound",
+ ErrCount: 0,
+ },
+ {
+ Value: "Outbound",
+ ErrCount: 0,
+ },
+ {
+ Value: "INBOUND",
+ ErrCount: 0,
+ },
+ {
+ Value: "Inbound",
+ ErrCount: 0,
+ },
+ }
+
+ for _, tc := range cases {
+ _, errors := validateSecurityRuleDirection(tc.Value, "azurerm_security_group")
+
+ if len(errors) != tc.ErrCount {
+ t.Fatalf("Expected the Azure RM Security Group direction to trigger a validation error")
+ }
+ }
+}
+
+func TestAccAzureRMSecurityGroup_basic(t *testing.T) {
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMSecurityGroupDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAzureRMSecurityGroup_basic,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMSecurityGroupExists("azurerm_security_group.test"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureRMSecurityGroup_addingExtraRules(t *testing.T) {
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMSecurityGroupDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAzureRMSecurityGroup_basic,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMSecurityGroupExists("azurerm_security_group.test"),
+ resource.TestCheckResourceAttr(
+ "azurerm_security_group.test", "security_rule.#", "1"),
+ ),
+ },
+
+ resource.TestStep{
+ Config: testAccAzureRMSecurityGroup_anotherRule,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMSecurityGroupExists("azurerm_security_group.test"),
+ resource.TestCheckResourceAttr(
+ "azurerm_security_group.test", "security_rule.#", "2"),
+ ),
+ },
+ },
+ })
+}
+
+func testCheckAzureRMSecurityGroupExists(name string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+
+ rs, ok := s.RootModule().Resources[name]
+ if !ok {
+ return fmt.Errorf("Not found: %s", name)
+ }
+
+ sgName := rs.Primary.Attributes["name"]
+ resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"]
+ if !hasResourceGroup {
+ return fmt.Errorf("Bad: no resource group found in state for security group: %s", sgName)
+ }
+
+ conn := testAccProvider.Meta().(*ArmClient).secGroupClient
+
+ resp, err := conn.Get(resourceGroup, sgName)
+ if err != nil {
+ return fmt.Errorf("Bad: Get on secGroupClient: %s", err)
+ }
+
+ if resp.StatusCode == http.StatusNotFound {
+ return fmt.Errorf("Bad: Security Group %q (resource group: %q) does not exist", name, resourceGroup)
+ }
+
+ return nil
+ }
+}
+
+func testCheckAzureRMSecurityGroupDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*ArmClient).secGroupClient
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "azurerm_security_group" {
+ continue
+ }
+
+ name := rs.Primary.Attributes["name"]
+ resourceGroup := rs.Primary.Attributes["resource_group_name"]
+
+ resp, err := conn.Get(resourceGroup, name)
+
+ if err != nil {
+ return nil
+ }
+
+ if resp.StatusCode != http.StatusNotFound {
+ return fmt.Errorf("Security Group still exists:\n%#v", resp.Properties)
+ }
+ }
+
+ return nil
+}
+
+var testAccAzureRMSecurityGroup_basic = `
+resource "azurerm_resource_group" "test" {
+ name = "acceptanceTestResourceGroup1"
+ location = "West US"
+}
+
+resource "azurerm_security_group" "test" {
+ name = "acceptanceTestSecurityGroup1"
+ location = "West US"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+
+ security_rule {
+ name = "test123"
+ priority = 100
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_range = "*"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+}
+`
+
+var testAccAzureRMSecurityGroup_anotherRule = `
+resource "azurerm_resource_group" "test" {
+ name = "acceptanceTestResourceGroup1"
+ location = "West US"
+}
+
+resource "azurerm_security_group" "test" {
+ name = "acceptanceTestSecurityGroup1"
+ location = "West US"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+
+ security_rule {
+ name = "test123"
+ priority = 100
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_range = "*"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+
+ security_rule {
+ name = "testDeny"
+ priority = 101
+ direction = "Inbound"
+ access = "Deny"
+ protocol = "Udp"
+ source_port_range = "*"
+ destination_port_range = "*"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+}
+`
diff --git a/website/source/docs/providers/azurerm/r/security_group.html.markdown b/website/source/docs/providers/azurerm/r/security_group.html.markdown
new file mode 100644
index 0000000000..f138f4c122
--- /dev/null
+++ b/website/source/docs/providers/azurerm/r/security_group.html.markdown
@@ -0,0 +1,83 @@
+---
+layout: "azurerm"
+page_title: "Azure Resource Manager: azurerm_security_group"
+sidebar_current: "docs-azurerm-resource-security-group"
+description: |-
+ Create a network security group that contains a list of network security rules. Network security groups enable inbound or outbound traffic to be enabled or denied.
+---
+
+# azurerm\_security\_group
+
+Create a network security group that contains a list of network security rules.
+
+## Example Usage
+
+```
+resource "azurerm_resource_group" "test" {
+ name = "acceptanceTestResourceGroup1"
+ location = "West US"
+}
+
+resource "azurerm_security_group" "test" {
+ name = "acceptanceTestSecurityGroup1"
+ location = "West US"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+
+ security_rule {
+ name = "test123"
+ priority = 100
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_range = "*"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) Specifies the name of the availability set. Changing this forces a
+ new resource to be created.
+
+* `resource_group_name` - (Required) The name of the resource group in which to
+ create the availability set.
+
+* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
+
+* `security_rule` - (Optional) Can be specified multiple times to define multiple
+ security rules. Each `security_rule` block supports fields documented below.
+
+
+The `security_rule` block supports:
+
+* `name` - (Required) The name of the security rule.
+
+* `description` - (Optional) A description for this rule. Restricted to 140 characters.
+
+* `protocol` - (Required) Network protocol this rule applies to. Can be Tcp, Udp or * to match both.
+
+* `source_port_range` - (Required) Source Port or Range. Integer or range between 0 and 65535 or * to match any.
+
+* `destination_port_range` - (Required) Destination Port or Range. Integer or range between 0 and 65535 or * to match any.
+
+* `source_address_prefix` - (Required) CIDR or source IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used.
+
+* `destination_address_prefix` - (Required) CIDR or destination IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used.
+
+* `access` - (Required) Specifies whether network traffic is allowed or denied. Possible values are “Allow” and “Deny”.
+
+* `priority` - (Required) Specifies the priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
+
+* `direction` - (Required) The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are “Inbound” and “Outbound”.
+
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The virtual AvailabilitySet ID.
\ No newline at end of file
diff --git a/website/source/layouts/azurerm.erb b/website/source/layouts/azurerm.erb
index cbac83abfc..77a0987a4b 100644
--- a/website/source/layouts/azurerm.erb
+++ b/website/source/layouts/azurerm.erb
@@ -29,6 +29,10 @@
azurerm_availability_set
+ >
+ azurerm_security_group
+
+
From eb444d12a4dc82f67fc5507ec569f2b17000eb46 Mon Sep 17 00:00:00 2001
From: Radek Simko
Date: Thu, 7 Jan 2016 21:15:48 +0000
Subject: [PATCH 450/664] Update CHANGELOG.md
---
CHANGELOG.md | 3 +++
1 file changed, 3 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d8a23d63ab..fc6f36aa10 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -76,6 +76,9 @@ BUG FIXES:
* provider/aws: Fix issue with nil parameter group value causing panic in `aws_db_parameter_group` [GH-4318]
* provider/aws: Fix issue with Elastic IPs not recognizing when they have been unassigned manually [GH-4387]
* provider/aws: Use body or URL for all CloudFormation stack updates [GH-4370]
+ * provider/aws: Fix template_url/template_body conflict [GH-4540]
+ * provider/aws: Add validation for ECR repository name [GH-4431]
+ * provider/aws: Fix bug w/ changing ECS svc/ELB association [GH-4366]
* provider/azure: Update for [breaking change to upstream client library](https://github.com/Azure/azure-sdk-for-go/commit/68d50cb53a73edfeb7f17f5e86cdc8eb359a9528). [GH-4300]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
* provider/google: Fix project metadata sshKeys from showing up and causing unnecessary diffs [GH-4512]
From e2e16ceca492567e3794eb68ef11bc6cd3153509 Mon Sep 17 00:00:00 2001
From: Radek Simko
Date: Thu, 7 Jan 2016 21:16:32 +0000
Subject: [PATCH 451/664] Update CHANGELOG.md
---
CHANGELOG.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index fc6f36aa10..268856dbb5 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -38,6 +38,7 @@ IMPROVEMENTS:
* provider/aws: Retry MalformedPolicy errors due to newly created principals in S3 Buckets [GH-4315]
* provider/aws: Validate `name` on `db_subnet_group` against AWS requirements [GH-4340]
* provider/aws: wait for ASG capacity on update [GH-3947]
+ * provider/aws: Add validation for ECR repository name [GH-4431]
* provider/cloudstack: performance improvements [GH-4150]
* provider/docker: Add support for setting the entry point on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting the restart policy on `docker_container` resources [GH-3761]
@@ -77,7 +78,6 @@ BUG FIXES:
* provider/aws: Fix issue with Elastic IPs not recognizing when they have been unassigned manually [GH-4387]
* provider/aws: Use body or URL for all CloudFormation stack updates [GH-4370]
* provider/aws: Fix template_url/template_body conflict [GH-4540]
- * provider/aws: Add validation for ECR repository name [GH-4431]
* provider/aws: Fix bug w/ changing ECS svc/ELB association [GH-4366]
* provider/azure: Update for [breaking change to upstream client library](https://github.com/Azure/azure-sdk-for-go/commit/68d50cb53a73edfeb7f17f5e86cdc8eb359a9528). [GH-4300]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
From f79d951524c804437a8df9def44f1aa1bed5426e Mon Sep 17 00:00:00 2001
From: stack72
Date: Thu, 7 Jan 2016 22:32:49 +0000
Subject: [PATCH 452/664] Rename the AzureRM Security Group to AzureRM Network
Security Group
---
builtin/providers/azurerm/provider.go | 10 +--
...=> resource_arm_network_security_group.go} | 52 ++++++++--------
...source_arm_network_security_group_test.go} | 62 +++++++++----------
...n => network_security_group.html.markdown} | 6 +-
website/source/layouts/azurerm.erb | 4 +-
5 files changed, 67 insertions(+), 67 deletions(-)
rename builtin/providers/azurerm/{resource_arm_security_group.go => resource_arm_network_security_group.go} (77%)
rename builtin/providers/azurerm/{resource_arm_security_group_test.go => resource_arm_network_security_group_test.go} (65%)
rename website/source/docs/providers/azurerm/r/{security_group.html.markdown => network_security_group.html.markdown} (93%)
diff --git a/builtin/providers/azurerm/provider.go b/builtin/providers/azurerm/provider.go
index 1642c64d35..0e989a15ce 100644
--- a/builtin/providers/azurerm/provider.go
+++ b/builtin/providers/azurerm/provider.go
@@ -39,11 +39,11 @@ func Provider() terraform.ResourceProvider {
},
ResourcesMap: map[string]*schema.Resource{
- "azurerm_resource_group": resourceArmResourceGroup(),
- "azurerm_virtual_network": resourceArmVirtualNetwork(),
- "azurerm_local_network_gateway": resourceArmLocalNetworkGateway(),
- "azurerm_availability_set": resourceArmAvailabilitySet(),
- "azurerm_security_group": resourceArmSecurityGroup(),
+ "azurerm_resource_group": resourceArmResourceGroup(),
+ "azurerm_virtual_network": resourceArmVirtualNetwork(),
+ "azurerm_local_network_gateway": resourceArmLocalNetworkGateway(),
+ "azurerm_availability_set": resourceArmAvailabilitySet(),
+ "azurerm_network_security_group": resourceArmNetworkSecurityGroup(),
},
ConfigureFunc: providerConfigure,
diff --git a/builtin/providers/azurerm/resource_arm_security_group.go b/builtin/providers/azurerm/resource_arm_network_security_group.go
similarity index 77%
rename from builtin/providers/azurerm/resource_arm_security_group.go
rename to builtin/providers/azurerm/resource_arm_network_security_group.go
index e523ea2f69..c70522313e 100644
--- a/builtin/providers/azurerm/resource_arm_security_group.go
+++ b/builtin/providers/azurerm/resource_arm_network_security_group.go
@@ -15,12 +15,12 @@ import (
"github.com/hashicorp/terraform/helper/schema"
)
-func resourceArmSecurityGroup() *schema.Resource {
+func resourceArmNetworkSecurityGroup() *schema.Resource {
return &schema.Resource{
- Create: resourceArmSecurityGroupCreate,
- Read: resourceArmSecurityGroupRead,
- Update: resourceArmSecurityGroupCreate,
- Delete: resourceArmSecurityGroupDelete,
+ Create: resourceArmNetworkSecurityGroupCreate,
+ Read: resourceArmNetworkSecurityGroupRead,
+ Update: resourceArmNetworkSecurityGroupCreate,
+ Delete: resourceArmNetworkSecurityGroupDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
@@ -60,7 +60,7 @@ func resourceArmSecurityGroup() *schema.Resource {
value := v.(string)
if len(value) > 140 {
errors = append(errors, fmt.Errorf(
- "The security rule description can be no longer than 140 chars"))
+ "The network security rule description can be no longer than 140 chars"))
}
return
},
@@ -69,7 +69,7 @@ func resourceArmSecurityGroup() *schema.Resource {
"protocol": &schema.Schema{
Type: schema.TypeString,
Required: true,
- ValidateFunc: validateSecurityRuleProtocol,
+ ValidateFunc: validateNetworkSecurityRuleProtocol,
},
"source_port_range": &schema.Schema{
@@ -95,7 +95,7 @@ func resourceArmSecurityGroup() *schema.Resource {
"access": &schema.Schema{
Type: schema.TypeString,
Required: true,
- ValidateFunc: validateSecurityRuleAccess,
+ ValidateFunc: validateNetworkSecurityRuleAccess,
},
"priority": &schema.Schema{
@@ -114,17 +114,17 @@ func resourceArmSecurityGroup() *schema.Resource {
"direction": &schema.Schema{
Type: schema.TypeString,
Required: true,
- ValidateFunc: validateSecurityRuleDirection,
+ ValidateFunc: validateNetworkSecurityRuleDirection,
},
},
},
- Set: resourceArmSecurityGroupRuleHash,
+ Set: resourceArmNetworkSecurityGroupRuleHash,
},
},
}
}
-func resourceArmSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error {
+func resourceArmNetworkSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
secClient := client.secGroupClient
@@ -134,7 +134,7 @@ func resourceArmSecurityGroupCreate(d *schema.ResourceData, meta interface{}) er
sgRules, sgErr := expandAzureRmSecurityGroupRules(d)
if sgErr != nil {
- return fmt.Errorf("Error Building list of Security Group Rules: %s", sgErr)
+ return fmt.Errorf("Error Building list of Network Security Group Rules: %s", sgErr)
}
sg := network.SecurityGroup{
@@ -152,7 +152,7 @@ func resourceArmSecurityGroupCreate(d *schema.ResourceData, meta interface{}) er
d.SetId(*resp.ID)
- log.Printf("[DEBUG] Waiting for Security Group (%s) to become available", name)
+ log.Printf("[DEBUG] Waiting for Network Security Group (%s) to become available", name)
stateConf := &resource.StateChangeConf{
Pending: []string{"Accepted", "Updating"},
Target: "Succeeded",
@@ -160,13 +160,13 @@ func resourceArmSecurityGroupCreate(d *schema.ResourceData, meta interface{}) er
Timeout: 10 * time.Minute,
}
if _, err := stateConf.WaitForState(); err != nil {
- return fmt.Errorf("Error waiting for Securty Group (%s) to become available: %s", name, err)
+ return fmt.Errorf("Error waiting for Network Securty Group (%s) to become available: %s", name, err)
}
- return resourceArmSecurityGroupRead(d, meta)
+ return resourceArmNetworkSecurityGroupRead(d, meta)
}
-func resourceArmSecurityGroupRead(d *schema.ResourceData, meta interface{}) error {
+func resourceArmNetworkSecurityGroupRead(d *schema.ResourceData, meta interface{}) error {
secGroupClient := meta.(*ArmClient).secGroupClient
id, err := parseAzureResourceID(d.Id())
@@ -182,13 +182,13 @@ func resourceArmSecurityGroupRead(d *schema.ResourceData, meta interface{}) erro
return nil
}
if err != nil {
- return fmt.Errorf("Error making Read request on Azure Security Group %s: %s", name, err)
+ return fmt.Errorf("Error making Read request on Azure Network Security Group %s: %s", name, err)
}
return nil
}
-func resourceArmSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error {
+func resourceArmNetworkSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error {
secGroupClient := meta.(*ArmClient).secGroupClient
id, err := parseAzureResourceID(d.Id())
@@ -203,7 +203,7 @@ func resourceArmSecurityGroupDelete(d *schema.ResourceData, meta interface{}) er
return err
}
-func resourceArmSecurityGroupRuleHash(v interface{}) int {
+func resourceArmNetworkSecurityGroupRuleHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-", m["protocol"].(string)))
@@ -222,7 +222,7 @@ func securityGroupStateRefreshFunc(client *ArmClient, resourceGroupName string,
return func() (interface{}, string, error) {
res, err := client.secGroupClient.Get(resourceGroupName, securityGroupName)
if err != nil {
- return nil, "", fmt.Errorf("Error issuing read request in securityGroupStateRefreshFunc to Azure ARM for security group '%s' (RG: '%s'): %s", securityGroupName, resourceGroupName, err)
+ return nil, "", fmt.Errorf("Error issuing read request in securityGroupStateRefreshFunc to Azure ARM for network security group '%s' (RG: '%s'): %s", securityGroupName, resourceGroupName, err)
}
return res, *res.Properties.ProvisioningState, nil
@@ -269,7 +269,7 @@ func expandAzureRmSecurityGroupRules(d *schema.ResourceData) ([]network.Security
return rules, nil
}
-func validateSecurityRuleProtocol(v interface{}, k string) (ws []string, errors []error) {
+func validateNetworkSecurityRuleProtocol(v interface{}, k string) (ws []string, errors []error) {
value := strings.ToLower(v.(string))
viewTypes := map[string]bool{
"tcp": true,
@@ -278,12 +278,12 @@ func validateSecurityRuleProtocol(v interface{}, k string) (ws []string, errors
}
if !viewTypes[value] {
- errors = append(errors, fmt.Errorf("Security Rule Protocol can only be Tcp, Udp or *"))
+ errors = append(errors, fmt.Errorf("Network Security Rule Protocol can only be Tcp, Udp or *"))
}
return
}
-func validateSecurityRuleAccess(v interface{}, k string) (ws []string, errors []error) {
+func validateNetworkSecurityRuleAccess(v interface{}, k string) (ws []string, errors []error) {
value := strings.ToLower(v.(string))
viewTypes := map[string]bool{
"allow": true,
@@ -291,12 +291,12 @@ func validateSecurityRuleAccess(v interface{}, k string) (ws []string, errors []
}
if !viewTypes[value] {
- errors = append(errors, fmt.Errorf("Security Rule Access can only be Allow or Deny"))
+ errors = append(errors, fmt.Errorf("Network Security Rule Access can only be Allow or Deny"))
}
return
}
-func validateSecurityRuleDirection(v interface{}, k string) (ws []string, errors []error) {
+func validateNetworkSecurityRuleDirection(v interface{}, k string) (ws []string, errors []error) {
value := strings.ToLower(v.(string))
viewTypes := map[string]bool{
"inbound": true,
@@ -304,7 +304,7 @@ func validateSecurityRuleDirection(v interface{}, k string) (ws []string, errors
}
if !viewTypes[value] {
- errors = append(errors, fmt.Errorf("Security Rule Directions can only be Inbound or Outbound"))
+ errors = append(errors, fmt.Errorf("Network Security Rule Directions can only be Inbound or Outbound"))
}
return
}
diff --git a/builtin/providers/azurerm/resource_arm_security_group_test.go b/builtin/providers/azurerm/resource_arm_network_security_group_test.go
similarity index 65%
rename from builtin/providers/azurerm/resource_arm_security_group_test.go
rename to builtin/providers/azurerm/resource_arm_network_security_group_test.go
index 8431399e04..11dd75d7c8 100644
--- a/builtin/providers/azurerm/resource_arm_security_group_test.go
+++ b/builtin/providers/azurerm/resource_arm_network_security_group_test.go
@@ -9,7 +9,7 @@ import (
"github.com/hashicorp/terraform/terraform"
)
-func TestResourceAzureRMSecurityGroupProtocol_validation(t *testing.T) {
+func TestResourceAzureRMNetworkSecurityGroupProtocol_validation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
@@ -41,15 +41,15 @@ func TestResourceAzureRMSecurityGroupProtocol_validation(t *testing.T) {
}
for _, tc := range cases {
- _, errors := validateSecurityRuleProtocol(tc.Value, "azurerm_security_group")
+ _, errors := validateNetworkSecurityRuleProtocol(tc.Value, "azurerm_network_security_group")
if len(errors) != tc.ErrCount {
- t.Fatalf("Expected the Azure RM Security Group protocol to trigger a validation error")
+ t.Fatalf("Expected the Azure RM Network Security Group protocol to trigger a validation error")
}
}
}
-func TestResourceAzureRMSecurityGroupAccess_validation(t *testing.T) {
+func TestResourceAzureRMNetworkSecurityGroupAccess_validation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
@@ -77,15 +77,15 @@ func TestResourceAzureRMSecurityGroupAccess_validation(t *testing.T) {
}
for _, tc := range cases {
- _, errors := validateSecurityRuleAccess(tc.Value, "azurerm_security_group")
+ _, errors := validateNetworkSecurityRuleAccess(tc.Value, "azurerm_network_security_group")
if len(errors) != tc.ErrCount {
- t.Fatalf("Expected the Azure RM Security Group access to trigger a validation error")
+ t.Fatalf("Expected the Azure RM Network Security Group access to trigger a validation error")
}
}
}
-func TestResourceAzureRMSecurityGroupDirection_validation(t *testing.T) {
+func TestResourceAzureRMNetworkSecurityGroupDirection_validation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
@@ -113,60 +113,60 @@ func TestResourceAzureRMSecurityGroupDirection_validation(t *testing.T) {
}
for _, tc := range cases {
- _, errors := validateSecurityRuleDirection(tc.Value, "azurerm_security_group")
+ _, errors := validateNetworkSecurityRuleDirection(tc.Value, "azurerm_network_security_group")
if len(errors) != tc.ErrCount {
- t.Fatalf("Expected the Azure RM Security Group direction to trigger a validation error")
+ t.Fatalf("Expected the Azure RM Network Security Group direction to trigger a validation error")
}
}
}
-func TestAccAzureRMSecurityGroup_basic(t *testing.T) {
+func TestAccAzureRMNetworkSecurityGroup_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
- CheckDestroy: testCheckAzureRMSecurityGroupDestroy,
+ CheckDestroy: testCheckAzureRMNetworkSecurityGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccAzureRMSecurityGroup_basic,
+ Config: testAccAzureRMNetworkSecurityGroup_basic,
Check: resource.ComposeTestCheckFunc(
- testCheckAzureRMSecurityGroupExists("azurerm_security_group.test"),
+ testCheckAzureRMNetworkSecurityGroupExists("azurerm_network_security_group.test"),
),
},
},
})
}
-func TestAccAzureRMSecurityGroup_addingExtraRules(t *testing.T) {
+func TestAccAzureRMNetworkSecurityGroup_addingExtraRules(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
- CheckDestroy: testCheckAzureRMSecurityGroupDestroy,
+ CheckDestroy: testCheckAzureRMNetworkSecurityGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
- Config: testAccAzureRMSecurityGroup_basic,
+ Config: testAccAzureRMNetworkSecurityGroup_basic,
Check: resource.ComposeTestCheckFunc(
- testCheckAzureRMSecurityGroupExists("azurerm_security_group.test"),
+ testCheckAzureRMNetworkSecurityGroupExists("azurerm_network_security_group.test"),
resource.TestCheckResourceAttr(
- "azurerm_security_group.test", "security_rule.#", "1"),
+ "azurerm_network_security_group.test", "security_rule.#", "1"),
),
},
resource.TestStep{
- Config: testAccAzureRMSecurityGroup_anotherRule,
+ Config: testAccAzureRMNetworkSecurityGroup_anotherRule,
Check: resource.ComposeTestCheckFunc(
- testCheckAzureRMSecurityGroupExists("azurerm_security_group.test"),
+ testCheckAzureRMNetworkSecurityGroupExists("azurerm_network_security_group.test"),
resource.TestCheckResourceAttr(
- "azurerm_security_group.test", "security_rule.#", "2"),
+ "azurerm_network_security_group.test", "security_rule.#", "2"),
),
},
},
})
}
-func testCheckAzureRMSecurityGroupExists(name string) resource.TestCheckFunc {
+func testCheckAzureRMNetworkSecurityGroupExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[name]
@@ -177,7 +177,7 @@ func testCheckAzureRMSecurityGroupExists(name string) resource.TestCheckFunc {
sgName := rs.Primary.Attributes["name"]
resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"]
if !hasResourceGroup {
- return fmt.Errorf("Bad: no resource group found in state for security group: %s", sgName)
+ return fmt.Errorf("Bad: no resource group found in state for network security group: %s", sgName)
}
conn := testAccProvider.Meta().(*ArmClient).secGroupClient
@@ -188,18 +188,18 @@ func testCheckAzureRMSecurityGroupExists(name string) resource.TestCheckFunc {
}
if resp.StatusCode == http.StatusNotFound {
- return fmt.Errorf("Bad: Security Group %q (resource group: %q) does not exist", name, resourceGroup)
+ return fmt.Errorf("Bad: Network Security Group %q (resource group: %q) does not exist", name, resourceGroup)
}
return nil
}
}
-func testCheckAzureRMSecurityGroupDestroy(s *terraform.State) error {
+func testCheckAzureRMNetworkSecurityGroupDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*ArmClient).secGroupClient
for _, rs := range s.RootModule().Resources {
- if rs.Type != "azurerm_security_group" {
+ if rs.Type != "azurerm_network_security_group" {
continue
}
@@ -213,20 +213,20 @@ func testCheckAzureRMSecurityGroupDestroy(s *terraform.State) error {
}
if resp.StatusCode != http.StatusNotFound {
- return fmt.Errorf("Security Group still exists:\n%#v", resp.Properties)
+ return fmt.Errorf("Network Security Group still exists:\n%#v", resp.Properties)
}
}
return nil
}
-var testAccAzureRMSecurityGroup_basic = `
+var testAccAzureRMNetworkSecurityGroup_basic = `
resource "azurerm_resource_group" "test" {
name = "acceptanceTestResourceGroup1"
location = "West US"
}
-resource "azurerm_security_group" "test" {
+resource "azurerm_network_security_group" "test" {
name = "acceptanceTestSecurityGroup1"
location = "West US"
resource_group_name = "${azurerm_resource_group.test.name}"
@@ -245,13 +245,13 @@ resource "azurerm_security_group" "test" {
}
`
-var testAccAzureRMSecurityGroup_anotherRule = `
+var testAccAzureRMNetworkSecurityGroup_anotherRule = `
resource "azurerm_resource_group" "test" {
name = "acceptanceTestResourceGroup1"
location = "West US"
}
-resource "azurerm_security_group" "test" {
+resource "azurerm_network_security_group" "test" {
name = "acceptanceTestSecurityGroup1"
location = "West US"
resource_group_name = "${azurerm_resource_group.test.name}"
diff --git a/website/source/docs/providers/azurerm/r/security_group.html.markdown b/website/source/docs/providers/azurerm/r/network_security_group.html.markdown
similarity index 93%
rename from website/source/docs/providers/azurerm/r/security_group.html.markdown
rename to website/source/docs/providers/azurerm/r/network_security_group.html.markdown
index f138f4c122..8a5a29e1f9 100644
--- a/website/source/docs/providers/azurerm/r/security_group.html.markdown
+++ b/website/source/docs/providers/azurerm/r/network_security_group.html.markdown
@@ -1,7 +1,7 @@
---
layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_security_group"
-sidebar_current: "docs-azurerm-resource-security-group"
+page_title: "Azure Resource Manager: azurerm_network_security_group"
+sidebar_current: "docs-azurerm-resource-network-security-group"
description: |-
Create a network security group that contains a list of network security rules. Network security groups enable inbound or outbound traffic to be enabled or denied.
---
@@ -18,7 +18,7 @@ resource "azurerm_resource_group" "test" {
location = "West US"
}
-resource "azurerm_security_group" "test" {
+resource "azurerm_network_security_group" "test" {
name = "acceptanceTestSecurityGroup1"
location = "West US"
resource_group_name = "${azurerm_resource_group.test.name}"
diff --git a/website/source/layouts/azurerm.erb b/website/source/layouts/azurerm.erb
index 77a0987a4b..90cb8fcbeb 100644
--- a/website/source/layouts/azurerm.erb
+++ b/website/source/layouts/azurerm.erb
@@ -29,8 +29,8 @@
azurerm_availability_set
- >
- azurerm_security_group
+ >
+ azurerm_network_security_group
From 98baf4784080a25b1f2247e4b81815c2b289c04f Mon Sep 17 00:00:00 2001
From: Clint
Date: Thu, 7 Jan 2016 16:52:30 -0600
Subject: [PATCH 453/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 268856dbb5..1b908890bd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -34,6 +34,7 @@ IMPROVEMENTS:
* provider/aws: Fix issue with updated route ids for VPC Endpoints [GH-4264]
* provider/aws: Validate IOPs for EBS Volumes [GH-4146]
* provider/aws: DB Subnet group arn output [GH-4261]
+ * provider/aws: Get full Kinesis streams view with pagination [GH-4368]
* provider/aws: Allow changing private IPs for ENIs [GH-4307]
* provider/aws: Retry MalformedPolicy errors due to newly created principals in S3 Buckets [GH-4315]
* provider/aws: Validate `name` on `db_subnet_group` against AWS requirements [GH-4340]
From 1dd1efa05be7ef575f324e9ff7742c8aa3bc9092 Mon Sep 17 00:00:00 2001
From: Clint
Date: Thu, 7 Jan 2016 17:07:38 -0600
Subject: [PATCH 454/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1b908890bd..8af052e5fd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -32,6 +32,7 @@ IMPROVEMENTS:
* provider/aws: Adding support for Tags to DB SecurityGroup [GH-4260]
* provider/aws: Adding Tag support for DB Param Groups [GH-4259]
* provider/aws: Fix issue with updated route ids for VPC Endpoints [GH-4264]
+ * provider/aws: Added measure_latency option to Route 53 Health Check resource [GH-3688]
* provider/aws: Validate IOPs for EBS Volumes [GH-4146]
* provider/aws: DB Subnet group arn output [GH-4261]
* provider/aws: Get full Kinesis streams view with pagination [GH-4368]
From 172faca0526413361018f4d69a4476a26ba71485 Mon Sep 17 00:00:00 2001
From: stack72
Date: Tue, 3 Nov 2015 22:34:06 +0000
Subject: [PATCH 455/664] Adding support to Route53 HealthCheck for
measure_latency and inverting healthcheck
---
.../aws/resource_aws_route53_health_check.go | 35 +++++++++++++++++--
.../resource_aws_route53_health_check_test.go | 7 ++++
.../aws/r/route53_health_check.html.markdown | 2 ++
3 files changed, 42 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_route53_health_check.go b/builtin/providers/aws/resource_aws_route53_health_check.go
index 3f4a2ae6f2..95859dc0be 100644
--- a/builtin/providers/aws/resource_aws_route53_health_check.go
+++ b/builtin/providers/aws/resource_aws_route53_health_check.go
@@ -47,10 +47,16 @@ func resourceAwsRoute53HealthCheck() *schema.Resource {
Optional: true,
},
+ "invert_healthcheck": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ },
+
"resource_path": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
+
"search_string": &schema.Schema{
Type: schema.TypeString,
Optional: true,
@@ -61,6 +67,7 @@ func resourceAwsRoute53HealthCheck() *schema.Resource {
Default: false,
ForceNew: true,
},
+
"tags": tagsSchema(),
},
}
@@ -89,8 +96,8 @@ func resourceAwsRoute53HealthCheckUpdate(d *schema.ResourceData, meta interface{
updateHealthCheck.ResourcePath = aws.String(d.Get("resource_path").(string))
}
- if d.HasChange("search_string") {
- updateHealthCheck.SearchString = aws.String(d.Get("search_string").(string))
+ if d.HasChange("invert_healthcheck") {
+ updateHealthCheck.Inverted = aws.Bool(d.Get("invert_healthcheck").(bool))
}
_, err := conn.UpdateHealthCheck(updateHealthCheck)
@@ -140,6 +147,20 @@ func resourceAwsRoute53HealthCheckCreate(d *schema.ResourceData, meta interface{
}
}
+ if v, ok := d.GetOk("invert_healthcheck"); ok {
+ healthConfig.Inverted = aws.Bool(v.(bool))
+ }
+
+ if *healthConfig.Type == route53.HealthCheckTypeCalculated {
+ if v, ok := d.GetOk("child_healthchecks"); ok {
+ healthConfig.ChildHealthChecks = expandStringList(v.(*schema.Set).List())
+ }
+
+ if v, ok := d.GetOk("child_health_threshold"); ok {
+ healthConfig.HealthThreshold = aws.Int64(int64(v.(int)))
+ }
+ }
+
input := &route53.CreateHealthCheckInput{
CallerReference: aws.String(time.Now().Format(time.RFC3339Nano)),
HealthCheckConfig: healthConfig,
@@ -187,6 +208,7 @@ func resourceAwsRoute53HealthCheckRead(d *schema.ResourceData, meta interface{})
d.Set("port", updated.Port)
d.Set("resource_path", updated.ResourcePath)
d.Set("measure_latency", updated.MeasureLatency)
+ d.Set("invent_healthcheck", updated.Inverted)
// read the tags
req := &route53.ListTagsForResourceInput{
@@ -222,3 +244,12 @@ func resourceAwsRoute53HealthCheckDelete(d *schema.ResourceData, meta interface{
return nil
}
+
+func createChildHealthCheckList(s *schema.Set) (nl []*string) {
+ l := s.List()
+ for _, n := range l {
+ nl = append(nl, aws.String(n.(string)))
+ }
+
+ return nl
+}
diff --git a/builtin/providers/aws/resource_aws_route53_health_check_test.go b/builtin/providers/aws/resource_aws_route53_health_check_test.go
index f6f837c926..d199f4e9b5 100644
--- a/builtin/providers/aws/resource_aws_route53_health_check_test.go
+++ b/builtin/providers/aws/resource_aws_route53_health_check_test.go
@@ -22,6 +22,8 @@ func TestAccAWSRoute53HealthCheck_basic(t *testing.T) {
testAccCheckRoute53HealthCheckExists("aws_route53_health_check.foo"),
resource.TestCheckResourceAttr(
"aws_route53_health_check.foo", "measure_latency", "true"),
+ resource.TestCheckResourceAttr(
+ "aws_route53_health_check.foo", "invert_healthcheck", "true"),
),
},
resource.TestStep{
@@ -30,6 +32,8 @@ func TestAccAWSRoute53HealthCheck_basic(t *testing.T) {
testAccCheckRoute53HealthCheckExists("aws_route53_health_check.foo"),
resource.TestCheckResourceAttr(
"aws_route53_health_check.foo", "failure_threshold", "5"),
+ resource.TestCheckResourceAttr(
+ "aws_route53_health_check.foo", "invert_healthcheck", "false"),
),
},
},
@@ -127,6 +131,7 @@ resource "aws_route53_health_check" "foo" {
failure_threshold = "2"
request_interval = "30"
measure_latency = true
+ invert_healthcheck = true
tags = {
Name = "tf-test-health-check"
@@ -142,6 +147,8 @@ resource "aws_route53_health_check" "foo" {
resource_path = "/"
failure_threshold = "5"
request_interval = "30"
+ measure_latency = true
+ invert_healthcheck = false
tags = {
Name = "tf-test-health-check"
diff --git a/website/source/docs/providers/aws/r/route53_health_check.html.markdown b/website/source/docs/providers/aws/r/route53_health_check.html.markdown
index 3456bcb112..dd16e15e18 100644
--- a/website/source/docs/providers/aws/r/route53_health_check.html.markdown
+++ b/website/source/docs/providers/aws/r/route53_health_check.html.markdown
@@ -36,6 +36,8 @@ The following arguments are supported:
* `request_interval` - (Required) The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health-check request.
* `resource_path` - (Optional) The path that you want Amazon Route 53 to request when performing health checks.
* `search_string` - (Optional) String searched in respoonse body for check to considered healthy.
+* `measure_latency` - (Optional) A Boolean value that indicates whether you want Route 53 to measure the latency between health checkers in multiple AWS regions and your endpoint and to display CloudWatch latency graphs in the Route 53 console.
+* `invert_healthcheck` - (Optional) A boolean value that indicates whether the status of health check should be inverted. For example, if a health check is healthy but Inverted is True , then Route 53 considers the health check to be unhealthy.
* `tags` - (Optional) A mapping of tags to assign to the health check.
At least one of either `fqdn` or `ip_address` must be specified.
From 3ebbb62bb0f0d0638eb7805d70c8ffbdad878a02 Mon Sep 17 00:00:00 2001
From: stack72
Date: Tue, 3 Nov 2015 23:28:47 +0000
Subject: [PATCH 456/664] Adding child_healthchecks to the Route 53 HealthCheck
resource
---
.../aws/resource_aws_route53_health_check.go | 46 +++++++++++++++++--
.../resource_aws_route53_health_check_test.go | 37 +++++++++++++++
.../aws/r/route53_health_check.html.markdown | 14 +++++-
3 files changed, 91 insertions(+), 6 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_route53_health_check.go b/builtin/providers/aws/resource_aws_route53_health_check.go
index 95859dc0be..b9db3050f1 100644
--- a/builtin/providers/aws/resource_aws_route53_health_check.go
+++ b/builtin/providers/aws/resource_aws_route53_health_check.go
@@ -1,6 +1,7 @@
package aws
import (
+ "fmt"
"log"
"time"
@@ -26,11 +27,11 @@ func resourceAwsRoute53HealthCheck() *schema.Resource {
},
"failure_threshold": &schema.Schema{
Type: schema.TypeInt,
- Required: true,
+ Optional: true,
},
"request_interval": &schema.Schema{
Type: schema.TypeInt,
- Required: true,
+ Optional: true,
ForceNew: true, // todo this should be updateable but the awslabs route53 service doesnt have the ability
},
"ip_address": &schema.Schema{
@@ -68,6 +69,25 @@ func resourceAwsRoute53HealthCheck() *schema.Resource {
ForceNew: true,
},
+ "child_healthchecks": &schema.Schema{
+ Type: schema.TypeSet,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ Optional: true,
+ Set: schema.HashString,
+ },
+ "child_health_threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
+ value := v.(int)
+ if value > 256 {
+ es = append(es, fmt.Errorf(
+ "Child HealthThreshold cannot be more than 256"))
+ }
+ return
+ },
+ },
+
"tags": tagsSchema(),
},
}
@@ -100,6 +120,14 @@ func resourceAwsRoute53HealthCheckUpdate(d *schema.ResourceData, meta interface{
updateHealthCheck.Inverted = aws.Bool(d.Get("invert_healthcheck").(bool))
}
+ if d.HasChange("child_healthchecks") {
+ updateHealthCheck.ChildHealthChecks = expandStringList(d.Get("child_healthchecks").(*schema.Set).List())
+
+ }
+ if d.HasChange("child_health_threshold") {
+ updateHealthCheck.HealthThreshold = aws.Int64(int64(d.Get("child_health_threshold").(int)))
+ }
+
_, err := conn.UpdateHealthCheck(updateHealthCheck)
if err != nil {
return err
@@ -116,9 +144,15 @@ func resourceAwsRoute53HealthCheckCreate(d *schema.ResourceData, meta interface{
conn := meta.(*AWSClient).r53conn
healthConfig := &route53.HealthCheckConfig{
- Type: aws.String(d.Get("type").(string)),
- FailureThreshold: aws.Int64(int64(d.Get("failure_threshold").(int))),
- RequestInterval: aws.Int64(int64(d.Get("request_interval").(int))),
+ Type: aws.String(d.Get("type").(string)),
+ }
+
+ if v, ok := d.GetOk("request_interval"); ok {
+ healthConfig.RequestInterval = aws.Int64(int64(v.(int)))
+ }
+
+ if v, ok := d.GetOk("failure_threshold"); ok {
+ healthConfig.FailureThreshold = aws.Int64(int64(v.(int)))
}
if v, ok := d.GetOk("fqdn"); ok {
@@ -209,6 +243,8 @@ func resourceAwsRoute53HealthCheckRead(d *schema.ResourceData, meta interface{})
d.Set("resource_path", updated.ResourcePath)
d.Set("measure_latency", updated.MeasureLatency)
d.Set("invent_healthcheck", updated.Inverted)
+ d.Set("child_healthchecks", updated.ChildHealthChecks)
+ d.Set("child_health_threshold", updated.HealthThreshold)
// read the tags
req := &route53.ListTagsForResourceInput{
diff --git a/builtin/providers/aws/resource_aws_route53_health_check_test.go b/builtin/providers/aws/resource_aws_route53_health_check_test.go
index d199f4e9b5..c3f81de6a6 100644
--- a/builtin/providers/aws/resource_aws_route53_health_check_test.go
+++ b/builtin/providers/aws/resource_aws_route53_health_check_test.go
@@ -40,6 +40,22 @@ func TestAccAWSRoute53HealthCheck_basic(t *testing.T) {
})
}
+func TestAccAWSRoute53HealthCheck_withChildHealthChecks(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckRoute53HealthCheckDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccRoute53HealthCheckConfig_withChildHealthChecks,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckRoute53HealthCheckExists("aws_route53_health_check.foo"),
+ ),
+ },
+ },
+ })
+}
+
func TestAccAWSRoute53HealthCheck_IpConfig(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -170,3 +186,24 @@ resource "aws_route53_health_check" "bar" {
}
}
`
+
+const testAccRoute53HealthCheckConfig_withChildHealthChecks = `
+resource "aws_route53_health_check" "child1" {
+ fqdn = "child1.notexample.com"
+ port = 80
+ type = "HTTP"
+ resource_path = "/"
+ failure_threshold = "2"
+ request_interval = "30"
+}
+
+resource "aws_route53_health_check" "foo" {
+ type = "CALCULATED"
+ child_health_threshold = 1
+ child_healthchecks = ["${aws_route53_health_check.child1.id}"]
+
+ tags = {
+ Name = "tf-test-calculated-health-check"
+ }
+}
+`
diff --git a/website/source/docs/providers/aws/r/route53_health_check.html.markdown b/website/source/docs/providers/aws/r/route53_health_check.html.markdown
index dd16e15e18..ad2221726e 100644
--- a/website/source/docs/providers/aws/r/route53_health_check.html.markdown
+++ b/website/source/docs/providers/aws/r/route53_health_check.html.markdown
@@ -12,7 +12,7 @@ Provides a Route53 health check.
## Example Usage
```
-resource "aws_route53_health_check" "foo" {
+resource "aws_route53_health_check" "child1" {
fqdn = "foobar.terraform.com"
port = 80
type = "HTTP"
@@ -24,6 +24,16 @@ resource "aws_route53_health_check" "foo" {
Name = "tf-test-health-check"
}
}
+
+resource "aws_route53_health_check" "foo" {
+ type = "CALCULATED"
+ child_health_threshold = 1
+ child_healthchecks = ["${aws_route53_health_check.child1.id}"]
+
+ tags = {
+ Name = "tf-test-calculated-health-check"
+ }
+}
```
## Argument Reference
@@ -38,6 +48,8 @@ The following arguments are supported:
* `search_string` - (Optional) String searched in respoonse body for check to considered healthy.
* `measure_latency` - (Optional) A Boolean value that indicates whether you want Route 53 to measure the latency between health checkers in multiple AWS regions and your endpoint and to display CloudWatch latency graphs in the Route 53 console.
* `invert_healthcheck` - (Optional) A boolean value that indicates whether the status of health check should be inverted. For example, if a health check is healthy but Inverted is True , then Route 53 considers the health check to be unhealthy.
+* `child_healthchecks` - (Optional) For a specified parent health check, a list of HealthCheckId values for the associated child health checks.
+* `child_health_threshold` - (Optional) The minimum number of child health checks that must be healthy for Route 53 to consider the parent health check to be healthy. Valid values are integers between 0 and 256, inclusive
* `tags` - (Optional) A mapping of tags to assign to the health check.
At least one of either `fqdn` or `ip_address` must be specified.
From e635878b110da4404e8b354e5a0a69b02b521a2a Mon Sep 17 00:00:00 2001
From: stack72
Date: Tue, 3 Nov 2015 23:29:53 +0000
Subject: [PATCH 457/664] Forcing all Route 53 Healthcheck Types to be
Uppercase or it would show diffs unnecessarily
---
.../aws/resource_aws_route53_health_check.go | 4 +++
.../resource_aws_route53_health_check_test.go | 26 +++++++++----------
2 files changed, 17 insertions(+), 13 deletions(-)
diff --git a/builtin/providers/aws/resource_aws_route53_health_check.go b/builtin/providers/aws/resource_aws_route53_health_check.go
index b9db3050f1..4034996a9a 100644
--- a/builtin/providers/aws/resource_aws_route53_health_check.go
+++ b/builtin/providers/aws/resource_aws_route53_health_check.go
@@ -3,6 +3,7 @@ package aws
import (
"fmt"
"log"
+ "strings"
"time"
"github.com/hashicorp/terraform/helper/schema"
@@ -24,6 +25,9 @@ func resourceAwsRoute53HealthCheck() *schema.Resource {
Type: schema.TypeString,
Required: true,
ForceNew: true,
+ StateFunc: func(val interface{}) string {
+ return strings.ToUpper(val.(string))
+ },
},
"failure_threshold": &schema.Schema{
Type: schema.TypeInt,
diff --git a/builtin/providers/aws/resource_aws_route53_health_check_test.go b/builtin/providers/aws/resource_aws_route53_health_check_test.go
index c3f81de6a6..3e27bc1023 100644
--- a/builtin/providers/aws/resource_aws_route53_health_check_test.go
+++ b/builtin/providers/aws/resource_aws_route53_health_check_test.go
@@ -41,19 +41,19 @@ func TestAccAWSRoute53HealthCheck_basic(t *testing.T) {
}
func TestAccAWSRoute53HealthCheck_withChildHealthChecks(t *testing.T) {
- resource.Test(t, resource.TestCase{
- PreCheck: func() { testAccPreCheck(t) },
- Providers: testAccProviders,
- CheckDestroy: testAccCheckRoute53HealthCheckDestroy,
- Steps: []resource.TestStep{
- resource.TestStep{
- Config: testAccRoute53HealthCheckConfig_withChildHealthChecks,
- Check: resource.ComposeTestCheckFunc(
- testAccCheckRoute53HealthCheckExists("aws_route53_health_check.foo"),
- ),
- },
- },
- })
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckRoute53HealthCheckDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccRoute53HealthCheckConfig_withChildHealthChecks,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckRoute53HealthCheckExists("aws_route53_health_check.foo"),
+ ),
+ },
+ },
+ })
}
func TestAccAWSRoute53HealthCheck_IpConfig(t *testing.T) {
From ad897b9ea230793023e9869719c32b0a5e210104 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Thu, 7 Jan 2016 16:07:56 -0800
Subject: [PATCH 458/664] Update CHANGELOG.md
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8af052e5fd..2080456bb0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -47,6 +47,7 @@ IMPROVEMENTS:
* provider/docker: Add support for setting memory, swap and CPU shares on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting labels on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting log driver and options on `docker_container` resources [GH-3761]
+ * provider/docker: Add support for settings network mode on `docker_container` resources [GH-4475]
* provider/heroku: Improve handling of Applications within an Organization [GH-4495]
* provider/vsphere: Add support for custom vm params on `vsphere_virtual_machine` [GH-3867]
* provider/vsphere: Rename vcenter_server config parameter to something clearer [GH-3718]
From 979586faea6ca2f2f80943404adc85453a26cef0 Mon Sep 17 00:00:00 2001
From: James Nugent
Date: Thu, 7 Jan 2016 16:23:20 -0800
Subject: [PATCH 459/664] private/azure: Don't reuse account names in tests
---
.../providers/azure/resource_azure_storage_service_test.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/builtin/providers/azure/resource_azure_storage_service_test.go b/builtin/providers/azure/resource_azure_storage_service_test.go
index e3ac588d23..ee71b9ca57 100644
--- a/builtin/providers/azure/resource_azure_storage_service_test.go
+++ b/builtin/providers/azure/resource_azure_storage_service_test.go
@@ -20,7 +20,7 @@ func TestAccAzureStorageService(t *testing.T) {
Config: testAccAzureStorageServiceConfig,
Check: resource.ComposeTestCheckFunc(
testAccAzureStorageServiceExists(name),
- resource.TestCheckResourceAttr(name, "name", "tftesting"),
+ resource.TestCheckResourceAttr(name, "name", "tftesting_dis"),
resource.TestCheckResourceAttr(name, "location", "West US"),
resource.TestCheckResourceAttr(name, "description", "very descriptive"),
resource.TestCheckResourceAttr(name, "account_type", "Standard_LRS"),
@@ -70,7 +70,7 @@ func testAccAzureStorageServiceDestroyed(s *terraform.State) error {
var testAccAzureStorageServiceConfig = `
resource "azure_storage_service" "foo" {
# NOTE: storage service names constrained to lowercase letters only.
- name = "tftesting"
+ name = "tftesting_dis"
location = "West US"
description = "very descriptive"
account_type = "Standard_LRS"
From eab75dc8840740f79a25fc2ce7e52acc4caf4d25 Mon Sep 17 00:00:00 2001
From: stack72