diff --git a/CHANGELOG.md b/CHANGELOG.md
index d69b172999..7bfe3876a6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,8 +2,11 @@
FEATURES:
- * **New provider: `azure`** [GH-2053]
+ * **New provider: `azure`** [GH-2052, GH-2053, GH-2372, GH-2380]
* **New resource: `aws_autoscaling_notification`** [GH-2197]
+ * **New resource: `aws_autoscaling_policy`** [GH-2201]
+ * **New resource: `aws_cloudwatch_metric_alarm`** [GH-2201]
+ * **New resource: `aws_dynamodb_table`** [GH-2121]
* **New resource: `aws_ecs_cluster`** [GH-1803]
* **New resource: `aws_ecs_service`** [GH-1803]
* **New resource: `aws_ecs_task_definition`** [GH-1803]
@@ -14,7 +17,7 @@ FEATURES:
* **New remote state backend: `swift`**: You can now store remote state in
a OpenStack Swift. [GH-2254]
* command/output: support display of module outputs [GH-2102]
- * core: keys() and values() funcs for map variables [GH-2198]
+ * core: `keys()` and `values()` funcs for map variables [GH-2198]
IMPROVEMENTS:
@@ -40,6 +43,8 @@ BUG FIXES:
when non-lowercase protocol strings were used [GH-2246]
* provider/aws: corrected frankfurt S3 website region [GH-2259]
* provider/aws: `aws_elasticache_cluster` port is required [GH-2160]
+ * provider/template: fix issue causing "unknown variable" rendering errors
+ when an existing set of template variables is changed [GH-2386]
## 0.5.3 (June 1, 2015)
diff --git a/RELEASING.md b/RELEASING.md
index d32bb87752..ca2d84319d 100644
--- a/RELEASING.md
+++ b/RELEASING.md
@@ -30,7 +30,7 @@ export VERSION="vX.Y.Z"
# Edit version.go, setting VersionPrelease to empty string
# Snapshot dependency information
-godep save
+godep save ./...
mv Godeps/Godeps.json deps/$(echo $VERSION | sed 's/\./-/g').json
rm -rf Godeps
diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go
index e3e07d659c..93f7db9190 100644
--- a/builtin/providers/aws/config.go
+++ b/builtin/providers/aws/config.go
@@ -10,6 +10,8 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/service/autoscaling"
+ "github.com/aws/aws-sdk-go/service/cloudwatch"
+ "github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/ecs"
"github.com/aws/aws-sdk-go/service/elasticache"
@@ -36,6 +38,8 @@ type Config struct {
}
type AWSClient struct {
+ cloudwatchconn *cloudwatch.CloudWatch
+ dynamodbconn *dynamodb.DynamoDB
ec2conn *ec2.EC2
ecsconn *ecs.ECS
elbconn *elb.ELB
@@ -88,6 +92,9 @@ func (c *Config) Client() (interface{}, error) {
MaxRetries: c.MaxRetries,
}
+ log.Println("[INFO] Initializing DynamoDB connection")
+ client.dynamodbconn = dynamodb.New(awsConfig)
+
log.Println("[INFO] Initializing ELB connection")
client.elbconn = elb.New(awsConfig)
@@ -138,6 +145,9 @@ func (c *Config) Client() (interface{}, error) {
log.Println("[INFO] Initializing Lambda Connection")
client.lambdaconn = lambda.New(awsConfig)
+
+ log.Println("[INFO] Initializing CloudWatch SDK connection")
+ client.cloudwatchconn = cloudwatch.New(awsConfig)
}
if len(errs) > 0 {
diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go
index 908f726e26..0e54e7b501 100644
--- a/builtin/providers/aws/provider.go
+++ b/builtin/providers/aws/provider.go
@@ -86,11 +86,14 @@ func Provider() terraform.ResourceProvider {
"aws_app_cookie_stickiness_policy": resourceAwsAppCookieStickinessPolicy(),
"aws_autoscaling_group": resourceAwsAutoscalingGroup(),
"aws_autoscaling_notification": resourceAwsAutoscalingNotification(),
+ "aws_autoscaling_policy": resourceAwsAutoscalingPolicy(),
+ "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(),
"aws_customer_gateway": resourceAwsCustomerGateway(),
"aws_db_instance": resourceAwsDbInstance(),
"aws_db_parameter_group": resourceAwsDbParameterGroup(),
"aws_db_security_group": resourceAwsDbSecurityGroup(),
"aws_db_subnet_group": resourceAwsDbSubnetGroup(),
+ "aws_dynamodb_table": resourceAwsDynamoDbTable(),
"aws_ebs_volume": resourceAwsEbsVolume(),
"aws_ecs_cluster": resourceAwsEcsCluster(),
"aws_ecs_service": resourceAwsEcsService(),
diff --git a/builtin/providers/aws/resource_aws_autoscaling_group.go b/builtin/providers/aws/resource_aws_autoscaling_group.go
index 0fc62b4b25..bf9b30c6a2 100644
--- a/builtin/providers/aws/resource_aws_autoscaling_group.go
+++ b/builtin/providers/aws/resource_aws_autoscaling_group.go
@@ -408,7 +408,7 @@ func waitForASGCapacity(d *schema.ResourceData, meta interface{}) error {
}
wantELB := d.Get("min_elb_capacity").(int)
- log.Printf("[DEBUG] Wanting for capacity: %d ASG, %d ELB", wantASG, wantELB)
+ log.Printf("[DEBUG] Waiting for capacity: %d ASG, %d ELB", wantASG, wantELB)
return resource.Retry(waitForASGCapacityTimeout, func() error {
g, err := getAwsAutoscalingGroup(d, meta)
diff --git a/builtin/providers/aws/resource_aws_autoscaling_policy.go b/builtin/providers/aws/resource_aws_autoscaling_policy.go
new file mode 100644
index 0000000000..d2f6d2d47b
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_autoscaling_policy.go
@@ -0,0 +1,181 @@
+package aws
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/autoscaling"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceAwsAutoscalingPolicy() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceAwsAutoscalingPolicyCreate,
+ Read: resourceAwsAutoscalingPolicyRead,
+ Update: resourceAwsAutoscalingPolicyUpdate,
+ Delete: resourceAwsAutoscalingPolicyDelete,
+
+ Schema: map[string]*schema.Schema{
+ "arn": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "adjustment_type": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "autoscaling_group_name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "cooldown": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ },
+ "min_adjustment_step": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ },
+ "scaling_adjustment": &schema.Schema{
+ Type: schema.TypeInt,
+ Required: true,
+ },
+ },
+ }
+}
+
+func resourceAwsAutoscalingPolicyCreate(d *schema.ResourceData, meta interface{}) error {
+ autoscalingconn := meta.(*AWSClient).autoscalingconn
+
+ params := getAwsAutoscalingPutScalingPolicyInput(d)
+
+ log.Printf("[DEBUG] AutoScaling PutScalingPolicy: %#v", params)
+ resp, err := autoscalingconn.PutScalingPolicy(¶ms)
+ if err != nil {
+ return fmt.Errorf("Error putting scaling policy: %s", err)
+ }
+
+ d.Set("arn", resp.PolicyARN)
+ d.SetId(d.Get("name").(string))
+ log.Printf("[INFO] AutoScaling Scaling PolicyARN: %s", d.Get("arn").(string))
+
+ return resourceAwsAutoscalingPolicyRead(d, meta)
+}
+
+func resourceAwsAutoscalingPolicyRead(d *schema.ResourceData, meta interface{}) error {
+ p, err := getAwsAutoscalingPolicy(d, meta)
+ if err != nil {
+ return err
+ }
+ if p == nil {
+ d.SetId("")
+ return nil
+ }
+
+ log.Printf("[DEBUG] Read Scaling Policy: ASG: %s, SP: %s, Obj: %#v", d.Get("autoscaling_group_name"), d.Get("name"), p)
+
+ d.Set("adjustment_type", p.AdjustmentType)
+ d.Set("autoscaling_group_name", p.AutoScalingGroupName)
+ d.Set("cooldown", p.Cooldown)
+ d.Set("min_adjustment_step", p.MinAdjustmentStep)
+ d.Set("arn", p.PolicyARN)
+ d.Set("name", p.PolicyName)
+ d.Set("scaling_adjustment", p.ScalingAdjustment)
+
+ return nil
+}
+
+func resourceAwsAutoscalingPolicyUpdate(d *schema.ResourceData, meta interface{}) error {
+ autoscalingconn := meta.(*AWSClient).autoscalingconn
+
+ params := getAwsAutoscalingPutScalingPolicyInput(d)
+
+ log.Printf("[DEBUG] Autoscaling Update Scaling Policy: %#v", params)
+ _, err := autoscalingconn.PutScalingPolicy(¶ms)
+ if err != nil {
+ return err
+ }
+
+ return resourceAwsAutoscalingPolicyRead(d, meta)
+}
+
+func resourceAwsAutoscalingPolicyDelete(d *schema.ResourceData, meta interface{}) error {
+ autoscalingconn := meta.(*AWSClient).autoscalingconn
+ p, err := getAwsAutoscalingPolicy(d, meta)
+ if err != nil {
+ return err
+ }
+ if p == nil {
+ return nil
+ }
+
+ params := autoscaling.DeletePolicyInput{
+ AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)),
+ PolicyName: aws.String(d.Get("name").(string)),
+ }
+ if _, err := autoscalingconn.DeletePolicy(¶ms); err != nil {
+ return fmt.Errorf("Autoscaling Scaling Policy: %s ", err)
+ }
+
+ d.SetId("")
+ return nil
+}
+
+// PutScalingPolicy seems to require all params to be resent, so create and update can share this common function
+func getAwsAutoscalingPutScalingPolicyInput(d *schema.ResourceData) autoscaling.PutScalingPolicyInput {
+ var params = autoscaling.PutScalingPolicyInput{
+ AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)),
+ PolicyName: aws.String(d.Get("name").(string)),
+ }
+
+ if v, ok := d.GetOk("adjustment_type"); ok {
+ params.AdjustmentType = aws.String(v.(string))
+ }
+
+ if v, ok := d.GetOk("cooldown"); ok {
+ params.Cooldown = aws.Long(int64(v.(int)))
+ }
+
+ if v, ok := d.GetOk("scaling_adjustment"); ok {
+ params.ScalingAdjustment = aws.Long(int64(v.(int)))
+ }
+
+ if v, ok := d.GetOk("min_adjustment_step"); ok {
+ params.MinAdjustmentStep = aws.Long(int64(v.(int)))
+ }
+
+ return params
+}
+
+func getAwsAutoscalingPolicy(d *schema.ResourceData, meta interface{}) (*autoscaling.ScalingPolicy, error) {
+ autoscalingconn := meta.(*AWSClient).autoscalingconn
+
+ params := autoscaling.DescribePoliciesInput{
+ AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)),
+ PolicyNames: []*string{aws.String(d.Get("name").(string))},
+ }
+
+ log.Printf("[DEBUG] AutoScaling Scaling Policy Describe Params: %#v", params)
+ resp, err := autoscalingconn.DescribePolicies(¶ms)
+ if err != nil {
+ return nil, fmt.Errorf("Error retrieving scaling policies: %s", err)
+ }
+
+ // find scaling policy
+ name := d.Get("name")
+ for idx, sp := range resp.ScalingPolicies {
+ if *sp.PolicyName == name {
+ return resp.ScalingPolicies[idx], nil
+ }
+ }
+
+ // policy not found
+ return nil, nil
+}
diff --git a/builtin/providers/aws/resource_aws_autoscaling_policy_test.go b/builtin/providers/aws/resource_aws_autoscaling_policy_test.go
new file mode 100644
index 0000000000..0a7aeff916
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_autoscaling_policy_test.go
@@ -0,0 +1,115 @@
+package aws
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/autoscaling"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAWSAutoscalingPolicy_basic(t *testing.T) {
+ var policy autoscaling.ScalingPolicy
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSAutoscalingPolicyDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSAutoscalingPolicyConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckScalingPolicyExists("aws_autoscaling_policy.foobar", &policy),
+ resource.TestCheckResourceAttr("aws_autoscaling_policy.foobar", "adjustment_type", "ChangeInCapacity"),
+ resource.TestCheckResourceAttr("aws_autoscaling_policy.foobar", "cooldown", "300"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckScalingPolicyExists(n string, policy *autoscaling.ScalingPolicy) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ rs = rs
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ conn := testAccProvider.Meta().(*AWSClient).autoscalingconn
+ params := &autoscaling.DescribePoliciesInput{
+ AutoScalingGroupName: aws.String(rs.Primary.Attributes["autoscaling_group_name"]),
+ PolicyNames: []*string{aws.String(rs.Primary.ID)},
+ }
+ resp, err := conn.DescribePolicies(params)
+ if err != nil {
+ return err
+ }
+ if len(resp.ScalingPolicies) == 0 {
+ return fmt.Errorf("ScalingPolicy not found")
+ }
+
+ return nil
+ }
+}
+
+func testAccCheckAWSAutoscalingPolicyDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*AWSClient).autoscalingconn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_autoscaling_group" {
+ continue
+ }
+
+ params := autoscaling.DescribePoliciesInput{
+ AutoScalingGroupName: aws.String(rs.Primary.Attributes["autoscaling_group_name"]),
+ PolicyNames: []*string{aws.String(rs.Primary.ID)},
+ }
+
+ resp, err := conn.DescribePolicies(¶ms)
+
+ if err == nil {
+ if len(resp.ScalingPolicies) != 0 &&
+ *resp.ScalingPolicies[0].PolicyName == rs.Primary.ID {
+ return fmt.Errorf("Scaling Policy Still Exists: %s", rs.Primary.ID)
+ }
+ }
+ }
+
+ return nil
+}
+
+var testAccAWSAutoscalingPolicyConfig = fmt.Sprintf(`
+resource "aws_launch_configuration" "foobar" {
+ name = "terraform-test-foobar5"
+ image_id = "ami-21f78e11"
+ instance_type = "t1.micro"
+}
+
+resource "aws_autoscaling_group" "foobar" {
+ availability_zones = ["us-west-2a"]
+ name = "terraform-test-foobar5"
+ max_size = 5
+ min_size = 2
+ health_check_grace_period = 300
+ health_check_type = "ELB"
+ force_delete = true
+ termination_policies = ["OldestInstance"]
+ launch_configuration = "${aws_launch_configuration.foobar.name}"
+ tag {
+ key = "Foo"
+ value = "foo-bar"
+ propagate_at_launch = true
+ }
+}
+
+resource "aws_autoscaling_policy" "foobar" {
+ name = "foobar"
+ scaling_adjustment = 4
+ adjustment_type = "ChangeInCapacity"
+ cooldown = 300
+ autoscaling_group_name = "${aws_autoscaling_group.foobar.name}"
+}
+`)
diff --git a/builtin/providers/aws/resource_aws_cloudwatch_metric_alarm.go b/builtin/providers/aws/resource_aws_cloudwatch_metric_alarm.go
new file mode 100644
index 0000000000..4c8b401400
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_cloudwatch_metric_alarm.go
@@ -0,0 +1,288 @@
+package aws
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/hashicorp/terraform/helper/hashcode"
+ "github.com/hashicorp/terraform/helper/schema"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/cloudwatch"
+)
+
+func resourceAwsCloudWatchMetricAlarm() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceAwsCloudWatchMetricAlarmCreate,
+ Read: resourceAwsCloudWatchMetricAlarmRead,
+ Update: resourceAwsCloudWatchMetricAlarmUpdate,
+ Delete: resourceAwsCloudWatchMetricAlarmDelete,
+
+ Schema: map[string]*schema.Schema{
+ "alarm_name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "comparison_operator": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "evaluation_periods": &schema.Schema{
+ Type: schema.TypeInt,
+ Required: true,
+ },
+ "metric_name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "namespace": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "period": &schema.Schema{
+ Type: schema.TypeInt,
+ Required: true,
+ },
+ "statistic": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "threshold": &schema.Schema{
+ Type: schema.TypeFloat,
+ Required: true,
+ },
+ "actions_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ Default: true,
+ },
+ "alarm_actions": &schema.Schema{
+ Type: schema.TypeSet,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ Set: func(v interface{}) int {
+ return hashcode.String(v.(string))
+ },
+ },
+ "alarm_description": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ "dimensions": &schema.Schema{
+ Type: schema.TypeMap,
+ Optional: true,
+ },
+ "insufficient_data_actions": &schema.Schema{
+ Type: schema.TypeSet,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ Set: func(v interface{}) int {
+ return hashcode.String(v.(string))
+ },
+ },
+ "ok_actions": &schema.Schema{
+ Type: schema.TypeSet,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ Set: func(v interface{}) int {
+ return hashcode.String(v.(string))
+ },
+ },
+ "unit": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ },
+ }
+}
+
+func resourceAwsCloudWatchMetricAlarmCreate(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).cloudwatchconn
+
+ params := getAwsCloudWatchPutMetricAlarmInput(d)
+
+ log.Printf("[DEBUG] Creating CloudWatch Metric Alarm: %#v", params)
+ _, err := conn.PutMetricAlarm(¶ms)
+ if err != nil {
+ return fmt.Errorf("Creating metric alarm failed: %s", err)
+ }
+ d.SetId(d.Get("alarm_name").(string))
+ log.Println("[INFO] CloudWatch Metric Alarm created")
+
+ return resourceAwsCloudWatchMetricAlarmRead(d, meta)
+}
+
+func resourceAwsCloudWatchMetricAlarmRead(d *schema.ResourceData, meta interface{}) error {
+ a, err := getAwsCloudWatchMetricAlarm(d, meta)
+ if err != nil {
+ return err
+ }
+ if a == nil {
+ d.SetId("")
+ return nil
+ }
+
+ log.Printf("[DEBUG] Reading CloudWatch Metric Alarm: %s", d.Get("alarm_name"))
+
+ d.Set("actions_enabled", a.ActionsEnabled)
+
+ if err := d.Set("alarm_actions", _strArrPtrToList(a.AlarmActions)); err != nil {
+ log.Printf("[WARN] Error setting Alarm Actions: %s", err)
+ }
+ d.Set("alarm_description", a.AlarmDescription)
+ d.Set("alarm_name", a.AlarmName)
+ d.Set("comparison_operator", a.ComparisonOperator)
+ d.Set("dimensions", a.Dimensions)
+ d.Set("evaluation_periods", a.EvaluationPeriods)
+
+ if err := d.Set("insufficient_data_actions", _strArrPtrToList(a.InsufficientDataActions)); err != nil {
+ log.Printf("[WARN] Error setting Insufficient Data Actions: %s", err)
+ }
+ d.Set("metric_name", a.MetricName)
+ d.Set("namespace", a.Namespace)
+
+ if err := d.Set("ok_actions", _strArrPtrToList(a.OKActions)); err != nil {
+ log.Printf("[WARN] Error setting OK Actions: %s", err)
+ }
+ d.Set("period", a.Period)
+ d.Set("statistic", a.Statistic)
+ d.Set("threshold", a.Threshold)
+ d.Set("unit", a.Unit)
+
+ return nil
+}
+
+func resourceAwsCloudWatchMetricAlarmUpdate(d *schema.ResourceData, meta interface{}) error {
+ conn := meta.(*AWSClient).cloudwatchconn
+ params := getAwsCloudWatchPutMetricAlarmInput(d)
+
+ log.Printf("[DEBUG] Updating CloudWatch Metric Alarm: %#v", params)
+ _, err := conn.PutMetricAlarm(¶ms)
+ if err != nil {
+ return fmt.Errorf("Updating metric alarm failed: %s", err)
+ }
+ log.Println("[INFO] CloudWatch Metric Alarm updated")
+
+ return resourceAwsCloudWatchMetricAlarmRead(d, meta)
+}
+
+func resourceAwsCloudWatchMetricAlarmDelete(d *schema.ResourceData, meta interface{}) error {
+ p, err := getAwsCloudWatchMetricAlarm(d, meta)
+ if err != nil {
+ return err
+ }
+ if p == nil {
+ log.Printf("[DEBUG] CloudWatch Metric Alarm %s is already gone", d.Id())
+ return nil
+ }
+
+ log.Printf("[INFO] Deleting CloudWatch Metric Alarm: %s", d.Id())
+
+ conn := meta.(*AWSClient).cloudwatchconn
+ params := cloudwatch.DeleteAlarmsInput{
+ AlarmNames: []*string{aws.String(d.Id())},
+ }
+
+ if _, err := conn.DeleteAlarms(¶ms); err != nil {
+ return fmt.Errorf("Error deleting CloudWatch Metric Alarm: %s", err)
+ }
+ log.Println("[INFO] CloudWatch Metric Alarm deleted")
+
+ d.SetId("")
+ return nil
+}
+
+func getAwsCloudWatchPutMetricAlarmInput(d *schema.ResourceData) cloudwatch.PutMetricAlarmInput {
+ params := cloudwatch.PutMetricAlarmInput{
+ AlarmName: aws.String(d.Get("alarm_name").(string)),
+ ComparisonOperator: aws.String(d.Get("comparison_operator").(string)),
+ EvaluationPeriods: aws.Long(int64(d.Get("evaluation_periods").(int))),
+ MetricName: aws.String(d.Get("metric_name").(string)),
+ Namespace: aws.String(d.Get("namespace").(string)),
+ Period: aws.Long(int64(d.Get("period").(int))),
+ Statistic: aws.String(d.Get("statistic").(string)),
+ Threshold: aws.Double(d.Get("threshold").(float64)),
+ }
+
+ if v := d.Get("actions_enabled"); v != nil {
+ params.ActionsEnabled = aws.Boolean(v.(bool))
+ }
+
+ if v, ok := d.GetOk("alarm_description"); ok {
+ params.AlarmDescription = aws.String(v.(string))
+ }
+
+ if v, ok := d.GetOk("unit"); ok {
+ params.Unit = aws.String(v.(string))
+ }
+
+ var alarmActions []*string
+ if v := d.Get("alarm_actions"); v != nil {
+ for _, v := range v.(*schema.Set).List() {
+ str := v.(string)
+ alarmActions = append(alarmActions, aws.String(str))
+ }
+ params.AlarmActions = alarmActions
+ }
+
+ var insufficientDataActions []*string
+ if v := d.Get("insufficient_data_actions"); v != nil {
+ for _, v := range v.(*schema.Set).List() {
+ str := v.(string)
+ insufficientDataActions = append(insufficientDataActions, aws.String(str))
+ }
+ params.InsufficientDataActions = insufficientDataActions
+ }
+
+ var okActions []*string
+ if v := d.Get("ok_actions"); v != nil {
+ for _, v := range v.(*schema.Set).List() {
+ str := v.(string)
+ okActions = append(okActions, aws.String(str))
+ }
+ params.OKActions = okActions
+ }
+
+ a := d.Get("dimensions").(map[string]interface{})
+ dimensions := make([]*cloudwatch.Dimension, 0, len(a))
+ for k, v := range a {
+ dimensions = append(dimensions, &cloudwatch.Dimension{
+ Name: aws.String(k),
+ Value: aws.String(v.(string)),
+ })
+ }
+ params.Dimensions = dimensions
+
+ return params
+}
+
+func getAwsCloudWatchMetricAlarm(d *schema.ResourceData, meta interface{}) (*cloudwatch.MetricAlarm, error) {
+ conn := meta.(*AWSClient).cloudwatchconn
+
+ params := cloudwatch.DescribeAlarmsInput{
+ AlarmNames: []*string{aws.String(d.Id())},
+ }
+
+ resp, err := conn.DescribeAlarms(¶ms)
+ if err != nil {
+ return nil, nil
+ }
+
+ // Find it and return it
+ for idx, ma := range resp.MetricAlarms {
+ if *ma.AlarmName == d.Id() {
+ return resp.MetricAlarms[idx], nil
+ }
+ }
+
+ return nil, nil
+}
+
+func _strArrPtrToList(strArrPtr []*string) []string {
+ var result []string
+ for _, elem := range strArrPtr {
+ result = append(result, *elem)
+ }
+ return result
+}
diff --git a/builtin/providers/aws/resource_aws_cloudwatch_metric_alarm_test.go b/builtin/providers/aws/resource_aws_cloudwatch_metric_alarm_test.go
new file mode 100644
index 0000000000..8d9a53c360
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_cloudwatch_metric_alarm_test.go
@@ -0,0 +1,95 @@
+package aws
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/cloudwatch"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAWSCloudWatchMetricAlarm_basic(t *testing.T) {
+ var alarm cloudwatch.MetricAlarm
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSCloudWatchMetricAlarmDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSCloudWatchMetricAlarmConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckCloudWatchMetricAlarmExists("aws_cloudwatch_metric_alarm.foobar", &alarm),
+ resource.TestCheckResourceAttr("aws_cloudwatch_metric_alarm.foobar", "metric_name", "CPUUtilization"),
+ resource.TestCheckResourceAttr("aws_cloudwatch_metric_alarm.foobar", "statistic", "Average"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckCloudWatchMetricAlarmExists(n string, alarm *cloudwatch.MetricAlarm) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ conn := testAccProvider.Meta().(*AWSClient).cloudwatchconn
+ params := cloudwatch.DescribeAlarmsInput{
+ AlarmNames: []*string{aws.String(rs.Primary.ID)},
+ }
+ resp, err := conn.DescribeAlarms(¶ms)
+ if err != nil {
+ return err
+ }
+ if len(resp.MetricAlarms) == 0 {
+ return fmt.Errorf("Alarm not found")
+ }
+ *alarm = *resp.MetricAlarms[0]
+
+ return nil
+ }
+}
+
+func testAccCheckAWSCloudWatchMetricAlarmDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*AWSClient).cloudwatchconn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_cloudwatch_metric_alarm" {
+ continue
+ }
+
+ params := cloudwatch.DescribeAlarmsInput{
+ AlarmNames: []*string{aws.String(rs.Primary.ID)},
+ }
+
+ resp, err := conn.DescribeAlarms(¶ms)
+
+ if err == nil {
+ if len(resp.MetricAlarms) != 0 &&
+ *resp.MetricAlarms[0].AlarmName == rs.Primary.ID {
+ return fmt.Errorf("Alarm Still Exists: %s", rs.Primary.ID)
+ }
+ }
+ }
+
+ return nil
+}
+
+var testAccAWSCloudWatchMetricAlarmConfig = fmt.Sprintf(`
+resource "aws_cloudwatch_metric_alarm" "foobar" {
+ alarm_name = "terraform-test-foobar5"
+ comparison_operator = "GreaterThanOrEqualToThreshold"
+ evaluation_periods = "2"
+ metric_name = "CPUUtilization"
+ namespace = "AWS/EC2"
+ period = "120"
+ statistic = "Average"
+ threshold = "80"
+ alarm_description = "This metric monitor ec2 cpu utilization"
+ insufficient_data_actions = []
+}
+`)
diff --git a/builtin/providers/aws/resource_aws_dynamodb_table.go b/builtin/providers/aws/resource_aws_dynamodb_table.go
new file mode 100644
index 0000000000..163d14d8d8
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_dynamodb_table.go
@@ -0,0 +1,704 @@
+package aws
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "time"
+
+ "github.com/hashicorp/terraform/helper/schema"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/dynamodb"
+ "github.com/hashicorp/terraform/helper/hashcode"
+)
+
+// A number of these are marked as computed because if you don't
+// provide a value, DynamoDB will provide you with defaults (which are the
+// default values specified below)
+func resourceAwsDynamoDbTable() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceAwsDynamoDbTableCreate,
+ Read: resourceAwsDynamoDbTableRead,
+ Update: resourceAwsDynamoDbTableUpdate,
+ Delete: resourceAwsDynamoDbTableDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "hash_key": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "range_key": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ "write_capacity": &schema.Schema{
+ Type: schema.TypeInt,
+ Required: true,
+ },
+ "read_capacity": &schema.Schema{
+ Type: schema.TypeInt,
+ Required: true,
+ },
+ "attribute": &schema.Schema{
+ Type: schema.TypeSet,
+ Required: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "type": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ },
+ },
+ Set: func(v interface{}) int {
+ var buf bytes.Buffer
+ m := v.(map[string]interface{})
+ buf.WriteString(fmt.Sprintf("%s-", m["name"].(string)))
+ return hashcode.String(buf.String())
+ },
+ },
+ "local_secondary_index": &schema.Schema{
+ Type: schema.TypeSet,
+ Optional: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "range_key": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "projection_type": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "non_key_attributes": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ },
+ },
+ Set: func(v interface{}) int {
+ var buf bytes.Buffer
+ m := v.(map[string]interface{})
+ buf.WriteString(fmt.Sprintf("%s-", m["name"].(string)))
+ return hashcode.String(buf.String())
+ },
+ },
+ "global_secondary_index": &schema.Schema{
+ Type: schema.TypeSet,
+ Optional: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "write_capacity": &schema.Schema{
+ Type: schema.TypeInt,
+ Required: true,
+ },
+ "read_capacity": &schema.Schema{
+ Type: schema.TypeInt,
+ Required: true,
+ },
+ "hash_key": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "range_key": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ "projection_type": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "non_key_attributes": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ },
+ },
+ },
+ // GSI names are the uniqueness constraint
+ Set: func(v interface{}) int {
+ var buf bytes.Buffer
+ m := v.(map[string]interface{})
+ buf.WriteString(fmt.Sprintf("%s-", m["name"].(string)))
+ buf.WriteString(fmt.Sprintf("%d-", m["write_capacity"].(int)))
+ buf.WriteString(fmt.Sprintf("%d-", m["read_capacity"].(int)))
+ return hashcode.String(buf.String())
+ },
+ },
+ },
+ }
+}
+
+func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) error {
+ dynamodbconn := meta.(*AWSClient).dynamodbconn
+
+ name := d.Get("name").(string)
+
+ log.Printf("[DEBUG] DynamoDB table create: %s", name)
+
+ throughput := &dynamodb.ProvisionedThroughput{
+ ReadCapacityUnits: aws.Long(int64(d.Get("read_capacity").(int))),
+ WriteCapacityUnits: aws.Long(int64(d.Get("write_capacity").(int))),
+ }
+
+ hash_key_name := d.Get("hash_key").(string)
+ keyschema := []*dynamodb.KeySchemaElement{
+ &dynamodb.KeySchemaElement{
+ AttributeName: aws.String(hash_key_name),
+ KeyType: aws.String("HASH"),
+ },
+ }
+
+ if range_key, ok := d.GetOk("range_key"); ok {
+ range_schema_element := &dynamodb.KeySchemaElement{
+ AttributeName: aws.String(range_key.(string)),
+ KeyType: aws.String("RANGE"),
+ }
+ keyschema = append(keyschema, range_schema_element)
+ }
+
+ req := &dynamodb.CreateTableInput{
+ TableName: aws.String(name),
+ ProvisionedThroughput: throughput,
+ KeySchema: keyschema,
+ }
+
+ if attributedata, ok := d.GetOk("attribute"); ok {
+ attributes := []*dynamodb.AttributeDefinition{}
+ attributeSet := attributedata.(*schema.Set)
+ for _, attribute := range attributeSet.List() {
+ attr := attribute.(map[string]interface{})
+ attributes = append(attributes, &dynamodb.AttributeDefinition{
+ AttributeName: aws.String(attr["name"].(string)),
+ AttributeType: aws.String(attr["type"].(string)),
+ })
+ }
+
+ req.AttributeDefinitions = attributes
+ }
+
+ if lsidata, ok := d.GetOk("local_secondary_index"); ok {
+ fmt.Printf("[DEBUG] Adding LSI data to the table")
+
+ lsiSet := lsidata.(*schema.Set)
+ localSecondaryIndexes := []*dynamodb.LocalSecondaryIndex{}
+ for _, lsiObject := range lsiSet.List() {
+ lsi := lsiObject.(map[string]interface{})
+
+ projection := &dynamodb.Projection{
+ ProjectionType: aws.String(lsi["projection_type"].(string)),
+ }
+
+ if lsi["projection_type"] != "ALL" {
+ non_key_attributes := []*string{}
+ for _, attr := range lsi["non_key_attributes"].([]interface{}) {
+ non_key_attributes = append(non_key_attributes, aws.String(attr.(string)))
+ }
+ projection.NonKeyAttributes = non_key_attributes
+ }
+
+ localSecondaryIndexes = append(localSecondaryIndexes, &dynamodb.LocalSecondaryIndex{
+ IndexName: aws.String(lsi["name"].(string)),
+ KeySchema: []*dynamodb.KeySchemaElement{
+ &dynamodb.KeySchemaElement{
+ AttributeName: aws.String(hash_key_name),
+ KeyType: aws.String("HASH"),
+ },
+ &dynamodb.KeySchemaElement{
+ AttributeName: aws.String(lsi["range_key"].(string)),
+ KeyType: aws.String("RANGE"),
+ },
+ },
+ Projection: projection,
+ })
+ }
+
+ req.LocalSecondaryIndexes = localSecondaryIndexes
+
+ fmt.Printf("[DEBUG] Added %d LSI definitions", len(localSecondaryIndexes))
+ }
+
+ if gsidata, ok := d.GetOk("global_secondary_index"); ok {
+ globalSecondaryIndexes := []*dynamodb.GlobalSecondaryIndex{}
+
+ gsiSet := gsidata.(*schema.Set)
+ for _, gsiObject := range gsiSet.List() {
+ gsi := gsiObject.(map[string]interface{})
+ gsiObject := createGSIFromData(&gsi)
+ globalSecondaryIndexes = append(globalSecondaryIndexes, &gsiObject)
+ }
+ req.GlobalSecondaryIndexes = globalSecondaryIndexes
+ }
+
+ output, err := dynamodbconn.CreateTable(req)
+ if err != nil {
+ return fmt.Errorf("Error creating DynamoDB table: %s", err)
+ }
+
+ d.SetId(*output.TableDescription.TableName)
+
+ // Creation complete, nothing to re-read
+ return nil
+}
+
+func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) error {
+
+ log.Printf("[DEBUG] Updating DynamoDB table %s", d.Id())
+ dynamodbconn := meta.(*AWSClient).dynamodbconn
+
+ // Ensure table is active before trying to update
+ waitForTableToBeActive(d.Id(), meta)
+
+ // LSI can only be done at create-time, abort if it's been changed
+ if d.HasChange("local_secondary_index") {
+ return fmt.Errorf("Local secondary indexes can only be built at creation, you cannot update them!")
+ }
+
+ if d.HasChange("hash_key") {
+ return fmt.Errorf("Hash key can only be specified at creation, you cannot modify it.")
+ }
+
+ if d.HasChange("range_key") {
+ return fmt.Errorf("Range key can only be specified at creation, you cannot modify it.")
+ }
+
+ if d.HasChange("read_capacity") || d.HasChange("write_capacity") {
+ req := &dynamodb.UpdateTableInput{
+ TableName: aws.String(d.Id()),
+ }
+
+ throughput := &dynamodb.ProvisionedThroughput{
+ ReadCapacityUnits: aws.Long(int64(d.Get("read_capacity").(int))),
+ WriteCapacityUnits: aws.Long(int64(d.Get("write_capacity").(int))),
+ }
+ req.ProvisionedThroughput = throughput
+
+ _, err := dynamodbconn.UpdateTable(req)
+
+ if err != nil {
+ return err
+ }
+
+ waitForTableToBeActive(d.Id(), meta)
+ }
+
+ if d.HasChange("global_secondary_index") {
+ log.Printf("[DEBUG] Changed GSI data")
+ req := &dynamodb.UpdateTableInput{
+ TableName: aws.String(d.Id()),
+ }
+
+ o, n := d.GetChange("global_secondary_index")
+
+ oldSet := o.(*schema.Set)
+ newSet := n.(*schema.Set)
+
+ // Track old names so we can know which ones we need to just update based on
+ // capacity changes, terraform appears to only diff on the set hash, not the
+ // contents so we need to make sure we don't delete any indexes that we
+ // just want to update the capacity for
+ oldGsiNameSet := make(map[string]bool)
+ newGsiNameSet := make(map[string]bool)
+
+ for _, gsidata := range oldSet.List() {
+ gsiName := gsidata.(map[string]interface{})["name"].(string)
+ oldGsiNameSet[gsiName] = true
+ }
+
+ for _, gsidata := range newSet.List() {
+ gsiName := gsidata.(map[string]interface{})["name"].(string)
+ newGsiNameSet[gsiName] = true
+ }
+
+ // First determine what's new
+ for _, newgsidata := range newSet.List() {
+ updates := []*dynamodb.GlobalSecondaryIndexUpdate{}
+ newGsiName := newgsidata.(map[string]interface{})["name"].(string)
+ if _, exists := oldGsiNameSet[newGsiName]; !exists {
+ attributes := []*dynamodb.AttributeDefinition{}
+ gsidata := newgsidata.(map[string]interface{})
+ gsi := createGSIFromData(&gsidata)
+ log.Printf("[DEBUG] Adding GSI %s", *gsi.IndexName)
+ update := &dynamodb.GlobalSecondaryIndexUpdate{
+ Create: &dynamodb.CreateGlobalSecondaryIndexAction{
+ IndexName: gsi.IndexName,
+ KeySchema: gsi.KeySchema,
+ ProvisionedThroughput: gsi.ProvisionedThroughput,
+ Projection: gsi.Projection,
+ },
+ }
+ updates = append(updates, update)
+
+ // Hash key is required, range key isn't
+ hashkey_type, err := getAttributeType(d, *(gsi.KeySchema[0].AttributeName))
+ if err != nil {
+ return err
+ }
+
+ attributes = append(attributes, &dynamodb.AttributeDefinition{
+ AttributeName: gsi.KeySchema[0].AttributeName,
+ AttributeType: aws.String(hashkey_type),
+ })
+
+ // If there's a range key, there will be 2 elements in KeySchema
+ if len(gsi.KeySchema) == 2 {
+ rangekey_type, err := getAttributeType(d, *(gsi.KeySchema[1].AttributeName))
+ if err != nil {
+ return err
+ }
+
+ attributes = append(attributes, &dynamodb.AttributeDefinition{
+ AttributeName: gsi.KeySchema[1].AttributeName,
+ AttributeType: aws.String(rangekey_type),
+ })
+ }
+
+ req.AttributeDefinitions = attributes
+ req.GlobalSecondaryIndexUpdates = updates
+ _, err = dynamodbconn.UpdateTable(req)
+
+ if err != nil {
+ return err
+ }
+
+ waitForTableToBeActive(d.Id(), meta)
+ waitForGSIToBeActive(d.Id(), *gsi.IndexName, meta)
+
+ }
+ }
+
+ for _, oldgsidata := range oldSet.List() {
+ updates := []*dynamodb.GlobalSecondaryIndexUpdate{}
+ oldGsiName := oldgsidata.(map[string]interface{})["name"].(string)
+ if _, exists := newGsiNameSet[oldGsiName]; !exists {
+ gsidata := oldgsidata.(map[string]interface{})
+ log.Printf("[DEBUG] Deleting GSI %s", gsidata["name"].(string))
+ update := &dynamodb.GlobalSecondaryIndexUpdate{
+ Delete: &dynamodb.DeleteGlobalSecondaryIndexAction{
+ IndexName: aws.String(gsidata["name"].(string)),
+ },
+ }
+ updates = append(updates, update)
+
+ req.GlobalSecondaryIndexUpdates = updates
+ _, err := dynamodbconn.UpdateTable(req)
+
+ if err != nil {
+ return err
+ }
+
+ waitForTableToBeActive(d.Id(), meta)
+ }
+ }
+ }
+
+ // Update any out-of-date read / write capacity
+ if gsiObjects, ok := d.GetOk("global_secondary_index"); ok {
+ gsiSet := gsiObjects.(*schema.Set)
+ if len(gsiSet.List()) > 0 {
+ log.Printf("Updating capacity as needed!")
+
+ // We can only change throughput, but we need to make sure it's actually changed
+ tableDescription, err := dynamodbconn.DescribeTable(&dynamodb.DescribeTableInput{
+ TableName: aws.String(d.Id()),
+ })
+
+ if err != nil {
+ return err
+ }
+
+ table := tableDescription.Table
+
+ updates := []*dynamodb.GlobalSecondaryIndexUpdate{}
+
+ for _, updatedgsidata := range gsiSet.List() {
+ gsidata := updatedgsidata.(map[string]interface{})
+ gsiName := gsidata["name"].(string)
+ gsiWriteCapacity := gsidata["write_capacity"].(int)
+ gsiReadCapacity := gsidata["read_capacity"].(int)
+
+ log.Printf("[DEBUG] Updating GSI %s", gsiName)
+ gsi, err := getGlobalSecondaryIndex(gsiName, table.GlobalSecondaryIndexes)
+
+ if err != nil {
+ return err
+ }
+
+ capacityUpdated := false
+
+ if int64(gsiReadCapacity) != *(gsi.ProvisionedThroughput.ReadCapacityUnits) ||
+ int64(gsiWriteCapacity) != *(gsi.ProvisionedThroughput.WriteCapacityUnits) {
+ capacityUpdated = true
+ }
+
+ if capacityUpdated {
+ update := &dynamodb.GlobalSecondaryIndexUpdate{
+ Update: &dynamodb.UpdateGlobalSecondaryIndexAction{
+ IndexName: aws.String(gsidata["name"].(string)),
+ ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
+ WriteCapacityUnits: aws.Long(int64(gsiWriteCapacity)),
+ ReadCapacityUnits: aws.Long(int64(gsiReadCapacity)),
+ },
+ },
+ }
+ updates = append(updates, update)
+
+ }
+
+ if len(updates) > 0 {
+
+ req := &dynamodb.UpdateTableInput{
+ TableName: aws.String(d.Id()),
+ }
+
+ req.GlobalSecondaryIndexUpdates = updates
+
+ log.Printf("[DEBUG] Updating GSI read / write capacity on %s", d.Id())
+ _, err := dynamodbconn.UpdateTable(req)
+
+ if err != nil {
+ log.Printf("[DEBUG] Error updating table: %s", err)
+ return err
+ }
+ }
+ }
+ }
+
+ }
+
+ return resourceAwsDynamoDbTableRead(d, meta)
+}
+
+func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) error {
+ dynamodbconn := meta.(*AWSClient).dynamodbconn
+ log.Printf("[DEBUG] Loading data for DynamoDB table '%s'", d.Id())
+ req := &dynamodb.DescribeTableInput{
+ TableName: aws.String(d.Id()),
+ }
+
+ result, err := dynamodbconn.DescribeTable(req)
+
+ if err != nil {
+ return err
+ }
+
+ table := result.Table
+
+ d.Set("write_capacity", table.ProvisionedThroughput.WriteCapacityUnits)
+ d.Set("read_capacity", table.ProvisionedThroughput.ReadCapacityUnits)
+
+ attributes := []interface{}{}
+ for _, attrdef := range table.AttributeDefinitions {
+ attribute := map[string]string{
+ "name": *(attrdef.AttributeName),
+ "type": *(attrdef.AttributeType),
+ }
+ attributes = append(attributes, attribute)
+ log.Printf("[DEBUG] Added Attribute: %s", attribute["name"])
+ }
+
+ d.Set("attribute", attributes)
+
+ gsiList := make([]map[string]interface{}, 0, len(table.GlobalSecondaryIndexes))
+ for _, gsiObject := range table.GlobalSecondaryIndexes {
+ gsi := map[string]interface{}{
+ "write_capacity": *(gsiObject.ProvisionedThroughput.WriteCapacityUnits),
+ "read_capacity": *(gsiObject.ProvisionedThroughput.ReadCapacityUnits),
+ "name": *(gsiObject.IndexName),
+ }
+
+ for _, attribute := range gsiObject.KeySchema {
+ if *attribute.KeyType == "HASH" {
+ gsi["hash_key"] = *attribute.AttributeName
+ }
+
+ if *attribute.KeyType == "RANGE" {
+ gsi["range_key"] = *attribute.AttributeName
+ }
+ }
+
+ gsi["projection_type"] = *(gsiObject.Projection.ProjectionType)
+ gsi["non_key_attributes"] = gsiObject.Projection.NonKeyAttributes
+
+ gsiList = append(gsiList, gsi)
+ log.Printf("[DEBUG] Added GSI: %s - Read: %d / Write: %d", gsi["name"], gsi["read_capacity"], gsi["write_capacity"])
+ }
+
+ d.Set("global_secondary_index", gsiList)
+
+ return nil
+}
+
+func resourceAwsDynamoDbTableDelete(d *schema.ResourceData, meta interface{}) error {
+ dynamodbconn := meta.(*AWSClient).dynamodbconn
+
+ waitForTableToBeActive(d.Id(), meta)
+
+ log.Printf("[DEBUG] DynamoDB delete table: %s", d.Id())
+
+ _, err := dynamodbconn.DeleteTable(&dynamodb.DeleteTableInput{
+ TableName: aws.String(d.Id()),
+ })
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func createGSIFromData(data *map[string]interface{}) dynamodb.GlobalSecondaryIndex {
+
+ projection := &dynamodb.Projection{
+ ProjectionType: aws.String((*data)["projection_type"].(string)),
+ }
+
+ if (*data)["projection_type"] != "ALL" {
+ non_key_attributes := []*string{}
+ for _, attr := range (*data)["non_key_attributes"].([]interface{}) {
+ non_key_attributes = append(non_key_attributes, aws.String(attr.(string)))
+ }
+ projection.NonKeyAttributes = non_key_attributes
+ }
+
+ writeCapacity := (*data)["write_capacity"].(int)
+ readCapacity := (*data)["read_capacity"].(int)
+
+ key_schema := []*dynamodb.KeySchemaElement{
+ &dynamodb.KeySchemaElement{
+ AttributeName: aws.String((*data)["hash_key"].(string)),
+ KeyType: aws.String("HASH"),
+ },
+ }
+
+ range_key_name := (*data)["range_key"]
+ if range_key_name != "" {
+ range_key_element := &dynamodb.KeySchemaElement{
+ AttributeName: aws.String(range_key_name.(string)),
+ KeyType: aws.String("RANGE"),
+ }
+
+ key_schema = append(key_schema, range_key_element)
+ }
+
+ return dynamodb.GlobalSecondaryIndex{
+ IndexName: aws.String((*data)["name"].(string)),
+ KeySchema: key_schema,
+ Projection: projection,
+ ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
+ WriteCapacityUnits: aws.Long(int64(writeCapacity)),
+ ReadCapacityUnits: aws.Long(int64(readCapacity)),
+ },
+ }
+}
+
+func getGlobalSecondaryIndex(indexName string, indexList []*dynamodb.GlobalSecondaryIndexDescription) (*dynamodb.GlobalSecondaryIndexDescription, error) {
+ for _, gsi := range indexList {
+ if *(gsi.IndexName) == indexName {
+ return gsi, nil
+ }
+ }
+
+ return &dynamodb.GlobalSecondaryIndexDescription{}, fmt.Errorf("Can't find a GSI by that name...")
+}
+
+func getAttributeType(d *schema.ResourceData, attributeName string) (string, error) {
+ if attributedata, ok := d.GetOk("attribute"); ok {
+ attributeSet := attributedata.(*schema.Set)
+ for _, attribute := range attributeSet.List() {
+ attr := attribute.(map[string]interface{})
+ if attr["name"] == attributeName {
+ return attr["type"].(string), nil
+ }
+ }
+ }
+
+ return "", fmt.Errorf("Unable to find an attribute named %s", attributeName)
+}
+
+func waitForGSIToBeActive(tableName string, gsiName string, meta interface{}) error {
+ dynamodbconn := meta.(*AWSClient).dynamodbconn
+ req := &dynamodb.DescribeTableInput{
+ TableName: aws.String(tableName),
+ }
+
+ activeIndex := false
+
+ for activeIndex == false {
+
+ result, err := dynamodbconn.DescribeTable(req)
+
+ if err != nil {
+ return err
+ }
+
+ table := result.Table
+ var targetGSI *dynamodb.GlobalSecondaryIndexDescription = nil
+
+ for _, gsi := range table.GlobalSecondaryIndexes {
+ if *gsi.IndexName == gsiName {
+ targetGSI = gsi
+ }
+ }
+
+ if targetGSI != nil {
+ activeIndex = *targetGSI.IndexStatus == "ACTIVE"
+
+ if !activeIndex {
+ log.Printf("[DEBUG] Sleeping for 5 seconds for %s GSI to become active", gsiName)
+ time.Sleep(5 * time.Second)
+ }
+ } else {
+ log.Printf("[DEBUG] GSI %s did not exist, giving up", gsiName)
+ break
+ }
+ }
+
+ return nil
+
+}
+
+func waitForTableToBeActive(tableName string, meta interface{}) error {
+ dynamodbconn := meta.(*AWSClient).dynamodbconn
+ req := &dynamodb.DescribeTableInput{
+ TableName: aws.String(tableName),
+ }
+
+ activeState := false
+
+ for activeState == false {
+ result, err := dynamodbconn.DescribeTable(req)
+
+ if err != nil {
+ return err
+ }
+
+ activeState = *(result.Table.TableStatus) == "ACTIVE"
+
+ // Wait for a few seconds
+ if !activeState {
+ log.Printf("[DEBUG] Sleeping for 5 seconds for table to become active")
+ time.Sleep(5 * time.Second)
+ }
+ }
+
+ return nil
+
+}
diff --git a/builtin/providers/aws/resource_aws_dynamodb_table_test.go b/builtin/providers/aws/resource_aws_dynamodb_table_test.go
new file mode 100644
index 0000000000..786a946b60
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_dynamodb_table_test.go
@@ -0,0 +1,296 @@
+package aws
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/dynamodb"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAWSDynamoDbTable(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSDynamoDbTableDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSDynamoDbConfigInitialState,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.basic-dynamodb-table"),
+ ),
+ },
+ resource.TestStep{
+ Config: testAccAWSDynamoDbConfigAddSecondaryGSI,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckDynamoDbTableWasUpdated("aws_dynamodb_table.basic-dynamodb-table"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckAWSDynamoDbTableDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*AWSClient).dynamodbconn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_dynamodb_table" {
+ continue
+ }
+
+ fmt.Printf("[DEBUG] Checking if DynamoDB table %s exists", rs.Primary.ID)
+ // Check if queue exists by checking for its attributes
+ params := &dynamodb.DescribeTableInput{
+ TableName: aws.String(rs.Primary.ID),
+ }
+ _, err := conn.DescribeTable(params)
+ if err == nil {
+ return fmt.Errorf("DynamoDB table %s still exists. Failing!", rs.Primary.ID)
+ }
+
+ // Verify the error is what we want
+ _, ok := err.(awserr.Error)
+ if !ok {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func testAccCheckInitialAWSDynamoDbTableExists(n string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ fmt.Printf("[DEBUG] Trying to create initial table state!")
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No DynamoDB table name specified!")
+ }
+
+ conn := testAccProvider.Meta().(*AWSClient).dynamodbconn
+
+ params := &dynamodb.DescribeTableInput{
+ TableName: aws.String(rs.Primary.ID),
+ }
+
+ resp, err := conn.DescribeTable(params)
+
+ if err != nil {
+ fmt.Printf("[ERROR] Problem describing table '%s': %s", rs.Primary.ID, err)
+ return err
+ }
+
+ table := resp.Table
+
+ fmt.Printf("[DEBUG] Checking on table %s", rs.Primary.ID)
+
+ if *table.ProvisionedThroughput.WriteCapacityUnits != 20 {
+ return fmt.Errorf("Provisioned write capacity was %d, not 20!", table.ProvisionedThroughput.WriteCapacityUnits)
+ }
+
+ if *table.ProvisionedThroughput.ReadCapacityUnits != 10 {
+ return fmt.Errorf("Provisioned read capacity was %d, not 10!", table.ProvisionedThroughput.ReadCapacityUnits)
+ }
+
+ attrCount := len(table.AttributeDefinitions)
+ gsiCount := len(table.GlobalSecondaryIndexes)
+ lsiCount := len(table.LocalSecondaryIndexes)
+
+ if attrCount != 4 {
+ return fmt.Errorf("There were %d attributes, not 4 like there should have been!", attrCount)
+ }
+
+ if gsiCount != 1 {
+ return fmt.Errorf("There were %d GSIs, not 1 like there should have been!", gsiCount)
+ }
+
+ if lsiCount != 1 {
+ return fmt.Errorf("There were %d LSIs, not 1 like there should have been!", lsiCount)
+ }
+
+ attrmap := dynamoDbAttributesToMap(&table.AttributeDefinitions)
+ if attrmap["TestTableHashKey"] != "S" {
+ return fmt.Errorf("Test table hash key was of type %s instead of S!", attrmap["TestTableHashKey"])
+ }
+ if attrmap["TestTableRangeKey"] != "S" {
+ return fmt.Errorf("Test table range key was of type %s instead of S!", attrmap["TestTableRangeKey"])
+ }
+ if attrmap["TestLSIRangeKey"] != "N" {
+ return fmt.Errorf("Test table LSI range key was of type %s instead of N!", attrmap["TestLSIRangeKey"])
+ }
+ if attrmap["TestGSIRangeKey"] != "S" {
+ return fmt.Errorf("Test table GSI range key was of type %s instead of S!", attrmap["TestGSIRangeKey"])
+ }
+
+ return nil
+ }
+}
+
+func testAccCheckDynamoDbTableWasUpdated(n string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("No DynamoDB table name specified!")
+ }
+
+ conn := testAccProvider.Meta().(*AWSClient).dynamodbconn
+
+ params := &dynamodb.DescribeTableInput{
+ TableName: aws.String(rs.Primary.ID),
+ }
+ resp, err := conn.DescribeTable(params)
+ table := resp.Table
+
+ if err != nil {
+ return err
+ }
+
+ attrCount := len(table.AttributeDefinitions)
+ gsiCount := len(table.GlobalSecondaryIndexes)
+ lsiCount := len(table.LocalSecondaryIndexes)
+
+ if attrCount != 4 {
+ return fmt.Errorf("There were %d attributes, not 4 like there should have been!", attrCount)
+ }
+
+ if gsiCount != 1 {
+ return fmt.Errorf("There were %d GSIs, not 1 like there should have been!", gsiCount)
+ }
+
+ if lsiCount != 1 {
+ return fmt.Errorf("There were %d LSIs, not 1 like there should have been!", lsiCount)
+ }
+
+ if dynamoDbGetGSIIndex(&table.GlobalSecondaryIndexes, "ReplacementTestTableGSI") == -1 {
+ return fmt.Errorf("Could not find GSI named 'ReplacementTestTableGSI' in the table!")
+ }
+
+ if dynamoDbGetGSIIndex(&table.GlobalSecondaryIndexes, "InitialTestTableGSI") != -1 {
+ return fmt.Errorf("Should have removed 'InitialTestTableGSI' but it still exists!")
+ }
+
+ attrmap := dynamoDbAttributesToMap(&table.AttributeDefinitions)
+ if attrmap["TestTableHashKey"] != "S" {
+ return fmt.Errorf("Test table hash key was of type %s instead of S!", attrmap["TestTableHashKey"])
+ }
+ if attrmap["TestTableRangeKey"] != "S" {
+ return fmt.Errorf("Test table range key was of type %s instead of S!", attrmap["TestTableRangeKey"])
+ }
+ if attrmap["TestLSIRangeKey"] != "N" {
+ return fmt.Errorf("Test table LSI range key was of type %s instead of N!", attrmap["TestLSIRangeKey"])
+ }
+ if attrmap["ReplacementGSIRangeKey"] != "N" {
+ return fmt.Errorf("Test table replacement GSI range key was of type %s instead of N!", attrmap["ReplacementGSIRangeKey"])
+ }
+
+ return nil
+ }
+}
+
+func dynamoDbGetGSIIndex(gsiList *[]*dynamodb.GlobalSecondaryIndexDescription, target string) int {
+ for idx, gsiObject := range *gsiList {
+ if *gsiObject.IndexName == target {
+ return idx
+ }
+ }
+
+ return -1
+}
+
+func dynamoDbAttributesToMap(attributes *[]*dynamodb.AttributeDefinition) map[string]string {
+ attrmap := make(map[string]string)
+
+ for _, attrdef := range *attributes {
+ attrmap[*(attrdef.AttributeName)] = *(attrdef.AttributeType)
+ }
+
+ return attrmap
+}
+
+const testAccAWSDynamoDbConfigInitialState = `
+resource "aws_dynamodb_table" "basic-dynamodb-table" {
+ name = "TerraformTestTable"
+ read_capacity = 10
+ write_capacity = 20
+ hash_key = "TestTableHashKey"
+ range_key = "TestTableRangeKey"
+ attribute {
+ name = "TestTableHashKey"
+ type = "S"
+ }
+ attribute {
+ name = "TestTableRangeKey"
+ type = "S"
+ }
+ attribute {
+ name = "TestLSIRangeKey"
+ type = "N"
+ }
+ attribute {
+ name = "TestGSIRangeKey"
+ type = "S"
+ }
+ local_secondary_index {
+ name = "TestTableLSI"
+ range_key = "TestLSIRangeKey"
+ projection_type = "ALL"
+ }
+ global_secondary_index {
+ name = "InitialTestTableGSI"
+ hash_key = "TestTableHashKey"
+ range_key = "TestGSIRangeKey"
+ write_capacity = 10
+ read_capacity = 10
+ projection_type = "ALL"
+ }
+}
+`
+
+const testAccAWSDynamoDbConfigAddSecondaryGSI = `
+resource "aws_dynamodb_table" "basic-dynamodb-table" {
+ name = "TerraformTestTable"
+ read_capacity = 20
+ write_capacity = 20
+ hash_key = "TestTableHashKey"
+ range_key = "TestTableRangeKey"
+ attribute {
+ name = "TestTableHashKey"
+ type = "S"
+ }
+ attribute {
+ name = "TestTableRangeKey"
+ type = "S"
+ }
+ attribute {
+ name = "TestLSIRangeKey"
+ type = "N"
+ }
+ attribute {
+ name = "ReplacementGSIRangeKey"
+ type = "N"
+ }
+ local_secondary_index {
+ name = "TestTableLSI"
+ range_key = "TestLSIRangeKey"
+ projection_type = "ALL"
+ }
+ global_secondary_index {
+ name = "ReplacementTestTableGSI"
+ hash_key = "TestTableHashKey"
+ range_key = "ReplacementGSIRangeKey"
+ write_capacity = 5
+ read_capacity = 5
+ projection_type = "ALL"
+ }
+}
+`
diff --git a/builtin/providers/azure/config.go b/builtin/providers/azure/config.go
index cf02bf5571..52c69d40cb 100644
--- a/builtin/providers/azure/config.go
+++ b/builtin/providers/azure/config.go
@@ -9,6 +9,7 @@ import (
"github.com/Azure/azure-sdk-for-go/management/hostedservice"
"github.com/Azure/azure-sdk-for-go/management/networksecuritygroup"
"github.com/Azure/azure-sdk-for-go/management/osimage"
+ "github.com/Azure/azure-sdk-for-go/management/sql"
"github.com/Azure/azure-sdk-for-go/management/storageservice"
"github.com/Azure/azure-sdk-for-go/management/virtualmachine"
"github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk"
@@ -36,6 +37,8 @@ type Client struct {
osImageClient osimage.OSImageClient
+ sqlClient sql.SqlDatabaseClient
+
storageServiceClient storageservice.StorageServiceClient
vmClient virtualmachine.VirtualMachineClient
@@ -107,6 +110,7 @@ func (c *Config) NewClientFromSettingsFile() (*Client, error) {
hostedServiceClient: hostedservice.NewClient(mc),
secGroupClient: networksecuritygroup.NewClient(mc),
osImageClient: osimage.NewClient(mc),
+ sqlClient: sql.NewClient(mc),
storageServiceClient: storageservice.NewClient(mc),
vmClient: virtualmachine.NewClient(mc),
vmDiskClient: virtualmachinedisk.NewClient(mc),
@@ -129,6 +133,7 @@ func (c *Config) NewClient() (*Client, error) {
hostedServiceClient: hostedservice.NewClient(mc),
secGroupClient: networksecuritygroup.NewClient(mc),
osImageClient: osimage.NewClient(mc),
+ sqlClient: sql.NewClient(mc),
storageServiceClient: storageservice.NewClient(mc),
vmClient: virtualmachine.NewClient(mc),
vmDiskClient: virtualmachinedisk.NewClient(mc),
diff --git a/builtin/providers/azure/provider.go b/builtin/providers/azure/provider.go
index 98a7b2a097..a6be93f5ca 100644
--- a/builtin/providers/azure/provider.go
+++ b/builtin/providers/azure/provider.go
@@ -34,6 +34,8 @@ func Provider() terraform.ResourceProvider {
ResourcesMap: map[string]*schema.Resource{
"azure_instance": resourceAzureInstance(),
"azure_data_disk": resourceAzureDataDisk(),
+ "azure_sql_database_server": resourceAzureSqlDatabaseServer(),
+ "azure_sql_database_service": resourceAzureSqlDatabaseService(),
"azure_hosted_service": resourceAzureHostedService(),
"azure_storage_service": resourceAzureStorageService(),
"azure_storage_container": resourceAzureStorageContainer(),
diff --git a/builtin/providers/azure/resource_azure_sql_database_server.go b/builtin/providers/azure/resource_azure_sql_database_server.go
new file mode 100644
index 0000000000..b3412b7b3e
--- /dev/null
+++ b/builtin/providers/azure/resource_azure_sql_database_server.go
@@ -0,0 +1,118 @@
+package azure
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/Azure/azure-sdk-for-go/management/sql"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+// resourceAzureDatabaseServer returns the *schema.Resource associated
+// to a database server on Azure.
+func resourceAzureSqlDatabaseServer() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceAzureSqlDatabaseServerCreate,
+ Read: resourceAzureSqlDatabaseServerRead,
+ Delete: resourceAzureSqlDatabaseServerDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ ForceNew: true,
+ },
+ "location": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "username": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "version": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "12.0",
+ ForceNew: true,
+ },
+ "url": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ }
+}
+
+// resourceAzureSqlDatabaseServerCreate does all the necessary API calls to
+// create an SQL database server off Azure.
+func resourceAzureSqlDatabaseServerCreate(d *schema.ResourceData, meta interface{}) error {
+ sqlClient := meta.(*Client).sqlClient
+
+ log.Println("[INFO] Began constructing SQL Server creation request.")
+ params := sql.DatabaseServerCreateParams{
+ Location: d.Get("location").(string),
+ AdministratorLogin: d.Get("username").(string),
+ AdministratorLoginPassword: d.Get("password").(string),
+ Version: d.Get("version").(string),
+ }
+
+ log.Println("[INFO] Issuing SQL Server creation request to Azure.")
+ name, err := sqlClient.CreateServer(params)
+ if err != nil {
+ return fmt.Errorf("Error creating SQL Server on Azure: %s", err)
+ }
+
+ d.Set("name", name)
+
+ d.SetId(name)
+ return resourceAzureSqlDatabaseServerRead(d, meta)
+}
+
+// resourceAzureSqlDatabaseServerRead does all the necessary API calls to
+// read the state of the SQL database server off Azure.
+func resourceAzureSqlDatabaseServerRead(d *schema.ResourceData, meta interface{}) error {
+ sqlClient := meta.(*Client).sqlClient
+
+ log.Println("[INFO] Sending SQL Servers list query to Azure.")
+ srvList, err := sqlClient.ListServers()
+ if err != nil {
+ return fmt.Errorf("Error issuing SQL Servers list query to Azure: %s", err)
+ }
+
+ // search for our particular server:
+ name := d.Get("name")
+ for _, srv := range srvList.DatabaseServers {
+ if srv.Name == name {
+ d.Set("url", srv.FullyQualifiedDomainName)
+ d.Set("state", srv.State)
+ return nil
+ }
+ }
+
+ // if reached here; it means out server doesn't exist, so we must untrack it:
+ d.SetId("")
+ return nil
+}
+
+// resourceAzureSqlDatabaseServerDelete does all the necessary API calls to
+// delete the SQL database server off Azure.
+func resourceAzureSqlDatabaseServerDelete(d *schema.ResourceData, meta interface{}) error {
+ sqlClient := meta.(*Client).sqlClient
+
+ log.Println("[INFO] Sending SQL Server deletion request to Azure.")
+ name := d.Get("name").(string)
+ err := sqlClient.DeleteServer(name)
+ if err != nil {
+ return fmt.Errorf("Error while issuing SQL Server deletion request to Azure: %s", err)
+ }
+
+ return nil
+}
diff --git a/builtin/providers/azure/resource_azure_sql_database_server_test.go b/builtin/providers/azure/resource_azure_sql_database_server_test.go
new file mode 100644
index 0000000000..8f65b39929
--- /dev/null
+++ b/builtin/providers/azure/resource_azure_sql_database_server_test.go
@@ -0,0 +1,119 @@
+package azure
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// testAccAzureSqlServerName is a helper variable in which to store
+// the randomly-generated name of the SQL Server after it is created.
+// The anonymous function is there because go is too good to &"" directly.
+var testAccAzureSqlServerName *string = func(s string) *string { return &s }("")
+
+func TestAccAzureSqlDatabaseServer(t *testing.T) {
+ name := "azure_sql_database_server.foo"
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAzureSqlDatabaseServerDeleted,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAzureSqlDatabaseServerConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccAzureSqlDatabaseServerGetName,
+ testAccCheckAzureSqlDatabaseServerExists(name),
+ resource.TestCheckResourceAttrPtr(name, "name", testAccAzureSqlServerName),
+ resource.TestCheckResourceAttr(name, "username", "SuperUser"),
+ resource.TestCheckResourceAttr(name, "password", "SuperSEKR3T"),
+ resource.TestCheckResourceAttr(name, "version", "2.0"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckAzureSqlDatabaseServerExists(name string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ resource, ok := s.RootModule().Resources[name]
+ if !ok {
+ return fmt.Errorf("SQL Server %s doesn't exist.", name)
+ }
+
+ if resource.Primary.ID == "" {
+ return fmt.Errorf("SQL Server %s resource ID not set.", name)
+ }
+
+ sqlClient := testAccProvider.Meta().(*Client).sqlClient
+ servers, err := sqlClient.ListServers()
+ if err != nil {
+ return fmt.Errorf("Error issuing Azure SQL Server list request: %s", err)
+ }
+
+ for _, srv := range servers.DatabaseServers {
+ if srv.Name == resource.Primary.ID {
+ return nil
+ }
+ }
+
+ return fmt.Errorf("SQL Server %s doesn't exist.", name)
+ }
+}
+
+func testAccCheckAzureSqlDatabaseServerDeleted(s *terraform.State) error {
+ for _, resource := range s.RootModule().Resources {
+ if resource.Type != "azure_sql_database_server" {
+ continue
+ }
+
+ if resource.Primary.ID == "" {
+ return fmt.Errorf("SQL Server resource ID not set.")
+ }
+
+ sqlClient := testAccProvider.Meta().(*Client).sqlClient
+ servers, err := sqlClient.ListServers()
+ if err != nil {
+ return fmt.Errorf("Error issuing Azure SQL Server list request: %s", err)
+ }
+
+ for _, srv := range servers.DatabaseServers {
+ if srv.Name == resource.Primary.ID {
+ fmt.Errorf("SQL Server %s still exists.", resource.Primary.ID)
+ }
+ }
+ }
+ return nil
+}
+
+// testAccAzureSqlDatabaseServerGetName is ahelper function which reads the current
+// state form Terraform and sets the testAccAzureSqlServerName variable
+// to the ID (which is actually the name) of the newly created server.
+// It is modeled as a resource.TestCheckFunc so as to be easily-embeddable in
+// test cases and run live.
+func testAccAzureSqlDatabaseServerGetName(s *terraform.State) error {
+ for _, resource := range s.RootModule().Resources {
+ if resource.Type != "azure_sql_database_server" {
+ continue
+ }
+
+ if resource.Primary.ID == "" {
+ return fmt.Errorf("Azure SQL Server resource ID not set.")
+ }
+
+ *testAccAzureSqlServerName = resource.Primary.ID
+ return nil
+ }
+
+ return fmt.Errorf("No Azure SQL Servers found.")
+}
+
+const testAccAzureSqlDatabaseServerConfig = `
+resource "azure_sql_database_server" "foo" {
+ location = "West US"
+ username = "SuperUser"
+ password = "SuperSEKR3T"
+ version = "2.0"
+}
+`
diff --git a/builtin/providers/azure/resource_azure_sql_database_service.go b/builtin/providers/azure/resource_azure_sql_database_service.go
new file mode 100644
index 0000000000..29824d7439
--- /dev/null
+++ b/builtin/providers/azure/resource_azure_sql_database_service.go
@@ -0,0 +1,234 @@
+package azure
+
+import (
+ "fmt"
+ "log"
+ "strconv"
+ "strings"
+
+ "github.com/Azure/azure-sdk-for-go/management/sql"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+// resourceAzureSqlDatabaseService returns the *schema.Resource
+// associated to an SQL Database Service on Azure.
+func resourceAzureSqlDatabaseService() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceAzureSqlDatabaseServiceCreate,
+ Read: resourceAzureSqlDatabaseServiceRead,
+ Update: resourceAzureSqlDatabaseServiceUpdate,
+ Exists: resourceAzureSqlDatabaseServiceExists,
+ Delete: resourceAzureSqlDatabaseServiceDelete,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "database_server_name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "collation": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ ForceNew: true,
+ },
+ "edition": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+ "max_size_bytes": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+ "service_level_id": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+ },
+ }
+}
+
+// resourceAzureSqlDatabaseServiceCreate does all the necessary API calls to
+// create an SQL Database Service on Azure.
+func resourceAzureSqlDatabaseServiceCreate(d *schema.ResourceData, meta interface{}) error {
+ sqlClient := meta.(*Client).sqlClient
+
+ log.Println("[INFO] Creating Azure SQL Database service creation request.")
+ name := d.Get("name").(string)
+ serverName := d.Get("database_server_name").(string)
+ params := sql.DatabaseCreateParams{
+ Name: name,
+ Edition: d.Get("edition").(string),
+ CollationName: d.Get("collation").(string),
+ ServiceObjectiveID: d.Get("service_level_id").(string),
+ }
+
+ if maxSize, ok := d.GetOk("max_size_bytes"); ok {
+ val, err := strconv.ParseInt(maxSize.(string), 10, 64)
+ if err != nil {
+ return fmt.Errorf("Provided max_size_bytes is not an integer: %s", err)
+ }
+ params.MaxSizeBytes = val
+ }
+
+ log.Println("[INFO] Sending SQL Database Service creation request to Azure.")
+ err := sqlClient.CreateDatabase(serverName, params)
+ if err != nil {
+ return fmt.Errorf("Error issuing Azure SQL Database Service creation: %s", err)
+ }
+
+ log.Println("[INFO] Beginning wait for Azure SQL Database Service creation.")
+ err = sqlClient.WaitForDatabaseCreation(serverName, name, nil)
+ if err != nil {
+ return fmt.Errorf("Error whilst waiting for Azure SQL Database Service creation: %s", err)
+ }
+
+ d.SetId(name)
+
+ return resourceAzureSqlDatabaseServiceRead(d, meta)
+}
+
+// resourceAzureSqlDatabaseServiceRead does all the necessary API calls to
+// read the state of the SQL Database Service off Azure.
+func resourceAzureSqlDatabaseServiceRead(d *schema.ResourceData, meta interface{}) error {
+ sqlClient := meta.(*Client).sqlClient
+
+ log.Println("[INFO] Issuing Azure SQL Database Services list operation.")
+ serverName := d.Get("database_server_name").(string)
+ dbs, err := sqlClient.ListDatabases(serverName)
+ if err != nil {
+ return fmt.Errorf("Error whilst listing Database Services off Azure: %s", err)
+ }
+
+ // search for our database:
+ var found bool
+ name := d.Get("name").(string)
+ for _, db := range dbs.ServiceResources {
+ if db.Name == name {
+ found = true
+ d.Set("edition", db.Edition)
+ d.Set("collation", db.CollationName)
+ d.Set("max_size_bytes", strconv.FormatInt(db.MaxSizeBytes, 10))
+ d.Set("service_level_id", db.ServiceObjectiveID)
+ break
+ }
+ }
+
+ // if not found; we must untrack the resource:
+ if !found {
+ d.SetId("")
+ }
+
+ return nil
+}
+
+// resourceAzureSqlDatabaseServiceUpdate does all the necessary API calls to
+// update the state of the SQL Database Service off Azure.
+func resourceAzureSqlDatabaseServiceUpdate(d *schema.ResourceData, meta interface{}) error {
+ azureClient := meta.(*Client)
+ mgmtClient := azureClient.mgmtClient
+ sqlClient := azureClient.sqlClient
+ serverName := d.Get("database_server_name").(string)
+
+ // changes to the name must occur seperately from changes to the attributes:
+ if d.HasChange("name") {
+ oldv, newv := d.GetChange("name")
+
+ // issue the update request:
+ log.Println("[INFO] Issuing Azure Database Service name change.")
+ reqID, err := sqlClient.UpdateDatabase(serverName, oldv.(string),
+ sql.ServiceResourceUpdateParams{
+ Name: newv.(string),
+ })
+
+ // wait for the update to occur:
+ log.Println("[INFO] Waiting for Azure SQL Database Service name change.")
+ err = mgmtClient.WaitForOperation(reqID, nil)
+ if err != nil {
+ return fmt.Errorf("Error waiting for Azure SQL Database Service name update: %s", err)
+ }
+
+ // set the new name as the ID:
+ d.SetId(newv.(string))
+ }
+
+ name := d.Get("name").(string)
+ cedition := d.HasChange("edition")
+ cmaxsize := d.HasChange("max_size_bytes")
+ clevel := d.HasChange("service_level_id")
+ if cedition || cmaxsize || clevel {
+ updateParams := sql.ServiceResourceUpdateParams{
+ // we still have to stick the name in here for good measure:
+ Name: name,
+ }
+
+ // build the update request:
+ if cedition {
+ updateParams.Edition = d.Get("edition").(string)
+ }
+ if maxSize, ok := d.GetOk("max_size_bytes"); cmaxsize && ok && maxSize.(string) != "" {
+ val, err := strconv.ParseInt(maxSize.(string), 10, 64)
+ if err != nil {
+ return fmt.Errorf("Provided max_size_bytes is not an integer: %s", err)
+ }
+ updateParams.MaxSizeBytes = val
+ }
+ if clevel {
+ updateParams.ServiceObjectiveID = d.Get("service_level_id").(string)
+ }
+
+ // issue the update:
+ log.Println("[INFO] Issuing Azure Database Service parameter update.")
+ reqID, err := sqlClient.UpdateDatabase(serverName, name, updateParams)
+ if err != nil {
+ return fmt.Errorf("Failed issuing Azure SQL Service paramater update: %s", err)
+ }
+
+ log.Println("[INFO] Waiting for Azure SQL Database Service parameter update.")
+ err = mgmtClient.WaitForOperation(reqID, nil)
+ if err != nil {
+ return fmt.Errorf("Error waiting for Azure SQL Database Service parameter update: %s", err)
+ }
+ }
+
+ return nil
+}
+
+// resourceAzureSqlDatabaseServiceExists does all the necessary API calls to
+// check for the existence of the SQL Database Service off Azure.
+func resourceAzureSqlDatabaseServiceExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ sqlClient := meta.(*Client).sqlClient
+
+ log.Println("[INFO] Issuing Azure SQL Database Service get request.")
+ name := d.Get("name").(string)
+ serverName := d.Get("database_server_name").(string)
+ _, err := sqlClient.GetDatabase(serverName, name)
+ if err != nil {
+ if strings.Contains(err.Error(), "does not exist") {
+ d.SetId("")
+ return false, nil
+ } else {
+ return false, fmt.Errorf("Error whilst getting Azure SQL Database Service info: %s", err)
+ }
+ }
+
+ return true, nil
+}
+
+// resourceAzureSqlDatabaseServiceDelete does all the necessary API calls to
+// delete the SQL Database Service off Azure.
+func resourceAzureSqlDatabaseServiceDelete(d *schema.ResourceData, meta interface{}) error {
+ sqlClient := meta.(*Client).sqlClient
+
+ log.Println("[INFO] Issuing Azure SQL Database deletion request.")
+ name := d.Get("name").(string)
+ serverName := d.Get("database_server_name").(string)
+ return sqlClient.DeleteDatabase(serverName, name)
+}
diff --git a/builtin/providers/azure/resource_azure_sql_database_service_test.go b/builtin/providers/azure/resource_azure_sql_database_service_test.go
new file mode 100644
index 0000000000..bc93fdb621
--- /dev/null
+++ b/builtin/providers/azure/resource_azure_sql_database_service_test.go
@@ -0,0 +1,189 @@
+package azure
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAzureSqlDatabaseServiceBasic(t *testing.T) {
+ name := "azure_sql_database_service.foo"
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAzureSqlDatabaseServiceDeleted,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAzureSqlDatabaseServiceConfigBasic,
+ Check: resource.ComposeTestCheckFunc(
+ testAccAzureSqlDatabaseServerGetName,
+ testAccCheckAzureSqlDatabaseServiceExists(name),
+ resource.TestCheckResourceAttr(name, "name", "terraform-testing-db"),
+ resource.TestCheckResourceAttrPtr(name, "database_server_name",
+ testAccAzureSqlServerName),
+ resource.TestCheckResourceAttr(name, "collation",
+ "SQL_Latin1_General_CP1_CI_AS"),
+ resource.TestCheckResourceAttr(name, "edition", "Standard"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureSqlDatabaseServiceAdvanced(t *testing.T) {
+ name := "azure_sql_database_service.foo"
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAzureSqlDatabaseServiceDeleted,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAzureSqlDatabaseServiceConfigAdvanced,
+ Check: resource.ComposeTestCheckFunc(
+ testAccAzureSqlDatabaseServerGetName,
+ testAccCheckAzureSqlDatabaseServiceExists(name),
+ resource.TestCheckResourceAttr(name, "name", "terraform-testing-db"),
+ resource.TestCheckResourceAttrPtr(name, "database_server_name",
+ testAccAzureSqlServerName),
+ resource.TestCheckResourceAttr(name, "edition", "Premium"),
+ resource.TestCheckResourceAttr(name, "collation",
+ "Arabic_BIN"),
+ resource.TestCheckResourceAttr(name, "max_size_bytes", "10737418240"),
+ resource.TestCheckResourceAttr(name, "service_level_id",
+ "7203483a-c4fb-4304-9e9f-17c71c904f5d"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureSqlDatabaseServiceUpdate(t *testing.T) {
+ name := "azure_sql_database_service.foo"
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAzureSqlDatabaseServiceDeleted,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAzureSqlDatabaseServiceConfigAdvanced,
+ Check: resource.ComposeTestCheckFunc(
+ testAccAzureSqlDatabaseServerGetName,
+ testAccCheckAzureSqlDatabaseServiceExists(name),
+ resource.TestCheckResourceAttr(name, "name", "terraform-testing-db"),
+ resource.TestCheckResourceAttrPtr(name, "database_server_name",
+ testAccAzureSqlServerName),
+ resource.TestCheckResourceAttr(name, "edition", "Premium"),
+ resource.TestCheckResourceAttr(name, "collation",
+ "Arabic_BIN"),
+ resource.TestCheckResourceAttr(name, "max_size_bytes", "10737418240"),
+ resource.TestCheckResourceAttr(name, "service_level_id",
+ "7203483a-c4fb-4304-9e9f-17c71c904f5d"),
+ ),
+ },
+ resource.TestStep{
+ Config: testAccAzureSqlDatabaseServiceConfigUpdate,
+ Check: resource.ComposeTestCheckFunc(
+ testAccAzureSqlDatabaseServerGetName,
+ testAccCheckAzureSqlDatabaseServiceExists(name),
+ resource.TestCheckResourceAttr(name, "name",
+ "terraform-testing-db-renamed"),
+ resource.TestCheckResourceAttrPtr(name, "database_server_name",
+ testAccAzureSqlServerName),
+ resource.TestCheckResourceAttr(name, "edition", "Standard"),
+ resource.TestCheckResourceAttr(name, "collation",
+ "SQL_Latin1_General_CP1_CI_AS"),
+ resource.TestCheckResourceAttr(name, "max_size_bytes", "5368709120"),
+ resource.TestCheckResourceAttr(name, "service_level_id",
+ "f1173c43-91bd-4aaa-973c-54e79e15235b"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckAzureSqlDatabaseServiceExists(name string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ resource, ok := s.RootModule().Resources[name]
+ if !ok {
+ return fmt.Errorf("SQL Service %s doesn't exist.", name)
+ }
+
+ if resource.Primary.ID == "" {
+ return fmt.Errorf("SQL Service %s resource ID not set.", name)
+ }
+
+ sqlClient := testAccProvider.Meta().(*Client).sqlClient
+ dbs, err := sqlClient.ListDatabases(*testAccAzureSqlServerName)
+ if err != nil {
+ return fmt.Errorf("Error issuing Azure SQL Service list request: %s", err)
+ }
+
+ for _, srv := range dbs.ServiceResources {
+ if srv.Name == resource.Primary.ID {
+ return nil
+ }
+ }
+
+ return fmt.Errorf("SQL Service %s doesn't exist.", name)
+ }
+}
+
+func testAccCheckAzureSqlDatabaseServiceDeleted(s *terraform.State) error {
+ for _, resource := range s.RootModule().Resources {
+ if resource.Type != "azure_sql_database_server" {
+ continue
+ }
+
+ if resource.Primary.ID == "" {
+ return fmt.Errorf("SQL Service resource ID not set.")
+ }
+
+ sqlClient := testAccProvider.Meta().(*Client).sqlClient
+ dbs, err := sqlClient.ListDatabases(*testAccAzureSqlServerName)
+ if err != nil {
+ return fmt.Errorf("Error issuing Azure SQL Service list request: %s", err)
+ }
+
+ for _, srv := range dbs.ServiceResources {
+ if srv.Name == resource.Primary.ID {
+ fmt.Errorf("SQL Service %s still exists.", resource.Primary.ID)
+ }
+ }
+ }
+ return nil
+}
+
+const testAccAzureSqlDatabaseServiceConfigBasic = testAccAzureSqlDatabaseServerConfig + `
+resource "azure_sql_database_service" "foo" {
+ name = "terraform-testing-db"
+ database_server_name = "${azure_sql_database_server.foo.name}"
+ edition = "Standard"
+}
+`
+
+const testAccAzureSqlDatabaseServiceConfigAdvanced = testAccAzureSqlDatabaseServerConfig + `
+resource "azure_sql_database_service" "foo" {
+ name = "terraform-testing-db"
+ database_server_name = "${azure_sql_database_server.foo.name}"
+ edition = "Premium"
+ collation = "Arabic_BIN"
+ max_size_bytes = "10737418240"
+ service_level_id = "7203483a-c4fb-4304-9e9f-17c71c904f5d"
+}
+`
+
+const testAccAzureSqlDatabaseServiceConfigUpdate = testAccAzureSqlDatabaseServerConfig + `
+resource "azure_sql_database_service" "foo" {
+ name = "terraform-testing-db-renamed"
+ database_server_name = "${azure_sql_database_server.foo.name}"
+ edition = "Standard"
+ collation = "SQL_Latin1_General_CP1_CI_AS"
+ max_size_bytes = "5368709120"
+ service_level_id = "f1173c43-91bd-4aaa-973c-54e79e15235b"
+}
+`
diff --git a/builtin/providers/template/resource.go b/builtin/providers/template/resource.go
index 8eb3ce9eb3..9019dcfc93 100644
--- a/builtin/providers/template/resource.go
+++ b/builtin/providers/template/resource.go
@@ -5,6 +5,7 @@ import (
"encoding/hex"
"fmt"
"io/ioutil"
+ "log"
"os"
"path/filepath"
@@ -75,7 +76,13 @@ func Delete(d *schema.ResourceData, meta interface{}) error {
func Exists(d *schema.ResourceData, meta interface{}) (bool, error) {
rendered, err := render(d)
if err != nil {
- return false, err
+ if _, ok := err.(templateRenderError); ok {
+ log.Printf("[DEBUG] Got error while rendering in Exists: %s", err)
+ log.Printf("[DEBUG] Returning false so the template re-renders using latest variables from config.")
+ return false, nil
+ } else {
+ return false, err
+ }
}
return hash(rendered) == d.Id(), nil
}
@@ -87,6 +94,8 @@ func Read(d *schema.ResourceData, meta interface{}) error {
return nil
}
+type templateRenderError error
+
var readfile func(string) ([]byte, error) = ioutil.ReadFile // testing hook
func render(d *schema.ResourceData) (string, error) {
@@ -105,7 +114,9 @@ func render(d *schema.ResourceData) (string, error) {
rendered, err := execute(string(buf), vars)
if err != nil {
- return "", fmt.Errorf("failed to render %v: %v", filename, err)
+ return "", templateRenderError(
+ fmt.Errorf("failed to render %v: %v", filename, err),
+ )
}
return rendered, nil
diff --git a/builtin/providers/template/resource_test.go b/builtin/providers/template/resource_test.go
index 13b6cf0522..7f461325a2 100644
--- a/builtin/providers/template/resource_test.go
+++ b/builtin/providers/template/resource_test.go
@@ -34,15 +34,7 @@ func TestTemplateRendering(t *testing.T) {
Providers: testProviders,
Steps: []r.TestStep{
r.TestStep{
- Config: `
-resource "template_file" "t0" {
- filename = "mock"
- vars = ` + tt.vars + `
-}
-output "rendered" {
- value = "${template_file.t0.rendered}"
-}
-`,
+ Config: testTemplateConfig(tt.vars),
Check: func(s *terraform.State) error {
got := s.RootModule().Outputs["rendered"]
if tt.want != got {
@@ -55,3 +47,55 @@ output "rendered" {
})
}
}
+
+// https://github.com/hashicorp/terraform/issues/2344
+func TestTemplateVariableChange(t *testing.T) {
+ steps := []struct {
+ vars string
+ template string
+ want string
+ }{
+ {`{a="foo"}`, `${a}`, `foo`},
+ {`{b="bar"}`, `${b}`, `bar`},
+ }
+
+ var testSteps []r.TestStep
+ for i, step := range steps {
+ testSteps = append(testSteps, r.TestStep{
+ PreConfig: func(template string) func() {
+ return func() {
+ readfile = func(string) ([]byte, error) {
+ return []byte(template), nil
+ }
+ }
+ }(step.template),
+ Config: testTemplateConfig(step.vars),
+ Check: func(i int, want string) r.TestCheckFunc {
+ return func(s *terraform.State) error {
+ got := s.RootModule().Outputs["rendered"]
+ if want != got {
+ return fmt.Errorf("[%d] got:\n%q\nwant:\n%q\n", i, got, want)
+ }
+ return nil
+ }
+ }(i, step.want),
+ })
+ }
+
+ r.Test(t, r.TestCase{
+ Providers: testProviders,
+ Steps: testSteps,
+ })
+}
+
+func testTemplateConfig(vars string) string {
+ return `
+resource "template_file" "t0" {
+ filename = "mock"
+ vars = ` + vars + `
+}
+output "rendered" {
+ value = "${template_file.t0.rendered}"
+}
+ `
+}
diff --git a/deps/v0-5-3.json b/deps/v0-5-3.json
index 94020f6f57..09fe5c60ff 100644
--- a/deps/v0-5-3.json
+++ b/deps/v0-5-3.json
@@ -120,6 +120,182 @@
{
"ImportPath": "golang.org/x/crypto/ssh/terminal",
"Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b"
+ },
+ {
+ "ImportPath": "github.com/Sirupsen/logrus",
+ "Rev": "52919f182f9c314f8a38c5afe96506f73d02b4b2"
+ },
+ {
+ "ImportPath": "github.com/armon/circbuf",
+ "Rev": "f092b4f207b6e5cce0569056fba9e1a2735cb6cf"
+ },
+ {
+ "ImportPath": "github.com/cyberdelia/heroku-go",
+ "Rev": "594d483b9b6a8ddc7cd2f1e3e7d1de92fa2de665"
+ },
+ {
+ "ImportPath": "github.com/docker/docker",
+ "Rev": "42cfc95549728014811cc9aa2c5b07bdf5553a54"
+ },
+ {
+ "ImportPath": "github.com/dylanmei/iso8601",
+ "Rev": "2075bf119b58e5576c6ed9f867b8f3d17f2e54d4"
+ },
+ {
+ "ImportPath": "github.com/dylanmei/winrmtest",
+ "Rev": "3e9661c52c45dab9a8528966a23d421922fca9b9"
+ },
+ {
+ "ImportPath": "github.com/fsouza/go-dockerclient",
+ "Rev": "f90594a4da6a7cbdaedd29ee5495ddd6b39fe5d3"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/atlas-go",
+ "Rev": "6a87d5f443991e9916104392cd5fc77678843e1d"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/aws-sdk-go",
+ "Rev": "e6ea0192eee4640f32ec73c0cbb71f63e4f2b65a"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/consul",
+ "Rev": "9417fd37686241d65918208874a7faa4d0cd92d2"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/errwrap",
+ "Rev": "7554cd9344cec97297fa6649b055a8c98c2a1e55"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/go-checkpoint",
+ "Rev": "88326f6851319068e7b34981032128c0b1a6524d"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/go-multierror",
+ "Rev": "fcdddc395df1ddf4247c69bd436e84cfa0733f7e"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/go-version",
+ "Rev": "999359b6b7a041ce16e695d51e92145b83f01087"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/hcl",
+ "Rev": "513e04c400ee2e81e97f5e011c08fb42c6f69b84"
+ },
+ {
+ "ImportPath": "github.com/hashicorp/yamux",
+ "Rev": "b2e55852ddaf823a85c67f798080eb7d08acd71d"
+ },
+ {
+ "ImportPath": "github.com/imdario/mergo",
+ "Rev": "2fcac9923693d66dc0e03988a31b21da05cdea84"
+ },
+ {
+ "ImportPath": "github.com/masterzen/simplexml",
+ "Rev": "95ba30457eb1121fa27753627c774c7cd4e90083"
+ },
+ {
+ "ImportPath": "github.com/masterzen/winrm",
+ "Rev": "132339029dfa67fd39ff8edeed2af78f2cca4fbb"
+ },
+ {
+ "ImportPath": "github.com/masterzen/xmlpath",
+ "Rev": "13f4951698adc0fa9c1dda3e275d489a24201161"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/cli",
+ "Rev": "6cc8bc522243675a2882b81662b0b0d2e04b99c9"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/colorstring",
+ "Rev": "61164e49940b423ba1f12ddbdf01632ac793e5e9"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/copystructure",
+ "Rev": "6fc66267e9da7d155a9d3bd489e00dad02666dc6"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/go-homedir",
+ "Rev": "1f6da4a72e57d4e7edd4a7295a585e0a3999a2d4"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/gox",
+ "Rev": "e8e6fd4fe12510cc46893dff18c5188a6a6dc549"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/iochan",
+ "Rev": "b584a329b193e206025682ae6c10cdbe03b0cd77"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/go-linereader",
+ "Rev": "07bab5fdd9580500aea6ada0e09df4aa28e68abd"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/mapstructure",
+ "Rev": "442e588f213303bec7936deba67901f8fc8f18b1"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/osext",
+ "Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/packer",
+ "Rev": "350a5f8cad6a0e4c2b24c3049a84c4f294416e16"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/panicwrap",
+ "Rev": "45cbfd3bae250c7676c077fb275be1a2968e066a"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/prefixedio",
+ "Rev": "89d9b535996bf0a185f85b59578f2e245f9e1724"
+ },
+ {
+ "ImportPath": "github.com/mitchellh/reflectwalk",
+ "Rev": "242be0c275dedfba00a616563e6db75ab8f279ec"
+ },
+ {
+ "ImportPath": "github.com/nu7hatch/gouuid",
+ "Rev": "179d4d0c4d8d407a32af483c2354df1d2c91e6c3"
+ },
+ {
+ "ImportPath": "github.com/packer-community/winrmcp",
+ "Rev": "650a91d1da6dc3fefa8f052289ffce648924a304"
+ },
+ {
+ "ImportPath": "github.com/pearkes/cloudflare",
+ "Rev": "19e280b056f3742e535ea12ae92a37ea7767ea82"
+ },
+ {
+ "ImportPath": "github.com/pearkes/digitalocean",
+ "Rev": "e966f00c2d9de5743e87697ab77c7278f5998914"
+ },
+ {
+ "ImportPath": "github.com/pearkes/dnsimple",
+ "Rev": "1e0c2b0eb33ca7b5632a130d6d34376a1ea46c84"
+ },
+ {
+ "ImportPath": "github.com/pearkes/mailgun",
+ "Rev": "5b02e7e9ffee9869f81393e80db138f6ff726260"
+ },
+ {
+ "ImportPath": "github.com/rackspace/gophercloud",
+ "Rev": "9ad4137a6b3e786b9c1e161b8d354b44482ab6d7"
+ },
+ {
+ "ImportPath": "github.com/satori/go.uuid",
+ "Rev": "7c7f2020c4c9491594b85767967f4619c2fa75f9"
+ },
+ {
+ "ImportPath": "github.com/soniah/dnsmadeeasy",
+ "Rev": "5578a8c15e33958c61cf7db720b6181af65f4a9e"
+ },
+ {
+ "ImportPath": "github.com/vaughan0/go-ini",
+ "Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
+ },
+ {
+ "ImportPath": "github.com/xanzy/go-cloudstack",
+ "Rev": "f73f6ff1b843dbdac0a01da7b7f39883adfe2bdb"
}
]
}
diff --git a/helper/resource/testing.go b/helper/resource/testing.go
index 6832146b22..3835e8639c 100644
--- a/helper/resource/testing.go
+++ b/helper/resource/testing.go
@@ -60,6 +60,10 @@ type TestCase struct {
// potentially complex update logic. In general, simply create/destroy
// tests will only need one step.
type TestStep struct {
+ // PreConfig is called before the Config is applied to perform any per-step
+ // setup that needs to happen
+ PreConfig func()
+
// Config a string of the configuration to give to Terraform.
Config string
@@ -160,6 +164,10 @@ func testStep(
opts terraform.ContextOpts,
state *terraform.State,
step TestStep) (*terraform.State, error) {
+ if step.PreConfig != nil {
+ step.PreConfig()
+ }
+
cfgPath, err := ioutil.TempDir("", "tf-test")
if err != nil {
return state, fmt.Errorf(
diff --git a/website/source/docs/commands/apply.html.markdown b/website/source/docs/commands/apply.html.markdown
index 9bb5acdbff..dec4ea19d6 100644
--- a/website/source/docs/commands/apply.html.markdown
+++ b/website/source/docs/commands/apply.html.markdown
@@ -14,7 +14,7 @@ set of actions generated by a `terraform plan` execution plan.
## Usage
-Usage: `terraform apply [options] [dir]`
+Usage: `terraform apply [options] [dir-or-plan]`
By default, `apply` scans the current directory for the configuration
and applies the changes appropriately. However, a path to another configuration
diff --git a/website/source/docs/commands/destroy.html.markdown b/website/source/docs/commands/destroy.html.markdown
index 0a0f3a738b..98f1b79e6f 100644
--- a/website/source/docs/commands/destroy.html.markdown
+++ b/website/source/docs/commands/destroy.html.markdown
@@ -18,9 +18,11 @@ Usage: `terraform destroy [options] [dir]`
Infrastructure managed by Terraform will be destroyed. This will ask for
confirmation before destroying.
-This command accepts all the flags that the
-[apply command](/docs/commands/apply.html) accepts. If `-force` is
-set, then the destroy confirmation will not be shown.
+This command accepts all the arguments and flags that the [apply
+command](/docs/commands/apply.html) accepts, with the exception of a plan file
+argument.
+
+If `-force` is set, then the destroy confirmation will not be shown.
The `-target` flag, instead of affecting "dependencies" will instead also
destroy any resources that _depend on_ the target(s) specified.
diff --git a/website/source/docs/providers/aws/r/autoscaling_policy.html.markdown b/website/source/docs/providers/aws/r/autoscaling_policy.html.markdown
new file mode 100644
index 0000000000..2543c0220d
--- /dev/null
+++ b/website/source/docs/providers/aws/r/autoscaling_policy.html.markdown
@@ -0,0 +1,53 @@
+---
+layout: "aws"
+page_title: "AWS: aws_autoscaling_policy"
+sidebar_current: "docs-aws-resource-autoscaling-policy"
+description: |-
+ Provides an AutoScaling Scaling Group resource.
+---
+
+# aws\_autoscaling\_policy
+
+Provides an AutoScaling Scaling Policy resource.
+
+~> **NOTE:** You may want to omit `desired_capacity` attribute from attached `aws_autoscaling_group`
+when using autoscaling policies. It's good practice to pick either
+[manual](http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-manual-scaling.html)
+or [dynamic](http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html)
+(policy-based) scaling.
+
+## Example Usage
+```
+resource "aws_autoscaling_policy" "bat" {
+ name = "foobar3-terraform-test"
+ scaling_adjustment = 4
+ adjustment_type = "ChangeInCapacity"
+ cooldown = 300
+ autoscaling_group_name = "${aws_autoscaling_group.bar.name}"
+}
+
+resource "aws_autoscaling_group" "bar" {
+ availability_zones = ["us-east-1a"]
+ name = "foobar3-terraform-test"
+ max_size = 5
+ min_size = 2
+ health_check_grace_period = 300
+ health_check_type = "ELB"
+ force_delete = true
+ launch_configuration = "${aws_launch_configuration.foo.name}"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) The name of the policy.
+* `autoscaling_group_name` - (Required) The name or ARN of the group.
+* `adjustment_type` - (Required) Specifies whether the `scaling_adjustment` is an absolute number or a percentage of the current capacity. Valid values are `ChangeInCapacity`, `ExactCapacity`, and `PercentChangeInCapacity`.
+* `scaling_adjustment` - (Required) The number of instances by which to scale. `adjustment_type` determines the interpretation of this number (e.g., as an absolute number or as a percentage of the existing Auto Scaling group size). A positive increment adds to the current capacity and a negative value removes from the current capacity.
+* `cooldown` - (Optional) The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start.
+* `min_adjustment_step` - (Optional) Used with `adjustment_type` with the value `PercentChangeInCapacity`, the scaling policy changes the `desired_capacity` of the Auto Scaling group by at least the number of instances specified in the value.
+
+## Attribute Reference
+* `arn` - The ARN assigned by AWS to the scaling policy.
diff --git a/website/source/docs/providers/aws/r/cloudwatch_metric_alarm.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_metric_alarm.html.markdown
new file mode 100644
index 0000000000..e6ede269fc
--- /dev/null
+++ b/website/source/docs/providers/aws/r/cloudwatch_metric_alarm.html.markdown
@@ -0,0 +1,75 @@
+---
+layout: "aws"
+page_title: "AWS: cloudwatch_metric_alarm"
+sidebar_current: "docs-aws-resource-cloudwatch-metric-alarm"
+description: |-
+ Provides an AutoScaling Scaling Group resource.
+---
+
+# aws\_cloudwatch\_metric\_alarm
+
+Provides a CloudWatch Metric Alarm resource.
+
+## Example Usage
+```
+resource "aws_cloudwatch_metric_alarm" "foobar" {
+ alarm_name = "terraform-test-foobar5"
+ comparison_operator = "GreaterThanOrEqualToThreshold"
+ evaluation_periods = "2"
+ metric_name = "CPUUtilization"
+ namespace = "AWS/EC2"
+ period = "120"
+ statistic = "Average"
+ threshold = "80"
+ alarm_description = "This metric monitor ec2 cpu utilization"
+ insufficient_data_actions = []
+}
+```
+
+## Example in Conjuction with Scaling Policies
+```
+resource "aws_autoscaling_policy" "bat" {
+ name = "foobar3-terraform-test"
+ scaling_adjustment = 4
+ adjustment_type = "ChangeInCapacity"
+ cooldown = 300
+ autoscaling_group_name = "${aws_autoscaling_group.bar.name}"
+}
+
+resource "aws_cloudwatch_metric_alarm" "bat" {
+ alarm_name = "terraform-test-foobar5"
+ comparison_operator = "GreaterThanOrEqualToThreshold"
+ evaluation_periods = "2"
+ metric_name = "CPUUtilization"
+ namespace = "AWS/EC2"
+ period = "120"
+ statistic = "Average"
+ threshold = "80"
+ alarm_description = "This metric monitor ec2 cpu utilization"
+ alarm_actions = ["${aws_autoscaling_policy.bat.arn}"]
+}
+```
+## Argument Reference
+
+See [related part of AWS Docs](http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_PutMetricAlarm.html)
+for details about valid values.
+
+The following arguments are supported:
+
+* `alarm_name` - (Required) The descriptive name for the alarm. This name must be unique within the user's AWS account
+* `comparison_operator` - (Required) The arithmetic operation to use when comparing the specified Statistic and Threshold. The specified Statistic value is used as the first operand. Either of the following is supported: `GreaterThanOrEqualToThreshold`, `GreaterThanThreshold`, `LessThanThreshold`, `LessThanOrEqualToThreshold`.
+* `evaluation_periods` - (Required) The number of periods over which data is compared to the specified threshold.
+* `metric_name` - (Required) The name for the alarm's associated metric.
+ See docs for [supported metrics]([valid metrics](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html)).
+* `namespace` - (Required) The namespace for the alarm's associated metric.
+* `period` - (Required) The period in seconds over which the specified `statistic` is applied.
+* `statistic` - (Required) The statistic to apply to the alarm's associated metric.
+ Either of the following is supported: `SampleCount`, `Average`, `Sum`, `Minimum`, `Maximum`
+* `threshold` - (Required) The value against which the specified statistic is compared.
+* `actions_enabled` - (Optional) Indicates whether or not actions should be executed during any changes to the alarm's state. Defaults to `true`.
+* `alarm_actions` - (Optional) The list of actions to execute when this alarm transitions into an ALARM state from any other state. Each action is specified as an Amazon Resource Number (ARN).
+* `alarm_description` - (Optional) The description for the alarm.
+* `dimensions` - (Optional) The dimensions for the alarm's associated metric.
+* `insufficient_data_actions` - (Optional) The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Number (ARN).
+* `ok_actions` - (Optional) The list of actions to execute when this alarm transitions into an OK state from any other state. Each action is specified as an Amazon Resource Number (ARN).
+* `unit` - (Optional) The unit for the alarm's associated metric.
diff --git a/website/source/docs/providers/aws/r/dynamodb_table.html.markdown b/website/source/docs/providers/aws/r/dynamodb_table.html.markdown
new file mode 100644
index 0000000000..e176f39a0f
--- /dev/null
+++ b/website/source/docs/providers/aws/r/dynamodb_table.html.markdown
@@ -0,0 +1,109 @@
+---
+layout: "aws"
+page_title: "AWS: dynamodb_table"
+sidebar_current: "docs-aws-resource-dynamodb-table"
+description: |-
+ Provides a DynamoDB table resource
+---
+
+# aws\_dynamodb\_table
+
+Provides a DynamoDB table resource
+
+## Example Usage
+
+The following dynamodb table description models the table and GSI shown
+in the [AWS SDK example documentation](http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.html)
+
+```
+resource "aws_dynamodb_table" "basic-dynamodb-table" {
+ name = "GameScores"
+ read_capacity = 20
+ write_capacity = 20
+ hash_key = "UserId"
+ range_key = "GameTitle"
+ attribute {
+ name = "Username"
+ type = "S"
+ }
+ attribute {
+ name = "GameTitle"
+ type = "S"
+ }
+ attribute {
+ name = "TopScore"
+ type = "N"
+ }
+ attribute {
+ name = "TopScoreDateTime"
+ type = "S"
+ }
+ attribute {
+ name = "Wins"
+ type = "N"
+ }
+ attribute {
+ name = "Losses"
+ type = "N"
+ }
+ global_secondary_index {
+ name = "GameTitleIndex"
+ hash_key = "GameTitle"
+ range_key = "TopScore"
+ write_capacity = 10
+ read_capacity = 10
+ projection_type = "INCLUDE"
+ non_key_attributes = [ "UserId" ]
+ }
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) The name of the table, this needs to be unique
+ within a region.
+* `read_capacity` - (Required) The number of read units for this table
+* `write_capacity` - (Required) The number of write units for this table
+* `hash_key` - (Required) The attribute to use as the hash key (the
+ attribute must also be defined as an attribute record
+* `range_key` - (Optional) The attribute to use as the range key (must
+ also be defined)
+* `attribute` - Define an attribute, has two properties:
+ * `name` - The name of the attribute
+ * `type` - One of: S, N, or B for (S)tring, (N)umber or (B)inary data
+* `local_secondary_index` - (Optional) Describe an LSI on the table;
+ these can only be allocated *at creation* so you cannot change this
+definition after you have created the resource.
+* `global_secondary_index` - (Optional) Describe a GSO for the table;
+ subject to the normal limits on the number of GSIs, projected
+attributes, etc.
+
+For both `local_secondary_index` and `global_secondary_index` objects,
+the following properties are supported:
+
+* `name` - (Required) The name of the LSI or GSI
+* `hash_key` - (Required) The name of the hash key in the index; must be
+ defined as an attribute in the resource
+* `range_key` - (Required) The name of the range key; must be defined
+* `projection_type` - (Required) One of "ALL", "INCLUDE" or "KEYS_ONLY"
+ where *ALL* projects every attribute into the index, *KEYS_ONLY*
+ projects just the hash and range key into the index, and *INCLUDE*
+ projects only the keys specified in the _non_key_attributes_
+parameter.
+* `non_key_attributes` - (Optional) Only required with *INCLUDE* as a
+ projection type; a list of attributes to project into the index. For
+each attribute listed, you need to make sure that it has been defined in
+the table object.
+
+For `global_secondary_index` objects only, you need to specify
+`write_capacity` and `read_capacity` in the same way you would for the
+table as they have separate I/O capacity.
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The name of the table
+
diff --git a/website/source/docs/providers/azure/r/sql_database_server.html.markdown b/website/source/docs/providers/azure/r/sql_database_server.html.markdown
new file mode 100644
index 0000000000..c038731813
--- /dev/null
+++ b/website/source/docs/providers/azure/r/sql_database_server.html.markdown
@@ -0,0 +1,50 @@
+---
+layout: "azure"
+page_title: "Azure: azure_sql_database_server"
+sidebar_current: "docs-azure-sql-database-server"
+description: |-
+ Allocates a new SQL Database Server on Azure.
+---
+
+# azure\_sql\_database\_server
+
+Allocates a new SQL Database Server on Azure.
+
+## Example Usage
+
+```
+resource "azure_sql_database_server" "sql-serv" {
+ name = "
+ Each release archive is a zip
file containing the Terraform binary
+ executables at the top level. These executables are meant to be extracted
+ to a location where they can be found by your shell.
+