mirror of
https://github.com/opentofu/opentofu.git
synced 2025-02-25 18:45:20 -06:00
Merge branch 'master' into p-aws-randomize-test-names2
This commit is contained in:
commit
02d3e1c12e
4
.gitattributes
vendored
4
.gitattributes
vendored
@ -1,4 +0,0 @@
|
|||||||
# Set the default behavior, in case people don't have core.autocrlf set.
|
|
||||||
* text=auto
|
|
||||||
|
|
||||||
*.go eol=lf
|
|
42
CHANGELOG.md
42
CHANGELOG.md
@ -1,14 +1,52 @@
|
|||||||
## 0.9.3 (unreleased)
|
## 0.9.3 (unreleased)
|
||||||
|
|
||||||
|
FEATURES:
|
||||||
|
|
||||||
|
* **New Resource:** `aws_lightsail_static_ip` [GH-13175]
|
||||||
|
* **New Resource:** `aws_lightsail_static_ip_attachment` [GH-13207]
|
||||||
|
* **New Resource:** `aws_ses_domain_identity` [GH-13098]
|
||||||
|
* **New Resource:** `kubernetes_secret` [GH-12960]
|
||||||
|
* **New Data Source:** `aws_iam_role` [GH-13213]
|
||||||
|
|
||||||
IMPROVEMENTS:
|
IMPROVEMENTS:
|
||||||
|
|
||||||
|
* backend/remote-state: Add support for assume role extensions to s3 backend [GH-13236]
|
||||||
* config: New interpolation functions `basename` and `dirname`, for file path manipulation [GH-13080]
|
* config: New interpolation functions `basename` and `dirname`, for file path manipulation [GH-13080]
|
||||||
* helper/resource: Allow unknown "pending" states [GH-13099]
|
* helper/resource: Allow unknown "pending" states [GH-13099]
|
||||||
* provider/aws: Add support to set iam_role_arn on cloudformation Stack [GH-12547]
|
* provider/aws: Add support to set iam_role_arn on cloudformation Stack [GH-12547]
|
||||||
* provider/aws: Support priority and listener_arn update of alb_listener_rule [GH-13125]
|
* provider/aws: Support priority and listener_arn update of alb_listener_rule [GH-13125]
|
||||||
* provider/aws: Support priority and listener_arn update of alb_listener_rule [GH-13125]
|
|
||||||
* provider/aws: Deprecate roles in favour of role in iam_instance_profile [GH-13130]
|
* provider/aws: Deprecate roles in favour of role in iam_instance_profile [GH-13130]
|
||||||
|
* provider/aws: Make alb_target_group_attachment port optional [GH-13139]
|
||||||
|
* provider/aws: `aws_api_gateway_domain_name` `certificate_private_key` field marked as sensitive [GH-13147]
|
||||||
|
* provider/aws: `aws_directory_service_directory` `password` field marked as sensitive [GH-13147]
|
||||||
|
* provider/aws: `aws_kinesis_firehose_delivery_stream` `password` field marked as sensitive [GH-13147]
|
||||||
|
* provider/aws: `aws_opsworks_application` `app_source.0.password` & `ssl_configuration.0.private_key` fields marked as sensitive [GH-13147]
|
||||||
|
* provider/aws: `aws_opsworks_stack` `custom_cookbooks_source.0.password` field marked as sensitive [GH-13147]
|
||||||
|
* provider/aws: Support the ability to enable / disable ipv6 support in VPC [GH-12527]
|
||||||
|
* provider/aws: Added API Gateway integration update [GH-13249]
|
||||||
|
* provider/aws: Add `identifier` | `name_prefix` to RDS resources [GH-13232]
|
||||||
|
* provider/aws: Validate `aws_ecs_task_definition.container_definitions` [GH-12161]
|
||||||
|
* provider/github: Handle the case when issue labels already exist [GH-13182]
|
||||||
|
* provider/google: Mark `google_container_cluster`'s `client_key` & `password` inside `master_auth` as sensitive [GH-13148]
|
||||||
|
* provider/triton: Move to joyent/triton-go [GH-13225]
|
||||||
|
|
||||||
|
BUG FIXES:
|
||||||
|
|
||||||
|
* core: Escaped interpolation-like sequences (like `$${foo}`) now permitted in variable defaults [GH-13137]
|
||||||
|
* provider/aws: Add Support for maintenance_window and back_window to rds_cluster_instance [GH-13134]
|
||||||
|
* provider/aws: Increase timeout for AMI registration [GH-13159]
|
||||||
|
* provider/aws: Increase timeouts for ELB [GH-13161]
|
||||||
|
* provider/aws: `volume_type` of `aws_elasticsearch_domain.0.ebs_options` marked as `Computed` which prevents spurious diffs [GH-13160]
|
||||||
|
* provider/aws: Don't set DBName on `aws_db_instance` from snapshot [GH-13140]
|
||||||
|
* provider/aws: Add DiffSuppression to aws_ecs_service placement_strategies [GH-13220]
|
||||||
|
* provider/aws: Refresh aws_alb_target_group stickiness on manual updates [GH-13199]
|
||||||
|
* provider/aws: Preserve default retain_on_delete in cloudfront import [GH-13209]
|
||||||
|
* provider/aws: Refresh aws_alb_target_group tags [GH-13200]
|
||||||
|
* provider/aws: Set aws_vpn_connection to recreate when in deleted state [GH-13204]
|
||||||
|
* provider/aws: Wait for aws_opsworks_instance to be running when it's specified [GH-13218]
|
||||||
|
* provider/aws: Handle `aws_lambda_function` missing s3 key error [GH-10960]
|
||||||
|
* provider/aws: Set stickiness to computed in alb_target_group [GH-13278]
|
||||||
|
* provider/azurerm: Network Security Group - ignoring protocol casing at Import time [GH-13153]
|
||||||
|
|
||||||
## 0.9.2 (March 28, 2017)
|
## 0.9.2 (March 28, 2017)
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Code generated by "stringer -type=countHookAction hook_count_action.go"; DO NOT EDIT
|
// Code generated by "stringer -type=countHookAction hook_count_action.go"; DO NOT EDIT.
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Code generated by "stringer -type=OperationType operation_type.go"; DO NOT EDIT
|
// Code generated by "stringer -type=OperationType operation_type.go"; DO NOT EDIT.
|
||||||
|
|
||||||
package backend
|
package backend
|
||||||
|
|
||||||
|
@ -21,101 +21,122 @@ import (
|
|||||||
func New() backend.Backend {
|
func New() backend.Backend {
|
||||||
s := &schema.Backend{
|
s := &schema.Backend{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"bucket": &schema.Schema{
|
"bucket": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
Description: "The name of the S3 bucket",
|
Description: "The name of the S3 bucket",
|
||||||
},
|
},
|
||||||
|
|
||||||
"key": &schema.Schema{
|
"key": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
Description: "The path to the state file inside the bucket",
|
Description: "The path to the state file inside the bucket",
|
||||||
},
|
},
|
||||||
|
|
||||||
"region": &schema.Schema{
|
"region": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
Description: "The region of the S3 bucket.",
|
Description: "The region of the S3 bucket.",
|
||||||
DefaultFunc: schema.EnvDefaultFunc("AWS_DEFAULT_REGION", nil),
|
DefaultFunc: schema.EnvDefaultFunc("AWS_DEFAULT_REGION", nil),
|
||||||
},
|
},
|
||||||
|
|
||||||
"endpoint": &schema.Schema{
|
"endpoint": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "A custom endpoint for the S3 API",
|
Description: "A custom endpoint for the S3 API",
|
||||||
DefaultFunc: schema.EnvDefaultFunc("AWS_S3_ENDPOINT", ""),
|
DefaultFunc: schema.EnvDefaultFunc("AWS_S3_ENDPOINT", ""),
|
||||||
},
|
},
|
||||||
|
|
||||||
"encrypt": &schema.Schema{
|
"encrypt": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Whether to enable server side encryption of the state file",
|
Description: "Whether to enable server side encryption of the state file",
|
||||||
Default: false,
|
Default: false,
|
||||||
},
|
},
|
||||||
|
|
||||||
"acl": &schema.Schema{
|
"acl": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Canned ACL to be applied to the state file",
|
Description: "Canned ACL to be applied to the state file",
|
||||||
Default: "",
|
Default: "",
|
||||||
},
|
},
|
||||||
|
|
||||||
"access_key": &schema.Schema{
|
"access_key": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "AWS access key",
|
Description: "AWS access key",
|
||||||
Default: "",
|
Default: "",
|
||||||
},
|
},
|
||||||
|
|
||||||
"secret_key": &schema.Schema{
|
"secret_key": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "AWS secret key",
|
Description: "AWS secret key",
|
||||||
Default: "",
|
Default: "",
|
||||||
},
|
},
|
||||||
|
|
||||||
"kms_key_id": &schema.Schema{
|
"kms_key_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "The ARN of a KMS Key to use for encrypting the state",
|
Description: "The ARN of a KMS Key to use for encrypting the state",
|
||||||
Default: "",
|
Default: "",
|
||||||
},
|
},
|
||||||
|
|
||||||
"lock_table": &schema.Schema{
|
"lock_table": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "DynamoDB table for state locking",
|
Description: "DynamoDB table for state locking",
|
||||||
Default: "",
|
Default: "",
|
||||||
},
|
},
|
||||||
|
|
||||||
"profile": &schema.Schema{
|
"profile": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "AWS profile name",
|
Description: "AWS profile name",
|
||||||
Default: "",
|
Default: "",
|
||||||
},
|
},
|
||||||
|
|
||||||
"shared_credentials_file": &schema.Schema{
|
"shared_credentials_file": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "Path to a shared credentials file",
|
Description: "Path to a shared credentials file",
|
||||||
Default: "",
|
Default: "",
|
||||||
},
|
},
|
||||||
|
|
||||||
"token": &schema.Schema{
|
"token": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "MFA token",
|
Description: "MFA token",
|
||||||
Default: "",
|
Default: "",
|
||||||
},
|
},
|
||||||
|
|
||||||
"role_arn": &schema.Schema{
|
"role_arn": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Description: "The role to be assumed",
|
Description: "The role to be assumed",
|
||||||
Default: "",
|
Default: "",
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"session_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "The session name to use when assuming the role.",
|
||||||
|
Default: "",
|
||||||
|
},
|
||||||
|
|
||||||
|
"external_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "The external ID to use when assuming the role",
|
||||||
|
Default: "",
|
||||||
|
},
|
||||||
|
|
||||||
|
"assume_role_policy": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "The permissions applied when assuming a role.",
|
||||||
|
Default: "",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -156,12 +177,15 @@ func (b *Backend) configure(ctx context.Context) error {
|
|||||||
|
|
||||||
var errs []error
|
var errs []error
|
||||||
creds, err := terraformAWS.GetCredentials(&terraformAWS.Config{
|
creds, err := terraformAWS.GetCredentials(&terraformAWS.Config{
|
||||||
AccessKey: data.Get("access_key").(string),
|
AccessKey: data.Get("access_key").(string),
|
||||||
SecretKey: data.Get("secret_key").(string),
|
SecretKey: data.Get("secret_key").(string),
|
||||||
Token: data.Get("token").(string),
|
Token: data.Get("token").(string),
|
||||||
Profile: data.Get("profile").(string),
|
Profile: data.Get("profile").(string),
|
||||||
CredsFilename: data.Get("shared_credentials_file").(string),
|
CredsFilename: data.Get("shared_credentials_file").(string),
|
||||||
AssumeRoleARN: data.Get("role_arn").(string),
|
AssumeRoleARN: data.Get("role_arn").(string),
|
||||||
|
AssumeRoleSessionName: data.Get("session_name").(string),
|
||||||
|
AssumeRoleExternalID: data.Get("external_id").(string),
|
||||||
|
AssumeRolePolicy: data.Get("assume_role_policy").(string),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
67
builtin/providers/aws/data_source_aws_iam_role.go
Normal file
67
builtin/providers/aws/data_source_aws_iam_role.go
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/iam"
|
||||||
|
"github.com/hashicorp/errwrap"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func dataSourceAwsIAMRole() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Read: dataSourceAwsIAMRoleRead,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"arn": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"assume_role_policy_document": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"path": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"role_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"role_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dataSourceAwsIAMRoleRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
iamconn := meta.(*AWSClient).iamconn
|
||||||
|
|
||||||
|
roleName := d.Get("role_name").(string)
|
||||||
|
|
||||||
|
req := &iam.GetRoleInput{
|
||||||
|
RoleName: aws.String(roleName),
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := iamconn.GetRole(req)
|
||||||
|
if err != nil {
|
||||||
|
return errwrap.Wrapf("Error getting roles: {{err}}", err)
|
||||||
|
}
|
||||||
|
if resp == nil {
|
||||||
|
return fmt.Errorf("no IAM role found")
|
||||||
|
}
|
||||||
|
|
||||||
|
role := resp.Role
|
||||||
|
|
||||||
|
d.SetId(*role.RoleId)
|
||||||
|
d.Set("arn", role.Arn)
|
||||||
|
d.Set("assume_role_policy_document", role.AssumeRolePolicyDocument)
|
||||||
|
d.Set("path", role.Path)
|
||||||
|
d.Set("role_id", role.RoleId)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
59
builtin/providers/aws/data_source_aws_iam_role_test.go
Normal file
59
builtin/providers/aws/data_source_aws_iam_role_test.go
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAWSDataSourceIAMRole_basic(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAwsIAMRoleConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
resource.TestCheckResourceAttrSet("data.aws_iam_role.test", "role_id"),
|
||||||
|
resource.TestCheckResourceAttr("data.aws_iam_role.test", "assume_role_policy_document", "%7B%22Version%22%3A%222012-10-17%22%2C%22Statement%22%3A%5B%7B%22Sid%22%3A%22%22%2C%22Effect%22%3A%22Allow%22%2C%22Principal%22%3A%7B%22Service%22%3A%22ec2.amazonaws.com%22%7D%2C%22Action%22%3A%22sts%3AAssumeRole%22%7D%5D%7D"),
|
||||||
|
resource.TestCheckResourceAttr("data.aws_iam_role.test", "path", "/testpath/"),
|
||||||
|
resource.TestCheckResourceAttr("data.aws_iam_role.test", "role_name", "TestRole"),
|
||||||
|
resource.TestMatchResourceAttr("data.aws_iam_role.test", "arn", regexp.MustCompile("^arn:aws:iam::[0-9]{12}:role/testpath/TestRole$")),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
const testAccAwsIAMRoleConfig = `
|
||||||
|
provider "aws" {
|
||||||
|
region = "us-east-1"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role" "test_role" {
|
||||||
|
name = "TestRole"
|
||||||
|
|
||||||
|
assume_role_policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Action": "sts:AssumeRole",
|
||||||
|
"Principal": {
|
||||||
|
"Service": "ec2.amazonaws.com"
|
||||||
|
},
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Sid": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
path = "/testpath/"
|
||||||
|
}
|
||||||
|
|
||||||
|
data "aws_iam_role" "test" {
|
||||||
|
role_name = "${aws_iam_role.test_role.name}"
|
||||||
|
}
|
||||||
|
`
|
@ -136,7 +136,7 @@ func dataSourceAwsRoute53ZoneRead(d *schema.ResourceData, meta interface{}) erro
|
|||||||
|
|
||||||
if matchingTags && matchingVPC {
|
if matchingTags && matchingVPC {
|
||||||
if hostedZoneFound != nil {
|
if hostedZoneFound != nil {
|
||||||
return fmt.Errorf("multplie Route53Zone found please use vpc_id option to filter")
|
return fmt.Errorf("multiple Route53Zone found please use vpc_id option to filter")
|
||||||
} else {
|
} else {
|
||||||
hostedZoneFound = hostedZone
|
hostedZoneFound = hostedZone
|
||||||
}
|
}
|
||||||
|
@ -7,6 +7,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func resourceAwsCloudFrontDistributionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
|
func resourceAwsCloudFrontDistributionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
|
||||||
|
// This is a non API attribute
|
||||||
|
// We are merely setting this to the same value as the Default setting in the schema
|
||||||
|
d.Set("retain_on_delete", false)
|
||||||
|
|
||||||
conn := meta.(*AWSClient).cloudfrontconn
|
conn := meta.(*AWSClient).cloudfrontconn
|
||||||
id := d.Id()
|
id := d.Id()
|
||||||
resp, err := conn.GetDistributionConfig(&cloudfront.GetDistributionConfigInput{
|
resp, err := conn.GetDistributionConfig(&cloudfront.GetDistributionConfigInput{
|
||||||
|
@ -19,16 +19,13 @@ func TestAccAWSCloudFrontDistribution_importBasic(t *testing.T) {
|
|||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckCloudFrontDistributionDestroy,
|
CheckDestroy: testAccCheckCloudFrontDistributionDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testConfig,
|
Config: testConfig,
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
ResourceName: resourceName,
|
ResourceName: resourceName,
|
||||||
ImportState: true,
|
ImportState: true,
|
||||||
ImportStateVerify: true,
|
ImportStateVerify: true,
|
||||||
// Ignore retain_on_delete since it doesn't come from the AWS
|
|
||||||
// API.
|
|
||||||
ImportStateVerifyIgnore: []string{"retain_on_delete"},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
@ -3,11 +3,13 @@ package aws
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccAWSEFSFileSystem_importBasic(t *testing.T) {
|
func TestAccAWSEFSFileSystem_importBasic(t *testing.T) {
|
||||||
resourceName := "aws_efs_file_system.foo-with-tags"
|
resourceName := "aws_efs_file_system.foo-with-tags"
|
||||||
|
rInt := acctest.RandInt()
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
@ -15,7 +17,7 @@ func TestAccAWSEFSFileSystem_importBasic(t *testing.T) {
|
|||||||
CheckDestroy: testAccCheckEfsFileSystemDestroy,
|
CheckDestroy: testAccCheckEfsFileSystemDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAWSEFSFileSystemConfigWithTags,
|
Config: testAccAWSEFSFileSystemConfigWithTags(rInt),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
|
@ -174,6 +174,7 @@ func Provider() terraform.ResourceProvider {
|
|||||||
"aws_elb_service_account": dataSourceAwsElbServiceAccount(),
|
"aws_elb_service_account": dataSourceAwsElbServiceAccount(),
|
||||||
"aws_iam_account_alias": dataSourceAwsIamAccountAlias(),
|
"aws_iam_account_alias": dataSourceAwsIamAccountAlias(),
|
||||||
"aws_iam_policy_document": dataSourceAwsIamPolicyDocument(),
|
"aws_iam_policy_document": dataSourceAwsIamPolicyDocument(),
|
||||||
|
"aws_iam_role": dataSourceAwsIAMRole(),
|
||||||
"aws_iam_server_certificate": dataSourceAwsIAMServerCertificate(),
|
"aws_iam_server_certificate": dataSourceAwsIAMServerCertificate(),
|
||||||
"aws_instance": dataSourceAwsInstance(),
|
"aws_instance": dataSourceAwsInstance(),
|
||||||
"aws_ip_ranges": dataSourceAwsIPRanges(),
|
"aws_ip_ranges": dataSourceAwsIPRanges(),
|
||||||
@ -337,6 +338,8 @@ func Provider() terraform.ResourceProvider {
|
|||||||
"aws_lightsail_domain": resourceAwsLightsailDomain(),
|
"aws_lightsail_domain": resourceAwsLightsailDomain(),
|
||||||
"aws_lightsail_instance": resourceAwsLightsailInstance(),
|
"aws_lightsail_instance": resourceAwsLightsailInstance(),
|
||||||
"aws_lightsail_key_pair": resourceAwsLightsailKeyPair(),
|
"aws_lightsail_key_pair": resourceAwsLightsailKeyPair(),
|
||||||
|
"aws_lightsail_static_ip": resourceAwsLightsailStaticIp(),
|
||||||
|
"aws_lightsail_static_ip_attachment": resourceAwsLightsailStaticIpAttachment(),
|
||||||
"aws_lb_cookie_stickiness_policy": resourceAwsLBCookieStickinessPolicy(),
|
"aws_lb_cookie_stickiness_policy": resourceAwsLBCookieStickinessPolicy(),
|
||||||
"aws_load_balancer_policy": resourceAwsLoadBalancerPolicy(),
|
"aws_load_balancer_policy": resourceAwsLoadBalancerPolicy(),
|
||||||
"aws_load_balancer_backend_server_policy": resourceAwsLoadBalancerBackendServerPolicies(),
|
"aws_load_balancer_backend_server_policy": resourceAwsLoadBalancerBackendServerPolicies(),
|
||||||
@ -383,6 +386,7 @@ func Provider() terraform.ResourceProvider {
|
|||||||
"aws_route_table": resourceAwsRouteTable(),
|
"aws_route_table": resourceAwsRouteTable(),
|
||||||
"aws_route_table_association": resourceAwsRouteTableAssociation(),
|
"aws_route_table_association": resourceAwsRouteTableAssociation(),
|
||||||
"aws_ses_active_receipt_rule_set": resourceAwsSesActiveReceiptRuleSet(),
|
"aws_ses_active_receipt_rule_set": resourceAwsSesActiveReceiptRuleSet(),
|
||||||
|
"aws_ses_domain_identity": resourceAwsSesDomainIdentity(),
|
||||||
"aws_ses_receipt_filter": resourceAwsSesReceiptFilter(),
|
"aws_ses_receipt_filter": resourceAwsSesReceiptFilter(),
|
||||||
"aws_ses_receipt_rule": resourceAwsSesReceiptRule(),
|
"aws_ses_receipt_rule": resourceAwsSesReceiptRule(),
|
||||||
"aws_ses_receipt_rule_set": resourceAwsSesReceiptRuleSet(),
|
"aws_ses_receipt_rule_set": resourceAwsSesReceiptRuleSet(),
|
||||||
|
@ -73,6 +73,7 @@ func resourceAwsAlbTargetGroup() *schema.Resource {
|
|||||||
"stickiness": {
|
"stickiness": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
MaxItems: 1,
|
MaxItems: 1,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
@ -258,11 +259,19 @@ func resourceAwsAlbTargetGroupRead(d *schema.ResourceData, meta interface{}) err
|
|||||||
for _, attr := range attrResp.Attributes {
|
for _, attr := range attrResp.Attributes {
|
||||||
switch *attr.Key {
|
switch *attr.Key {
|
||||||
case "stickiness.enabled":
|
case "stickiness.enabled":
|
||||||
stickinessMap["enabled"] = *attr.Value
|
enabled, err := strconv.ParseBool(*attr.Value)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error converting stickiness.enabled to bool: %s", *attr.Value)
|
||||||
|
}
|
||||||
|
stickinessMap["enabled"] = enabled
|
||||||
case "stickiness.type":
|
case "stickiness.type":
|
||||||
stickinessMap["type"] = *attr.Value
|
stickinessMap["type"] = *attr.Value
|
||||||
case "stickiness.lb_cookie.duration_seconds":
|
case "stickiness.lb_cookie.duration_seconds":
|
||||||
stickinessMap["cookie_duration"] = *attr.Value
|
duration, err := strconv.Atoi(*attr.Value)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error converting stickiness.lb_cookie.duration_seconds to int: %s", *attr.Value)
|
||||||
|
}
|
||||||
|
stickinessMap["cookie_duration"] = duration
|
||||||
case "deregistration_delay.timeout_seconds":
|
case "deregistration_delay.timeout_seconds":
|
||||||
timeout, err := strconv.Atoi(*attr.Value)
|
timeout, err := strconv.Atoi(*attr.Value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -271,7 +280,24 @@ func resourceAwsAlbTargetGroupRead(d *schema.ResourceData, meta interface{}) err
|
|||||||
d.Set("deregistration_delay", timeout)
|
d.Set("deregistration_delay", timeout)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
d.Set("stickiness", []interface{}{stickinessMap})
|
|
||||||
|
if err := d.Set("stickiness", []interface{}{stickinessMap}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
tagsResp, err := elbconn.DescribeTags(&elbv2.DescribeTagsInput{
|
||||||
|
ResourceArns: []*string{aws.String(d.Id())},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return errwrap.Wrapf("Error retrieving Target Group Tags: {{err}}", err)
|
||||||
|
}
|
||||||
|
for _, t := range tagsResp.TagDescriptions {
|
||||||
|
if *t.ResourceArn == d.Id() {
|
||||||
|
if err := d.Set("tags", tagsToMapELBv2(t.Tags)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -34,7 +34,7 @@ func resourceAwsAlbTargetGroupAttachment() *schema.Resource {
|
|||||||
"port": {
|
"port": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Required: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -43,18 +43,21 @@ func resourceAwsAlbTargetGroupAttachment() *schema.Resource {
|
|||||||
func resourceAwsAlbAttachmentCreate(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsAlbAttachmentCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
elbconn := meta.(*AWSClient).elbv2conn
|
elbconn := meta.(*AWSClient).elbv2conn
|
||||||
|
|
||||||
params := &elbv2.RegisterTargetsInput{
|
target := &elbv2.TargetDescription{
|
||||||
TargetGroupArn: aws.String(d.Get("target_group_arn").(string)),
|
Id: aws.String(d.Get("target_id").(string)),
|
||||||
Targets: []*elbv2.TargetDescription{
|
|
||||||
{
|
|
||||||
Id: aws.String(d.Get("target_id").(string)),
|
|
||||||
Port: aws.Int64(int64(d.Get("port").(int))),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[INFO] Registering Target %s (%d) with Target Group %s", d.Get("target_id").(string),
|
if v, ok := d.GetOk("port"); ok {
|
||||||
d.Get("port").(int), d.Get("target_group_arn").(string))
|
target.Port = aws.Int64(int64(v.(int)))
|
||||||
|
}
|
||||||
|
|
||||||
|
params := &elbv2.RegisterTargetsInput{
|
||||||
|
TargetGroupArn: aws.String(d.Get("target_group_arn").(string)),
|
||||||
|
Targets: []*elbv2.TargetDescription{target},
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] Registering Target %s with Target Group %s", d.Get("target_id").(string),
|
||||||
|
d.Get("target_group_arn").(string))
|
||||||
|
|
||||||
_, err := elbconn.RegisterTargets(params)
|
_, err := elbconn.RegisterTargets(params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -69,14 +72,17 @@ func resourceAwsAlbAttachmentCreate(d *schema.ResourceData, meta interface{}) er
|
|||||||
func resourceAwsAlbAttachmentDelete(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsAlbAttachmentDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
elbconn := meta.(*AWSClient).elbv2conn
|
elbconn := meta.(*AWSClient).elbv2conn
|
||||||
|
|
||||||
|
target := &elbv2.TargetDescription{
|
||||||
|
Id: aws.String(d.Get("target_id").(string)),
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("port"); ok {
|
||||||
|
target.Port = aws.Int64(int64(v.(int)))
|
||||||
|
}
|
||||||
|
|
||||||
params := &elbv2.DeregisterTargetsInput{
|
params := &elbv2.DeregisterTargetsInput{
|
||||||
TargetGroupArn: aws.String(d.Get("target_group_arn").(string)),
|
TargetGroupArn: aws.String(d.Get("target_group_arn").(string)),
|
||||||
Targets: []*elbv2.TargetDescription{
|
Targets: []*elbv2.TargetDescription{target},
|
||||||
{
|
|
||||||
Id: aws.String(d.Get("target_id").(string)),
|
|
||||||
Port: aws.Int64(int64(d.Get("port").(int))),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := elbconn.DeregisterTargets(params)
|
_, err := elbconn.DeregisterTargets(params)
|
||||||
@ -93,14 +99,18 @@ func resourceAwsAlbAttachmentDelete(d *schema.ResourceData, meta interface{}) er
|
|||||||
// target, so there is no work to do beyond ensuring that the target and group still exist.
|
// target, so there is no work to do beyond ensuring that the target and group still exist.
|
||||||
func resourceAwsAlbAttachmentRead(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsAlbAttachmentRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
elbconn := meta.(*AWSClient).elbv2conn
|
elbconn := meta.(*AWSClient).elbv2conn
|
||||||
|
|
||||||
|
target := &elbv2.TargetDescription{
|
||||||
|
Id: aws.String(d.Get("target_id").(string)),
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("port"); ok {
|
||||||
|
target.Port = aws.Int64(int64(v.(int)))
|
||||||
|
}
|
||||||
|
|
||||||
resp, err := elbconn.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{
|
resp, err := elbconn.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{
|
||||||
TargetGroupArn: aws.String(d.Get("target_group_arn").(string)),
|
TargetGroupArn: aws.String(d.Get("target_group_arn").(string)),
|
||||||
Targets: []*elbv2.TargetDescription{
|
Targets: []*elbv2.TargetDescription{target},
|
||||||
{
|
|
||||||
Id: aws.String(d.Get("target_id").(string)),
|
|
||||||
Port: aws.Int64(int64(d.Get("port").(int))),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isTargetGroupNotFound(err) {
|
if isTargetGroupNotFound(err) {
|
||||||
|
@ -3,14 +3,15 @@ package aws
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/service/elbv2"
|
"github.com/aws/aws-sdk-go/service/elbv2"
|
||||||
"github.com/hashicorp/errwrap"
|
"github.com/hashicorp/errwrap"
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
"strconv"
|
|
||||||
"testing"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccAWSALBTargetGroupAttachment_basic(t *testing.T) {
|
func TestAccAWSALBTargetGroupAttachment_basic(t *testing.T) {
|
||||||
@ -32,6 +33,25 @@ func TestAccAWSALBTargetGroupAttachment_basic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSALBTargetGroupAttachment_withoutPort(t *testing.T) {
|
||||||
|
targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
IDRefreshName: "aws_alb_target_group.test",
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSALBTargetGroupAttachmentDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSALBTargetGroupAttachmentConfigWithoutPort(targetGroupName),
|
||||||
|
Check: resource.ComposeAggregateTestCheckFunc(
|
||||||
|
testAccCheckAWSALBTargetGroupAttachmentExists("aws_alb_target_group_attachment.test"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckAWSALBTargetGroupAttachmentExists(n string) resource.TestCheckFunc {
|
func testAccCheckAWSALBTargetGroupAttachmentExists(n string) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
@ -45,15 +65,20 @@ func testAccCheckAWSALBTargetGroupAttachmentExists(n string) resource.TestCheckF
|
|||||||
|
|
||||||
conn := testAccProvider.Meta().(*AWSClient).elbv2conn
|
conn := testAccProvider.Meta().(*AWSClient).elbv2conn
|
||||||
|
|
||||||
port, _ := strconv.Atoi(rs.Primary.Attributes["port"])
|
_, hasPort := rs.Primary.Attributes["port"]
|
||||||
|
targetGroupArn, _ := rs.Primary.Attributes["target_group_arn"]
|
||||||
|
|
||||||
|
target := &elbv2.TargetDescription{
|
||||||
|
Id: aws.String(rs.Primary.Attributes["target_id"]),
|
||||||
|
}
|
||||||
|
if hasPort == true {
|
||||||
|
port, _ := strconv.Atoi(rs.Primary.Attributes["port"])
|
||||||
|
target.Port = aws.Int64(int64(port))
|
||||||
|
}
|
||||||
|
|
||||||
describe, err := conn.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{
|
describe, err := conn.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{
|
||||||
TargetGroupArn: aws.String(rs.Primary.Attributes["target_group_arn"]),
|
TargetGroupArn: aws.String(targetGroupArn),
|
||||||
Targets: []*elbv2.TargetDescription{
|
Targets: []*elbv2.TargetDescription{target},
|
||||||
{
|
|
||||||
Id: aws.String(rs.Primary.Attributes["target_id"]),
|
|
||||||
Port: aws.Int64(int64(port)),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -76,15 +101,20 @@ func testAccCheckAWSALBTargetGroupAttachmentDestroy(s *terraform.State) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
port, _ := strconv.Atoi(rs.Primary.Attributes["port"])
|
_, hasPort := rs.Primary.Attributes["port"]
|
||||||
|
targetGroupArn, _ := rs.Primary.Attributes["target_group_arn"]
|
||||||
|
|
||||||
|
target := &elbv2.TargetDescription{
|
||||||
|
Id: aws.String(rs.Primary.Attributes["target_id"]),
|
||||||
|
}
|
||||||
|
if hasPort == true {
|
||||||
|
port, _ := strconv.Atoi(rs.Primary.Attributes["port"])
|
||||||
|
target.Port = aws.Int64(int64(port))
|
||||||
|
}
|
||||||
|
|
||||||
describe, err := conn.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{
|
describe, err := conn.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{
|
||||||
TargetGroupArn: aws.String(rs.Primary.Attributes["target_group_arn"]),
|
TargetGroupArn: aws.String(targetGroupArn),
|
||||||
Targets: []*elbv2.TargetDescription{
|
Targets: []*elbv2.TargetDescription{target},
|
||||||
{
|
|
||||||
Id: aws.String(rs.Primary.Attributes["target_id"]),
|
|
||||||
Port: aws.Int64(int64(port)),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if len(describe.TargetHealthDescriptions) != 0 {
|
if len(describe.TargetHealthDescriptions) != 0 {
|
||||||
@ -103,6 +133,55 @@ func testAccCheckAWSALBTargetGroupAttachmentDestroy(s *terraform.State) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccAWSALBTargetGroupAttachmentConfigWithoutPort(targetGroupName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "aws_alb_target_group_attachment" "test" {
|
||||||
|
target_group_arn = "${aws_alb_target_group.test.arn}"
|
||||||
|
target_id = "${aws_instance.test.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_instance" "test" {
|
||||||
|
ami = "ami-f701cb97"
|
||||||
|
instance_type = "t2.micro"
|
||||||
|
subnet_id = "${aws_subnet.subnet.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_alb_target_group" "test" {
|
||||||
|
name = "%s"
|
||||||
|
port = 443
|
||||||
|
protocol = "HTTPS"
|
||||||
|
vpc_id = "${aws_vpc.test.id}"
|
||||||
|
|
||||||
|
deregistration_delay = 200
|
||||||
|
|
||||||
|
stickiness {
|
||||||
|
type = "lb_cookie"
|
||||||
|
cookie_duration = 10000
|
||||||
|
}
|
||||||
|
|
||||||
|
health_check {
|
||||||
|
path = "/health"
|
||||||
|
interval = 60
|
||||||
|
port = 8081
|
||||||
|
protocol = "HTTP"
|
||||||
|
timeout = 3
|
||||||
|
healthy_threshold = 3
|
||||||
|
unhealthy_threshold = 3
|
||||||
|
matcher = "200-299"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "subnet" {
|
||||||
|
cidr_block = "10.0.1.0/24"
|
||||||
|
vpc_id = "${aws_vpc.test.id}"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc" "test" {
|
||||||
|
cidr_block = "10.0.0.0/16"
|
||||||
|
}`, targetGroupName)
|
||||||
|
}
|
||||||
|
|
||||||
func testAccAWSALBTargetGroupAttachmentConfig_basic(targetGroupName string) string {
|
func testAccAWSALBTargetGroupAttachmentConfig_basic(targetGroupName string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "aws_alb_target_group_attachment" "test" {
|
resource "aws_alb_target_group_attachment" "test" {
|
||||||
|
@ -77,6 +77,8 @@ func TestAccAWSALBTargetGroup_basic(t *testing.T) {
|
|||||||
resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.healthy_threshold", "3"),
|
resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.healthy_threshold", "3"),
|
||||||
resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.unhealthy_threshold", "3"),
|
resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.unhealthy_threshold", "3"),
|
||||||
resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.matcher", "200-299"),
|
resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.matcher", "200-299"),
|
||||||
|
resource.TestCheckResourceAttr("aws_alb_target_group.test", "tags.%", "1"),
|
||||||
|
resource.TestCheckResourceAttr("aws_alb_target_group.test", "tags.TestName", "TestAccAWSALBTargetGroup_basic"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -18,7 +18,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AWSAMIRetryTimeout = 10 * time.Minute
|
AWSAMIRetryTimeout = 20 * time.Minute
|
||||||
AWSAMIDeleteRetryTimeout = 20 * time.Minute
|
AWSAMIDeleteRetryTimeout = 20 * time.Minute
|
||||||
AWSAMIRetryDelay = 5 * time.Second
|
AWSAMIRetryDelay = 5 * time.Second
|
||||||
AWSAMIRetryMinTimeout = 3 * time.Second
|
AWSAMIRetryMinTimeout = 3 * time.Second
|
||||||
|
@ -48,6 +48,7 @@ func resourceAwsApiGatewayDomainName() *schema.Resource {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
Sensitive: true,
|
||||||
ConflictsWith: []string{"certificate_arn"},
|
ConflictsWith: []string{"certificate_arn"},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
@ -11,87 +11,94 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/service/apigateway"
|
"github.com/aws/aws-sdk-go/service/apigateway"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceAwsApiGatewayIntegration() *schema.Resource {
|
func resourceAwsApiGatewayIntegration() *schema.Resource {
|
||||||
return &schema.Resource{
|
return &schema.Resource{
|
||||||
Create: resourceAwsApiGatewayIntegrationCreate,
|
Create: resourceAwsApiGatewayIntegrationCreate,
|
||||||
Read: resourceAwsApiGatewayIntegrationRead,
|
Read: resourceAwsApiGatewayIntegrationRead,
|
||||||
Update: resourceAwsApiGatewayIntegrationCreate,
|
Update: resourceAwsApiGatewayIntegrationUpdate,
|
||||||
Delete: resourceAwsApiGatewayIntegrationDelete,
|
Delete: resourceAwsApiGatewayIntegrationDelete,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"rest_api_id": &schema.Schema{
|
"rest_api_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"resource_id": &schema.Schema{
|
"resource_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"http_method": &schema.Schema{
|
"http_method": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
ValidateFunc: validateHTTPMethod,
|
ValidateFunc: validateHTTPMethod,
|
||||||
},
|
},
|
||||||
|
|
||||||
"type": &schema.Schema{
|
"type": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
ValidateFunc: validateApiGatewayIntegrationType,
|
ValidateFunc: validateApiGatewayIntegrationType,
|
||||||
},
|
},
|
||||||
|
|
||||||
"uri": &schema.Schema{
|
"uri": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"credentials": &schema.Schema{
|
"credentials": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"integration_http_method": &schema.Schema{
|
"integration_http_method": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
ValidateFunc: validateHTTPMethod,
|
ValidateFunc: validateHTTPMethod,
|
||||||
},
|
},
|
||||||
|
|
||||||
"request_templates": &schema.Schema{
|
"request_templates": {
|
||||||
Type: schema.TypeMap,
|
Type: schema.TypeMap,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: schema.TypeString,
|
Elem: schema.TypeString,
|
||||||
},
|
},
|
||||||
|
|
||||||
"request_parameters": &schema.Schema{
|
"request_parameters": {
|
||||||
Type: schema.TypeMap,
|
Type: schema.TypeMap,
|
||||||
Elem: schema.TypeString,
|
Elem: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ConflictsWith: []string{"request_parameters_in_json"},
|
ConflictsWith: []string{"request_parameters_in_json"},
|
||||||
},
|
},
|
||||||
|
|
||||||
"request_parameters_in_json": &schema.Schema{
|
"request_parameters_in_json": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ConflictsWith: []string{"request_parameters"},
|
ConflictsWith: []string{"request_parameters"},
|
||||||
Deprecated: "Use field request_parameters instead",
|
Deprecated: "Use field request_parameters instead",
|
||||||
},
|
},
|
||||||
|
|
||||||
"content_handling": &schema.Schema{
|
"content_handling": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
ValidateFunc: validateApiGatewayIntegrationContentHandling,
|
ValidateFunc: validateApiGatewayIntegrationContentHandling,
|
||||||
},
|
},
|
||||||
|
|
||||||
"passthrough_behavior": &schema.Schema{
|
"passthrough_behavior": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
ValidateFunc: validateApiGatewayIntegrationPassthroughBehavior,
|
ValidateFunc: validateApiGatewayIntegrationPassthroughBehavior,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -101,6 +108,7 @@ func resourceAwsApiGatewayIntegration() *schema.Resource {
|
|||||||
func resourceAwsApiGatewayIntegrationCreate(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsApiGatewayIntegrationCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).apigateway
|
conn := meta.(*AWSClient).apigateway
|
||||||
|
|
||||||
|
log.Print("[DEBUG] Creating API Gateway Integration")
|
||||||
var integrationHttpMethod *string
|
var integrationHttpMethod *string
|
||||||
if v, ok := d.GetOk("integration_http_method"); ok {
|
if v, ok := d.GetOk("integration_http_method"); ok {
|
||||||
integrationHttpMethod = aws.String(v.(string))
|
integrationHttpMethod = aws.String(v.(string))
|
||||||
@ -163,13 +171,13 @@ func resourceAwsApiGatewayIntegrationCreate(d *schema.ResourceData, meta interfa
|
|||||||
|
|
||||||
d.SetId(fmt.Sprintf("agi-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string)))
|
d.SetId(fmt.Sprintf("agi-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string)))
|
||||||
|
|
||||||
return nil
|
return resourceAwsApiGatewayIntegrationRead(d, meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceAwsApiGatewayIntegrationRead(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsApiGatewayIntegrationRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).apigateway
|
conn := meta.(*AWSClient).apigateway
|
||||||
|
|
||||||
log.Printf("[DEBUG] Reading API Gateway Integration %s", d.Id())
|
log.Printf("[DEBUG] Reading API Gateway Integration: %s", d.Id())
|
||||||
integration, err := conn.GetIntegration(&apigateway.GetIntegrationInput{
|
integration, err := conn.GetIntegration(&apigateway.GetIntegrationInput{
|
||||||
HttpMethod: aws.String(d.Get("http_method").(string)),
|
HttpMethod: aws.String(d.Get("http_method").(string)),
|
||||||
ResourceId: aws.String(d.Get("resource_id").(string)),
|
ResourceId: aws.String(d.Get("resource_id").(string)),
|
||||||
@ -191,17 +199,127 @@ func resourceAwsApiGatewayIntegrationRead(d *schema.ResourceData, meta interface
|
|||||||
}
|
}
|
||||||
|
|
||||||
d.Set("request_templates", aws.StringValueMap(integration.RequestTemplates))
|
d.Set("request_templates", aws.StringValueMap(integration.RequestTemplates))
|
||||||
d.Set("credentials", integration.Credentials)
|
|
||||||
d.Set("type", integration.Type)
|
d.Set("type", integration.Type)
|
||||||
d.Set("uri", integration.Uri)
|
|
||||||
d.Set("request_parameters", aws.StringValueMap(integration.RequestParameters))
|
d.Set("request_parameters", aws.StringValueMap(integration.RequestParameters))
|
||||||
d.Set("request_parameters_in_json", aws.StringValueMap(integration.RequestParameters))
|
d.Set("request_parameters_in_json", aws.StringValueMap(integration.RequestParameters))
|
||||||
d.Set("passthrough_behavior", integration.PassthroughBehavior)
|
d.Set("passthrough_behavior", integration.PassthroughBehavior)
|
||||||
d.Set("content_handling", integration.ContentHandling)
|
|
||||||
|
if integration.Uri != nil {
|
||||||
|
d.Set("uri", integration.Uri)
|
||||||
|
}
|
||||||
|
|
||||||
|
if integration.Credentials != nil {
|
||||||
|
d.Set("credentials", integration.Credentials)
|
||||||
|
}
|
||||||
|
|
||||||
|
if integration.ContentHandling != nil {
|
||||||
|
d.Set("content_handling", integration.ContentHandling)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func resourceAwsApiGatewayIntegrationUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).apigateway
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Updating API Gateway Integration: %s", d.Id())
|
||||||
|
operations := make([]*apigateway.PatchOperation, 0)
|
||||||
|
|
||||||
|
// https://docs.aws.amazon.com/apigateway/api-reference/link-relation/integration-update/#remarks
|
||||||
|
// According to the above documentation, only a few parts are addable / removable.
|
||||||
|
if d.HasChange("request_templates") {
|
||||||
|
o, n := d.GetChange("request_templates")
|
||||||
|
prefix := "requestTemplates"
|
||||||
|
|
||||||
|
os := o.(map[string]interface{})
|
||||||
|
ns := n.(map[string]interface{})
|
||||||
|
|
||||||
|
// Handle Removal
|
||||||
|
for k := range os {
|
||||||
|
if _, ok := ns[k]; !ok {
|
||||||
|
operations = append(operations, &apigateway.PatchOperation{
|
||||||
|
Op: aws.String("remove"),
|
||||||
|
Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range ns {
|
||||||
|
// Handle replaces
|
||||||
|
if _, ok := os[k]; ok {
|
||||||
|
operations = append(operations, &apigateway.PatchOperation{
|
||||||
|
Op: aws.String("replace"),
|
||||||
|
Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))),
|
||||||
|
Value: aws.String(v.(string)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle additions
|
||||||
|
if _, ok := os[k]; !ok {
|
||||||
|
operations = append(operations, &apigateway.PatchOperation{
|
||||||
|
Op: aws.String("add"),
|
||||||
|
Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))),
|
||||||
|
Value: aws.String(v.(string)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("request_parameters") {
|
||||||
|
o, n := d.GetChange("request_parameters")
|
||||||
|
prefix := "requestParameters"
|
||||||
|
|
||||||
|
os := o.(map[string]interface{})
|
||||||
|
ns := n.(map[string]interface{})
|
||||||
|
|
||||||
|
// Handle Removal
|
||||||
|
for k := range os {
|
||||||
|
if _, ok := ns[k]; !ok {
|
||||||
|
operations = append(operations, &apigateway.PatchOperation{
|
||||||
|
Op: aws.String("remove"),
|
||||||
|
Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range ns {
|
||||||
|
// Handle replaces
|
||||||
|
if _, ok := os[k]; ok {
|
||||||
|
operations = append(operations, &apigateway.PatchOperation{
|
||||||
|
Op: aws.String("replace"),
|
||||||
|
Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))),
|
||||||
|
Value: aws.String(v.(string)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle additions
|
||||||
|
if _, ok := os[k]; !ok {
|
||||||
|
operations = append(operations, &apigateway.PatchOperation{
|
||||||
|
Op: aws.String("add"),
|
||||||
|
Path: aws.String(fmt.Sprintf("/%s/%s", prefix, strings.Replace(k, "/", "~1", -1))),
|
||||||
|
Value: aws.String(v.(string)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
params := &apigateway.UpdateIntegrationInput{
|
||||||
|
HttpMethod: aws.String(d.Get("http_method").(string)),
|
||||||
|
ResourceId: aws.String(d.Get("resource_id").(string)),
|
||||||
|
RestApiId: aws.String(d.Get("rest_api_id").(string)),
|
||||||
|
PatchOperations: operations,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := conn.UpdateIntegration(params)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error updating API Gateway Integration: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(fmt.Sprintf("agi-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string)))
|
||||||
|
|
||||||
|
return resourceAwsApiGatewayIntegrationRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
func resourceAwsApiGatewayIntegrationDelete(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsApiGatewayIntegrationDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).apigateway
|
conn := meta.(*AWSClient).apigateway
|
||||||
log.Printf("[DEBUG] Deleting API Gateway Integration: %s", d.Id())
|
log.Printf("[DEBUG] Deleting API Gateway Integration: %s", d.Id())
|
||||||
|
@ -19,88 +19,80 @@ func TestAccAWSAPIGatewayIntegration_basic(t *testing.T) {
|
|||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckAWSAPIGatewayIntegrationDestroy,
|
CheckDestroy: testAccCheckAWSAPIGatewayIntegrationDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSAPIGatewayIntegrationConfig,
|
Config: testAccAWSAPIGatewayIntegrationConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSAPIGatewayIntegrationExists("aws_api_gateway_integration.test", &conf),
|
testAccCheckAWSAPIGatewayIntegrationExists("aws_api_gateway_integration.test", &conf),
|
||||||
testAccCheckAWSAPIGatewayIntegrationAttributes(&conf),
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "type", "HTTP"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "integration_http_method", "GET"),
|
||||||
"aws_api_gateway_integration.test", "type", "HTTP"),
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "uri", "https://www.google.de"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "passthrough_behavior", "WHEN_NO_MATCH"),
|
||||||
"aws_api_gateway_integration.test", "integration_http_method", "GET"),
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "content_handling", "CONVERT_TO_TEXT"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckNoResourceAttr("aws_api_gateway_integration.test", "credentials"),
|
||||||
"aws_api_gateway_integration.test", "uri", "https://www.google.de"),
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_parameters.%", "2"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_parameters.integration.request.header.X-Authorization", "'static'"),
|
||||||
"aws_api_gateway_integration.test", "request_templates.application/json", ""),
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_parameters.integration.request.header.X-Foo", "'Bar'"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_templates.%", "2"),
|
||||||
"aws_api_gateway_integration.test", "request_templates.application/xml", "#set($inputRoot = $input.path('$'))\n{ }"),
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_templates.application/json", ""),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_templates.application/xml", "#set($inputRoot = $input.path('$'))\n{ }"),
|
||||||
"aws_api_gateway_integration.test", "passthrough_behavior", "WHEN_NO_MATCH"),
|
|
||||||
resource.TestCheckNoResourceAttr(
|
|
||||||
"aws_api_gateway_integration.test", "content_handling"),
|
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccAWSAPIGatewayIntegrationConfigUpdate,
|
Config: testAccAWSAPIGatewayIntegrationConfigUpdate,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSAPIGatewayIntegrationExists("aws_api_gateway_integration.test", &conf),
|
testAccCheckAWSAPIGatewayIntegrationExists("aws_api_gateway_integration.test", &conf),
|
||||||
testAccCheckAWSAPIGatewayMockIntegrationAttributes(&conf),
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "type", "HTTP"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "integration_http_method", "GET"),
|
||||||
"aws_api_gateway_integration.test", "type", "MOCK"),
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "uri", "https://www.google.de"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "passthrough_behavior", "WHEN_NO_MATCH"),
|
||||||
"aws_api_gateway_integration.test", "integration_http_method", ""),
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "content_handling", "CONVERT_TO_TEXT"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckNoResourceAttr("aws_api_gateway_integration.test", "credentials"),
|
||||||
"aws_api_gateway_integration.test", "uri", ""),
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_parameters.%", "2"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_parameters.integration.request.header.X-Authorization", "'updated'"),
|
||||||
"aws_api_gateway_integration.test", "passthrough_behavior", "NEVER"),
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_parameters.integration.request.header.X-FooBar", "'Baz'"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_templates.%", "2"),
|
||||||
"aws_api_gateway_integration.test", "content_handling", "CONVERT_TO_BINARY"),
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_templates.application/json", "{'foobar': 'bar}"),
|
||||||
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_templates.text/html", "<html>Foo</html>"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
Config: testAccAWSAPIGatewayIntegrationConfigUpdateNoTemplates,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSAPIGatewayIntegrationExists("aws_api_gateway_integration.test", &conf),
|
||||||
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "type", "HTTP"),
|
||||||
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "integration_http_method", "GET"),
|
||||||
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "uri", "https://www.google.de"),
|
||||||
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "passthrough_behavior", "WHEN_NO_MATCH"),
|
||||||
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "content_handling", "CONVERT_TO_TEXT"),
|
||||||
|
resource.TestCheckNoResourceAttr("aws_api_gateway_integration.test", "credentials"),
|
||||||
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_parameters.%", "0"),
|
||||||
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_templates.%", "0"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
Config: testAccAWSAPIGatewayIntegrationConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSAPIGatewayIntegrationExists("aws_api_gateway_integration.test", &conf),
|
||||||
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "type", "HTTP"),
|
||||||
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "integration_http_method", "GET"),
|
||||||
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "uri", "https://www.google.de"),
|
||||||
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "passthrough_behavior", "WHEN_NO_MATCH"),
|
||||||
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "content_handling", "CONVERT_TO_TEXT"),
|
||||||
|
resource.TestCheckNoResourceAttr("aws_api_gateway_integration.test", "credentials"),
|
||||||
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_parameters.%", "2"),
|
||||||
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_parameters.integration.request.header.X-Authorization", "'static'"),
|
||||||
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_templates.%", "2"),
|
||||||
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_templates.application/json", ""),
|
||||||
|
resource.TestCheckResourceAttr("aws_api_gateway_integration.test", "request_templates.application/xml", "#set($inputRoot = $input.path('$'))\n{ }"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckAWSAPIGatewayMockIntegrationAttributes(conf *apigateway.Integration) resource.TestCheckFunc {
|
|
||||||
return func(s *terraform.State) error {
|
|
||||||
if *conf.Type != "MOCK" {
|
|
||||||
return fmt.Errorf("Wrong Type: %q", *conf.Type)
|
|
||||||
}
|
|
||||||
if *conf.RequestParameters["integration.request.header.X-Authorization"] != "'updated'" {
|
|
||||||
return fmt.Errorf("wrong updated RequestParameters for header.X-Authorization")
|
|
||||||
}
|
|
||||||
if *conf.ContentHandling != "CONVERT_TO_BINARY" {
|
|
||||||
return fmt.Errorf("wrong ContentHandling: %q", *conf.ContentHandling)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccCheckAWSAPIGatewayIntegrationAttributes(conf *apigateway.Integration) resource.TestCheckFunc {
|
|
||||||
return func(s *terraform.State) error {
|
|
||||||
if *conf.HttpMethod == "" {
|
|
||||||
return fmt.Errorf("empty HttpMethod")
|
|
||||||
}
|
|
||||||
if *conf.Uri != "https://www.google.de" {
|
|
||||||
return fmt.Errorf("wrong Uri")
|
|
||||||
}
|
|
||||||
if *conf.Type != "HTTP" {
|
|
||||||
return fmt.Errorf("wrong Type")
|
|
||||||
}
|
|
||||||
if conf.RequestTemplates["application/json"] != nil {
|
|
||||||
return fmt.Errorf("wrong RequestTemplate for application/json")
|
|
||||||
}
|
|
||||||
if *conf.RequestTemplates["application/xml"] != "#set($inputRoot = $input.path('$'))\n{ }" {
|
|
||||||
return fmt.Errorf("wrong RequestTemplate for application/xml")
|
|
||||||
}
|
|
||||||
if *conf.RequestParameters["integration.request.header.X-Authorization"] != "'static'" {
|
|
||||||
return fmt.Errorf("wrong RequestParameters for header.X-Authorization")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccCheckAWSAPIGatewayIntegrationExists(n string, res *apigateway.Integration) resource.TestCheckFunc {
|
func testAccCheckAWSAPIGatewayIntegrationExists(n string, res *apigateway.Integration) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
@ -196,13 +188,15 @@ resource "aws_api_gateway_integration" "test" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
request_parameters = {
|
request_parameters = {
|
||||||
"integration.request.header.X-Authorization" = "'static'"
|
"integration.request.header.X-Authorization" = "'static'"
|
||||||
|
"integration.request.header.X-Foo" = "'Bar'"
|
||||||
}
|
}
|
||||||
|
|
||||||
type = "HTTP"
|
type = "HTTP"
|
||||||
uri = "https://www.google.de"
|
uri = "https://www.google.de"
|
||||||
integration_http_method = "GET"
|
integration_http_method = "GET"
|
||||||
passthrough_behavior = "WHEN_NO_MATCH"
|
passthrough_behavior = "WHEN_NO_MATCH"
|
||||||
|
content_handling = "CONVERT_TO_TEXT"
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
@ -233,13 +227,55 @@ resource "aws_api_gateway_integration" "test" {
|
|||||||
resource_id = "${aws_api_gateway_resource.test.id}"
|
resource_id = "${aws_api_gateway_resource.test.id}"
|
||||||
http_method = "${aws_api_gateway_method.test.http_method}"
|
http_method = "${aws_api_gateway_method.test.http_method}"
|
||||||
|
|
||||||
request_parameters = {
|
request_templates = {
|
||||||
"integration.request.header.X-Authorization" = "'updated'"
|
"application/json" = "{'foobar': 'bar}"
|
||||||
|
"text/html" = "<html>Foo</html>"
|
||||||
}
|
}
|
||||||
|
|
||||||
type = "MOCK"
|
request_parameters = {
|
||||||
passthrough_behavior = "NEVER"
|
"integration.request.header.X-Authorization" = "'updated'"
|
||||||
content_handling = "CONVERT_TO_BINARY"
|
"integration.request.header.X-FooBar" = "'Baz'"
|
||||||
|
}
|
||||||
|
|
||||||
|
type = "HTTP"
|
||||||
|
uri = "https://www.google.de"
|
||||||
|
integration_http_method = "GET"
|
||||||
|
passthrough_behavior = "WHEN_NO_MATCH"
|
||||||
|
content_handling = "CONVERT_TO_TEXT"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
const testAccAWSAPIGatewayIntegrationConfigUpdateNoTemplates = `
|
||||||
|
resource "aws_api_gateway_rest_api" "test" {
|
||||||
|
name = "test"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_api_gateway_resource" "test" {
|
||||||
|
rest_api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||||
|
parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}"
|
||||||
|
path_part = "test"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_api_gateway_method" "test" {
|
||||||
|
rest_api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||||
|
resource_id = "${aws_api_gateway_resource.test.id}"
|
||||||
|
http_method = "GET"
|
||||||
|
authorization = "NONE"
|
||||||
|
|
||||||
|
request_models = {
|
||||||
|
"application/json" = "Error"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_api_gateway_integration" "test" {
|
||||||
|
rest_api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||||
|
resource_id = "${aws_api_gateway_resource.test.id}"
|
||||||
|
http_method = "${aws_api_gateway_method.test.http_method}"
|
||||||
|
|
||||||
|
type = "HTTP"
|
||||||
|
uri = "https://www.google.de"
|
||||||
|
integration_http_method = "GET"
|
||||||
|
passthrough_behavior = "WHEN_NO_MATCH"
|
||||||
|
content_handling = "CONVERT_TO_TEXT"
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
@ -75,6 +75,7 @@ func TestAccAWSCustomerGateway_disappears(t *testing.T) {
|
|||||||
rInt := acctest.RandInt()
|
rInt := acctest.RandInt()
|
||||||
rBgpAsn := acctest.RandIntRange(64512, 65534)
|
rBgpAsn := acctest.RandIntRange(64512, 65534)
|
||||||
var gateway ec2.CustomerGateway
|
var gateway ec2.CustomerGateway
|
||||||
|
randInt := acctest.RandInt()
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
|
@ -101,11 +101,19 @@ func resourceAwsDbInstance() *schema.Resource {
|
|||||||
},
|
},
|
||||||
|
|
||||||
"identifier": {
|
"identifier": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ConflictsWith: []string{"identifier_prefix"},
|
||||||
|
ValidateFunc: validateRdsIdentifier,
|
||||||
|
},
|
||||||
|
"identifier_prefix": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
ValidateFunc: validateRdsId,
|
ValidateFunc: validateRdsIdentifierPrefix,
|
||||||
},
|
},
|
||||||
|
|
||||||
"instance_class": {
|
"instance_class": {
|
||||||
@ -336,10 +344,16 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
|
|||||||
conn := meta.(*AWSClient).rdsconn
|
conn := meta.(*AWSClient).rdsconn
|
||||||
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
|
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
|
||||||
|
|
||||||
identifier := d.Get("identifier").(string)
|
var identifier string
|
||||||
// Generate a unique ID for the user
|
if v, ok := d.GetOk("identifier"); ok {
|
||||||
if identifier == "" {
|
identifier = v.(string)
|
||||||
identifier = resource.PrefixedUniqueId("tf-")
|
} else {
|
||||||
|
if v, ok := d.GetOk("identifier_prefix"); ok {
|
||||||
|
identifier = resource.PrefixedUniqueId(v.(string))
|
||||||
|
} else {
|
||||||
|
identifier = resource.UniqueId()
|
||||||
|
}
|
||||||
|
|
||||||
// SQL Server identifier size is max 15 chars, so truncate
|
// SQL Server identifier size is max 15 chars, so truncate
|
||||||
if engine := d.Get("engine").(string); engine != "" {
|
if engine := d.Get("engine").(string); engine != "" {
|
||||||
if strings.Contains(strings.ToLower(engine), "sqlserver") {
|
if strings.Contains(strings.ToLower(engine), "sqlserver") {
|
||||||
@ -407,7 +421,14 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
|
|||||||
}
|
}
|
||||||
|
|
||||||
if attr, ok := d.GetOk("name"); ok {
|
if attr, ok := d.GetOk("name"); ok {
|
||||||
opts.DBName = aws.String(attr.(string))
|
// "Note: This parameter [DBName] doesn't apply to the MySQL, PostgreSQL, or MariaDB engines."
|
||||||
|
// https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_RestoreDBInstanceFromDBSnapshot.html
|
||||||
|
switch strings.ToLower(d.Get("engine").(string)) {
|
||||||
|
case "mysql", "postgres", "mariadb":
|
||||||
|
// skip
|
||||||
|
default:
|
||||||
|
opts.DBName = aws.String(attr.(string))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if attr, ok := d.GetOk("availability_zone"); ok {
|
if attr, ok := d.GetOk("availability_zone"); ok {
|
||||||
|
@ -53,6 +53,46 @@ func TestAccAWSDBInstance_basic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSDBInstance_namePrefix(t *testing.T) {
|
||||||
|
var v rds.DBInstance
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSDBInstanceDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSDBInstanceConfig_namePrefix,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSDBInstanceExists("aws_db_instance.test", &v),
|
||||||
|
testAccCheckAWSDBInstanceAttributes(&v),
|
||||||
|
resource.TestMatchResourceAttr(
|
||||||
|
"aws_db_instance.test", "identifier", regexp.MustCompile("^tf-test-")),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSDBInstance_generatedName(t *testing.T) {
|
||||||
|
var v rds.DBInstance
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSDBInstanceDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSDBInstanceConfig_generatedName,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSDBInstanceExists("aws_db_instance.test", &v),
|
||||||
|
testAccCheckAWSDBInstanceAttributes(&v),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccAWSDBInstance_kmsKey(t *testing.T) {
|
func TestAccAWSDBInstance_kmsKey(t *testing.T) {
|
||||||
var v rds.DBInstance
|
var v rds.DBInstance
|
||||||
keyRegex := regexp.MustCompile("^arn:aws:kms:")
|
keyRegex := regexp.MustCompile("^arn:aws:kms:")
|
||||||
@ -613,8 +653,8 @@ resource "aws_db_instance" "bar" {
|
|||||||
username = "foo"
|
username = "foo"
|
||||||
|
|
||||||
|
|
||||||
# Maintenance Window is stored in lower case in the API, though not strictly
|
# Maintenance Window is stored in lower case in the API, though not strictly
|
||||||
# documented. Terraform will downcase this to match (as opposed to throw a
|
# documented. Terraform will downcase this to match (as opposed to throw a
|
||||||
# validation error).
|
# validation error).
|
||||||
maintenance_window = "Fri:09:00-Fri:09:30"
|
maintenance_window = "Fri:09:00-Fri:09:30"
|
||||||
skip_final_snapshot = true
|
skip_final_snapshot = true
|
||||||
@ -628,6 +668,37 @@ resource "aws_db_instance" "bar" {
|
|||||||
}
|
}
|
||||||
}`
|
}`
|
||||||
|
|
||||||
|
const testAccAWSDBInstanceConfig_namePrefix = `
|
||||||
|
resource "aws_db_instance" "test" {
|
||||||
|
allocated_storage = 10
|
||||||
|
engine = "MySQL"
|
||||||
|
identifier_prefix = "tf-test-"
|
||||||
|
instance_class = "db.t1.micro"
|
||||||
|
password = "password"
|
||||||
|
username = "root"
|
||||||
|
publicly_accessible = true
|
||||||
|
skip_final_snapshot = true
|
||||||
|
|
||||||
|
timeouts {
|
||||||
|
create = "30m"
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
|
const testAccAWSDBInstanceConfig_generatedName = `
|
||||||
|
resource "aws_db_instance" "test" {
|
||||||
|
allocated_storage = 10
|
||||||
|
engine = "MySQL"
|
||||||
|
instance_class = "db.t1.micro"
|
||||||
|
password = "password"
|
||||||
|
username = "root"
|
||||||
|
publicly_accessible = true
|
||||||
|
skip_final_snapshot = true
|
||||||
|
|
||||||
|
timeouts {
|
||||||
|
create = "30m"
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
var testAccAWSDBInstanceConfigKmsKeyId = `
|
var testAccAWSDBInstanceConfigKmsKeyId = `
|
||||||
resource "aws_kms_key" "foo" {
|
resource "aws_kms_key" "foo" {
|
||||||
description = "Terraform acc test %s"
|
description = "Terraform acc test %s"
|
||||||
@ -720,7 +791,7 @@ func testAccReplicaInstanceConfig(val int) string {
|
|||||||
|
|
||||||
parameter_group_name = "default.mysql5.6"
|
parameter_group_name = "default.mysql5.6"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_db_instance" "replica" {
|
resource "aws_db_instance" "replica" {
|
||||||
identifier = "tf-replica-db-%d"
|
identifier = "tf-replica-db-%d"
|
||||||
backup_retention_period = 0
|
backup_retention_period = 0
|
||||||
|
@ -4,7 +4,6 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"regexp"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
@ -31,10 +30,19 @@ func resourceAwsDbOptionGroup() *schema.Resource {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"name": &schema.Schema{
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ConflictsWith: []string{"name_prefix"},
|
||||||
|
ValidateFunc: validateDbOptionGroupName,
|
||||||
|
},
|
||||||
|
"name_prefix": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Required: true,
|
ValidateFunc: validateDbOptionGroupNamePrefix,
|
||||||
ValidateFunc: validateDbOptionGroupName,
|
|
||||||
},
|
},
|
||||||
"engine_name": &schema.Schema{
|
"engine_name": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
@ -48,8 +56,9 @@ func resourceAwsDbOptionGroup() *schema.Resource {
|
|||||||
},
|
},
|
||||||
"option_group_description": &schema.Schema{
|
"option_group_description": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
Default: "Managed by Terraform",
|
||||||
},
|
},
|
||||||
|
|
||||||
"option": &schema.Schema{
|
"option": &schema.Schema{
|
||||||
@ -107,11 +116,20 @@ func resourceAwsDbOptionGroupCreate(d *schema.ResourceData, meta interface{}) er
|
|||||||
rdsconn := meta.(*AWSClient).rdsconn
|
rdsconn := meta.(*AWSClient).rdsconn
|
||||||
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
|
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
|
||||||
|
|
||||||
|
var groupName string
|
||||||
|
if v, ok := d.GetOk("name"); ok {
|
||||||
|
groupName = v.(string)
|
||||||
|
} else if v, ok := d.GetOk("name_prefix"); ok {
|
||||||
|
groupName = resource.PrefixedUniqueId(v.(string))
|
||||||
|
} else {
|
||||||
|
groupName = resource.UniqueId()
|
||||||
|
}
|
||||||
|
|
||||||
createOpts := &rds.CreateOptionGroupInput{
|
createOpts := &rds.CreateOptionGroupInput{
|
||||||
EngineName: aws.String(d.Get("engine_name").(string)),
|
EngineName: aws.String(d.Get("engine_name").(string)),
|
||||||
MajorEngineVersion: aws.String(d.Get("major_engine_version").(string)),
|
MajorEngineVersion: aws.String(d.Get("major_engine_version").(string)),
|
||||||
OptionGroupDescription: aws.String(d.Get("option_group_description").(string)),
|
OptionGroupDescription: aws.String(d.Get("option_group_description").(string)),
|
||||||
OptionGroupName: aws.String(d.Get("name").(string)),
|
OptionGroupName: aws.String(groupName),
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -121,7 +139,7 @@ func resourceAwsDbOptionGroupCreate(d *schema.ResourceData, meta interface{}) er
|
|||||||
return fmt.Errorf("Error creating DB Option Group: %s", err)
|
return fmt.Errorf("Error creating DB Option Group: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(d.Get("name").(string))
|
d.SetId(groupName)
|
||||||
log.Printf("[INFO] DB Option Group ID: %s", d.Id())
|
log.Printf("[INFO] DB Option Group ID: %s", d.Id())
|
||||||
|
|
||||||
return resourceAwsDbOptionGroupUpdate(d, meta)
|
return resourceAwsDbOptionGroupUpdate(d, meta)
|
||||||
@ -343,28 +361,3 @@ func buildRDSOptionGroupARN(identifier, partition, accountid, region string) (st
|
|||||||
arn := fmt.Sprintf("arn:%s:rds:%s:%s:og:%s", partition, region, accountid, identifier)
|
arn := fmt.Sprintf("arn:%s:rds:%s:%s:og:%s", partition, region, accountid, identifier)
|
||||||
return arn, nil
|
return arn, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateDbOptionGroupName(v interface{}, k string) (ws []string, errors []error) {
|
|
||||||
value := v.(string)
|
|
||||||
if !regexp.MustCompile(`^[a-z]`).MatchString(value) {
|
|
||||||
errors = append(errors, fmt.Errorf(
|
|
||||||
"first character of %q must be a letter", k))
|
|
||||||
}
|
|
||||||
if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) {
|
|
||||||
errors = append(errors, fmt.Errorf(
|
|
||||||
"only alphanumeric characters and hyphens allowed in %q", k))
|
|
||||||
}
|
|
||||||
if regexp.MustCompile(`--`).MatchString(value) {
|
|
||||||
errors = append(errors, fmt.Errorf(
|
|
||||||
"%q cannot contain two consecutive hyphens", k))
|
|
||||||
}
|
|
||||||
if regexp.MustCompile(`-$`).MatchString(value) {
|
|
||||||
errors = append(errors, fmt.Errorf(
|
|
||||||
"%q cannot end with a hyphen", k))
|
|
||||||
}
|
|
||||||
if len(value) > 255 {
|
|
||||||
errors = append(errors, fmt.Errorf(
|
|
||||||
"%q cannot be greater than 255 characters", k))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
@ -2,6 +2,7 @@ package aws
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"regexp"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
@ -34,6 +35,66 @@ func TestAccAWSDBOptionGroup_basic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSDBOptionGroup_namePrefix(t *testing.T) {
|
||||||
|
var v rds.OptionGroup
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSDBOptionGroupDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSDBOptionGroup_namePrefix,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSDBOptionGroupExists("aws_db_option_group.test", &v),
|
||||||
|
testAccCheckAWSDBOptionGroupAttributes(&v),
|
||||||
|
resource.TestMatchResourceAttr(
|
||||||
|
"aws_db_option_group.test", "name", regexp.MustCompile("^tf-test-")),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSDBOptionGroup_generatedName(t *testing.T) {
|
||||||
|
var v rds.OptionGroup
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSDBOptionGroupDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSDBOptionGroup_generatedName,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSDBOptionGroupExists("aws_db_option_group.test", &v),
|
||||||
|
testAccCheckAWSDBOptionGroupAttributes(&v),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSDBOptionGroup_defaultDescription(t *testing.T) {
|
||||||
|
var v rds.OptionGroup
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSDBOptionGroupDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSDBOptionGroup_defaultDescription(acctest.RandInt()),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSDBOptionGroupExists("aws_db_option_group.test", &v),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_db_option_group.test", "option_group_description", "Managed by Terraform"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccAWSDBOptionGroup_basicDestroyWithInstance(t *testing.T) {
|
func TestAccAWSDBOptionGroup_basicDestroyWithInstance(t *testing.T) {
|
||||||
rName := fmt.Sprintf("option-group-test-terraform-%s", acctest.RandString(5))
|
rName := fmt.Sprintf("option-group-test-terraform-%s", acctest.RandString(5))
|
||||||
|
|
||||||
@ -160,42 +221,6 @@ func testAccCheckAWSDBOptionGroupAttributes(v *rds.OptionGroup) resource.TestChe
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestResourceAWSDBOptionGroupName_validation(t *testing.T) {
|
|
||||||
cases := []struct {
|
|
||||||
Value string
|
|
||||||
ErrCount int
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
Value: "testing123!",
|
|
||||||
ErrCount: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Value: "1testing123",
|
|
||||||
ErrCount: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Value: "testing--123",
|
|
||||||
ErrCount: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Value: "testing123-",
|
|
||||||
ErrCount: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Value: randomString(256),
|
|
||||||
ErrCount: 1,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range cases {
|
|
||||||
_, errors := validateDbOptionGroupName(tc.Value, "aws_db_option_group_name")
|
|
||||||
|
|
||||||
if len(errors) != tc.ErrCount {
|
|
||||||
t.Fatalf("Expected the DB Option Group Name to trigger a validation error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccCheckAWSDBOptionGroupExists(n string, v *rds.OptionGroup) resource.TestCheckFunc {
|
func testAccCheckAWSDBOptionGroupExists(n string, v *rds.OptionGroup) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
@ -387,3 +412,30 @@ resource "aws_db_option_group" "bar" {
|
|||||||
}
|
}
|
||||||
`, r)
|
`, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const testAccAWSDBOptionGroup_namePrefix = `
|
||||||
|
resource "aws_db_option_group" "test" {
|
||||||
|
name_prefix = "tf-test-"
|
||||||
|
option_group_description = "Test option group for terraform"
|
||||||
|
engine_name = "mysql"
|
||||||
|
major_engine_version = "5.6"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
const testAccAWSDBOptionGroup_generatedName = `
|
||||||
|
resource "aws_db_option_group" "test" {
|
||||||
|
option_group_description = "Test option group for terraform"
|
||||||
|
engine_name = "mysql"
|
||||||
|
major_engine_version = "5.6"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
func testAccAWSDBOptionGroup_defaultDescription(n int) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "aws_db_option_group" "test" {
|
||||||
|
name = "tf-test-%d"
|
||||||
|
engine_name = "mysql"
|
||||||
|
major_engine_version = "5.6"
|
||||||
|
}
|
||||||
|
`, n)
|
||||||
|
}
|
||||||
|
@ -32,10 +32,19 @@ func resourceAwsDbParameterGroup() *schema.Resource {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"name": &schema.Schema{
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ConflictsWith: []string{"name_prefix"},
|
||||||
|
ValidateFunc: validateDbParamGroupName,
|
||||||
|
},
|
||||||
|
"name_prefix": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Required: true,
|
ValidateFunc: validateDbParamGroupNamePrefix,
|
||||||
ValidateFunc: validateDbParamGroupName,
|
|
||||||
},
|
},
|
||||||
"family": &schema.Schema{
|
"family": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
@ -81,8 +90,17 @@ func resourceAwsDbParameterGroupCreate(d *schema.ResourceData, meta interface{})
|
|||||||
rdsconn := meta.(*AWSClient).rdsconn
|
rdsconn := meta.(*AWSClient).rdsconn
|
||||||
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
|
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
|
||||||
|
|
||||||
|
var groupName string
|
||||||
|
if v, ok := d.GetOk("name"); ok {
|
||||||
|
groupName = v.(string)
|
||||||
|
} else if v, ok := d.GetOk("name_prefix"); ok {
|
||||||
|
groupName = resource.PrefixedUniqueId(v.(string))
|
||||||
|
} else {
|
||||||
|
groupName = resource.UniqueId()
|
||||||
|
}
|
||||||
|
|
||||||
createOpts := rds.CreateDBParameterGroupInput{
|
createOpts := rds.CreateDBParameterGroupInput{
|
||||||
DBParameterGroupName: aws.String(d.Get("name").(string)),
|
DBParameterGroupName: aws.String(groupName),
|
||||||
DBParameterGroupFamily: aws.String(d.Get("family").(string)),
|
DBParameterGroupFamily: aws.String(d.Get("family").(string)),
|
||||||
Description: aws.String(d.Get("description").(string)),
|
Description: aws.String(d.Get("description").(string)),
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
|
@ -3,6 +3,7 @@ package aws
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"regexp"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -290,6 +291,44 @@ func TestAccAWSDBParameterGroup_basic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSDBParameterGroup_namePrefix(t *testing.T) {
|
||||||
|
var v rds.DBParameterGroup
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSDBParameterGroupDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccDBParameterGroupConfig_namePrefix,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.test", &v),
|
||||||
|
resource.TestMatchResourceAttr(
|
||||||
|
"aws_db_parameter_group.test", "name", regexp.MustCompile("^tf-test-")),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSDBParameterGroup_generatedName(t *testing.T) {
|
||||||
|
var v rds.DBParameterGroup
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSDBParameterGroupDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccDBParameterGroupConfig_generatedName,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.test", &v),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccAWSDBParameterGroup_withApplyMethod(t *testing.T) {
|
func TestAccAWSDBParameterGroup_withApplyMethod(t *testing.T) {
|
||||||
var v rds.DBParameterGroup
|
var v rds.DBParameterGroup
|
||||||
|
|
||||||
@ -671,3 +710,16 @@ resource "aws_db_parameter_group" "large" {
|
|||||||
parameter { name = "tx_isolation" value = "REPEATABLE-READ" }
|
parameter { name = "tx_isolation" value = "REPEATABLE-READ" }
|
||||||
}`, n)
|
}`, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const testAccDBParameterGroupConfig_namePrefix = `
|
||||||
|
resource "aws_db_parameter_group" "test" {
|
||||||
|
name_prefix = "tf-test-"
|
||||||
|
family = "mysql5.6"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
const testAccDBParameterGroupConfig_generatedName = `
|
||||||
|
resource "aws_db_parameter_group" "test" {
|
||||||
|
family = "mysql5.6"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
@ -3,7 +3,6 @@ package aws
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"regexp"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -31,10 +30,19 @@ func resourceAwsDbSubnetGroup() *schema.Resource {
|
|||||||
},
|
},
|
||||||
|
|
||||||
"name": &schema.Schema{
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ConflictsWith: []string{"name_prefix"},
|
||||||
|
ValidateFunc: validateDbSubnetGroupName,
|
||||||
|
},
|
||||||
|
"name_prefix": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Required: true,
|
ValidateFunc: validateDbSubnetGroupNamePrefix,
|
||||||
ValidateFunc: validateSubnetGroupName,
|
|
||||||
},
|
},
|
||||||
|
|
||||||
"description": &schema.Schema{
|
"description": &schema.Schema{
|
||||||
@ -65,8 +73,17 @@ func resourceAwsDbSubnetGroupCreate(d *schema.ResourceData, meta interface{}) er
|
|||||||
subnetIds[i] = aws.String(subnetId.(string))
|
subnetIds[i] = aws.String(subnetId.(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var groupName string
|
||||||
|
if v, ok := d.GetOk("name"); ok {
|
||||||
|
groupName = v.(string)
|
||||||
|
} else if v, ok := d.GetOk("name_prefix"); ok {
|
||||||
|
groupName = resource.PrefixedUniqueId(v.(string))
|
||||||
|
} else {
|
||||||
|
groupName = resource.UniqueId()
|
||||||
|
}
|
||||||
|
|
||||||
createOpts := rds.CreateDBSubnetGroupInput{
|
createOpts := rds.CreateDBSubnetGroupInput{
|
||||||
DBSubnetGroupName: aws.String(d.Get("name").(string)),
|
DBSubnetGroupName: aws.String(groupName),
|
||||||
DBSubnetGroupDescription: aws.String(d.Get("description").(string)),
|
DBSubnetGroupDescription: aws.String(d.Get("description").(string)),
|
||||||
SubnetIds: subnetIds,
|
SubnetIds: subnetIds,
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
@ -238,20 +255,3 @@ func buildRDSsubgrpARN(identifier, partition, accountid, region string) (string,
|
|||||||
return arn, nil
|
return arn, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateSubnetGroupName(v interface{}, k string) (ws []string, errors []error) {
|
|
||||||
value := v.(string)
|
|
||||||
if !regexp.MustCompile(`^[ .0-9a-z-_]+$`).MatchString(value) {
|
|
||||||
errors = append(errors, fmt.Errorf(
|
|
||||||
"only lowercase alphanumeric characters, hyphens, underscores, periods, and spaces allowed in %q", k))
|
|
||||||
}
|
|
||||||
if len(value) > 255 {
|
|
||||||
errors = append(errors, fmt.Errorf(
|
|
||||||
"%q cannot be longer than 255 characters", k))
|
|
||||||
}
|
|
||||||
if regexp.MustCompile(`(?i)^default$`).MatchString(value) {
|
|
||||||
errors = append(errors, fmt.Errorf(
|
|
||||||
"%q is not allowed as %q", "Default", k))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
@ -2,6 +2,7 @@ package aws
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"regexp"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
@ -43,6 +44,46 @@ func TestAccAWSDBSubnetGroup_basic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSDBSubnetGroup_namePrefix(t *testing.T) {
|
||||||
|
var v rds.DBSubnetGroup
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckDBSubnetGroupDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccDBSubnetGroupConfig_namePrefix,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckDBSubnetGroupExists(
|
||||||
|
"aws_db_subnet_group.test", &v),
|
||||||
|
resource.TestMatchResourceAttr(
|
||||||
|
"aws_db_subnet_group.test", "name", regexp.MustCompile("^tf_test-")),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSDBSubnetGroup_generatedName(t *testing.T) {
|
||||||
|
var v rds.DBSubnetGroup
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckDBSubnetGroupDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccDBSubnetGroupConfig_generatedName,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckDBSubnetGroupExists(
|
||||||
|
"aws_db_subnet_group.test", &v),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Regression test for https://github.com/hashicorp/terraform/issues/2603 and
|
// Regression test for https://github.com/hashicorp/terraform/issues/2603 and
|
||||||
// https://github.com/hashicorp/terraform/issues/2664
|
// https://github.com/hashicorp/terraform/issues/2664
|
||||||
func TestAccAWSDBSubnetGroup_withUndocumentedCharacters(t *testing.T) {
|
func TestAccAWSDBSubnetGroup_withUndocumentedCharacters(t *testing.T) {
|
||||||
@ -105,38 +146,6 @@ func TestAccAWSDBSubnetGroup_updateDescription(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestResourceAWSDBSubnetGroupNameValidation(t *testing.T) {
|
|
||||||
cases := []struct {
|
|
||||||
Value string
|
|
||||||
ErrCount int
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
Value: "tEsting",
|
|
||||||
ErrCount: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Value: "testing?",
|
|
||||||
ErrCount: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Value: "default",
|
|
||||||
ErrCount: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Value: randomString(300),
|
|
||||||
ErrCount: 1,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range cases {
|
|
||||||
_, errors := validateSubnetGroupName(tc.Value, "aws_db_subnet_group")
|
|
||||||
|
|
||||||
if len(errors) != tc.ErrCount {
|
|
||||||
t.Fatalf("Expected the DB Subnet Group name to trigger a validation error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccCheckDBSubnetGroupDestroy(s *terraform.State) error {
|
func testAccCheckDBSubnetGroupDestroy(s *terraform.State) error {
|
||||||
conn := testAccProvider.Meta().(*AWSClient).rdsconn
|
conn := testAccProvider.Meta().(*AWSClient).rdsconn
|
||||||
|
|
||||||
@ -263,6 +272,49 @@ resource "aws_db_subnet_group" "foo" {
|
|||||||
}`, rName)
|
}`, rName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const testAccDBSubnetGroupConfig_namePrefix = `
|
||||||
|
resource "aws_vpc" "test" {
|
||||||
|
cidr_block = "10.1.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "a" {
|
||||||
|
vpc_id = "${aws_vpc.test.id}"
|
||||||
|
cidr_block = "10.1.1.0/24"
|
||||||
|
availability_zone = "us-west-2a"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "b" {
|
||||||
|
vpc_id = "${aws_vpc.test.id}"
|
||||||
|
cidr_block = "10.1.2.0/24"
|
||||||
|
availability_zone = "us-west-2b"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_db_subnet_group" "test" {
|
||||||
|
name_prefix = "tf_test-"
|
||||||
|
subnet_ids = ["${aws_subnet.a.id}", "${aws_subnet.b.id}"]
|
||||||
|
}`
|
||||||
|
|
||||||
|
const testAccDBSubnetGroupConfig_generatedName = `
|
||||||
|
resource "aws_vpc" "test" {
|
||||||
|
cidr_block = "10.1.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "a" {
|
||||||
|
vpc_id = "${aws_vpc.test.id}"
|
||||||
|
cidr_block = "10.1.1.0/24"
|
||||||
|
availability_zone = "us-west-2a"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "b" {
|
||||||
|
vpc_id = "${aws_vpc.test.id}"
|
||||||
|
cidr_block = "10.1.2.0/24"
|
||||||
|
availability_zone = "us-west-2b"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_db_subnet_group" "test" {
|
||||||
|
subnet_ids = ["${aws_subnet.a.id}", "${aws_subnet.b.id}"]
|
||||||
|
}`
|
||||||
|
|
||||||
const testAccDBSubnetGroupConfig_withUnderscoresAndPeriodsAndSpaces = `
|
const testAccDBSubnetGroupConfig_withUnderscoresAndPeriodsAndSpaces = `
|
||||||
resource "aws_vpc" "main" {
|
resource "aws_vpc" "main" {
|
||||||
cidr_block = "192.168.0.0/16"
|
cidr_block = "192.168.0.0/16"
|
||||||
|
@ -33,9 +33,10 @@ func resourceAwsDirectoryServiceDirectory() *schema.Resource {
|
|||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"password": &schema.Schema{
|
"password": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
Sensitive: true,
|
||||||
},
|
},
|
||||||
"size": &schema.Schema{
|
"size": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
|
@ -169,7 +169,7 @@ resource "aws_dms_replication_instance" "dms_replication_instance" {
|
|||||||
func dmsReplicationInstanceConfigUpdate(randId string) string {
|
func dmsReplicationInstanceConfigUpdate(randId string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "aws_iam_role" "dms_iam_role" {
|
resource "aws_iam_role" "dms_iam_role" {
|
||||||
name = "dms-vpc-role"
|
name = "dms-vpc-role-%[1]s"
|
||||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"dms.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"dms.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ func dmsReplicationSubnetGroupDestroy(s *terraform.State) error {
|
|||||||
func dmsReplicationSubnetGroupConfig(randId string) string {
|
func dmsReplicationSubnetGroupConfig(randId string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "aws_iam_role" "dms_iam_role" {
|
resource "aws_iam_role" "dms_iam_role" {
|
||||||
name = "dms-vpc-role"
|
name = "dms-vpc-role-%[1]s"
|
||||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"dms.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"dms.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ func dmsReplicationTaskDestroy(s *terraform.State) error {
|
|||||||
func dmsReplicationTaskConfig(randId string) string {
|
func dmsReplicationTaskConfig(randId string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "aws_iam_role" "dms_iam_role" {
|
resource "aws_iam_role" "dms_iam_role" {
|
||||||
name = "dms-vpc-role"
|
name = "dms-vpc-role-%[1]s"
|
||||||
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"dms.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"dms.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,6 +118,12 @@ func resourceAwsEcsService() *schema.Resource {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool {
|
||||||
|
if strings.ToLower(old) == strings.ToLower(new) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -45,6 +45,7 @@ func resourceAwsEcsTaskDefinition() *schema.Resource {
|
|||||||
hash := sha1.Sum([]byte(v.(string)))
|
hash := sha1.Sum([]byte(v.(string)))
|
||||||
return hex.EncodeToString(hash[:])
|
return hex.EncodeToString(hash[:])
|
||||||
},
|
},
|
||||||
|
ValidateFunc: validateAwsEcsTaskDefinitionContainerDefinitions,
|
||||||
},
|
},
|
||||||
|
|
||||||
"task_role_arn": {
|
"task_role_arn": {
|
||||||
@ -121,6 +122,15 @@ func validateAwsEcsTaskDefinitionNetworkMode(v interface{}, k string) (ws []stri
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateAwsEcsTaskDefinitionContainerDefinitions(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
_, err := expandEcsContainerDefinitions(value)
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, fmt.Errorf("ECS Task Definition container_definitions is invalid: %s", err))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
func resourceAwsEcsTaskDefinitionCreate(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsEcsTaskDefinitionCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).ecsconn
|
conn := meta.(*AWSClient).ecsconn
|
||||||
|
|
||||||
|
@ -203,6 +203,28 @@ func TestValidateAwsEcsTaskDefinitionNetworkMode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestValidateAwsEcsTaskDefinitionContainerDefinitions(t *testing.T) {
|
||||||
|
validDefinitions := []string{
|
||||||
|
testValidateAwsEcsTaskDefinitionValidContainerDefinitions,
|
||||||
|
}
|
||||||
|
for _, v := range validDefinitions {
|
||||||
|
_, errors := validateAwsEcsTaskDefinitionContainerDefinitions(v, "container_definitions")
|
||||||
|
if len(errors) != 0 {
|
||||||
|
t.Fatalf("%q should be a valid AWS ECS Task Definition Container Definitions: %q", v, errors)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
invalidDefinitions := []string{
|
||||||
|
testValidateAwsEcsTaskDefinitionInvalidCommandContainerDefinitions,
|
||||||
|
}
|
||||||
|
for _, v := range invalidDefinitions {
|
||||||
|
_, errors := validateAwsEcsTaskDefinitionContainerDefinitions(v, "container_definitions")
|
||||||
|
if len(errors) == 0 {
|
||||||
|
t.Fatalf("%q should be an invalid AWS ECS Task Definition Container Definitions", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckAWSEcsTaskDefinitionDestroy(s *terraform.State) error {
|
func testAccCheckAWSEcsTaskDefinitionDestroy(s *terraform.State) error {
|
||||||
conn := testAccProvider.Meta().(*AWSClient).ecsconn
|
conn := testAccProvider.Meta().(*AWSClient).ecsconn
|
||||||
|
|
||||||
@ -666,3 +688,29 @@ TASK_DEFINITION
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
var testValidateAwsEcsTaskDefinitionValidContainerDefinitions = `
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name": "sleep",
|
||||||
|
"image": "busybox",
|
||||||
|
"cpu": 10,
|
||||||
|
"command": ["sleep","360"],
|
||||||
|
"memory": 10,
|
||||||
|
"essential": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
`
|
||||||
|
|
||||||
|
var testValidateAwsEcsTaskDefinitionInvalidCommandContainerDefinitions = `
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name": "sleep",
|
||||||
|
"image": "busybox",
|
||||||
|
"cpu": 10,
|
||||||
|
"command": "sleep 360",
|
||||||
|
"memory": 10,
|
||||||
|
"essential": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
`
|
||||||
|
@ -82,6 +82,7 @@ func TestResourceAWSEFSFileSystem_hasEmptyFileSystems(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAWSEFSFileSystem_basic(t *testing.T) {
|
func TestAccAWSEFSFileSystem_basic(t *testing.T) {
|
||||||
|
rInt := acctest.RandInt()
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
@ -104,7 +105,7 @@ func TestAccAWSEFSFileSystem_basic(t *testing.T) {
|
|||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Config: testAccAWSEFSFileSystemConfigWithTags,
|
Config: testAccAWSEFSFileSystemConfigWithTags(rInt),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckEfsFileSystem(
|
testAccCheckEfsFileSystem(
|
||||||
"aws_efs_file_system.foo-with-tags",
|
"aws_efs_file_system.foo-with-tags",
|
||||||
@ -116,7 +117,7 @@ func TestAccAWSEFSFileSystem_basic(t *testing.T) {
|
|||||||
testAccCheckEfsFileSystemTags(
|
testAccCheckEfsFileSystemTags(
|
||||||
"aws_efs_file_system.foo-with-tags",
|
"aws_efs_file_system.foo-with-tags",
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"Name": "foo-efs",
|
"Name": fmt.Sprintf("foo-efs-%d", rInt),
|
||||||
"Another": "tag",
|
"Another": "tag",
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
@ -143,13 +144,14 @@ func TestAccAWSEFSFileSystem_basic(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAWSEFSFileSystem_pagedTags(t *testing.T) {
|
func TestAccAWSEFSFileSystem_pagedTags(t *testing.T) {
|
||||||
|
rInt := acctest.RandInt()
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckEfsFileSystemDestroy,
|
CheckDestroy: testAccCheckEfsFileSystemDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
{
|
{
|
||||||
Config: testAccAWSEFSFileSystemConfigPagedTags,
|
Config: testAccAWSEFSFileSystemConfigPagedTags(rInt),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_efs_file_system.foo",
|
"aws_efs_file_system.foo",
|
||||||
@ -312,34 +314,38 @@ resource "aws_efs_file_system" "foo" {
|
|||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
const testAccAWSEFSFileSystemConfigPagedTags = `
|
func testAccAWSEFSFileSystemConfigPagedTags(rInt int) string {
|
||||||
resource "aws_efs_file_system" "foo" {
|
return fmt.Sprintf(`
|
||||||
creation_token = "radeksimko"
|
resource "aws_efs_file_system" "foo" {
|
||||||
tags {
|
creation_token = "radeksimko"
|
||||||
Name = "foo-efs"
|
tags {
|
||||||
Another = "tag"
|
Name = "foo-efs-%d"
|
||||||
Test = "yes"
|
Another = "tag"
|
||||||
User = "root"
|
Test = "yes"
|
||||||
Page = "1"
|
User = "root"
|
||||||
Environment = "prod"
|
Page = "1"
|
||||||
CostCenter = "terraform"
|
Environment = "prod"
|
||||||
AcceptanceTest = "PagedTags"
|
CostCenter = "terraform"
|
||||||
CreationToken = "radek"
|
AcceptanceTest = "PagedTags"
|
||||||
PerfMode = "max"
|
CreationToken = "radek"
|
||||||
Region = "us-west-2"
|
PerfMode = "max"
|
||||||
|
Region = "us-west-2"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
`, rInt)
|
||||||
}
|
}
|
||||||
`
|
|
||||||
|
|
||||||
const testAccAWSEFSFileSystemConfigWithTags = `
|
func testAccAWSEFSFileSystemConfigWithTags(rInt int) string {
|
||||||
resource "aws_efs_file_system" "foo-with-tags" {
|
return fmt.Sprintf(`
|
||||||
creation_token = "yada_yada"
|
resource "aws_efs_file_system" "foo-with-tags" {
|
||||||
tags {
|
creation_token = "yada_yada"
|
||||||
Name = "foo-efs"
|
tags {
|
||||||
Another = "tag"
|
Name = "foo-efs-%d"
|
||||||
|
Another = "tag"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
`, rInt)
|
||||||
}
|
}
|
||||||
`
|
|
||||||
|
|
||||||
const testAccAWSEFSFileSystemConfigWithPerformanceMode = `
|
const testAccAWSEFSFileSystemConfigWithPerformanceMode = `
|
||||||
resource "aws_efs_file_system" "foo-with-performance-mode" {
|
resource "aws_efs_file_system" "foo-with-performance-mode" {
|
||||||
|
@ -661,7 +661,7 @@ resource "aws_elastic_beanstalk_environment" "tfenvtest" {
|
|||||||
func testAccBeanstalkWorkerEnvConfig(rInt int) string {
|
func testAccBeanstalkWorkerEnvConfig(rInt int) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "aws_iam_instance_profile" "tftest" {
|
resource "aws_iam_instance_profile" "tftest" {
|
||||||
name = "tftest_profile"
|
name = "tftest_profile-%d"
|
||||||
roles = ["${aws_iam_role.tftest.name}"]
|
roles = ["${aws_iam_role.tftest.name}"]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -693,7 +693,7 @@ func testAccBeanstalkWorkerEnvConfig(rInt int) string {
|
|||||||
name = "IamInstanceProfile"
|
name = "IamInstanceProfile"
|
||||||
value = "${aws_iam_instance_profile.tftest.name}"
|
value = "${aws_iam_instance_profile.tftest.name}"
|
||||||
}
|
}
|
||||||
}`, rInt, rInt)
|
}`, rInt, rInt, rInt)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccBeanstalkEnvCnamePrefixConfig(randString string, rInt int) string {
|
func testAccBeanstalkEnvCnamePrefixConfig(randString string, rInt int) string {
|
||||||
@ -937,24 +937,24 @@ resource "aws_s3_bucket_object" "default" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_elastic_beanstalk_application" "default" {
|
resource "aws_elastic_beanstalk_application" "default" {
|
||||||
name = "tf-test-name"
|
name = "tf-test-name-%d"
|
||||||
description = "tf-test-desc"
|
description = "tf-test-desc"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_elastic_beanstalk_application_version" "default" {
|
resource "aws_elastic_beanstalk_application_version" "default" {
|
||||||
application = "tf-test-name"
|
application = "tf-test-name-%d"
|
||||||
name = "tf-test-version-label"
|
name = "tf-test-version-label"
|
||||||
bucket = "${aws_s3_bucket.default.id}"
|
bucket = "${aws_s3_bucket.default.id}"
|
||||||
key = "${aws_s3_bucket_object.default.id}"
|
key = "${aws_s3_bucket_object.default.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_elastic_beanstalk_environment" "default" {
|
resource "aws_elastic_beanstalk_environment" "default" {
|
||||||
name = "tf-test-name"
|
name = "tf-test-name-%d"
|
||||||
application = "${aws_elastic_beanstalk_application.default.name}"
|
application = "${aws_elastic_beanstalk_application.default.name}"
|
||||||
version_label = "${aws_elastic_beanstalk_application_version.default.name}"
|
version_label = "${aws_elastic_beanstalk_application_version.default.name}"
|
||||||
solution_stack_name = "64bit Amazon Linux running Python"
|
solution_stack_name = "64bit Amazon Linux running Python"
|
||||||
}
|
}
|
||||||
`, randInt)
|
`, randInt, randInt, randInt, randInt)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccBeanstalkEnvApplicationVersionConfigUpdate(randInt int) string {
|
func testAccBeanstalkEnvApplicationVersionConfigUpdate(randInt int) string {
|
||||||
@ -970,22 +970,22 @@ resource "aws_s3_bucket_object" "default" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_elastic_beanstalk_application" "default" {
|
resource "aws_elastic_beanstalk_application" "default" {
|
||||||
name = "tf-test-name"
|
name = "tf-test-name-%d"
|
||||||
description = "tf-test-desc"
|
description = "tf-test-desc"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_elastic_beanstalk_application_version" "default" {
|
resource "aws_elastic_beanstalk_application_version" "default" {
|
||||||
application = "tf-test-name"
|
application = "tf-test-name-%d"
|
||||||
name = "tf-test-version-label-v2"
|
name = "tf-test-version-label-v2"
|
||||||
bucket = "${aws_s3_bucket.default.id}"
|
bucket = "${aws_s3_bucket.default.id}"
|
||||||
key = "${aws_s3_bucket_object.default.id}"
|
key = "${aws_s3_bucket_object.default.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_elastic_beanstalk_environment" "default" {
|
resource "aws_elastic_beanstalk_environment" "default" {
|
||||||
name = "tf-test-name"
|
name = "tf-test-name-%d"
|
||||||
application = "${aws_elastic_beanstalk_application.default.name}"
|
application = "${aws_elastic_beanstalk_application.default.name}"
|
||||||
version_label = "${aws_elastic_beanstalk_application_version.default.name}"
|
version_label = "${aws_elastic_beanstalk_application_version.default.name}"
|
||||||
solution_stack_name = "64bit Amazon Linux running Python"
|
solution_stack_name = "64bit Amazon Linux running Python"
|
||||||
}
|
}
|
||||||
`, randInt)
|
`, randInt, randInt, randInt, randInt)
|
||||||
}
|
}
|
||||||
|
@ -83,6 +83,7 @@ func resourceAwsElasticSearchDomain() *schema.Resource {
|
|||||||
"volume_type": {
|
"volume_type": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -85,6 +85,13 @@ func testAccESDomainPolicyConfig(randInt int, policy string) string {
|
|||||||
resource "aws_elasticsearch_domain" "example" {
|
resource "aws_elasticsearch_domain" "example" {
|
||||||
domain_name = "tf-test-%d"
|
domain_name = "tf-test-%d"
|
||||||
elasticsearch_version = "2.3"
|
elasticsearch_version = "2.3"
|
||||||
|
cluster_config {
|
||||||
|
instance_type = "t2.micro.elasticsearch"
|
||||||
|
}
|
||||||
|
ebs_options {
|
||||||
|
ebs_enabled = true
|
||||||
|
volume_size = 10
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_elasticsearch_domain_policy" "main" {
|
resource "aws_elasticsearch_domain_policy" "main" {
|
||||||
|
@ -96,7 +96,7 @@ func TestAccAWSElasticSearchDomain_complex(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccAWSElasticSearch_tags(t *testing.T) {
|
func TestAccAWSElasticSearchDomain_tags(t *testing.T) {
|
||||||
var domain elasticsearch.ElasticsearchDomainStatus
|
var domain elasticsearch.ElasticsearchDomainStatus
|
||||||
var td elasticsearch.ListTagsOutput
|
var td elasticsearch.ListTagsOutput
|
||||||
ri := acctest.RandInt()
|
ri := acctest.RandInt()
|
||||||
@ -198,6 +198,10 @@ func testAccESDomainConfig(randInt int) string {
|
|||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "aws_elasticsearch_domain" "example" {
|
resource "aws_elasticsearch_domain" "example" {
|
||||||
domain_name = "tf-test-%d"
|
domain_name = "tf-test-%d"
|
||||||
|
ebs_options {
|
||||||
|
ebs_enabled = true
|
||||||
|
volume_size = 10
|
||||||
|
}
|
||||||
}
|
}
|
||||||
`, randInt)
|
`, randInt)
|
||||||
}
|
}
|
||||||
@ -206,6 +210,10 @@ func testAccESDomainConfig_TagUpdate(randInt int) string {
|
|||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "aws_elasticsearch_domain" "example" {
|
resource "aws_elasticsearch_domain" "example" {
|
||||||
domain_name = "tf-test-%d"
|
domain_name = "tf-test-%d"
|
||||||
|
ebs_options {
|
||||||
|
ebs_enabled = true
|
||||||
|
volume_size = 10
|
||||||
|
}
|
||||||
|
|
||||||
tags {
|
tags {
|
||||||
foo = "bar"
|
foo = "bar"
|
||||||
@ -220,6 +228,10 @@ func testAccESDomainConfig_complex(randInt int) string {
|
|||||||
resource "aws_elasticsearch_domain" "example" {
|
resource "aws_elasticsearch_domain" "example" {
|
||||||
domain_name = "tf-test-%d"
|
domain_name = "tf-test-%d"
|
||||||
|
|
||||||
|
cluster_config {
|
||||||
|
instance_type = "r3.large.elasticsearch"
|
||||||
|
}
|
||||||
|
|
||||||
advanced_options {
|
advanced_options {
|
||||||
"indices.fielddata.cache.size" = 80
|
"indices.fielddata.cache.size" = 80
|
||||||
}
|
}
|
||||||
@ -248,6 +260,10 @@ func testAccESDomainConfigV23(randInt int) string {
|
|||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "aws_elasticsearch_domain" "example" {
|
resource "aws_elasticsearch_domain" "example" {
|
||||||
domain_name = "tf-test-%d"
|
domain_name = "tf-test-%d"
|
||||||
|
ebs_options {
|
||||||
|
ebs_enabled = true
|
||||||
|
volume_size = 10
|
||||||
|
}
|
||||||
elasticsearch_version = "2.3"
|
elasticsearch_version = "2.3"
|
||||||
}
|
}
|
||||||
`, randInt)
|
`, randInt)
|
||||||
|
@ -287,7 +287,7 @@ func resourceAwsElbCreate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] ELB create configuration: %#v", elbOpts)
|
log.Printf("[DEBUG] ELB create configuration: %#v", elbOpts)
|
||||||
err = resource.Retry(1*time.Minute, func() *resource.RetryError {
|
err = resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||||
_, err := elbconn.CreateLoadBalancer(elbOpts)
|
_, err := elbconn.CreateLoadBalancer(elbOpts)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -488,7 +488,7 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
|
|
||||||
// Occasionally AWS will error with a 'duplicate listener', without any
|
// Occasionally AWS will error with a 'duplicate listener', without any
|
||||||
// other listeners on the ELB. Retry here to eliminate that.
|
// other listeners on the ELB. Retry here to eliminate that.
|
||||||
err := resource.Retry(1*time.Minute, func() *resource.RetryError {
|
err := resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||||
log.Printf("[DEBUG] ELB Create Listeners opts: %s", createListenersOpts)
|
log.Printf("[DEBUG] ELB Create Listeners opts: %s", createListenersOpts)
|
||||||
if _, err := elbconn.CreateLoadBalancerListeners(createListenersOpts); err != nil {
|
if _, err := elbconn.CreateLoadBalancerListeners(createListenersOpts); err != nil {
|
||||||
if awsErr, ok := err.(awserr.Error); ok {
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
@ -746,7 +746,7 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] ELB attach subnets opts: %s", attachOpts)
|
log.Printf("[DEBUG] ELB attach subnets opts: %s", attachOpts)
|
||||||
err := resource.Retry(1*time.Minute, func() *resource.RetryError {
|
err := resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||||
_, err := elbconn.AttachLoadBalancerToSubnets(attachOpts)
|
_, err := elbconn.AttachLoadBalancerToSubnets(attachOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if awsErr, ok := err.(awserr.Error); ok {
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
@ -150,8 +150,9 @@ func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource {
|
|||||||
},
|
},
|
||||||
|
|
||||||
"password": {
|
"password": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
|
Sensitive: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"role_arn": {
|
"role_arn": {
|
||||||
|
@ -297,14 +297,13 @@ func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) e
|
|||||||
err := resource.Retry(10*time.Minute, func() *resource.RetryError {
|
err := resource.Retry(10*time.Minute, func() *resource.RetryError {
|
||||||
_, err := conn.CreateFunction(params)
|
_, err := conn.CreateFunction(params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[ERROR] Received %q, retrying CreateFunction", err)
|
|
||||||
if awserr, ok := err.(awserr.Error); ok {
|
|
||||||
if awserr.Code() == "InvalidParameterValueException" {
|
|
||||||
log.Printf("[DEBUG] InvalidParameterValueException creating Lambda Function: %s", awserr)
|
|
||||||
return resource.RetryableError(awserr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log.Printf("[DEBUG] Error creating Lambda Function: %s", err)
|
log.Printf("[DEBUG] Error creating Lambda Function: %s", err)
|
||||||
|
|
||||||
|
if isAWSErr(err, "InvalidParameterValueException", "The role defined for the function cannot be assumed by Lambda") {
|
||||||
|
log.Printf("[DEBUG] Received %s, retrying CreateFunction", err)
|
||||||
|
return resource.RetryableError(err)
|
||||||
|
}
|
||||||
|
|
||||||
return resource.NonRetryableError(err)
|
return resource.NonRetryableError(err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
98
builtin/providers/aws/resource_aws_lightsail_static_ip.go
Normal file
98
builtin/providers/aws/resource_aws_lightsail_static_ip.go
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/lightsail"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsLightsailStaticIp() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceAwsLightsailStaticIpCreate,
|
||||||
|
Read: resourceAwsLightsailStaticIpRead,
|
||||||
|
Delete: resourceAwsLightsailStaticIpDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"ip_address": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"arn": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"support_code": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsLightsailStaticIpCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).lightsailconn
|
||||||
|
|
||||||
|
name := d.Get("name").(string)
|
||||||
|
log.Printf("[INFO] Allocating Lightsail Static IP: %q", name)
|
||||||
|
out, err := conn.AllocateStaticIp(&lightsail.AllocateStaticIpInput{
|
||||||
|
StaticIpName: aws.String(name),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Printf("[INFO] Lightsail Static IP allocated: %s", *out)
|
||||||
|
|
||||||
|
d.SetId(name)
|
||||||
|
|
||||||
|
return resourceAwsLightsailStaticIpRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsLightsailStaticIpRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).lightsailconn
|
||||||
|
|
||||||
|
name := d.Get("name").(string)
|
||||||
|
log.Printf("[INFO] Reading Lightsail Static IP: %q", name)
|
||||||
|
out, err := conn.GetStaticIp(&lightsail.GetStaticIpInput{
|
||||||
|
StaticIpName: aws.String(name),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
if awsErr.Code() == "NotFoundException" {
|
||||||
|
log.Printf("[WARN] Lightsail Static IP (%s) not found, removing from state", d.Id())
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Printf("[INFO] Received Lightsail Static IP: %s", *out)
|
||||||
|
|
||||||
|
d.Set("arn", out.StaticIp.Arn)
|
||||||
|
d.Set("ip_address", out.StaticIp.IpAddress)
|
||||||
|
d.Set("support_code", out.StaticIp.SupportCode)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsLightsailStaticIpDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).lightsailconn
|
||||||
|
|
||||||
|
name := d.Get("name").(string)
|
||||||
|
log.Printf("[INFO] Deleting Lightsail Static IP: %q", name)
|
||||||
|
out, err := conn.ReleaseStaticIp(&lightsail.ReleaseStaticIpInput{
|
||||||
|
StaticIpName: aws.String(name),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Printf("[INFO] Deleted Lightsail Static IP: %s", *out)
|
||||||
|
return nil
|
||||||
|
}
|
@ -0,0 +1,96 @@
|
|||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/lightsail"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsLightsailStaticIpAttachment() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceAwsLightsailStaticIpAttachmentCreate,
|
||||||
|
Read: resourceAwsLightsailStaticIpAttachmentRead,
|
||||||
|
Delete: resourceAwsLightsailStaticIpAttachmentDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"static_ip_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"instance_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsLightsailStaticIpAttachmentCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).lightsailconn
|
||||||
|
|
||||||
|
staticIpName := d.Get("static_ip_name").(string)
|
||||||
|
log.Printf("[INFO] Attaching Lightsail Static IP: %q", staticIpName)
|
||||||
|
out, err := conn.AttachStaticIp(&lightsail.AttachStaticIpInput{
|
||||||
|
StaticIpName: aws.String(staticIpName),
|
||||||
|
InstanceName: aws.String(d.Get("instance_name").(string)),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Printf("[INFO] Lightsail Static IP attached: %s", *out)
|
||||||
|
|
||||||
|
d.SetId(staticIpName)
|
||||||
|
|
||||||
|
return resourceAwsLightsailStaticIpAttachmentRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsLightsailStaticIpAttachmentRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).lightsailconn
|
||||||
|
|
||||||
|
staticIpName := d.Get("static_ip_name").(string)
|
||||||
|
log.Printf("[INFO] Reading Lightsail Static IP: %q", staticIpName)
|
||||||
|
out, err := conn.GetStaticIp(&lightsail.GetStaticIpInput{
|
||||||
|
StaticIpName: aws.String(staticIpName),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
if awsErr.Code() == "NotFoundException" {
|
||||||
|
log.Printf("[WARN] Lightsail Static IP (%s) not found, removing from state", d.Id())
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !*out.StaticIp.IsAttached {
|
||||||
|
log.Printf("[WARN] Lightsail Static IP (%s) is not attached, removing from state", d.Id())
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] Received Lightsail Static IP: %s", *out)
|
||||||
|
|
||||||
|
d.Set("instance_name", out.StaticIp.AttachedTo)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsLightsailStaticIpAttachmentDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).lightsailconn
|
||||||
|
|
||||||
|
name := d.Get("static_ip_name").(string)
|
||||||
|
log.Printf("[INFO] Detaching Lightsail Static IP: %q", name)
|
||||||
|
out, err := conn.DetachStaticIp(&lightsail.DetachStaticIpInput{
|
||||||
|
StaticIpName: aws.String(name),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Printf("[INFO] Detached Lightsail Static IP: %s", *out)
|
||||||
|
return nil
|
||||||
|
}
|
@ -0,0 +1,163 @@
|
|||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/lightsail"
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAWSLightsailStaticIpAttachment_basic(t *testing.T) {
|
||||||
|
var staticIp lightsail.StaticIp
|
||||||
|
staticIpName := fmt.Sprintf("tf-test-lightsail-%s", acctest.RandString(5))
|
||||||
|
instanceName := fmt.Sprintf("tf-test-lightsail-%s", acctest.RandString(5))
|
||||||
|
keypairName := fmt.Sprintf("tf-test-lightsail-%s", acctest.RandString(5))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSLightsailStaticIpAttachmentDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSLightsailStaticIpAttachmentConfig_basic(staticIpName, instanceName, keypairName),
|
||||||
|
Check: resource.ComposeAggregateTestCheckFunc(
|
||||||
|
testAccCheckAWSLightsailStaticIpAttachmentExists("aws_lightsail_static_ip_attachment.test", &staticIp),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSLightsailStaticIpAttachment_disappears(t *testing.T) {
|
||||||
|
var staticIp lightsail.StaticIp
|
||||||
|
staticIpName := fmt.Sprintf("tf-test-lightsail-%s", acctest.RandString(5))
|
||||||
|
instanceName := fmt.Sprintf("tf-test-lightsail-%s", acctest.RandString(5))
|
||||||
|
keypairName := fmt.Sprintf("tf-test-lightsail-%s", acctest.RandString(5))
|
||||||
|
|
||||||
|
staticIpDestroy := func(*terraform.State) error {
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).lightsailconn
|
||||||
|
_, err := conn.DetachStaticIp(&lightsail.DetachStaticIpInput{
|
||||||
|
StaticIpName: aws.String(staticIpName),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error deleting Lightsail Static IP in disappear test")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSLightsailStaticIpAttachmentDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSLightsailStaticIpAttachmentConfig_basic(staticIpName, instanceName, keypairName),
|
||||||
|
Check: resource.ComposeAggregateTestCheckFunc(
|
||||||
|
testAccCheckAWSLightsailStaticIpAttachmentExists("aws_lightsail_static_ip_attachment.test", &staticIp),
|
||||||
|
staticIpDestroy,
|
||||||
|
),
|
||||||
|
ExpectNonEmptyPlan: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSLightsailStaticIpAttachmentExists(n string, staticIp *lightsail.StaticIp) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return errors.New("No Lightsail Static IP Attachment ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).lightsailconn
|
||||||
|
|
||||||
|
resp, err := conn.GetStaticIp(&lightsail.GetStaticIpInput{
|
||||||
|
StaticIpName: aws.String(rs.Primary.ID),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp == nil || resp.StaticIp == nil {
|
||||||
|
return fmt.Errorf("Static IP (%s) not found", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !*resp.StaticIp.IsAttached {
|
||||||
|
return fmt.Errorf("Static IP (%s) not attached", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
*staticIp = *resp.StaticIp
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSLightsailStaticIpAttachmentDestroy(s *terraform.State) error {
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "aws_lightsail_static_ip_attachment" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).lightsailconn
|
||||||
|
|
||||||
|
resp, err := conn.GetStaticIp(&lightsail.GetStaticIpInput{
|
||||||
|
StaticIpName: aws.String(rs.Primary.ID),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
if *resp.StaticIp.IsAttached {
|
||||||
|
return fmt.Errorf("Lightsail Static IP %q is still attached (to %q)", rs.Primary.ID, *resp.StaticIp.AttachedTo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the error
|
||||||
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
if awsErr.Code() == "NotFoundException" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccAWSLightsailStaticIpAttachmentConfig_basic(staticIpName, instanceName, keypairName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
provider "aws" {
|
||||||
|
region = "us-east-1"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_lightsail_static_ip_attachment" "test" {
|
||||||
|
static_ip_name = "${aws_lightsail_static_ip.test.name}"
|
||||||
|
instance_name = "${aws_lightsail_instance.test.name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_lightsail_static_ip" "test" {
|
||||||
|
name = "%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_lightsail_instance" "test" {
|
||||||
|
name = "%s"
|
||||||
|
availability_zone = "us-east-1b"
|
||||||
|
blueprint_id = "wordpress_4_6_1"
|
||||||
|
bundle_id = "micro_1_0"
|
||||||
|
key_pair_name = "${aws_lightsail_key_pair.test.name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_lightsail_key_pair" "test" {
|
||||||
|
name = "%s"
|
||||||
|
}
|
||||||
|
`, staticIpName, instanceName, keypairName)
|
||||||
|
}
|
138
builtin/providers/aws/resource_aws_lightsail_static_ip_test.go
Normal file
138
builtin/providers/aws/resource_aws_lightsail_static_ip_test.go
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/lightsail"
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAWSLightsailStaticIp_basic(t *testing.T) {
|
||||||
|
var staticIp lightsail.StaticIp
|
||||||
|
staticIpName := fmt.Sprintf("tf-test-lightsail-%s", acctest.RandString(5))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSLightsailStaticIpDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSLightsailStaticIpConfig_basic(staticIpName),
|
||||||
|
Check: resource.ComposeAggregateTestCheckFunc(
|
||||||
|
testAccCheckAWSLightsailStaticIpExists("aws_lightsail_static_ip.test", &staticIp),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSLightsailStaticIp_disappears(t *testing.T) {
|
||||||
|
var staticIp lightsail.StaticIp
|
||||||
|
staticIpName := fmt.Sprintf("tf-test-lightsail-%s", acctest.RandString(5))
|
||||||
|
|
||||||
|
staticIpDestroy := func(*terraform.State) error {
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).lightsailconn
|
||||||
|
_, err := conn.ReleaseStaticIp(&lightsail.ReleaseStaticIpInput{
|
||||||
|
StaticIpName: aws.String(staticIpName),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error deleting Lightsail Static IP in disapear test")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSLightsailStaticIpDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSLightsailStaticIpConfig_basic(staticIpName),
|
||||||
|
Check: resource.ComposeAggregateTestCheckFunc(
|
||||||
|
testAccCheckAWSLightsailStaticIpExists("aws_lightsail_static_ip.test", &staticIp),
|
||||||
|
staticIpDestroy,
|
||||||
|
),
|
||||||
|
ExpectNonEmptyPlan: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSLightsailStaticIpExists(n string, staticIp *lightsail.StaticIp) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return errors.New("No Lightsail Static IP ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).lightsailconn
|
||||||
|
|
||||||
|
resp, err := conn.GetStaticIp(&lightsail.GetStaticIpInput{
|
||||||
|
StaticIpName: aws.String(rs.Primary.ID),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp == nil || resp.StaticIp == nil {
|
||||||
|
return fmt.Errorf("Static IP (%s) not found", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
*staticIp = *resp.StaticIp
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSLightsailStaticIpDestroy(s *terraform.State) error {
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "aws_lightsail_static_ip" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).lightsailconn
|
||||||
|
|
||||||
|
resp, err := conn.GetStaticIp(&lightsail.GetStaticIpInput{
|
||||||
|
StaticIpName: aws.String(rs.Primary.ID),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
if resp.StaticIp != nil {
|
||||||
|
return fmt.Errorf("Lightsail Static IP %q still exists", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the error
|
||||||
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
if awsErr.Code() == "NotFoundException" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccAWSLightsailStaticIpConfig_basic(staticIpName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
provider "aws" {
|
||||||
|
region = "us-east-1"
|
||||||
|
}
|
||||||
|
resource "aws_lightsail_static_ip" "test" {
|
||||||
|
name = "%s"
|
||||||
|
}
|
||||||
|
`, staticIpName)
|
||||||
|
}
|
@ -102,8 +102,9 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
|||||||
},
|
},
|
||||||
|
|
||||||
"password": {
|
"password": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
Sensitive: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"revision": {
|
"revision": {
|
||||||
@ -187,8 +188,9 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
"private_key": {
|
"private_key": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
|
Sensitive: true,
|
||||||
StateFunc: func(v interface{}) string {
|
StateFunc: func(v interface{}) string {
|
||||||
switch v.(type) {
|
switch v.(type) {
|
||||||
case string:
|
case string:
|
||||||
|
@ -781,7 +781,7 @@ func resourceAwsOpsworksInstanceCreate(d *schema.ResourceData, meta interface{})
|
|||||||
d.Set("id", instanceId)
|
d.Set("id", instanceId)
|
||||||
|
|
||||||
if v, ok := d.GetOk("state"); ok && v.(string) == "running" {
|
if v, ok := d.GetOk("state"); ok && v.(string) == "running" {
|
||||||
err := startOpsworksInstance(d, meta, false)
|
err := startOpsworksInstance(d, meta, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -860,7 +860,7 @@ func resourceAwsOpsworksInstanceUpdate(d *schema.ResourceData, meta interface{})
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if status != "stopped" && status != "stopping" && status != "shutting_down" {
|
if status != "stopped" && status != "stopping" && status != "shutting_down" {
|
||||||
err := stopOpsworksInstance(d, meta, false)
|
err := stopOpsworksInstance(d, meta, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -111,8 +111,9 @@ func resourceAwsOpsworksStack() *schema.Resource {
|
|||||||
},
|
},
|
||||||
|
|
||||||
"password": {
|
"password": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
Sensitive: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"revision": {
|
"revision": {
|
||||||
|
@ -36,10 +36,19 @@ func resourceAwsRDSCluster() *schema.Resource {
|
|||||||
},
|
},
|
||||||
|
|
||||||
"cluster_identifier": {
|
"cluster_identifier": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ConflictsWith: []string{"cluster_identifier_prefix"},
|
||||||
|
ValidateFunc: validateRdsIdentifier,
|
||||||
|
},
|
||||||
|
"cluster_identifier_prefix": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
ValidateFunc: validateRdsId,
|
ValidateFunc: validateRdsIdentifierPrefix,
|
||||||
},
|
},
|
||||||
|
|
||||||
"cluster_members": {
|
"cluster_members": {
|
||||||
@ -225,6 +234,19 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error
|
|||||||
conn := meta.(*AWSClient).rdsconn
|
conn := meta.(*AWSClient).rdsconn
|
||||||
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
|
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
|
||||||
|
|
||||||
|
var identifier string
|
||||||
|
if v, ok := d.GetOk("cluster_identifier"); ok {
|
||||||
|
identifier = v.(string)
|
||||||
|
} else {
|
||||||
|
if v, ok := d.GetOk("cluster_identifier_prefix"); ok {
|
||||||
|
identifier = resource.PrefixedUniqueId(v.(string))
|
||||||
|
} else {
|
||||||
|
identifier = resource.PrefixedUniqueId("tf-")
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("cluster_identifier", identifier)
|
||||||
|
}
|
||||||
|
|
||||||
if _, ok := d.GetOk("snapshot_identifier"); ok {
|
if _, ok := d.GetOk("snapshot_identifier"); ok {
|
||||||
opts := rds.RestoreDBClusterFromSnapshotInput{
|
opts := rds.RestoreDBClusterFromSnapshotInput{
|
||||||
DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
|
DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)),
|
||||||
|
@ -3,6 +3,7 @@ package aws
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
@ -23,10 +24,19 @@ func resourceAwsRDSClusterInstance() *schema.Resource {
|
|||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"identifier": {
|
"identifier": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ConflictsWith: []string{"identifier_prefix"},
|
||||||
|
ValidateFunc: validateRdsIdentifier,
|
||||||
|
},
|
||||||
|
"identifier_prefix": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
ValidateFunc: validateRdsId,
|
ValidateFunc: validateRdsIdentifierPrefix,
|
||||||
},
|
},
|
||||||
|
|
||||||
"db_subnet_group_name": {
|
"db_subnet_group_name": {
|
||||||
@ -105,6 +115,27 @@ func resourceAwsRDSClusterInstance() *schema.Resource {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"preferred_maintenance_window": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
StateFunc: func(v interface{}) string {
|
||||||
|
if v != nil {
|
||||||
|
value := v.(string)
|
||||||
|
return strings.ToLower(value)
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
},
|
||||||
|
ValidateFunc: validateOnceAWeekWindowFormat,
|
||||||
|
},
|
||||||
|
|
||||||
|
"preferred_backup_window": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ValidateFunc: validateOnceADayWindowFormat,
|
||||||
|
},
|
||||||
|
|
||||||
"monitoring_interval": {
|
"monitoring_interval": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@ -140,10 +171,14 @@ func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{
|
|||||||
createOpts.DBParameterGroupName = aws.String(attr.(string))
|
createOpts.DBParameterGroupName = aws.String(attr.(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
if v := d.Get("identifier").(string); v != "" {
|
if v, ok := d.GetOk("identifier"); ok {
|
||||||
createOpts.DBInstanceIdentifier = aws.String(v)
|
createOpts.DBInstanceIdentifier = aws.String(v.(string))
|
||||||
} else {
|
} else {
|
||||||
createOpts.DBInstanceIdentifier = aws.String(resource.UniqueId())
|
if v, ok := d.GetOk("identifier_prefix"); ok {
|
||||||
|
createOpts.DBInstanceIdentifier = aws.String(resource.PrefixedUniqueId(v.(string)))
|
||||||
|
} else {
|
||||||
|
createOpts.DBInstanceIdentifier = aws.String(resource.PrefixedUniqueId("tf-"))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if attr, ok := d.GetOk("db_subnet_group_name"); ok {
|
if attr, ok := d.GetOk("db_subnet_group_name"); ok {
|
||||||
@ -154,6 +189,14 @@ func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{
|
|||||||
createOpts.MonitoringRoleArn = aws.String(attr.(string))
|
createOpts.MonitoringRoleArn = aws.String(attr.(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if attr, ok := d.GetOk("preferred_backup_window"); ok {
|
||||||
|
createOpts.PreferredBackupWindow = aws.String(attr.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if attr, ok := d.GetOk("preferred_maintenance_window"); ok {
|
||||||
|
createOpts.PreferredMaintenanceWindow = aws.String(attr.(string))
|
||||||
|
}
|
||||||
|
|
||||||
if attr, ok := d.GetOk("monitoring_interval"); ok {
|
if attr, ok := d.GetOk("monitoring_interval"); ok {
|
||||||
createOpts.MonitoringInterval = aws.Int64(int64(attr.(int)))
|
createOpts.MonitoringInterval = aws.Int64(int64(attr.(int)))
|
||||||
}
|
}
|
||||||
@ -239,6 +282,8 @@ func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{})
|
|||||||
d.Set("kms_key_id", db.KmsKeyId)
|
d.Set("kms_key_id", db.KmsKeyId)
|
||||||
d.Set("auto_minor_version_upgrade", db.AutoMinorVersionUpgrade)
|
d.Set("auto_minor_version_upgrade", db.AutoMinorVersionUpgrade)
|
||||||
d.Set("promotion_tier", db.PromotionTier)
|
d.Set("promotion_tier", db.PromotionTier)
|
||||||
|
d.Set("preferred_backup_window", db.PreferredBackupWindow)
|
||||||
|
d.Set("preferred_maintenance_window", db.PreferredMaintenanceWindow)
|
||||||
|
|
||||||
if db.MonitoringInterval != nil {
|
if db.MonitoringInterval != nil {
|
||||||
d.Set("monitoring_interval", db.MonitoringInterval)
|
d.Set("monitoring_interval", db.MonitoringInterval)
|
||||||
@ -290,6 +335,18 @@ func resourceAwsRDSClusterInstanceUpdate(d *schema.ResourceData, meta interface{
|
|||||||
requestUpdate = true
|
requestUpdate = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.HasChange("preferred_backup_window") {
|
||||||
|
d.SetPartial("preferred_backup_window")
|
||||||
|
req.PreferredBackupWindow = aws.String(d.Get("preferred_backup_window").(string))
|
||||||
|
requestUpdate = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("preferred_maintenance_window") {
|
||||||
|
d.SetPartial("preferred_maintenance_window")
|
||||||
|
req.PreferredMaintenanceWindow = aws.String(d.Get("preferred_maintenance_window").(string))
|
||||||
|
requestUpdate = true
|
||||||
|
}
|
||||||
|
|
||||||
if d.HasChange("monitoring_interval") {
|
if d.HasChange("monitoring_interval") {
|
||||||
d.SetPartial("monitoring_interval")
|
d.SetPartial("monitoring_interval")
|
||||||
req.MonitoringInterval = aws.Int64(int64(d.Get("monitoring_interval").(int)))
|
req.MonitoringInterval = aws.Int64(int64(d.Get("monitoring_interval").(int)))
|
||||||
|
@ -30,6 +30,8 @@ func TestAccAWSRDSClusterInstance_basic(t *testing.T) {
|
|||||||
testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.cluster_instances", &v),
|
testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.cluster_instances", &v),
|
||||||
testAccCheckAWSDBClusterInstanceAttributes(&v),
|
testAccCheckAWSDBClusterInstanceAttributes(&v),
|
||||||
resource.TestCheckResourceAttr("aws_rds_cluster_instance.cluster_instances", "auto_minor_version_upgrade", "true"),
|
resource.TestCheckResourceAttr("aws_rds_cluster_instance.cluster_instances", "auto_minor_version_upgrade", "true"),
|
||||||
|
resource.TestCheckResourceAttrSet("aws_rds_cluster_instance.cluster_instances", "preferred_maintenance_window"),
|
||||||
|
resource.TestCheckResourceAttrSet("aws_rds_cluster_instance.cluster_instances", "preferred_backup_window"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -44,6 +46,48 @@ func TestAccAWSRDSClusterInstance_basic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSRDSClusterInstance_namePrefix(t *testing.T) {
|
||||||
|
var v rds.DBInstance
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSClusterDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSClusterInstanceConfig_namePrefix(acctest.RandInt()),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.test", &v),
|
||||||
|
testAccCheckAWSDBClusterInstanceAttributes(&v),
|
||||||
|
resource.TestMatchResourceAttr(
|
||||||
|
"aws_rds_cluster_instance.test", "identifier", regexp.MustCompile("^tf-cluster-instance-")),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSRDSClusterInstance_generatedName(t *testing.T) {
|
||||||
|
var v rds.DBInstance
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSClusterDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSClusterInstanceConfig_generatedName(acctest.RandInt()),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.test", &v),
|
||||||
|
testAccCheckAWSDBClusterInstanceAttributes(&v),
|
||||||
|
resource.TestMatchResourceAttr(
|
||||||
|
"aws_rds_cluster_instance.test", "identifier", regexp.MustCompile("^tf-")),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccAWSRDSClusterInstance_kmsKey(t *testing.T) {
|
func TestAccAWSRDSClusterInstance_kmsKey(t *testing.T) {
|
||||||
var v rds.DBInstance
|
var v rds.DBInstance
|
||||||
keyRegex := regexp.MustCompile("^arn:aws:kms:")
|
keyRegex := regexp.MustCompile("^arn:aws:kms:")
|
||||||
@ -254,6 +298,83 @@ resource "aws_db_parameter_group" "bar" {
|
|||||||
`, n, n, n)
|
`, n, n, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccAWSClusterInstanceConfig_namePrefix(n int) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "aws_rds_cluster_instance" "test" {
|
||||||
|
identifier_prefix = "tf-cluster-instance-"
|
||||||
|
cluster_identifier = "${aws_rds_cluster.test.id}"
|
||||||
|
instance_class = "db.r3.large"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_rds_cluster" "test" {
|
||||||
|
cluster_identifier = "tf-aurora-cluster-%d"
|
||||||
|
master_username = "root"
|
||||||
|
master_password = "password"
|
||||||
|
db_subnet_group_name = "${aws_db_subnet_group.test.name}"
|
||||||
|
skip_final_snapshot = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc" "test" {
|
||||||
|
cidr_block = "10.0.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "a" {
|
||||||
|
vpc_id = "${aws_vpc.test.id}"
|
||||||
|
cidr_block = "10.0.0.0/24"
|
||||||
|
availability_zone = "us-west-2a"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "b" {
|
||||||
|
vpc_id = "${aws_vpc.test.id}"
|
||||||
|
cidr_block = "10.0.1.0/24"
|
||||||
|
availability_zone = "us-west-2b"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_db_subnet_group" "test" {
|
||||||
|
name = "tf-test-%d"
|
||||||
|
subnet_ids = ["${aws_subnet.a.id}", "${aws_subnet.b.id}"]
|
||||||
|
}
|
||||||
|
`, n, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccAWSClusterInstanceConfig_generatedName(n int) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "aws_rds_cluster_instance" "test" {
|
||||||
|
cluster_identifier = "${aws_rds_cluster.test.id}"
|
||||||
|
instance_class = "db.r3.large"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_rds_cluster" "test" {
|
||||||
|
cluster_identifier = "tf-aurora-cluster-%d"
|
||||||
|
master_username = "root"
|
||||||
|
master_password = "password"
|
||||||
|
db_subnet_group_name = "${aws_db_subnet_group.test.name}"
|
||||||
|
skip_final_snapshot = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc" "test" {
|
||||||
|
cidr_block = "10.0.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "a" {
|
||||||
|
vpc_id = "${aws_vpc.test.id}"
|
||||||
|
cidr_block = "10.0.0.0/24"
|
||||||
|
availability_zone = "us-west-2a"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "b" {
|
||||||
|
vpc_id = "${aws_vpc.test.id}"
|
||||||
|
cidr_block = "10.0.1.0/24"
|
||||||
|
availability_zone = "us-west-2b"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_db_subnet_group" "test" {
|
||||||
|
name = "tf-test-%d"
|
||||||
|
subnet_ids = ["${aws_subnet.a.id}", "${aws_subnet.b.id}"]
|
||||||
|
}
|
||||||
|
`, n, n)
|
||||||
|
}
|
||||||
|
|
||||||
func testAccAWSClusterInstanceConfigKmsKey(n int) string {
|
func testAccAWSClusterInstanceConfigKmsKey(n int) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
|
|
||||||
|
@ -29,10 +29,19 @@ func resourceAwsRDSClusterParameterGroup() *schema.Resource {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"name": &schema.Schema{
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ConflictsWith: []string{"name_prefix"},
|
||||||
|
ValidateFunc: validateDbParamGroupName,
|
||||||
|
},
|
||||||
|
"name_prefix": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Required: true,
|
ValidateFunc: validateDbParamGroupNamePrefix,
|
||||||
ValidateFunc: validateDbParamGroupName,
|
|
||||||
},
|
},
|
||||||
"family": &schema.Schema{
|
"family": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
@ -86,8 +95,17 @@ func resourceAwsRDSClusterParameterGroupCreate(d *schema.ResourceData, meta inte
|
|||||||
rdsconn := meta.(*AWSClient).rdsconn
|
rdsconn := meta.(*AWSClient).rdsconn
|
||||||
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
|
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
|
||||||
|
|
||||||
|
var groupName string
|
||||||
|
if v, ok := d.GetOk("name"); ok {
|
||||||
|
groupName = v.(string)
|
||||||
|
} else if v, ok := d.GetOk("name_prefix"); ok {
|
||||||
|
groupName = resource.PrefixedUniqueId(v.(string))
|
||||||
|
} else {
|
||||||
|
groupName = resource.UniqueId()
|
||||||
|
}
|
||||||
|
|
||||||
createOpts := rds.CreateDBClusterParameterGroupInput{
|
createOpts := rds.CreateDBClusterParameterGroupInput{
|
||||||
DBClusterParameterGroupName: aws.String(d.Get("name").(string)),
|
DBClusterParameterGroupName: aws.String(groupName),
|
||||||
DBParameterGroupFamily: aws.String(d.Get("family").(string)),
|
DBParameterGroupFamily: aws.String(d.Get("family").(string)),
|
||||||
Description: aws.String(d.Get("description").(string)),
|
Description: aws.String(d.Get("description").(string)),
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
|
@ -3,6 +3,7 @@ package aws
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"regexp"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -90,6 +91,44 @@ func TestAccAWSDBClusterParameterGroup_basic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSDBClusterParameterGroup_namePrefix(t *testing.T) {
|
||||||
|
var v rds.DBClusterParameterGroup
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSDBClusterParameterGroupDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSDBClusterParameterGroupConfig_namePrefix,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSDBClusterParameterGroupExists("aws_rds_cluster_parameter_group.test", &v),
|
||||||
|
resource.TestMatchResourceAttr(
|
||||||
|
"aws_rds_cluster_parameter_group.test", "name", regexp.MustCompile("^tf-test-")),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSDBClusterParameterGroup_generatedName(t *testing.T) {
|
||||||
|
var v rds.DBClusterParameterGroup
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSDBClusterParameterGroupDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSDBClusterParameterGroupConfig_generatedName,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSDBClusterParameterGroupExists("aws_rds_cluster_parameter_group.test", &v),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccAWSDBClusterParameterGroup_disappears(t *testing.T) {
|
func TestAccAWSDBClusterParameterGroup_disappears(t *testing.T) {
|
||||||
var v rds.DBClusterParameterGroup
|
var v rds.DBClusterParameterGroup
|
||||||
|
|
||||||
@ -365,3 +404,15 @@ func testAccAWSDBClusterParameterGroupOnlyConfig(name string) string {
|
|||||||
family = "aurora5.6"
|
family = "aurora5.6"
|
||||||
}`, name)
|
}`, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const testAccAWSDBClusterParameterGroupConfig_namePrefix = `
|
||||||
|
resource "aws_rds_cluster_parameter_group" "test" {
|
||||||
|
name_prefix = "tf-test-"
|
||||||
|
family = "aurora5.6"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
const testAccAWSDBClusterParameterGroupConfig_generatedName = `
|
||||||
|
resource "aws_rds_cluster_parameter_group" "test" {
|
||||||
|
family = "aurora5.6"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
@ -40,6 +40,46 @@ func TestAccAWSRDSCluster_basic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSRDSCluster_namePrefix(t *testing.T) {
|
||||||
|
var v rds.DBCluster
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSClusterDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSClusterConfig_namePrefix(acctest.RandInt()),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSClusterExists("aws_rds_cluster.test", &v),
|
||||||
|
resource.TestMatchResourceAttr(
|
||||||
|
"aws_rds_cluster.test", "cluster_identifier", regexp.MustCompile("^tf-test-")),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSRDSCluster_generatedName(t *testing.T) {
|
||||||
|
var v rds.DBCluster
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSClusterDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAWSClusterConfig_generatedName(acctest.RandInt()),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSClusterExists("aws_rds_cluster.test", &v),
|
||||||
|
resource.TestMatchResourceAttr(
|
||||||
|
"aws_rds_cluster.test", "cluster_identifier", regexp.MustCompile("^tf-")),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccAWSRDSCluster_takeFinalSnapshot(t *testing.T) {
|
func TestAccAWSRDSCluster_takeFinalSnapshot(t *testing.T) {
|
||||||
var v rds.DBCluster
|
var v rds.DBCluster
|
||||||
rInt := acctest.RandInt()
|
rInt := acctest.RandInt()
|
||||||
@ -322,6 +362,71 @@ resource "aws_rds_cluster" "default" {
|
|||||||
}`, n)
|
}`, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccAWSClusterConfig_namePrefix(n int) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "aws_rds_cluster" "test" {
|
||||||
|
cluster_identifier_prefix = "tf-test-"
|
||||||
|
master_username = "root"
|
||||||
|
master_password = "password"
|
||||||
|
db_subnet_group_name = "${aws_db_subnet_group.test.name}"
|
||||||
|
skip_final_snapshot = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc" "test" {
|
||||||
|
cidr_block = "10.0.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "a" {
|
||||||
|
vpc_id = "${aws_vpc.test.id}"
|
||||||
|
cidr_block = "10.0.0.0/24"
|
||||||
|
availability_zone = "us-west-2a"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "b" {
|
||||||
|
vpc_id = "${aws_vpc.test.id}"
|
||||||
|
cidr_block = "10.0.1.0/24"
|
||||||
|
availability_zone = "us-west-2b"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_db_subnet_group" "test" {
|
||||||
|
name = "tf-test-%d"
|
||||||
|
subnet_ids = ["${aws_subnet.a.id}", "${aws_subnet.b.id}"]
|
||||||
|
}
|
||||||
|
`, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccAWSClusterConfig_generatedName(n int) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "aws_rds_cluster" "test" {
|
||||||
|
master_username = "root"
|
||||||
|
master_password = "password"
|
||||||
|
db_subnet_group_name = "${aws_db_subnet_group.test.name}"
|
||||||
|
skip_final_snapshot = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc" "test" {
|
||||||
|
cidr_block = "10.0.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "a" {
|
||||||
|
vpc_id = "${aws_vpc.test.id}"
|
||||||
|
cidr_block = "10.0.0.0/24"
|
||||||
|
availability_zone = "us-west-2a"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "b" {
|
||||||
|
vpc_id = "${aws_vpc.test.id}"
|
||||||
|
cidr_block = "10.0.1.0/24"
|
||||||
|
availability_zone = "us-west-2b"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_db_subnet_group" "test" {
|
||||||
|
name = "tf-test-%d"
|
||||||
|
subnet_ids = ["${aws_subnet.a.id}", "${aws_subnet.b.id}"]
|
||||||
|
}
|
||||||
|
`, n)
|
||||||
|
}
|
||||||
|
|
||||||
func testAccAWSClusterConfigWithFinalSnapshot(n int) string {
|
func testAccAWSClusterConfigWithFinalSnapshot(n int) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "aws_rds_cluster" "default" {
|
resource "aws_rds_cluster" "default" {
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/service/ses"
|
"github.com/aws/aws-sdk-go/service/ses"
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
)
|
)
|
||||||
@ -42,7 +43,7 @@ func testAccCheckSESConfigurationSetDestroy(s *terraform.State) error {
|
|||||||
|
|
||||||
found := false
|
found := false
|
||||||
for _, element := range response.ConfigurationSets {
|
for _, element := range response.ConfigurationSets {
|
||||||
if *element.Name == "some-configuration-set" {
|
if *element.Name == fmt.Sprintf("some-configuration-set-%d", escRandomInteger) {
|
||||||
found = true
|
found = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -77,7 +78,7 @@ func testAccCheckAwsSESConfigurationSetExists(n string) resource.TestCheckFunc {
|
|||||||
|
|
||||||
found := false
|
found := false
|
||||||
for _, element := range response.ConfigurationSets {
|
for _, element := range response.ConfigurationSets {
|
||||||
if *element.Name == "some-configuration-set" {
|
if *element.Name == fmt.Sprintf("some-configuration-set-%d", escRandomInteger) {
|
||||||
found = true
|
found = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -90,8 +91,9 @@ func testAccCheckAwsSESConfigurationSetExists(n string) resource.TestCheckFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const testAccAWSSESConfigurationSetConfig = `
|
var escRandomInteger = acctest.RandInt()
|
||||||
|
var testAccAWSSESConfigurationSetConfig = fmt.Sprintf(`
|
||||||
resource "aws_ses_configuration_set" "test" {
|
resource "aws_ses_configuration_set" "test" {
|
||||||
name = "some-configuration-set"
|
name = "some-configuration-set-%d"
|
||||||
}
|
}
|
||||||
`
|
`, escRandomInteger)
|
||||||
|
99
builtin/providers/aws/resource_aws_ses_domain_identity.go
Normal file
99
builtin/providers/aws/resource_aws_ses_domain_identity.go
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/ses"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsSesDomainIdentity() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceAwsSesDomainIdentityCreate,
|
||||||
|
Read: resourceAwsSesDomainIdentityRead,
|
||||||
|
Delete: resourceAwsSesDomainIdentityDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
State: schema.ImportStatePassthrough,
|
||||||
|
},
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"domain": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"verification_token": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsSesDomainIdentityCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).sesConn
|
||||||
|
|
||||||
|
domainName := d.Get("domain").(string)
|
||||||
|
|
||||||
|
createOpts := &ses.VerifyDomainIdentityInput{
|
||||||
|
Domain: aws.String(domainName),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := conn.VerifyDomainIdentity(createOpts)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error requesting SES domain identity verification: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(domainName)
|
||||||
|
|
||||||
|
return resourceAwsSesDomainIdentityRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsSesDomainIdentityRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).sesConn
|
||||||
|
|
||||||
|
domainName := d.Id()
|
||||||
|
d.Set("domain", domainName)
|
||||||
|
|
||||||
|
readOpts := &ses.GetIdentityVerificationAttributesInput{
|
||||||
|
Identities: []*string{
|
||||||
|
aws.String(domainName),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := conn.GetIdentityVerificationAttributes(readOpts)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[WARN] Error fetching identity verification attributes for %s: %s", d.Id(), err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
verificationAttrs, ok := response.VerificationAttributes[domainName]
|
||||||
|
if !ok {
|
||||||
|
log.Printf("[WARN] Domain not listed in response when fetching verification attributes for %s", d.Id())
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("verification_token", verificationAttrs.VerificationToken)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsSesDomainIdentityDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).sesConn
|
||||||
|
|
||||||
|
domainName := d.Get("domain").(string)
|
||||||
|
|
||||||
|
deleteOpts := &ses.DeleteIdentityInput{
|
||||||
|
Identity: aws.String(domainName),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := conn.DeleteIdentity(deleteOpts)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error deleting SES domain identity: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
100
builtin/providers/aws/resource_aws_ses_domain_identity_test.go
Normal file
100
builtin/providers/aws/resource_aws_ses_domain_identity_test.go
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/ses"
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAwsSESDomainIdentity_basic(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() {
|
||||||
|
testAccPreCheck(t)
|
||||||
|
},
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAwsSESDomainIdentityDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: fmt.Sprintf(
|
||||||
|
testAccAwsSESDomainIdentityConfig,
|
||||||
|
acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum),
|
||||||
|
),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAwsSESDomainIdentityExists("aws_ses_domain_identity.test"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAwsSESDomainIdentityDestroy(s *terraform.State) error {
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).sesConn
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "aws_ses_domain_identity" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
domain := rs.Primary.ID
|
||||||
|
params := &ses.GetIdentityVerificationAttributesInput{
|
||||||
|
Identities: []*string{
|
||||||
|
aws.String(domain),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := conn.GetIdentityVerificationAttributes(params)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.VerificationAttributes[domain] != nil {
|
||||||
|
return fmt.Errorf("SES Domain Identity %s still exists. Failing!", domain)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAwsSESDomainIdentityExists(n string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("SES Domain Identity not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("SES Domain Identity name not set")
|
||||||
|
}
|
||||||
|
|
||||||
|
domain := rs.Primary.ID
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).sesConn
|
||||||
|
|
||||||
|
params := &ses.GetIdentityVerificationAttributesInput{
|
||||||
|
Identities: []*string{
|
||||||
|
aws.String(domain),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := conn.GetIdentityVerificationAttributes(params)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.VerificationAttributes[domain] == nil {
|
||||||
|
return fmt.Errorf("SES Domain Identity %s not found in AWS", domain)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const testAccAwsSESDomainIdentityConfig = `
|
||||||
|
resource "aws_ses_domain_identity" "test" {
|
||||||
|
domain = "%s.terraformtesting.com"
|
||||||
|
}
|
||||||
|
`
|
@ -5,6 +5,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/service/ses"
|
"github.com/aws/aws-sdk-go/service/ses"
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
)
|
)
|
||||||
@ -46,7 +47,7 @@ func testAccCheckSESEventDestinationDestroy(s *terraform.State) error {
|
|||||||
|
|
||||||
found := false
|
found := false
|
||||||
for _, element := range response.ConfigurationSets {
|
for _, element := range response.ConfigurationSets {
|
||||||
if *element.Name == "some-configuration-set" {
|
if *element.Name == fmt.Sprintf("some-configuration-set-%d", edRandomInteger) {
|
||||||
found = true
|
found = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -81,7 +82,7 @@ func testAccCheckAwsSESEventDestinationExists(n string) resource.TestCheckFunc {
|
|||||||
|
|
||||||
found := false
|
found := false
|
||||||
for _, element := range response.ConfigurationSets {
|
for _, element := range response.ConfigurationSets {
|
||||||
if *element.Name == "some-configuration-set" {
|
if *element.Name == fmt.Sprintf("some-configuration-set-%d", edRandomInteger) {
|
||||||
found = true
|
found = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -94,7 +95,8 @@ func testAccCheckAwsSESEventDestinationExists(n string) resource.TestCheckFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const testAccAWSSESEventDestinationConfig = `
|
var edRandomInteger = acctest.RandInt()
|
||||||
|
var testAccAWSSESEventDestinationConfig = fmt.Sprintf(`
|
||||||
resource "aws_s3_bucket" "bucket" {
|
resource "aws_s3_bucket" "bucket" {
|
||||||
bucket = "tf-test-bucket-format"
|
bucket = "tf-test-bucket-format"
|
||||||
acl = "private"
|
acl = "private"
|
||||||
@ -155,7 +157,7 @@ data "aws_iam_policy_document" "fh_felivery_document" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_ses_configuration_set" "test" {
|
resource "aws_ses_configuration_set" "test" {
|
||||||
name = "some-configuration-set"
|
name = "some-configuration-set-%d"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_ses_event_destination" "kinesis" {
|
resource "aws_ses_event_destination" "kinesis" {
|
||||||
@ -182,4 +184,4 @@ resource "aws_ses_event_destination" "cloudwatch" {
|
|||||||
value_source = "emailHeader"
|
value_source = "emailHeader"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
`
|
`, edRandomInteger)
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/service/ses"
|
"github.com/aws/aws-sdk-go/service/ses"
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
)
|
)
|
||||||
@ -111,7 +112,7 @@ func testAccCheckAwsSESReceiptRuleExists(n string) resource.TestCheckFunc {
|
|||||||
|
|
||||||
params := &ses.DescribeReceiptRuleInput{
|
params := &ses.DescribeReceiptRuleInput{
|
||||||
RuleName: aws.String("basic"),
|
RuleName: aws.String("basic"),
|
||||||
RuleSetName: aws.String("test-me"),
|
RuleSetName: aws.String(fmt.Sprintf("test-me-%d", srrsRandomInt)),
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err := conn.DescribeReceiptRule(params)
|
response, err := conn.DescribeReceiptRule(params)
|
||||||
@ -153,7 +154,7 @@ func testAccCheckAwsSESReceiptRuleOrder(n string) resource.TestCheckFunc {
|
|||||||
conn := testAccProvider.Meta().(*AWSClient).sesConn
|
conn := testAccProvider.Meta().(*AWSClient).sesConn
|
||||||
|
|
||||||
params := &ses.DescribeReceiptRuleSetInput{
|
params := &ses.DescribeReceiptRuleSetInput{
|
||||||
RuleSetName: aws.String("test-me"),
|
RuleSetName: aws.String(fmt.Sprintf("test-me-%d", srrsRandomInt)),
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err := conn.DescribeReceiptRuleSet(params)
|
response, err := conn.DescribeReceiptRuleSet(params)
|
||||||
@ -185,8 +186,8 @@ func testAccCheckAwsSESReceiptRuleActions(n string) resource.TestCheckFunc {
|
|||||||
conn := testAccProvider.Meta().(*AWSClient).sesConn
|
conn := testAccProvider.Meta().(*AWSClient).sesConn
|
||||||
|
|
||||||
params := &ses.DescribeReceiptRuleInput{
|
params := &ses.DescribeReceiptRuleInput{
|
||||||
RuleName: aws.String("actions"),
|
RuleName: aws.String("actions4"),
|
||||||
RuleSetName: aws.String("test-me"),
|
RuleSetName: aws.String(fmt.Sprintf("test-me-%d", srrsRandomInt)),
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err := conn.DescribeReceiptRule(params)
|
response, err := conn.DescribeReceiptRule(params)
|
||||||
@ -227,9 +228,10 @@ func testAccCheckAwsSESReceiptRuleActions(n string) resource.TestCheckFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const testAccAWSSESReceiptRuleBasicConfig = `
|
var srrsRandomInt = acctest.RandInt()
|
||||||
|
var testAccAWSSESReceiptRuleBasicConfig = fmt.Sprintf(`
|
||||||
resource "aws_ses_receipt_rule_set" "test" {
|
resource "aws_ses_receipt_rule_set" "test" {
|
||||||
rule_set_name = "test-me"
|
rule_set_name = "test-me-%d"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_ses_receipt_rule" "basic" {
|
resource "aws_ses_receipt_rule" "basic" {
|
||||||
@ -240,11 +242,11 @@ resource "aws_ses_receipt_rule" "basic" {
|
|||||||
scan_enabled = true
|
scan_enabled = true
|
||||||
tls_policy = "Require"
|
tls_policy = "Require"
|
||||||
}
|
}
|
||||||
`
|
`, srrsRandomInt)
|
||||||
|
|
||||||
const testAccAWSSESReceiptRuleOrderConfig = `
|
var testAccAWSSESReceiptRuleOrderConfig = fmt.Sprintf(`
|
||||||
resource "aws_ses_receipt_rule_set" "test" {
|
resource "aws_ses_receipt_rule_set" "test" {
|
||||||
rule_set_name = "test-me"
|
rule_set_name = "test-me-%d"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_ses_receipt_rule" "second" {
|
resource "aws_ses_receipt_rule" "second" {
|
||||||
@ -257,36 +259,36 @@ resource "aws_ses_receipt_rule" "first" {
|
|||||||
name = "first"
|
name = "first"
|
||||||
rule_set_name = "${aws_ses_receipt_rule_set.test.rule_set_name}"
|
rule_set_name = "${aws_ses_receipt_rule_set.test.rule_set_name}"
|
||||||
}
|
}
|
||||||
`
|
`, srrsRandomInt)
|
||||||
|
|
||||||
const testAccAWSSESReceiptRuleActionsConfig = `
|
var testAccAWSSESReceiptRuleActionsConfig = fmt.Sprintf(`
|
||||||
resource "aws_s3_bucket" "emails" {
|
resource "aws_s3_bucket" "emails" {
|
||||||
bucket = "ses-terraform-emails"
|
bucket = "ses-terraform-emails"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_ses_receipt_rule_set" "test" {
|
resource "aws_ses_receipt_rule_set" "test" {
|
||||||
rule_set_name = "test-me"
|
rule_set_name = "test-me-%d"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_ses_receipt_rule" "actions" {
|
resource "aws_ses_receipt_rule" "actions" {
|
||||||
name = "actions"
|
name = "actions4"
|
||||||
rule_set_name = "${aws_ses_receipt_rule_set.test.rule_set_name}"
|
rule_set_name = "${aws_ses_receipt_rule_set.test.rule_set_name}"
|
||||||
|
|
||||||
add_header_action {
|
add_header_action {
|
||||||
header_name = "Added-By"
|
header_name = "Added-By"
|
||||||
header_value = "Terraform"
|
header_value = "Terraform"
|
||||||
position = 1
|
position = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
add_header_action {
|
add_header_action {
|
||||||
header_name = "Another-Header"
|
header_name = "Another-Header"
|
||||||
header_value = "First"
|
header_value = "First"
|
||||||
position = 0
|
position = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
stop_action {
|
stop_action {
|
||||||
scope = "RuleSet"
|
scope = "RuleSet"
|
||||||
position = 2
|
position = 2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
`
|
`, srrsRandomInt)
|
||||||
|
@ -60,7 +60,6 @@ func resourceAwsVpc() *schema.Resource {
|
|||||||
|
|
||||||
"assign_generated_ipv6_cidr_block": {
|
"assign_generated_ipv6_cidr_block": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
ForceNew: true,
|
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
},
|
},
|
||||||
@ -178,7 +177,7 @@ func resourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error {
|
|||||||
d.Set("tags", tagsToMap(vpc.Tags))
|
d.Set("tags", tagsToMap(vpc.Tags))
|
||||||
|
|
||||||
for _, a := range vpc.Ipv6CidrBlockAssociationSet {
|
for _, a := range vpc.Ipv6CidrBlockAssociationSet {
|
||||||
if *a.Ipv6CidrBlockState.State == "associated" {
|
if *a.Ipv6CidrBlockState.State == "associated" { //we can only ever have 1 IPv6 block associated at once
|
||||||
d.Set("assign_generated_ipv6_cidr_block", true)
|
d.Set("assign_generated_ipv6_cidr_block", true)
|
||||||
d.Set("ipv6_association_id", a.AssociationId)
|
d.Set("ipv6_association_id", a.AssociationId)
|
||||||
d.Set("ipv6_cidr_block", a.Ipv6CidrBlock)
|
d.Set("ipv6_cidr_block", a.Ipv6CidrBlock)
|
||||||
@ -344,6 +343,68 @@ func resourceAwsVpcUpdate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
d.SetPartial("enable_classiclink")
|
d.SetPartial("enable_classiclink")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.HasChange("assign_generated_ipv6_cidr_block") && !d.IsNewResource() {
|
||||||
|
toAssign := d.Get("assign_generated_ipv6_cidr_block").(bool)
|
||||||
|
|
||||||
|
log.Printf("[INFO] Modifying assign_generated_ipv6_cidr_block to %#v", toAssign)
|
||||||
|
|
||||||
|
if toAssign {
|
||||||
|
modifyOpts := &ec2.AssociateVpcCidrBlockInput{
|
||||||
|
VpcId: &vpcid,
|
||||||
|
AmazonProvidedIpv6CidrBlock: aws.Bool(toAssign),
|
||||||
|
}
|
||||||
|
log.Printf("[INFO] Enabling assign_generated_ipv6_cidr_block vpc attribute for %s: %#v",
|
||||||
|
d.Id(), modifyOpts)
|
||||||
|
resp, err := conn.AssociateVpcCidrBlock(modifyOpts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for the CIDR to become available
|
||||||
|
log.Printf(
|
||||||
|
"[DEBUG] Waiting for IPv6 CIDR (%s) to become associated",
|
||||||
|
d.Id())
|
||||||
|
stateConf := &resource.StateChangeConf{
|
||||||
|
Pending: []string{"associating", "disassociated"},
|
||||||
|
Target: []string{"associated"},
|
||||||
|
Refresh: Ipv6CidrStateRefreshFunc(conn, d.Id(), *resp.Ipv6CidrBlockAssociation.AssociationId),
|
||||||
|
Timeout: 1 * time.Minute,
|
||||||
|
}
|
||||||
|
if _, err := stateConf.WaitForState(); err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"Error waiting for IPv6 CIDR (%s) to become associated: %s",
|
||||||
|
d.Id(), err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
modifyOpts := &ec2.DisassociateVpcCidrBlockInput{
|
||||||
|
AssociationId: aws.String(d.Get("ipv6_association_id").(string)),
|
||||||
|
}
|
||||||
|
log.Printf("[INFO] Disabling assign_generated_ipv6_cidr_block vpc attribute for %s: %#v",
|
||||||
|
d.Id(), modifyOpts)
|
||||||
|
if _, err := conn.DisassociateVpcCidrBlock(modifyOpts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for the CIDR to become available
|
||||||
|
log.Printf(
|
||||||
|
"[DEBUG] Waiting for IPv6 CIDR (%s) to become disassociated",
|
||||||
|
d.Id())
|
||||||
|
stateConf := &resource.StateChangeConf{
|
||||||
|
Pending: []string{"disassociating", "associated"},
|
||||||
|
Target: []string{"disassociated"},
|
||||||
|
Refresh: Ipv6CidrStateRefreshFunc(conn, d.Id(), d.Get("ipv6_association_id").(string)),
|
||||||
|
Timeout: 1 * time.Minute,
|
||||||
|
}
|
||||||
|
if _, err := stateConf.WaitForState(); err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"Error waiting for IPv6 CIDR (%s) to become disassociated: %s",
|
||||||
|
d.Id(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetPartial("assign_generated_ipv6_cidr_block")
|
||||||
|
}
|
||||||
|
|
||||||
if err := setTags(conn, d); err != nil {
|
if err := setTags(conn, d); err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
@ -412,6 +473,41 @@ func VPCStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Ipv6CidrStateRefreshFunc(conn *ec2.EC2, id string, associationId string) resource.StateRefreshFunc {
|
||||||
|
return func() (interface{}, string, error) {
|
||||||
|
describeVpcOpts := &ec2.DescribeVpcsInput{
|
||||||
|
VpcIds: []*string{aws.String(id)},
|
||||||
|
}
|
||||||
|
resp, err := conn.DescribeVpcs(describeVpcOpts)
|
||||||
|
if err != nil {
|
||||||
|
if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpcID.NotFound" {
|
||||||
|
resp = nil
|
||||||
|
} else {
|
||||||
|
log.Printf("Error on VPCStateRefresh: %s", err)
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp == nil {
|
||||||
|
// Sometimes AWS just has consistency issues and doesn't see
|
||||||
|
// our instance yet. Return an empty state.
|
||||||
|
return nil, "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.Vpcs[0].Ipv6CidrBlockAssociationSet == nil {
|
||||||
|
return nil, "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, association := range resp.Vpcs[0].Ipv6CidrBlockAssociationSet {
|
||||||
|
if *association.AssociationId == associationId {
|
||||||
|
return association, *association.Ipv6CidrBlockState.State, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, "", nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func resourceAwsVpcSetDefaultNetworkAcl(conn *ec2.EC2, d *schema.ResourceData) error {
|
func resourceAwsVpcSetDefaultNetworkAcl(conn *ec2.EC2, d *schema.ResourceData) error {
|
||||||
filter1 := &ec2.Filter{
|
filter1 := &ec2.Filter{
|
||||||
Name: aws.String("default"),
|
Name: aws.String("default"),
|
||||||
|
@ -46,7 +46,7 @@ func TestAccAWSVpc_enableIpv6(t *testing.T) {
|
|||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
{
|
{
|
||||||
Config: testAccVpcConfigIpv6Enabled,
|
Config: testAccVpcConfigIpv6Enabled,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeAggregateTestCheckFunc(
|
||||||
testAccCheckVpcExists("aws_vpc.foo", &vpc),
|
testAccCheckVpcExists("aws_vpc.foo", &vpc),
|
||||||
testAccCheckVpcCidr(&vpc, "10.1.0.0/16"),
|
testAccCheckVpcCidr(&vpc, "10.1.0.0/16"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
@ -55,6 +55,34 @@ func TestAccAWSVpc_enableIpv6(t *testing.T) {
|
|||||||
"aws_vpc.foo", "ipv6_association_id"),
|
"aws_vpc.foo", "ipv6_association_id"),
|
||||||
resource.TestCheckResourceAttrSet(
|
resource.TestCheckResourceAttrSet(
|
||||||
"aws_vpc.foo", "ipv6_cidr_block"),
|
"aws_vpc.foo", "ipv6_cidr_block"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_vpc.foo", "assign_generated_ipv6_cidr_block", "true"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Config: testAccVpcConfigIpv6Disabled,
|
||||||
|
Check: resource.ComposeAggregateTestCheckFunc(
|
||||||
|
testAccCheckVpcExists("aws_vpc.foo", &vpc),
|
||||||
|
testAccCheckVpcCidr(&vpc, "10.1.0.0/16"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_vpc.foo", "cidr_block", "10.1.0.0/16"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_vpc.foo", "assign_generated_ipv6_cidr_block", "false"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Config: testAccVpcConfigIpv6Enabled,
|
||||||
|
Check: resource.ComposeAggregateTestCheckFunc(
|
||||||
|
testAccCheckVpcExists("aws_vpc.foo", &vpc),
|
||||||
|
testAccCheckVpcCidr(&vpc, "10.1.0.0/16"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_vpc.foo", "cidr_block", "10.1.0.0/16"),
|
||||||
|
resource.TestCheckResourceAttrSet(
|
||||||
|
"aws_vpc.foo", "ipv6_association_id"),
|
||||||
|
resource.TestCheckResourceAttrSet(
|
||||||
|
"aws_vpc.foo", "ipv6_cidr_block"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_vpc.foo", "assign_generated_ipv6_cidr_block", "true"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -283,6 +311,12 @@ resource "aws_vpc" "foo" {
|
|||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
const testAccVpcConfigIpv6Disabled = `
|
||||||
|
resource "aws_vpc" "foo" {
|
||||||
|
cidr_block = "10.1.0.0/16"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
const testAccVpcConfigUpdate = `
|
const testAccVpcConfigUpdate = `
|
||||||
resource "aws_vpc" "foo" {
|
resource "aws_vpc" "foo" {
|
||||||
cidr_block = "10.1.0.0/16"
|
cidr_block = "10.1.0.0/16"
|
||||||
|
@ -294,6 +294,11 @@ func resourceAwsVpnConnectionRead(d *schema.ResourceData, meta interface{}) erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
vpnConnection := resp.VpnConnections[0]
|
vpnConnection := resp.VpnConnections[0]
|
||||||
|
if vpnConnection == nil || *vpnConnection.State == "deleted" {
|
||||||
|
// Seems we have lost our VPN Connection
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Set attributes under the user's control.
|
// Set attributes under the user's control.
|
||||||
d.Set("vpn_gateway_id", vpnConnection.VpnGatewayId)
|
d.Set("vpn_gateway_id", vpnConnection.VpnGatewayId)
|
||||||
|
@ -3,6 +3,7 @@ package aws
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
@ -16,6 +17,8 @@ import (
|
|||||||
func TestAccAWSVpnConnection_basic(t *testing.T) {
|
func TestAccAWSVpnConnection_basic(t *testing.T) {
|
||||||
rInt := acctest.RandInt()
|
rInt := acctest.RandInt()
|
||||||
rBgpAsn := acctest.RandIntRange(64512, 65534)
|
rBgpAsn := acctest.RandIntRange(64512, 65534)
|
||||||
|
var vpn ec2.VpnConnection
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
IDRefreshName: "aws_vpn_connection.foo",
|
IDRefreshName: "aws_vpn_connection.foo",
|
||||||
@ -30,6 +33,7 @@ func TestAccAWSVpnConnection_basic(t *testing.T) {
|
|||||||
"aws_vpn_gateway.vpn_gateway",
|
"aws_vpn_gateway.vpn_gateway",
|
||||||
"aws_customer_gateway.customer_gateway",
|
"aws_customer_gateway.customer_gateway",
|
||||||
"aws_vpn_connection.foo",
|
"aws_vpn_connection.foo",
|
||||||
|
&vpn,
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
@ -41,6 +45,7 @@ func TestAccAWSVpnConnection_basic(t *testing.T) {
|
|||||||
"aws_vpn_gateway.vpn_gateway",
|
"aws_vpn_gateway.vpn_gateway",
|
||||||
"aws_customer_gateway.customer_gateway",
|
"aws_customer_gateway.customer_gateway",
|
||||||
"aws_vpn_connection.foo",
|
"aws_vpn_connection.foo",
|
||||||
|
&vpn,
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
@ -51,6 +56,7 @@ func TestAccAWSVpnConnection_basic(t *testing.T) {
|
|||||||
func TestAccAWSVpnConnection_withoutStaticRoutes(t *testing.T) {
|
func TestAccAWSVpnConnection_withoutStaticRoutes(t *testing.T) {
|
||||||
rInt := acctest.RandInt()
|
rInt := acctest.RandInt()
|
||||||
rBgpAsn := acctest.RandIntRange(64512, 65534)
|
rBgpAsn := acctest.RandIntRange(64512, 65534)
|
||||||
|
var vpn ec2.VpnConnection
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
IDRefreshName: "aws_vpn_connection.foo",
|
IDRefreshName: "aws_vpn_connection.foo",
|
||||||
@ -65,6 +71,7 @@ func TestAccAWSVpnConnection_withoutStaticRoutes(t *testing.T) {
|
|||||||
"aws_vpn_gateway.vpn_gateway",
|
"aws_vpn_gateway.vpn_gateway",
|
||||||
"aws_customer_gateway.customer_gateway",
|
"aws_customer_gateway.customer_gateway",
|
||||||
"aws_vpn_connection.foo",
|
"aws_vpn_connection.foo",
|
||||||
|
&vpn,
|
||||||
),
|
),
|
||||||
resource.TestCheckResourceAttr("aws_vpn_connection.foo", "static_routes_only", "false"),
|
resource.TestCheckResourceAttr("aws_vpn_connection.foo", "static_routes_only", "false"),
|
||||||
),
|
),
|
||||||
@ -73,6 +80,74 @@ func TestAccAWSVpnConnection_withoutStaticRoutes(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSVpnConnection_disappears(t *testing.T) {
|
||||||
|
var vpn ec2.VpnConnection
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccAwsVpnConnectionDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccAwsVpnConnectionConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccAwsVpnConnection(
|
||||||
|
"aws_vpc.vpc",
|
||||||
|
"aws_vpn_gateway.vpn_gateway",
|
||||||
|
"aws_customer_gateway.customer_gateway",
|
||||||
|
"aws_vpn_connection.foo",
|
||||||
|
&vpn,
|
||||||
|
),
|
||||||
|
testAccAWSVpnConnectionDisappears(&vpn),
|
||||||
|
),
|
||||||
|
ExpectNonEmptyPlan: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccAWSVpnConnectionDisappears(connection *ec2.VpnConnection) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).ec2conn
|
||||||
|
|
||||||
|
_, err := conn.DeleteVpnConnection(&ec2.DeleteVpnConnectionInput{
|
||||||
|
VpnConnectionId: connection.VpnConnectionId,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVpnConnectionID.NotFound" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resource.Retry(40*time.Minute, func() *resource.RetryError {
|
||||||
|
opts := &ec2.DescribeVpnConnectionsInput{
|
||||||
|
VpnConnectionIds: []*string{connection.VpnConnectionId},
|
||||||
|
}
|
||||||
|
resp, err := conn.DescribeVpnConnections(opts)
|
||||||
|
if err != nil {
|
||||||
|
cgw, ok := err.(awserr.Error)
|
||||||
|
if ok && cgw.Code() == "InvalidVpnConnectionID.NotFound" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if ok && cgw.Code() == "IncorrectState" {
|
||||||
|
return resource.RetryableError(fmt.Errorf(
|
||||||
|
"Waiting for VPN Connection to be in the correct state: %v", connection.VpnConnectionId))
|
||||||
|
}
|
||||||
|
return resource.NonRetryableError(
|
||||||
|
fmt.Errorf("Error retrieving VPN Connection: %s", err))
|
||||||
|
}
|
||||||
|
if *resp.VpnConnections[0].State == "deleted" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return resource.RetryableError(fmt.Errorf(
|
||||||
|
"Waiting for VPN Connection: %v", connection.VpnConnectionId))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func testAccAwsVpnConnectionDestroy(s *terraform.State) error {
|
func testAccAwsVpnConnectionDestroy(s *terraform.State) error {
|
||||||
conn := testAccProvider.Meta().(*AWSClient).ec2conn
|
conn := testAccProvider.Meta().(*AWSClient).ec2conn
|
||||||
for _, rs := range s.RootModule().Resources {
|
for _, rs := range s.RootModule().Resources {
|
||||||
@ -117,7 +192,8 @@ func testAccAwsVpnConnection(
|
|||||||
vpcResource string,
|
vpcResource string,
|
||||||
vpnGatewayResource string,
|
vpnGatewayResource string,
|
||||||
customerGatewayResource string,
|
customerGatewayResource string,
|
||||||
vpnConnectionResource string) resource.TestCheckFunc {
|
vpnConnectionResource string,
|
||||||
|
vpnConnection *ec2.VpnConnection) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[vpnConnectionResource]
|
rs, ok := s.RootModule().Resources[vpnConnectionResource]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -134,7 +210,7 @@ func testAccAwsVpnConnection(
|
|||||||
|
|
||||||
ec2conn := testAccProvider.Meta().(*AWSClient).ec2conn
|
ec2conn := testAccProvider.Meta().(*AWSClient).ec2conn
|
||||||
|
|
||||||
_, err := ec2conn.DescribeVpnConnections(&ec2.DescribeVpnConnectionsInput{
|
resp, err := ec2conn.DescribeVpnConnections(&ec2.DescribeVpnConnectionsInput{
|
||||||
VpnConnectionIds: []*string{aws.String(connection.Primary.ID)},
|
VpnConnectionIds: []*string{aws.String(connection.Primary.ID)},
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -142,6 +218,8 @@ func testAccAwsVpnConnection(
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
*vpnConnection = *resp.VpnConnections[0]
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,7 @@ import (
|
|||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
func validateRdsId(v interface{}, k string) (ws []string, errors []error) {
|
func validateRdsIdentifier(v interface{}, k string) (ws []string, errors []error) {
|
||||||
value := v.(string)
|
value := v.(string)
|
||||||
if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) {
|
if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) {
|
||||||
errors = append(errors, fmt.Errorf(
|
errors = append(errors, fmt.Errorf(
|
||||||
@ -33,6 +33,23 @@ func validateRdsId(v interface{}, k string) (ws []string, errors []error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateRdsIdentifierPrefix(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"only lowercase alphanumeric characters and hyphens allowed in %q", k))
|
||||||
|
}
|
||||||
|
if !regexp.MustCompile(`^[a-z]`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"first character of %q must be a letter", k))
|
||||||
|
}
|
||||||
|
if regexp.MustCompile(`--`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot contain two consecutive hyphens", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
func validateElastiCacheClusterId(v interface{}, k string) (ws []string, errors []error) {
|
func validateElastiCacheClusterId(v interface{}, k string) (ws []string, errors []error) {
|
||||||
value := v.(string)
|
value := v.(string)
|
||||||
if (len(value) < 1) || (len(value) > 20) {
|
if (len(value) < 1) || (len(value) > 20) {
|
||||||
@ -103,7 +120,27 @@ func validateDbParamGroupName(v interface{}, k string) (ws []string, errors []er
|
|||||||
"%q cannot be greater than 255 characters", k))
|
"%q cannot be greater than 255 characters", k))
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateDbParamGroupNamePrefix(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"only lowercase alphanumeric characters and hyphens allowed in %q", k))
|
||||||
|
}
|
||||||
|
if !regexp.MustCompile(`^[a-z]`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"first character of %q must be a letter", k))
|
||||||
|
}
|
||||||
|
if regexp.MustCompile(`--`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot contain two consecutive hyphens", k))
|
||||||
|
}
|
||||||
|
if len(value) > 255 {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot be greater than 226 characters", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateStreamViewType(v interface{}, k string) (ws []string, errors []error) {
|
func validateStreamViewType(v interface{}, k string) (ws []string, errors []error) {
|
||||||
@ -1041,3 +1078,79 @@ func validateApiGatewayUsagePlanQuotaSettings(v map[string]interface{}) (errors
|
|||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateDbSubnetGroupName(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
if !regexp.MustCompile(`^[ .0-9a-z-_]+$`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"only lowercase alphanumeric characters, hyphens, underscores, periods, and spaces allowed in %q", k))
|
||||||
|
}
|
||||||
|
if len(value) > 255 {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot be longer than 255 characters", k))
|
||||||
|
}
|
||||||
|
if regexp.MustCompile(`(?i)^default$`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q is not allowed as %q", "Default", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateDbSubnetGroupNamePrefix(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
if !regexp.MustCompile(`^[ .0-9a-z-_]+$`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"only lowercase alphanumeric characters, hyphens, underscores, periods, and spaces allowed in %q", k))
|
||||||
|
}
|
||||||
|
if len(value) > 229 {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot be longer than 229 characters", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateDbOptionGroupName(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
if !regexp.MustCompile(`^[a-z]`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"first character of %q must be a letter", k))
|
||||||
|
}
|
||||||
|
if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"only alphanumeric characters and hyphens allowed in %q", k))
|
||||||
|
}
|
||||||
|
if regexp.MustCompile(`--`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot contain two consecutive hyphens", k))
|
||||||
|
}
|
||||||
|
if regexp.MustCompile(`-$`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot end with a hyphen", k))
|
||||||
|
}
|
||||||
|
if len(value) > 255 {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot be greater than 255 characters", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateDbOptionGroupNamePrefix(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
if !regexp.MustCompile(`^[a-z]`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"first character of %q must be a letter", k))
|
||||||
|
}
|
||||||
|
if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"only alphanumeric characters and hyphens allowed in %q", k))
|
||||||
|
}
|
||||||
|
if regexp.MustCompile(`--`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot contain two consecutive hyphens", k))
|
||||||
|
}
|
||||||
|
if len(value) > 229 {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot be greater than 229 characters", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
@ -1785,3 +1785,131 @@ func TestValidateElbNamePrefix(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestValidateDbSubnetGroupName(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
Value string
|
||||||
|
ErrCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Value: "tEsting",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "testing?",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "default",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: randomString(300),
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
_, errors := validateDbSubnetGroupName(tc.Value, "aws_db_subnet_group")
|
||||||
|
|
||||||
|
if len(errors) != tc.ErrCount {
|
||||||
|
t.Fatalf("Expected the DB Subnet Group name to trigger a validation error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateDbSubnetGroupNamePrefix(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
Value string
|
||||||
|
ErrCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Value: "tEsting",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "testing?",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: randomString(230),
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
_, errors := validateDbSubnetGroupNamePrefix(tc.Value, "aws_db_subnet_group")
|
||||||
|
|
||||||
|
if len(errors) != tc.ErrCount {
|
||||||
|
t.Fatalf("Expected the DB Subnet Group name prefix to trigger a validation error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateDbOptionGroupName(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
Value string
|
||||||
|
ErrCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Value: "testing123!",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "1testing123",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "testing--123",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "testing123-",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: randomString(256),
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
_, errors := validateDbOptionGroupName(tc.Value, "aws_db_option_group_name")
|
||||||
|
|
||||||
|
if len(errors) != tc.ErrCount {
|
||||||
|
t.Fatalf("Expected the DB Option Group Name to trigger a validation error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateDbOptionGroupNamePrefix(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
Value string
|
||||||
|
ErrCount int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Value: "testing123!",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "1testing123",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: "testing--123",
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: randomString(230),
|
||||||
|
ErrCount: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
_, errors := validateDbOptionGroupNamePrefix(tc.Value, "aws_db_option_group_name")
|
||||||
|
|
||||||
|
if len(errors) != tc.ErrCount {
|
||||||
|
t.Fatalf("Expected the DB Option Group name prefix to trigger a validation error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -66,6 +66,7 @@ func resourceArmNetworkSecurityGroup() *schema.Resource {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ValidateFunc: validateNetworkSecurityRuleProtocol,
|
ValidateFunc: validateNetworkSecurityRuleProtocol,
|
||||||
|
StateFunc: ignoreCaseStateFunc,
|
||||||
},
|
},
|
||||||
|
|
||||||
"source_port_range": {
|
"source_port_range": {
|
||||||
|
@ -204,7 +204,7 @@ resource "azurerm_network_security_group" "test" {
|
|||||||
priority = 100
|
priority = 100
|
||||||
direction = "Inbound"
|
direction = "Inbound"
|
||||||
access = "Allow"
|
access = "Allow"
|
||||||
protocol = "Tcp"
|
protocol = "TCP"
|
||||||
source_port_range = "*"
|
source_port_range = "*"
|
||||||
destination_port_range = "*"
|
destination_port_range = "*"
|
||||||
source_address_prefix = "*"
|
source_address_prefix = "*"
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path"
|
"path"
|
||||||
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -117,8 +118,13 @@ func buildDataSourceTestProgram() (string, error) {
|
|||||||
return "", fmt.Errorf("failed to build test stub program: %s", err)
|
return "", fmt.Errorf("failed to build test stub program: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
gopath := os.Getenv("GOPATH")
|
||||||
|
if gopath == "" {
|
||||||
|
gopath = filepath.Join(os.Getenv("HOME") + "/go")
|
||||||
|
}
|
||||||
|
|
||||||
programPath := path.Join(
|
programPath := path.Join(
|
||||||
os.Getenv("GOPATH"), "bin", "tf-acc-external-data-source",
|
filepath.SplitList(gopath)[0], "bin", "tf-acc-external-data-source",
|
||||||
)
|
)
|
||||||
return programPath, nil
|
return programPath, nil
|
||||||
}
|
}
|
||||||
|
@ -10,9 +10,9 @@ import (
|
|||||||
|
|
||||||
func resourceGithubIssueLabel() *schema.Resource {
|
func resourceGithubIssueLabel() *schema.Resource {
|
||||||
return &schema.Resource{
|
return &schema.Resource{
|
||||||
Create: resourceGithubIssueLabelCreate,
|
Create: resourceGithubIssueLabelCreateOrUpdate,
|
||||||
Read: resourceGithubIssueLabelRead,
|
Read: resourceGithubIssueLabelRead,
|
||||||
Update: resourceGithubIssueLabelUpdate,
|
Update: resourceGithubIssueLabelCreateOrUpdate,
|
||||||
Delete: resourceGithubIssueLabelDelete,
|
Delete: resourceGithubIssueLabelDelete,
|
||||||
Importer: &schema.ResourceImporter{
|
Importer: &schema.ResourceImporter{
|
||||||
State: schema.ImportStatePassthrough,
|
State: schema.ImportStatePassthrough,
|
||||||
@ -40,21 +40,54 @@ func resourceGithubIssueLabel() *schema.Resource {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceGithubIssueLabelCreate(d *schema.ResourceData, meta interface{}) error {
|
// resourceGithubIssueLabelCreateOrUpdate idempotently creates or updates an
|
||||||
|
// issue label. Issue labels are keyed off of their "name", so pre-existing
|
||||||
|
// issue labels result in a 422 HTTP error if they exist outside of Terraform.
|
||||||
|
// Normally this would not be an issue, except new repositories are created with
|
||||||
|
// a "default" set of labels, and those labels easily conflict with custom ones.
|
||||||
|
//
|
||||||
|
// This function will first check if the label exists, and then issue an update,
|
||||||
|
// otherwise it will create. This is also advantageous in that we get to use the
|
||||||
|
// same function for two schema funcs.
|
||||||
|
|
||||||
|
func resourceGithubIssueLabelCreateOrUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*Organization).client
|
client := meta.(*Organization).client
|
||||||
|
o := meta.(*Organization).name
|
||||||
r := d.Get("repository").(string)
|
r := d.Get("repository").(string)
|
||||||
n := d.Get("name").(string)
|
n := d.Get("name").(string)
|
||||||
c := d.Get("color").(string)
|
c := d.Get("color").(string)
|
||||||
label := github.Label{
|
|
||||||
|
label := &github.Label{
|
||||||
Name: &n,
|
Name: &n,
|
||||||
Color: &c,
|
Color: &c,
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] Creating label: %#v", label)
|
log.Printf("[DEBUG] Querying label existence %s/%s (%s)", o, r, n)
|
||||||
_, resp, err := client.Issues.CreateLabel(context.TODO(), meta.(*Organization).name, r, &label)
|
existing, _, _ := client.Issues.GetLabel(context.TODO(), o, r, n)
|
||||||
log.Printf("[DEBUG] Response from creating label: %s", *resp)
|
|
||||||
if err != nil {
|
if existing != nil {
|
||||||
return err
|
log.Printf("[DEBUG] Updating label: %s/%s (%s: %s)", o, r, n, c)
|
||||||
|
|
||||||
|
// Pull out the original name. If we already have a resource, this is the
|
||||||
|
// parsed ID. If not, it's the value given to the resource.
|
||||||
|
var oname string
|
||||||
|
if d.Id() == "" {
|
||||||
|
oname = n
|
||||||
|
} else {
|
||||||
|
_, oname = parseTwoPartID(d.Id())
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, err := client.Issues.EditLabel(context.TODO(), o, r, oname, label)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Printf("[DEBUG] Creating label: %s/%s (%s: %s)", o, r, n, c)
|
||||||
|
_, resp, err := client.Issues.CreateLabel(context.TODO(), o, r, label)
|
||||||
|
log.Printf("[DEBUG] Response from creating label: %s", *resp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(buildTwoPartID(&r, &n))
|
d.SetId(buildTwoPartID(&r, &n))
|
||||||
@ -66,6 +99,7 @@ func resourceGithubIssueLabelRead(d *schema.ResourceData, meta interface{}) erro
|
|||||||
client := meta.(*Organization).client
|
client := meta.(*Organization).client
|
||||||
r, n := parseTwoPartID(d.Id())
|
r, n := parseTwoPartID(d.Id())
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Reading label: %s/%s", r, n)
|
||||||
githubLabel, _, err := client.Issues.GetLabel(context.TODO(), meta.(*Organization).name, r, n)
|
githubLabel, _, err := client.Issues.GetLabel(context.TODO(), meta.(*Organization).name, r, n)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
@ -80,31 +114,12 @@ func resourceGithubIssueLabelRead(d *schema.ResourceData, meta interface{}) erro
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceGithubIssueLabelUpdate(d *schema.ResourceData, meta interface{}) error {
|
|
||||||
client := meta.(*Organization).client
|
|
||||||
r := d.Get("repository").(string)
|
|
||||||
n := d.Get("name").(string)
|
|
||||||
c := d.Get("color").(string)
|
|
||||||
|
|
||||||
_, originalName := parseTwoPartID(d.Id())
|
|
||||||
_, _, err := client.Issues.EditLabel(context.TODO(), meta.(*Organization).name, r, originalName, &github.Label{
|
|
||||||
Name: &n,
|
|
||||||
Color: &c,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
d.SetId(buildTwoPartID(&r, &n))
|
|
||||||
|
|
||||||
return resourceGithubIssueLabelRead(d, meta)
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceGithubIssueLabelDelete(d *schema.ResourceData, meta interface{}) error {
|
func resourceGithubIssueLabelDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*Organization).client
|
client := meta.(*Organization).client
|
||||||
r := d.Get("repository").(string)
|
r := d.Get("repository").(string)
|
||||||
n := d.Get("name").(string)
|
n := d.Get("name").(string)
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Deleting label: %s/%s", r, n)
|
||||||
_, err := client.Issues.DeleteLabel(context.TODO(), meta.(*Organization).name, r, n)
|
_, err := client.Issues.DeleteLabel(context.TODO(), meta.(*Organization).name, r, n)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -32,6 +32,13 @@ func TestAccGithubIssueLabel_basic(t *testing.T) {
|
|||||||
testAccCheckGithubIssueLabelAttributes(&label, "bar", "FFFFFF"),
|
testAccCheckGithubIssueLabelAttributes(&label, "bar", "FFFFFF"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Config: testAccGitHubIssueLabelExistsConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGithubIssueLabelExists("github_issue_label.test", &label),
|
||||||
|
testAccCheckGithubIssueLabelAttributes(&label, "enhancement", "FF00FF"),
|
||||||
|
),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -134,3 +141,16 @@ resource "github_issue_label" "test" {
|
|||||||
color = "FFFFFF"
|
color = "FFFFFF"
|
||||||
}
|
}
|
||||||
`, testRepo)
|
`, testRepo)
|
||||||
|
|
||||||
|
var testAccGitHubIssueLabelExistsConfig string = fmt.Sprintf(`
|
||||||
|
// Create a repository which has the default labels
|
||||||
|
resource "github_repository" "test" {
|
||||||
|
name = "tf-acc-repo-label-abc1234"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "github_issue_label" "test" {
|
||||||
|
repository = "${github_repository.test.name}"
|
||||||
|
name = "enhancement" // Important! This is a pre-created label
|
||||||
|
color = "FF00FF"
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
@ -40,17 +40,19 @@ func resourceContainerCluster() *schema.Resource {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"client_key": &schema.Schema{
|
"client_key": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
Sensitive: true,
|
||||||
},
|
},
|
||||||
"cluster_ca_certificate": &schema.Schema{
|
"cluster_ca_certificate": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"password": &schema.Schema{
|
"password": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
Sensitive: true,
|
||||||
},
|
},
|
||||||
"username": &schema.Schema{
|
"username": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
|
@ -83,6 +83,7 @@ func Provider() terraform.ResourceProvider {
|
|||||||
ResourcesMap: map[string]*schema.Resource{
|
ResourcesMap: map[string]*schema.Resource{
|
||||||
"kubernetes_config_map": resourceKubernetesConfigMap(),
|
"kubernetes_config_map": resourceKubernetesConfigMap(),
|
||||||
"kubernetes_namespace": resourceKubernetesNamespace(),
|
"kubernetes_namespace": resourceKubernetesNamespace(),
|
||||||
|
"kubernetes_secret": resourceKubernetesSecret(),
|
||||||
},
|
},
|
||||||
ConfigureFunc: providerConfigure,
|
ConfigureFunc: providerConfigure,
|
||||||
}
|
}
|
||||||
|
159
builtin/providers/kubernetes/resource_kubernetes_secret.go
Normal file
159
builtin/providers/kubernetes/resource_kubernetes_secret.go
Normal file
@ -0,0 +1,159 @@
|
|||||||
|
package kubernetes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
pkgApi "k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/pkg/api/errors"
|
||||||
|
api "k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceKubernetesSecret() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceKubernetesSecretCreate,
|
||||||
|
Read: resourceKubernetesSecretRead,
|
||||||
|
Exists: resourceKubernetesSecretExists,
|
||||||
|
Update: resourceKubernetesSecretUpdate,
|
||||||
|
Delete: resourceKubernetesSecretDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
State: schema.ImportStatePassthrough,
|
||||||
|
},
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"metadata": namespacedMetadataSchema("secret", true),
|
||||||
|
"data": {
|
||||||
|
Type: schema.TypeMap,
|
||||||
|
Description: "A map of the secret data.",
|
||||||
|
Optional: true,
|
||||||
|
Sensitive: true,
|
||||||
|
},
|
||||||
|
"type": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Description: "Type of secret",
|
||||||
|
Default: "Opaque",
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceKubernetesSecretCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*kubernetes.Clientset)
|
||||||
|
|
||||||
|
metadata := expandMetadata(d.Get("metadata").([]interface{}))
|
||||||
|
secret := api.Secret{
|
||||||
|
ObjectMeta: metadata,
|
||||||
|
StringData: expandStringMap(d.Get("data").(map[string]interface{})),
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("type"); ok {
|
||||||
|
secret.Type = api.SecretType(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] Creating new secret: %#v", secret)
|
||||||
|
out, err := conn.CoreV1().Secrets(metadata.Namespace).Create(&secret)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] Submitting new secret: %#v", out)
|
||||||
|
d.SetId(buildId(out.ObjectMeta))
|
||||||
|
|
||||||
|
return resourceKubernetesSecretRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceKubernetesSecretRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*kubernetes.Clientset)
|
||||||
|
|
||||||
|
namespace, name := idParts(d.Id())
|
||||||
|
|
||||||
|
log.Printf("[INFO] Reading secret %s", name)
|
||||||
|
secret, err := conn.CoreV1().Secrets(namespace).Get(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] Received secret: %#v", secret)
|
||||||
|
err = d.Set("metadata", flattenMetadata(secret.ObjectMeta))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("data", byteMapToStringMap(secret.Data))
|
||||||
|
d.Set("type", secret.Type)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceKubernetesSecretUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*kubernetes.Clientset)
|
||||||
|
|
||||||
|
namespace, name := idParts(d.Id())
|
||||||
|
|
||||||
|
ops := patchMetadata("metadata.0.", "/metadata/", d)
|
||||||
|
if d.HasChange("data") {
|
||||||
|
oldV, newV := d.GetChange("data")
|
||||||
|
|
||||||
|
oldV = base64EncodeStringMap(oldV.(map[string]interface{}))
|
||||||
|
newV = base64EncodeStringMap(newV.(map[string]interface{}))
|
||||||
|
|
||||||
|
diffOps := diffStringMap("/data/", oldV.(map[string]interface{}), newV.(map[string]interface{}))
|
||||||
|
|
||||||
|
ops = append(ops, diffOps...)
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := ops.MarshalJSON()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to marshal update operations: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] Updating secret %q: %v", name, data)
|
||||||
|
out, err := conn.CoreV1().Secrets(namespace).Patch(name, pkgApi.JSONPatchType, data)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to update secret: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] Submitting updated secret: %#v", out)
|
||||||
|
d.SetId(buildId(out.ObjectMeta))
|
||||||
|
|
||||||
|
return resourceKubernetesSecretRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceKubernetesSecretDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*kubernetes.Clientset)
|
||||||
|
|
||||||
|
namespace, name := idParts(d.Id())
|
||||||
|
|
||||||
|
log.Printf("[INFO] Deleting secret: %q", name)
|
||||||
|
err := conn.CoreV1().Secrets(namespace).Delete(name, &api.DeleteOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] Secret %s deleted", name)
|
||||||
|
|
||||||
|
d.SetId("")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceKubernetesSecretExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
||||||
|
conn := meta.(*kubernetes.Clientset)
|
||||||
|
|
||||||
|
namespace, name := idParts(d.Id())
|
||||||
|
|
||||||
|
log.Printf("[INFO] Checking secret %s", name)
|
||||||
|
_, err := conn.CoreV1().Secrets(namespace).Get(name)
|
||||||
|
if err != nil {
|
||||||
|
if statusErr, ok := err.(*errors.StatusError); ok && statusErr.ErrStatus.Code == 404 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
log.Printf("[DEBUG] Received error: %#v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, err
|
||||||
|
}
|
320
builtin/providers/kubernetes/resource_kubernetes_secret_test.go
Normal file
320
builtin/providers/kubernetes/resource_kubernetes_secret_test.go
Normal file
@ -0,0 +1,320 @@
|
|||||||
|
package kubernetes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
api "k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccKubernetesSecret_basic(t *testing.T) {
|
||||||
|
var conf api.Secret
|
||||||
|
name := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
IDRefreshName: "kubernetes_secret.test",
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckKubernetesSecretDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccKubernetesSecretConfig_basic(name),
|
||||||
|
Check: resource.ComposeAggregateTestCheckFunc(
|
||||||
|
testAccCheckKubernetesSecretExists("kubernetes_secret.test", &conf),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.annotations.%", "2"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.annotations.TestAnnotationOne", "one"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.annotations.TestAnnotationTwo", "two"),
|
||||||
|
testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one", "TestAnnotationTwo": "two"}),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.%", "3"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.TestLabelOne", "one"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.TestLabelTwo", "two"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.TestLabelThree", "three"),
|
||||||
|
testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelTwo": "two", "TestLabelThree": "three"}),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.name", name),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.generation"),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.resource_version"),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.self_link"),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.uid"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "data.%", "2"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "data.one", "first"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "data.two", "second"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "type", "Opaque"),
|
||||||
|
testAccCheckSecretData(&conf, map[string]string{"one": "first", "two": "second"}),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Config: testAccKubernetesSecretConfig_modified(name),
|
||||||
|
Check: resource.ComposeAggregateTestCheckFunc(
|
||||||
|
testAccCheckKubernetesSecretExists("kubernetes_secret.test", &conf),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.annotations.%", "2"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.annotations.TestAnnotationOne", "one"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.annotations.Different", "1234"),
|
||||||
|
testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{"TestAnnotationOne": "one", "Different": "1234"}),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.%", "2"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.TestLabelOne", "one"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.TestLabelThree", "three"),
|
||||||
|
testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{"TestLabelOne": "one", "TestLabelThree": "three"}),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.name", name),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.generation"),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.resource_version"),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.self_link"),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.uid"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "data.%", "3"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "data.one", "first"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "data.two", "second"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "data.nine", "ninth"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "type", "Opaque"),
|
||||||
|
testAccCheckSecretData(&conf, map[string]string{"one": "first", "two": "second", "nine": "ninth"}),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Config: testAccKubernetesSecretConfig_noData(name),
|
||||||
|
Check: resource.ComposeAggregateTestCheckFunc(
|
||||||
|
testAccCheckKubernetesSecretExists("kubernetes_secret.test", &conf),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.annotations.%", "0"),
|
||||||
|
testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.%", "0"),
|
||||||
|
testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.name", name),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.generation"),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.resource_version"),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.self_link"),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.uid"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "data.%", "0"),
|
||||||
|
testAccCheckSecretData(&conf, map[string]string{}),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Config: testAccKubernetesSecretConfig_typeSpecified(name),
|
||||||
|
Check: resource.ComposeAggregateTestCheckFunc(
|
||||||
|
testAccCheckKubernetesSecretExists("kubernetes_secret.test", &conf),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.annotations.%", "0"),
|
||||||
|
testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.%", "0"),
|
||||||
|
testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.name", name),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.generation"),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.resource_version"),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.self_link"),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.uid"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "data.%", "2"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "data.username", "admin"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "data.password", "password"),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "type", "kubernetes.io/basic-auth"),
|
||||||
|
testAccCheckSecretData(&conf, map[string]string{"username": "admin", "password": "password"}),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccKubernetesSecret_importBasic(t *testing.T) {
|
||||||
|
resourceName := "kubernetes_secret.test"
|
||||||
|
name := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckKubernetesSecretDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccKubernetesSecretConfig_basic(name),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccKubernetesSecret_generatedName(t *testing.T) {
|
||||||
|
var conf api.Secret
|
||||||
|
prefix := "tf-acc-test-gen-"
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
IDRefreshName: "kubernetes_secret.test",
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckKubernetesSecretDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccKubernetesSecretConfig_generatedName(prefix),
|
||||||
|
Check: resource.ComposeAggregateTestCheckFunc(
|
||||||
|
testAccCheckKubernetesSecretExists("kubernetes_secret.test", &conf),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.annotations.%", "0"),
|
||||||
|
testAccCheckMetaAnnotations(&conf.ObjectMeta, map[string]string{}),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.labels.%", "0"),
|
||||||
|
testAccCheckMetaLabels(&conf.ObjectMeta, map[string]string{}),
|
||||||
|
resource.TestCheckResourceAttr("kubernetes_secret.test", "metadata.0.generate_name", prefix),
|
||||||
|
resource.TestMatchResourceAttr("kubernetes_secret.test", "metadata.0.name", regexp.MustCompile("^"+prefix)),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.generation"),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.resource_version"),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.self_link"),
|
||||||
|
resource.TestCheckResourceAttrSet("kubernetes_secret.test", "metadata.0.uid"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccKubernetesSecret_importGeneratedName(t *testing.T) {
|
||||||
|
resourceName := "kubernetes_secret.test"
|
||||||
|
prefix := "tf-acc-test-gen-import-"
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckKubernetesSecretDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccKubernetesSecretConfig_generatedName(prefix),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckSecretData(m *api.Secret, expected map[string]string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
if len(expected) == 0 && len(m.Data) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(byteMapToStringMap(m.Data), expected) {
|
||||||
|
return fmt.Errorf("%s data don't match.\nExpected: %q\nGiven: %q",
|
||||||
|
m.Name, expected, m.Data)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckKubernetesSecretDestroy(s *terraform.State) error {
|
||||||
|
conn := testAccProvider.Meta().(*kubernetes.Clientset)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "kubernetes_secret" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
namespace, name := idParts(rs.Primary.ID)
|
||||||
|
resp, err := conn.CoreV1().Secrets(namespace).Get(name)
|
||||||
|
if err == nil {
|
||||||
|
if resp.Name == rs.Primary.ID {
|
||||||
|
return fmt.Errorf("Secret still exists: %s", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckKubernetesSecretExists(n string, obj *api.Secret) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*kubernetes.Clientset)
|
||||||
|
namespace, name := idParts(rs.Primary.ID)
|
||||||
|
out, err := conn.CoreV1().Secrets(namespace).Get(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*obj = *out
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccKubernetesSecretConfig_basic(name string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "kubernetes_secret" "test" {
|
||||||
|
metadata {
|
||||||
|
annotations {
|
||||||
|
TestAnnotationOne = "one"
|
||||||
|
TestAnnotationTwo = "two"
|
||||||
|
}
|
||||||
|
labels {
|
||||||
|
TestLabelOne = "one"
|
||||||
|
TestLabelTwo = "two"
|
||||||
|
TestLabelThree = "three"
|
||||||
|
}
|
||||||
|
name = "%s"
|
||||||
|
}
|
||||||
|
data {
|
||||||
|
one = "first"
|
||||||
|
two = "second"
|
||||||
|
}
|
||||||
|
}`, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccKubernetesSecretConfig_modified(name string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "kubernetes_secret" "test" {
|
||||||
|
metadata {
|
||||||
|
annotations {
|
||||||
|
TestAnnotationOne = "one"
|
||||||
|
Different = "1234"
|
||||||
|
}
|
||||||
|
labels {
|
||||||
|
TestLabelOne = "one"
|
||||||
|
TestLabelThree = "three"
|
||||||
|
}
|
||||||
|
name = "%s"
|
||||||
|
}
|
||||||
|
data {
|
||||||
|
one = "first"
|
||||||
|
two = "second"
|
||||||
|
nine = "ninth"
|
||||||
|
}
|
||||||
|
}`, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccKubernetesSecretConfig_noData(name string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "kubernetes_secret" "test" {
|
||||||
|
metadata {
|
||||||
|
name = "%s"
|
||||||
|
}
|
||||||
|
}`, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccKubernetesSecretConfig_typeSpecified(name string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "kubernetes_secret" "test" {
|
||||||
|
metadata {
|
||||||
|
name = "%s"
|
||||||
|
}
|
||||||
|
data {
|
||||||
|
username = "admin"
|
||||||
|
password = "password"
|
||||||
|
}
|
||||||
|
type = "kubernetes.io/basic-auth"
|
||||||
|
}`, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccKubernetesSecretConfig_generatedName(prefix string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "kubernetes_secret" "test" {
|
||||||
|
metadata {
|
||||||
|
generate_name = "%s"
|
||||||
|
}
|
||||||
|
data {
|
||||||
|
one = "first"
|
||||||
|
two = "second"
|
||||||
|
}
|
||||||
|
}`, prefix)
|
||||||
|
}
|
@ -5,6 +5,7 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"encoding/base64"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
api "k8s.io/kubernetes/pkg/api/v1"
|
api "k8s.io/kubernetes/pkg/api/v1"
|
||||||
)
|
)
|
||||||
@ -99,3 +100,20 @@ func isInternalAnnotationKey(annotationKey string) bool {
|
|||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func byteMapToStringMap(m map[string][]byte) map[string]string {
|
||||||
|
result := make(map[string]string)
|
||||||
|
for k, v := range m {
|
||||||
|
result[k] = string(v)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func base64EncodeStringMap(m map[string]interface{}) map[string]interface{} {
|
||||||
|
result := make(map[string]interface{})
|
||||||
|
for k, v := range m {
|
||||||
|
value := v.(string)
|
||||||
|
result[k] = (base64.StdEncoding.EncodeToString([]byte(value)))
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
@ -1,41 +1,43 @@
|
|||||||
package triton
|
package triton
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"crypto/md5"
|
||||||
"log"
|
"encoding/base64"
|
||||||
"os"
|
"errors"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/errwrap"
|
||||||
"github.com/hashicorp/go-multierror"
|
"github.com/hashicorp/go-multierror"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
"github.com/joyent/gocommon/client"
|
"github.com/joyent/triton-go"
|
||||||
"github.com/joyent/gosdc/cloudapi"
|
"github.com/joyent/triton-go/authentication"
|
||||||
"github.com/joyent/gosign/auth"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Provider returns a terraform.ResourceProvider.
|
// Provider returns a terraform.ResourceProvider.
|
||||||
func Provider() terraform.ResourceProvider {
|
func Provider() terraform.ResourceProvider {
|
||||||
return &schema.Provider{
|
return &schema.Provider{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"account": &schema.Schema{
|
"account": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
DefaultFunc: schema.EnvDefaultFunc("SDC_ACCOUNT", ""),
|
DefaultFunc: schema.EnvDefaultFunc("SDC_ACCOUNT", ""),
|
||||||
},
|
},
|
||||||
|
|
||||||
"url": &schema.Schema{
|
"url": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
DefaultFunc: schema.EnvDefaultFunc("SDC_URL", "https://us-west-1.api.joyentcloud.com"),
|
DefaultFunc: schema.EnvDefaultFunc("SDC_URL", "https://us-west-1.api.joyentcloud.com"),
|
||||||
},
|
},
|
||||||
|
|
||||||
"key_material": &schema.Schema{
|
"key_material": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Optional: true,
|
||||||
DefaultFunc: schema.EnvDefaultFunc("SDC_KEY_MATERIAL", ""),
|
DefaultFunc: schema.EnvDefaultFunc("SDC_KEY_MATERIAL", ""),
|
||||||
},
|
},
|
||||||
|
|
||||||
"key_id": &schema.Schema{
|
"key_id": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
DefaultFunc: schema.EnvDefaultFunc("SDC_KEY_ID", ""),
|
DefaultFunc: schema.EnvDefaultFunc("SDC_KEY_ID", ""),
|
||||||
@ -53,70 +55,113 @@ func Provider() terraform.ResourceProvider {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type SDCConfig struct {
|
type Config struct {
|
||||||
Account string
|
Account string
|
||||||
KeyMaterial string
|
KeyMaterial string
|
||||||
KeyID string
|
KeyID string
|
||||||
URL string
|
URL string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c SDCConfig) validate() error {
|
func (c Config) validate() error {
|
||||||
var err *multierror.Error
|
var err *multierror.Error
|
||||||
|
|
||||||
if c.URL == "" {
|
if c.URL == "" {
|
||||||
err = multierror.Append(err, fmt.Errorf("URL must be configured for the Triton provider"))
|
err = multierror.Append(err, errors.New("URL must be configured for the Triton provider"))
|
||||||
}
|
|
||||||
if c.KeyMaterial == "" {
|
|
||||||
err = multierror.Append(err, fmt.Errorf("Key Material must be configured for the Triton provider"))
|
|
||||||
}
|
}
|
||||||
if c.KeyID == "" {
|
if c.KeyID == "" {
|
||||||
err = multierror.Append(err, fmt.Errorf("Key ID must be configured for the Triton provider"))
|
err = multierror.Append(err, errors.New("Key ID must be configured for the Triton provider"))
|
||||||
}
|
}
|
||||||
if c.Account == "" {
|
if c.Account == "" {
|
||||||
err = multierror.Append(err, fmt.Errorf("Account must be configured for the Triton provider"))
|
err = multierror.Append(err, errors.New("Account must be configured for the Triton provider"))
|
||||||
}
|
}
|
||||||
|
|
||||||
return err.ErrorOrNil()
|
return err.ErrorOrNil()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c SDCConfig) getSDCClient() (*cloudapi.Client, error) {
|
func (c Config) getTritonClient() (*triton.Client, error) {
|
||||||
userauth, err := auth.NewAuth(c.Account, c.KeyMaterial, "rsa-sha256")
|
var signer authentication.Signer
|
||||||
|
var err error
|
||||||
|
if c.KeyMaterial == "" {
|
||||||
|
signer, err = authentication.NewSSHAgentSigner(c.KeyID, c.Account)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrapf("Error Creating SSH Agent Signer: {{err}}", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
signer, err = authentication.NewPrivateKeySigner(c.KeyID, []byte(c.KeyMaterial), c.Account)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrapf("Error Creating SSH Private Key Signer: {{err}}", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := triton.NewClient(c.URL, c.Account, signer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, errwrap.Wrapf("Error Creating Triton Client: {{err}}", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
creds := &auth.Credentials{
|
|
||||||
UserAuthentication: userauth,
|
|
||||||
SdcKeyId: c.KeyID,
|
|
||||||
SdcEndpoint: auth.Endpoint{URL: c.URL},
|
|
||||||
}
|
|
||||||
|
|
||||||
client := cloudapi.New(client.NewClient(
|
|
||||||
c.URL,
|
|
||||||
cloudapi.DefaultAPIVersion,
|
|
||||||
creds,
|
|
||||||
log.New(os.Stderr, "", log.LstdFlags),
|
|
||||||
))
|
|
||||||
|
|
||||||
return client, nil
|
return client, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
||||||
config := SDCConfig{
|
config := Config{
|
||||||
Account: d.Get("account").(string),
|
Account: d.Get("account").(string),
|
||||||
URL: d.Get("url").(string),
|
URL: d.Get("url").(string),
|
||||||
KeyMaterial: d.Get("key_material").(string),
|
KeyID: d.Get("key_id").(string),
|
||||||
KeyID: d.Get("key_id").(string),
|
}
|
||||||
|
|
||||||
|
if keyMaterial, ok := d.GetOk("key_material"); ok {
|
||||||
|
config.KeyMaterial = keyMaterial.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := config.validate(); err != nil {
|
if err := config.validate(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := config.getSDCClient()
|
client, err := config.getTritonClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return client, nil
|
return client, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func resourceExists(resource interface{}, err error) (bool, error) {
|
||||||
|
if err != nil {
|
||||||
|
if triton.IsResourceNotFound(err) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resource != nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func stableMapHash(input map[string]string) string {
|
||||||
|
keys := make([]string, 0, len(input))
|
||||||
|
for k := range input {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
|
||||||
|
hash := md5.New()
|
||||||
|
for _, key := range keys {
|
||||||
|
hash.Write([]byte(key))
|
||||||
|
hash.Write([]byte(input[key]))
|
||||||
|
}
|
||||||
|
|
||||||
|
return base64.StdEncoding.EncodeToString(hash.Sum([]byte{}))
|
||||||
|
}
|
||||||
|
|
||||||
|
var fastResourceTimeout = &schema.ResourceTimeout{
|
||||||
|
Create: schema.DefaultTimeout(1 * time.Minute),
|
||||||
|
Read: schema.DefaultTimeout(30 * time.Second),
|
||||||
|
Update: schema.DefaultTimeout(1 * time.Minute),
|
||||||
|
Delete: schema.DefaultTimeout(1 * time.Minute),
|
||||||
|
}
|
||||||
|
|
||||||
|
var slowResourceTimeout = &schema.ResourceTimeout{
|
||||||
|
Create: schema.DefaultTimeout(10 * time.Minute),
|
||||||
|
Read: schema.DefaultTimeout(30 * time.Second),
|
||||||
|
Update: schema.DefaultTimeout(10 * time.Minute),
|
||||||
|
Delete: schema.DefaultTimeout(10 * time.Minute),
|
||||||
|
}
|
||||||
|
@ -32,13 +32,13 @@ func testAccPreCheck(t *testing.T) {
|
|||||||
sdcURL := os.Getenv("SDC_URL")
|
sdcURL := os.Getenv("SDC_URL")
|
||||||
account := os.Getenv("SDC_ACCOUNT")
|
account := os.Getenv("SDC_ACCOUNT")
|
||||||
keyID := os.Getenv("SDC_KEY_ID")
|
keyID := os.Getenv("SDC_KEY_ID")
|
||||||
keyMaterial := os.Getenv("SDC_KEY_MATERIAL")
|
|
||||||
|
|
||||||
if sdcURL == "" {
|
if sdcURL == "" {
|
||||||
sdcURL = "https://us-west-1.api.joyentcloud.com"
|
sdcURL = "https://us-west-1.api.joyentcloud.com"
|
||||||
}
|
}
|
||||||
|
|
||||||
if sdcURL == "" || account == "" || keyID == "" || keyMaterial == "" {
|
if sdcURL == "" || account == "" || keyID == "" {
|
||||||
t.Fatal("SDC_ACCOUNT, SDC_KEY_ID and SDC_KEY_MATERIAL must be set for acceptance tests")
|
t.Fatal("SDC_ACCOUNT and SDC_KEY_ID must be set for acceptance tests. To test with the SSH" +
|
||||||
|
" private key signer, SDC_KEY_MATERIAL must also be set.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"github.com/joyent/gosdc/cloudapi"
|
"github.com/joyent/triton-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceFabric() *schema.Resource {
|
func resourceFabric() *schema.Resource {
|
||||||
@ -16,74 +16,74 @@ func resourceFabric() *schema.Resource {
|
|||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"name": {
|
"name": {
|
||||||
Description: "network name",
|
Description: "Network name",
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
},
|
},
|
||||||
"public": {
|
"public": {
|
||||||
Description: "whether or not this is an RFC1918 network",
|
Description: "Whether or not this is an RFC1918 network",
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
},
|
},
|
||||||
"fabric": {
|
"fabric": {
|
||||||
Description: "whether or not this network is on a fabric",
|
Description: "Whether or not this network is on a fabric",
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
},
|
},
|
||||||
"description": {
|
"description": {
|
||||||
Description: "optional description of network",
|
Description: "Description of network",
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
},
|
},
|
||||||
"subnet": {
|
"subnet": {
|
||||||
Description: "CIDR formatted string describing network",
|
Description: "CIDR formatted string describing network address space",
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
},
|
},
|
||||||
"provision_start_ip": {
|
"provision_start_ip": {
|
||||||
Description: "first IP on the network that can be assigned",
|
Description: "First IP on the network that can be assigned",
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
},
|
},
|
||||||
"provision_end_ip": {
|
"provision_end_ip": {
|
||||||
Description: "last assignable IP on the network",
|
Description: "Last assignable IP on the network",
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
},
|
},
|
||||||
"gateway": {
|
"gateway": {
|
||||||
Description: "optional gateway IP",
|
Description: "Gateway IP",
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
},
|
},
|
||||||
"resolvers": {
|
"resolvers": {
|
||||||
Description: "array of IP addresses for resolvers",
|
Description: "List of IP addresses for DNS resolvers",
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
},
|
},
|
||||||
"routes": {
|
"routes": {
|
||||||
Description: "map of CIDR block to Gateway IP address",
|
Description: "Map of CIDR block to Gateway IP address",
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Type: schema.TypeMap,
|
Type: schema.TypeMap,
|
||||||
},
|
},
|
||||||
"internet_nat": {
|
"internet_nat": {
|
||||||
Description: "if a NAT zone is provisioned at Gateway IP address",
|
Description: "Whether or not a NAT zone is provisioned at the Gateway IP address",
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
},
|
},
|
||||||
"vlan_id": {
|
"vlan_id": {
|
||||||
Description: "VLAN network is on",
|
Description: "VLAN on which the network exists",
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
@ -93,7 +93,7 @@ func resourceFabric() *schema.Resource {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func resourceFabricCreate(d *schema.ResourceData, meta interface{}) error {
|
func resourceFabricCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
var resolvers []string
|
var resolvers []string
|
||||||
for _, resolver := range d.Get("resolvers").([]interface{}) {
|
for _, resolver := range d.Get("resolvers").([]interface{}) {
|
||||||
@ -104,24 +104,23 @@ func resourceFabricCreate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
for cidr, v := range d.Get("routes").(map[string]interface{}) {
|
for cidr, v := range d.Get("routes").(map[string]interface{}) {
|
||||||
ip, ok := v.(string)
|
ip, ok := v.(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf(`cannot use "%v" as an IP address`, v)
|
return fmt.Errorf(`Cannot use "%v" as an IP address`, v)
|
||||||
}
|
}
|
||||||
routes[cidr] = ip
|
routes[cidr] = ip
|
||||||
}
|
}
|
||||||
|
|
||||||
fabric, err := client.CreateFabricNetwork(
|
fabric, err := client.Fabrics().CreateFabricNetwork(&triton.CreateFabricNetworkInput{
|
||||||
int16(d.Get("vlan_id").(int)),
|
FabricVLANID: d.Get("vlan_id").(int),
|
||||||
cloudapi.CreateFabricNetworkOpts{
|
Name: d.Get("name").(string),
|
||||||
Name: d.Get("name").(string),
|
Description: d.Get("description").(string),
|
||||||
Description: d.Get("description").(string),
|
Subnet: d.Get("subnet").(string),
|
||||||
Subnet: d.Get("subnet").(string),
|
ProvisionStartIP: d.Get("provision_start_ip").(string),
|
||||||
ProvisionStartIp: d.Get("provision_start_ip").(string),
|
ProvisionEndIP: d.Get("provision_end_ip").(string),
|
||||||
ProvisionEndIp: d.Get("provision_end_ip").(string),
|
Gateway: d.Get("gateway").(string),
|
||||||
Gateway: d.Get("gateway").(string),
|
Resolvers: resolvers,
|
||||||
Resolvers: resolvers,
|
Routes: routes,
|
||||||
Routes: routes,
|
InternetNAT: d.Get("internet_nat").(bool),
|
||||||
InternetNAT: d.Get("internet_nat").(bool),
|
},
|
||||||
},
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -129,26 +128,25 @@ func resourceFabricCreate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
|
|
||||||
d.SetId(fabric.Id)
|
d.SetId(fabric.Id)
|
||||||
|
|
||||||
err = resourceFabricRead(d, meta)
|
return resourceFabricRead(d, meta)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceFabricExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
func resourceFabricExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
fabric, err := client.GetFabricNetwork(int16(d.Get("vlan_id").(int)), d.Id())
|
return resourceExists(client.Fabrics().GetFabricNetwork(&triton.GetFabricNetworkInput{
|
||||||
|
FabricVLANID: d.Get("vlan_id").(int),
|
||||||
return fabric != nil && err == nil, err
|
NetworkID: d.Id(),
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceFabricRead(d *schema.ResourceData, meta interface{}) error {
|
func resourceFabricRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
fabric, err := client.GetFabricNetwork(int16(d.Get("vlan_id").(int)), d.Id())
|
fabric, err := client.Fabrics().GetFabricNetwork(&triton.GetFabricNetworkInput{
|
||||||
|
FabricVLANID: d.Get("vlan_id").(int),
|
||||||
|
NetworkID: d.Id(),
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -156,23 +154,25 @@ func resourceFabricRead(d *schema.ResourceData, meta interface{}) error {
|
|||||||
d.SetId(fabric.Id)
|
d.SetId(fabric.Id)
|
||||||
d.Set("name", fabric.Name)
|
d.Set("name", fabric.Name)
|
||||||
d.Set("public", fabric.Public)
|
d.Set("public", fabric.Public)
|
||||||
d.Set("public", fabric.Public)
|
|
||||||
d.Set("fabric", fabric.Fabric)
|
d.Set("fabric", fabric.Fabric)
|
||||||
d.Set("description", fabric.Description)
|
d.Set("description", fabric.Description)
|
||||||
d.Set("subnet", fabric.Subnet)
|
d.Set("subnet", fabric.Subnet)
|
||||||
d.Set("provision_start_ip", fabric.ProvisionStartIp)
|
d.Set("provision_start_ip", fabric.ProvisioningStartIP)
|
||||||
d.Set("provision_end_ip", fabric.ProvisionEndIp)
|
d.Set("provision_end_ip", fabric.ProvisioningEndIP)
|
||||||
d.Set("gateway", fabric.Gateway)
|
d.Set("gateway", fabric.Gateway)
|
||||||
d.Set("resolvers", fabric.Resolvers)
|
d.Set("resolvers", fabric.Resolvers)
|
||||||
d.Set("routes", fabric.Routes)
|
d.Set("routes", fabric.Routes)
|
||||||
d.Set("internet_nat", fabric.InternetNAT)
|
d.Set("internet_nat", fabric.InternetNAT)
|
||||||
d.Set("vlan_id", fabric.VLANId)
|
d.Set("vlan_id", d.Get("vlan_id").(int))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceFabricDelete(d *schema.ResourceData, meta interface{}) error {
|
func resourceFabricDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
return client.DeleteFabricNetwork(int16(d.Get("vlan_id").(int)), d.Id())
|
return client.Fabrics().DeleteFabricNetwork(&triton.DeleteFabricNetworkInput{
|
||||||
|
FabricVLANID: d.Get("vlan_id").(int),
|
||||||
|
NetworkID: d.Id(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
@ -9,19 +9,19 @@ import (
|
|||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
"github.com/joyent/gosdc/cloudapi"
|
"github.com/joyent/triton-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccTritonFabric_basic(t *testing.T) {
|
func TestAccTritonFabric_basic(t *testing.T) {
|
||||||
fabricName := fmt.Sprintf("acctest-%d", acctest.RandInt())
|
fabricName := fmt.Sprintf("acctest-%d", acctest.RandInt())
|
||||||
config := fmt.Sprintf(testAccTritonFabric_basic, fabricName)
|
config := fmt.Sprintf(testAccTritonFabric_basic, acctest.RandIntRange(3, 2049), fabricName, fabricName)
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testCheckTritonFabricDestroy,
|
CheckDestroy: testCheckTritonFabricDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: config,
|
Config: config,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonFabricExists("triton_fabric.test"),
|
testCheckTritonFabricExists("triton_fabric.test"),
|
||||||
@ -37,62 +37,75 @@ func TestAccTritonFabric_basic(t *testing.T) {
|
|||||||
|
|
||||||
func testCheckTritonFabricExists(name string) resource.TestCheckFunc {
|
func testCheckTritonFabricExists(name string) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
// Ensure we have enough information in state to look up in API
|
|
||||||
rs, ok := s.RootModule().Resources[name]
|
rs, ok := s.RootModule().Resources[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Not found: %s", name)
|
return fmt.Errorf("Not found: %s", name)
|
||||||
}
|
}
|
||||||
conn := testAccProvider.Meta().(*cloudapi.Client)
|
conn := testAccProvider.Meta().(*triton.Client)
|
||||||
|
|
||||||
id, err := strconv.ParseInt(rs.Primary.Attributes["vlan_id"], 10, 16)
|
vlanID, err := strconv.Atoi(rs.Primary.Attributes["vlan_id"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fabric, err := conn.GetFabricNetwork(int16(id), rs.Primary.ID)
|
exists, err := resourceExists(conn.Fabrics().GetFabricNetwork(&triton.GetFabricNetworkInput{
|
||||||
|
FabricVLANID: vlanID,
|
||||||
|
NetworkID: rs.Primary.ID,
|
||||||
|
}))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Bad: Check Fabric Exists: %s", err)
|
return fmt.Errorf("Error: Check Fabric Exists: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if fabric == nil {
|
if exists {
|
||||||
return fmt.Errorf("Bad: Fabric %q does not exist", rs.Primary.ID)
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return fmt.Errorf("Error: Fabric %q (VLAN %d) Does Not Exist", rs.Primary.ID, vlanID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testCheckTritonFabricDestroy(s *terraform.State) error {
|
func testCheckTritonFabricDestroy(s *terraform.State) error {
|
||||||
conn := testAccProvider.Meta().(*cloudapi.Client)
|
conn := testAccProvider.Meta().(*triton.Client)
|
||||||
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
for _, rs := range s.RootModule().Resources {
|
||||||
if rs.Type != "triton_fabric" {
|
if rs.Type != "triton_fabric" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := strconv.ParseInt(rs.Primary.Attributes["vlan_id"], 10, 16)
|
vlanID, err := strconv.Atoi(rs.Primary.Attributes["vlan_id"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fabric, err := conn.GetFabricNetwork(int16(id), rs.Primary.ID)
|
exists, err := resourceExists(conn.Fabrics().GetFabricNetwork(&triton.GetFabricNetworkInput{
|
||||||
|
FabricVLANID: vlanID,
|
||||||
|
NetworkID: rs.Primary.ID,
|
||||||
|
}))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if fabric != nil {
|
if exists {
|
||||||
return fmt.Errorf("Bad: Fabric %q still exists", rs.Primary.ID)
|
return fmt.Errorf("Error: Fabric %q (VLAN %d) Still Exists", rs.Primary.ID, vlanID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var testAccTritonFabric_basic = `
|
var testAccTritonFabric_basic = `
|
||||||
|
resource "triton_vlan" "test" {
|
||||||
|
vlan_id = "%d"
|
||||||
|
name = "%s"
|
||||||
|
description = "testAccTritonFabric_basic"
|
||||||
|
}
|
||||||
|
|
||||||
resource "triton_fabric" "test" {
|
resource "triton_fabric" "test" {
|
||||||
name = "%s"
|
name = "%s"
|
||||||
description = "test network"
|
description = "test network"
|
||||||
vlan_id = 2 # every DC seems to have a vlan 2 available
|
vlan_id = "${triton_vlan.test.id}"
|
||||||
|
|
||||||
subnet = "10.0.0.0/22"
|
subnet = "10.0.0.0/22"
|
||||||
gateway = "10.0.0.1"
|
gateway = "10.0.0.1"
|
||||||
|
@ -2,8 +2,7 @@ package triton
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"github.com/joyent/gocommon/errors"
|
"github.com/joyent/triton-go"
|
||||||
"github.com/joyent/gosdc/cloudapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceFirewallRule() *schema.Resource {
|
func resourceFirewallRule() *schema.Resource {
|
||||||
@ -14,7 +13,7 @@ func resourceFirewallRule() *schema.Resource {
|
|||||||
Update: resourceFirewallRuleUpdate,
|
Update: resourceFirewallRuleUpdate,
|
||||||
Delete: resourceFirewallRuleDelete,
|
Delete: resourceFirewallRuleDelete,
|
||||||
Importer: &schema.ResourceImporter{
|
Importer: &schema.ResourceImporter{
|
||||||
State: resourceFirewallRuleImporter,
|
State: schema.ImportStatePassthrough,
|
||||||
},
|
},
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
@ -29,67 +28,73 @@ func resourceFirewallRule() *schema.Resource {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
},
|
},
|
||||||
|
"description": {
|
||||||
|
Description: "Human-readable description of the rule",
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"global": {
|
||||||
|
Description: "Indicates whether or not the rule is global",
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceFirewallRuleCreate(d *schema.ResourceData, meta interface{}) error {
|
func resourceFirewallRuleCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
rule, err := client.CreateFirewallRule(cloudapi.CreateFwRuleOpts{
|
rule, err := client.Firewall().CreateFirewallRule(&triton.CreateFirewallRuleInput{
|
||||||
Rule: d.Get("rule").(string),
|
Rule: d.Get("rule").(string),
|
||||||
Enabled: d.Get("enabled").(bool),
|
Enabled: d.Get("enabled").(bool),
|
||||||
|
Description: d.Get("description").(string),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(rule.Id)
|
d.SetId(rule.ID)
|
||||||
|
|
||||||
err = resourceFirewallRuleRead(d, meta)
|
return resourceFirewallRuleRead(d, meta)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceFirewallRuleExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
func resourceFirewallRuleExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
rule, err := client.GetFirewallRule(d.Id())
|
return resourceExists(client.Firewall().GetFirewallRule(&triton.GetFirewallRuleInput{
|
||||||
if errors.IsResourceNotFound(err) {
|
ID: d.Id(),
|
||||||
return false, nil
|
}))
|
||||||
}
|
|
||||||
|
|
||||||
return rule != nil && err == nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceFirewallRuleRead(d *schema.ResourceData, meta interface{}) error {
|
func resourceFirewallRuleRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
rule, err := client.GetFirewallRule(d.Id())
|
rule, err := client.Firewall().GetFirewallRule(&triton.GetFirewallRuleInput{
|
||||||
|
ID: d.Id(),
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(rule.Id)
|
d.SetId(rule.ID)
|
||||||
d.Set("rule", rule.Rule)
|
d.Set("rule", rule.Rule)
|
||||||
d.Set("enabled", rule.Enabled)
|
d.Set("enabled", rule.Enabled)
|
||||||
|
d.Set("global", rule.Global)
|
||||||
|
d.Set("description", rule.Description)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceFirewallRuleUpdate(d *schema.ResourceData, meta interface{}) error {
|
func resourceFirewallRuleUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
_, err := client.UpdateFirewallRule(
|
_, err := client.Firewall().UpdateFirewallRule(&triton.UpdateFirewallRuleInput{
|
||||||
d.Id(),
|
ID: d.Id(),
|
||||||
cloudapi.CreateFwRuleOpts{
|
Rule: d.Get("rule").(string),
|
||||||
Rule: d.Get("rule").(string),
|
Enabled: d.Get("enabled").(bool),
|
||||||
Enabled: d.Get("enabled").(bool),
|
Description: d.Get("description").(string),
|
||||||
},
|
})
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -98,15 +103,9 @@ func resourceFirewallRuleUpdate(d *schema.ResourceData, meta interface{}) error
|
|||||||
}
|
}
|
||||||
|
|
||||||
func resourceFirewallRuleDelete(d *schema.ResourceData, meta interface{}) error {
|
func resourceFirewallRuleDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
if err := client.DeleteFirewallRule(d.Id()); err != nil {
|
return client.Firewall().DeleteFirewallRule(&triton.DeleteFirewallRuleInput{
|
||||||
return err
|
ID: d.Id(),
|
||||||
}
|
})
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceFirewallRuleImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
|
|
||||||
return []*schema.ResourceData{d}, nil
|
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,7 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
"github.com/joyent/gosdc/cloudapi"
|
"github.com/joyent/triton-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccTritonFirewallRule_basic(t *testing.T) {
|
func TestAccTritonFirewallRule_basic(t *testing.T) {
|
||||||
@ -17,7 +17,7 @@ func TestAccTritonFirewallRule_basic(t *testing.T) {
|
|||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testCheckTritonFirewallRuleDestroy,
|
CheckDestroy: testCheckTritonFirewallRuleDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: config,
|
Config: config,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonFirewallRuleExists("triton_firewall_rule.test"),
|
testCheckTritonFirewallRuleExists("triton_firewall_rule.test"),
|
||||||
@ -36,20 +36,20 @@ func TestAccTritonFirewallRule_update(t *testing.T) {
|
|||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testCheckTritonFirewallRuleDestroy,
|
CheckDestroy: testCheckTritonFirewallRuleDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: preConfig,
|
Config: preConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonFirewallRuleExists("triton_firewall_rule.test"),
|
testCheckTritonFirewallRuleExists("triton_firewall_rule.test"),
|
||||||
resource.TestCheckResourceAttr("triton_firewall_rule.test", "rule", "FROM any TO tag www ALLOW tcp PORT 80"),
|
resource.TestCheckResourceAttr("triton_firewall_rule.test", "rule", "FROM any TO tag \"www\" ALLOW tcp PORT 80"),
|
||||||
resource.TestCheckResourceAttr("triton_firewall_rule.test", "enabled", "false"),
|
resource.TestCheckResourceAttr("triton_firewall_rule.test", "enabled", "false"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
Config: postConfig,
|
Config: postConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonFirewallRuleExists("triton_firewall_rule.test"),
|
testCheckTritonFirewallRuleExists("triton_firewall_rule.test"),
|
||||||
resource.TestCheckResourceAttr("triton_firewall_rule.test", "rule", "FROM any TO tag www BLOCK tcp PORT 80"),
|
resource.TestCheckResourceAttr("triton_firewall_rule.test", "rule", "FROM any TO tag \"www\" BLOCK tcp PORT 80"),
|
||||||
resource.TestCheckResourceAttr("triton_firewall_rule.test", "enabled", "true"),
|
resource.TestCheckResourceAttr("triton_firewall_rule.test", "enabled", "true"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
@ -66,20 +66,20 @@ func TestAccTritonFirewallRule_enable(t *testing.T) {
|
|||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testCheckTritonFirewallRuleDestroy,
|
CheckDestroy: testCheckTritonFirewallRuleDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: preConfig,
|
Config: preConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonFirewallRuleExists("triton_firewall_rule.test"),
|
testCheckTritonFirewallRuleExists("triton_firewall_rule.test"),
|
||||||
resource.TestCheckResourceAttr("triton_firewall_rule.test", "rule", "FROM any TO tag www ALLOW tcp PORT 80"),
|
resource.TestCheckResourceAttr("triton_firewall_rule.test", "rule", "FROM any TO tag \"www\" ALLOW tcp PORT 80"),
|
||||||
resource.TestCheckResourceAttr("triton_firewall_rule.test", "enabled", "false"),
|
resource.TestCheckResourceAttr("triton_firewall_rule.test", "enabled", "false"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
Config: postConfig,
|
Config: postConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonFirewallRuleExists("triton_firewall_rule.test"),
|
testCheckTritonFirewallRuleExists("triton_firewall_rule.test"),
|
||||||
resource.TestCheckResourceAttr("triton_firewall_rule.test", "rule", "FROM any TO tag www ALLOW tcp PORT 80"),
|
resource.TestCheckResourceAttr("triton_firewall_rule.test", "rule", "FROM any TO tag \"www\" ALLOW tcp PORT 80"),
|
||||||
resource.TestCheckResourceAttr("triton_firewall_rule.test", "enabled", "true"),
|
resource.TestCheckResourceAttr("triton_firewall_rule.test", "enabled", "true"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
@ -94,15 +94,19 @@ func testCheckTritonFirewallRuleExists(name string) resource.TestCheckFunc {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Not found: %s", name)
|
return fmt.Errorf("Not found: %s", name)
|
||||||
}
|
}
|
||||||
conn := testAccProvider.Meta().(*cloudapi.Client)
|
conn := testAccProvider.Meta().(*triton.Client)
|
||||||
|
|
||||||
rule, err := conn.GetFirewallRule(rs.Primary.ID)
|
resp, err := conn.Firewall().GetFirewallRule(&triton.GetFirewallRuleInput{
|
||||||
if err != nil {
|
ID: rs.Primary.ID,
|
||||||
|
})
|
||||||
|
if err != nil && triton.IsResourceNotFound(err) {
|
||||||
return fmt.Errorf("Bad: Check Firewall Rule Exists: %s", err)
|
return fmt.Errorf("Bad: Check Firewall Rule Exists: %s", err)
|
||||||
|
} else if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if rule == nil {
|
if resp == nil {
|
||||||
return fmt.Errorf("Bad: Firewall rule %q does not exist", rs.Primary.ID)
|
return fmt.Errorf("Bad: Firewall Rule %q does not exist", rs.Primary.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -110,20 +114,24 @@ func testCheckTritonFirewallRuleExists(name string) resource.TestCheckFunc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testCheckTritonFirewallRuleDestroy(s *terraform.State) error {
|
func testCheckTritonFirewallRuleDestroy(s *terraform.State) error {
|
||||||
conn := testAccProvider.Meta().(*cloudapi.Client)
|
conn := testAccProvider.Meta().(*triton.Client)
|
||||||
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
for _, rs := range s.RootModule().Resources {
|
||||||
if rs.Type != "triton_firewall_rule" {
|
if rs.Type != "triton_firewall_rule" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := conn.GetFirewallRule(rs.Primary.ID)
|
resp, err := conn.Firewall().GetFirewallRule(&triton.GetFirewallRuleInput{
|
||||||
if err != nil {
|
ID: rs.Primary.ID,
|
||||||
|
})
|
||||||
|
if triton.IsResourceNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
return fmt.Errorf("Bad: Firewall rule %q still exists", rs.Primary.ID)
|
return fmt.Errorf("Bad: Firewall Rule %q still exists", rs.Primary.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -132,21 +140,21 @@ func testCheckTritonFirewallRuleDestroy(s *terraform.State) error {
|
|||||||
|
|
||||||
var testAccTritonFirewallRule_basic = `
|
var testAccTritonFirewallRule_basic = `
|
||||||
resource "triton_firewall_rule" "test" {
|
resource "triton_firewall_rule" "test" {
|
||||||
rule = "FROM any TO tag www ALLOW tcp PORT 80"
|
rule = "FROM any TO tag \"www\" ALLOW tcp PORT 80"
|
||||||
enabled = false
|
enabled = false
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
var testAccTritonFirewallRule_update = `
|
var testAccTritonFirewallRule_update = `
|
||||||
resource "triton_firewall_rule" "test" {
|
resource "triton_firewall_rule" "test" {
|
||||||
rule = "FROM any TO tag www BLOCK tcp PORT 80"
|
rule = "FROM any TO tag \"www\" BLOCK tcp PORT 80"
|
||||||
enabled = true
|
enabled = true
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
var testAccTritonFirewallRule_enable = `
|
var testAccTritonFirewallRule_enable = `
|
||||||
resource "triton_firewall_rule" "test" {
|
resource "triton_firewall_rule" "test" {
|
||||||
rule = "FROM any TO tag www ALLOW tcp PORT 80"
|
rule = "FROM any TO tag \"www\" ALLOW tcp PORT 80"
|
||||||
enabled = true
|
enabled = true
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
@ -5,35 +5,30 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"github.com/joyent/gosdc/cloudapi"
|
"github.com/joyent/triton-go"
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrNoKeyComment will be returned when the key name cannot be generated from
|
|
||||||
// the key comment and is not otherwise specified.
|
|
||||||
ErrNoKeyComment = errors.New("no key comment found to use as a name (and none specified)")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceKey() *schema.Resource {
|
func resourceKey() *schema.Resource {
|
||||||
return &schema.Resource{
|
return &schema.Resource{
|
||||||
Create: resourceKeyCreate,
|
Create: resourceKeyCreate,
|
||||||
Exists: resourceKeyExists,
|
Exists: resourceKeyExists,
|
||||||
Read: resourceKeyRead,
|
Read: resourceKeyRead,
|
||||||
Delete: resourceKeyDelete,
|
Delete: resourceKeyDelete,
|
||||||
|
Timeouts: fastResourceTimeout,
|
||||||
Importer: &schema.ResourceImporter{
|
Importer: &schema.ResourceImporter{
|
||||||
State: resourceKeyImporter,
|
State: schema.ImportStatePassthrough,
|
||||||
},
|
},
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"name": &schema.Schema{
|
"name": {
|
||||||
Description: "name of this key (will be generated from the key comment, if not set and comment present)",
|
Description: "Name of the key (generated from the key comment if not set)",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"key": &schema.Schema{
|
"key": {
|
||||||
Description: "content of public key from disk",
|
Description: "Content of public key from disk in OpenSSH format",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
@ -43,18 +38,18 @@ func resourceKey() *schema.Resource {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func resourceKeyCreate(d *schema.ResourceData, meta interface{}) error {
|
func resourceKeyCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
if d.Get("name").(string) == "" {
|
if keyName := d.Get("name").(string); keyName == "" {
|
||||||
parts := strings.SplitN(d.Get("key").(string), " ", 3)
|
parts := strings.SplitN(d.Get("key").(string), " ", 3)
|
||||||
if len(parts) == 3 {
|
if len(parts) == 3 {
|
||||||
d.Set("name", parts[2])
|
d.Set("name", parts[2])
|
||||||
} else {
|
} else {
|
||||||
return ErrNoKeyComment
|
return errors.New("No key name specified, and key material has no comment")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := client.CreateKey(cloudapi.CreateKeyOpts{
|
_, err := client.Keys().CreateKey(&triton.CreateKeyInput{
|
||||||
Name: d.Get("name").(string),
|
Name: d.Get("name").(string),
|
||||||
Key: d.Get("key").(string),
|
Key: d.Get("key").(string),
|
||||||
})
|
})
|
||||||
@ -64,35 +59,28 @@ func resourceKeyCreate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
|
|
||||||
d.SetId(d.Get("name").(string))
|
d.SetId(d.Get("name").(string))
|
||||||
|
|
||||||
err = resourceKeyRead(d, meta)
|
return resourceKeyRead(d, meta)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
func resourceKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
keys, err := client.ListKeys()
|
_, err := client.Keys().GetKey(&triton.GetKeyInput{
|
||||||
|
KeyName: d.Id(),
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, key := range keys {
|
return true, nil
|
||||||
if key.Name == d.Id() {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceKeyRead(d *schema.ResourceData, meta interface{}) error {
|
func resourceKeyRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
key, err := client.GetKey(d.Id())
|
key, err := client.Keys().GetKey(&triton.GetKeyInput{
|
||||||
|
KeyName: d.Id(),
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -104,15 +92,9 @@ func resourceKeyRead(d *schema.ResourceData, meta interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func resourceKeyDelete(d *schema.ResourceData, meta interface{}) error {
|
func resourceKeyDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
if err := client.DeleteKey(d.Get("name").(string)); err != nil {
|
return client.Keys().DeleteKey(&triton.DeleteKeyInput{
|
||||||
return err
|
KeyName: d.Id(),
|
||||||
}
|
})
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceKeyImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
|
|
||||||
return []*schema.ResourceData{d}, nil
|
|
||||||
}
|
}
|
||||||
|
@ -8,22 +8,57 @@ import (
|
|||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
"github.com/joyent/gosdc/cloudapi"
|
"github.com/joyent/triton-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccTritonKey_basic(t *testing.T) {
|
func TestAccTritonKey_basic(t *testing.T) {
|
||||||
keyName := fmt.Sprintf("acctest-%d", acctest.RandInt())
|
keyName := fmt.Sprintf("acctest-%d", acctest.RandInt())
|
||||||
config := fmt.Sprintf(testAccTritonKey_basic, keyName, testAccTritonKey_basicMaterial)
|
publicKeyMaterial, _, err := acctest.RandSSHKeyPair("TestAccTritonKey_basic@terraform")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Cannot generate test SSH key pair: %s", err)
|
||||||
|
}
|
||||||
|
config := testAccTritonKey_basic(keyName, publicKeyMaterial)
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testCheckTritonKeyDestroy,
|
CheckDestroy: testCheckTritonKeyDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: config,
|
Config: config,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonKeyExists("triton_key.test"),
|
testCheckTritonKeyExists("triton_key.test"),
|
||||||
|
resource.TestCheckResourceAttr("triton_key.test", "name", keyName),
|
||||||
|
resource.TestCheckResourceAttr("triton_key.test", "key", publicKeyMaterial),
|
||||||
|
func(*terraform.State) error {
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccTritonKey_noKeyName(t *testing.T) {
|
||||||
|
keyComment := fmt.Sprintf("acctest_%d@terraform", acctest.RandInt())
|
||||||
|
keyMaterial, _, err := acctest.RandSSHKeyPair(keyComment)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Cannot generate test SSH key pair: %s", err)
|
||||||
|
}
|
||||||
|
config := testAccTritonKey_noKeyName(keyMaterial)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testCheckTritonKeyDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: config,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testCheckTritonKeyExists("triton_key.test"),
|
||||||
|
resource.TestCheckResourceAttr("triton_key.test", "name", keyComment),
|
||||||
|
resource.TestCheckResourceAttr("triton_key.test", "key", keyMaterial),
|
||||||
func(*terraform.State) error {
|
func(*terraform.State) error {
|
||||||
time.Sleep(10 * time.Second)
|
time.Sleep(10 * time.Second)
|
||||||
return nil
|
return nil
|
||||||
@ -41,14 +76,16 @@ func testCheckTritonKeyExists(name string) resource.TestCheckFunc {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Not found: %s", name)
|
return fmt.Errorf("Not found: %s", name)
|
||||||
}
|
}
|
||||||
conn := testAccProvider.Meta().(*cloudapi.Client)
|
conn := testAccProvider.Meta().(*triton.Client)
|
||||||
|
|
||||||
rule, err := conn.GetKey(rs.Primary.ID)
|
key, err := conn.Keys().GetKey(&triton.GetKeyInput{
|
||||||
|
KeyName: rs.Primary.ID,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Bad: Check Key Exists: %s", err)
|
return fmt.Errorf("Bad: Check Key Exists: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if rule == nil {
|
if key == nil {
|
||||||
return fmt.Errorf("Bad: Key %q does not exist", rs.Primary.ID)
|
return fmt.Errorf("Bad: Key %q does not exist", rs.Primary.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,7 +94,7 @@ func testCheckTritonKeyExists(name string) resource.TestCheckFunc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testCheckTritonKeyDestroy(s *terraform.State) error {
|
func testCheckTritonKeyDestroy(s *terraform.State) error {
|
||||||
conn := testAccProvider.Meta().(*cloudapi.Client)
|
conn := testAccProvider.Meta().(*triton.Client)
|
||||||
|
|
||||||
return resource.Retry(1*time.Minute, func() *resource.RetryError {
|
return resource.Retry(1*time.Minute, func() *resource.RetryError {
|
||||||
for _, rs := range s.RootModule().Resources {
|
for _, rs := range s.RootModule().Resources {
|
||||||
@ -65,12 +102,14 @@ func testCheckTritonKeyDestroy(s *terraform.State) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := conn.GetKey(rs.Primary.ID)
|
key, err := conn.Keys().GetKey(&triton.GetKeyInput{
|
||||||
|
KeyName: rs.Primary.ID,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp != nil {
|
if key != nil {
|
||||||
return resource.RetryableError(fmt.Errorf("Bad: Key %q still exists", rs.Primary.ID))
|
return resource.RetryableError(fmt.Errorf("Bad: Key %q still exists", rs.Primary.ID))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -79,11 +118,17 @@ func testCheckTritonKeyDestroy(s *terraform.State) error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var testAccTritonKey_basic = `
|
var testAccTritonKey_basic = func(keyName string, keyMaterial string) string {
|
||||||
resource "triton_key" "test" {
|
return fmt.Sprintf(`resource "triton_key" "test" {
|
||||||
name = "%s"
|
name = "%s"
|
||||||
key = "%s"
|
key = "%s"
|
||||||
|
}
|
||||||
|
`, keyName, keyMaterial)
|
||||||
}
|
}
|
||||||
`
|
|
||||||
|
|
||||||
const testAccTritonKey_basicMaterial = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDL18KJIe8N7FxcgOMtabo10qZEDyYUSlOpsh/EYrugQCQHMKuNytog1lhFNZNk4LGNAz5L8/btG9+/axY/PfundbjR3SXt0hupAGQIVHuygWTr7foj5iGhckrEM+r3eMCXqoCnIFLhDZLDcq/zN2MxNbqDKcWSYmc8ul9dZWuiQpKOL+0nNXjhYA8Ewu+07kVAtsZD0WfvnAUjxmYb3rB15eBWk7gLxHrOPfZpeDSvOOX2bmzikpLn+L5NKrJsLrzO6hU/rpxD4OTHLULcsnIts3lYH8hShU8uY5ry94PBzdix++se3pUGvNSe967fKlHw3Ymh9nE/LJDQnzTNyFMj James@jn-mpb13`
|
var testAccTritonKey_noKeyName = func(keyMaterial string) string {
|
||||||
|
return fmt.Sprintf(`resource "triton_key" "test" {
|
||||||
|
key = "%s"
|
||||||
|
}
|
||||||
|
`, keyMaterial)
|
||||||
|
}
|
||||||
|
@ -2,22 +2,20 @@ package triton
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/hashcode"
|
"github.com/hashicorp/terraform/helper/hashcode"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"github.com/joyent/gosdc/cloudapi"
|
"github.com/joyent/triton-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
machineStateRunning = "running"
|
machineStateRunning = "running"
|
||||||
machineStateStopped = "stopped"
|
|
||||||
machineStateDeleted = "deleted"
|
machineStateDeleted = "deleted"
|
||||||
|
|
||||||
machineStateChangeTimeout = 10 * time.Minute
|
machineStateChangeTimeout = 10 * time.Minute
|
||||||
machineStateChangeCheckInterval = 10 * time.Second
|
|
||||||
|
|
||||||
resourceMachineMetadataKeys = map[string]string{
|
resourceMachineMetadataKeys = map[string]string{
|
||||||
// semantics: "schema_name": "metadata_name"
|
// semantics: "schema_name": "metadata_name"
|
||||||
@ -30,50 +28,46 @@ var (
|
|||||||
|
|
||||||
func resourceMachine() *schema.Resource {
|
func resourceMachine() *schema.Resource {
|
||||||
return &schema.Resource{
|
return &schema.Resource{
|
||||||
Create: resourceMachineCreate,
|
Create: resourceMachineCreate,
|
||||||
Exists: resourceMachineExists,
|
Exists: resourceMachineExists,
|
||||||
Read: resourceMachineRead,
|
Read: resourceMachineRead,
|
||||||
Update: resourceMachineUpdate,
|
Update: resourceMachineUpdate,
|
||||||
Delete: resourceMachineDelete,
|
Delete: resourceMachineDelete,
|
||||||
|
Timeouts: slowResourceTimeout,
|
||||||
Importer: &schema.ResourceImporter{
|
Importer: &schema.ResourceImporter{
|
||||||
State: resourceMachineImporter,
|
State: schema.ImportStatePassthrough,
|
||||||
},
|
},
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"name": {
|
"name": {
|
||||||
Description: "friendly name",
|
Description: "Friendly name for machine",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ValidateFunc: resourceMachineValidateName,
|
ValidateFunc: resourceMachineValidateName,
|
||||||
},
|
},
|
||||||
"type": {
|
"type": {
|
||||||
Description: "machine type (smartmachine or virtualmachine)",
|
Description: "Machine type (smartmachine or virtualmachine)",
|
||||||
Type: schema.TypeString,
|
|
||||||
Computed: true,
|
|
||||||
},
|
|
||||||
"state": {
|
|
||||||
Description: "current state of the machine",
|
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"dataset": {
|
"dataset": {
|
||||||
Description: "dataset URN the machine was provisioned with",
|
Description: "Dataset URN with which the machine was provisioned",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"memory": {
|
"memory": {
|
||||||
Description: "amount of memory the machine has (in Mb)",
|
Description: "Amount of memory allocated to the machine (in Mb)",
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"disk": {
|
"disk": {
|
||||||
Description: "amount of disk the machine has (in Gb)",
|
Description: "Amount of disk allocated to the machine (in Gb)",
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"ips": {
|
"ips": {
|
||||||
Description: "IP addresses the machine has",
|
Description: "IP addresses assigned to the machine",
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Elem: &schema.Schema{
|
Elem: &schema.Schema{
|
||||||
@ -81,39 +75,38 @@ func resourceMachine() *schema.Resource {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
"tags": {
|
"tags": {
|
||||||
Description: "machine tags",
|
Description: "Machine tags",
|
||||||
Type: schema.TypeMap,
|
Type: schema.TypeMap,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
"created": {
|
"created": {
|
||||||
Description: "when the machine was created",
|
Description: "When the machine was created",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"updated": {
|
"updated": {
|
||||||
Description: "when the machine was update",
|
Description: "When the machine was updated",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"package": {
|
"package": {
|
||||||
Description: "name of the package to use on provisioning",
|
Description: "The package for use for provisioning",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
"image": {
|
"image": {
|
||||||
Description: "image UUID",
|
Description: "UUID of the image",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
// TODO: validate that the UUID is valid
|
|
||||||
},
|
},
|
||||||
"primaryip": {
|
"primaryip": {
|
||||||
Description: "the primary (public) IP address for the machine",
|
Description: "Primary (public) IP address for the machine",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"nic": {
|
"nic": {
|
||||||
Description: "network interface",
|
Description: "Network interface",
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@ -148,27 +141,27 @@ func resourceMachine() *schema.Resource {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
},
|
},
|
||||||
"state": {
|
"network": {
|
||||||
Description: "describes the state of the NIC (e.g. provisioning, running, or stopped)",
|
Description: "ID of the network to which the NIC is attached",
|
||||||
Computed: true,
|
Required: true,
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
},
|
},
|
||||||
"network": {
|
"state": {
|
||||||
Description: "Network ID this NIC is attached to",
|
Description: "Provisioning state of the NIC",
|
||||||
Required: true,
|
Computed: true,
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"firewall_enabled": {
|
"firewall_enabled": {
|
||||||
Description: "enable firewall for this machine",
|
Description: "Whether to enable the firewall for this machine",
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
},
|
},
|
||||||
"domain_names": {
|
"domain_names": {
|
||||||
Description: "list of domain names from Triton's CNS",
|
Description: "List of domain names from Triton CNS",
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
Elem: &schema.Schema{
|
Elem: &schema.Schema{
|
||||||
@ -178,25 +171,25 @@ func resourceMachine() *schema.Resource {
|
|||||||
|
|
||||||
// computed resources from metadata
|
// computed resources from metadata
|
||||||
"root_authorized_keys": {
|
"root_authorized_keys": {
|
||||||
Description: "authorized keys for the root user on this machine",
|
Description: "Authorized keys for the root user on this machine",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"user_script": {
|
"user_script": {
|
||||||
Description: "user script to run on boot (every boot on SmartMachines)",
|
Description: "User script to run on boot (every boot on SmartMachines)",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"user_data": {
|
"user_data": {
|
||||||
Description: "copied to machine on boot",
|
Description: "Data copied to machine on boot",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"administrator_pw": {
|
"administrator_pw": {
|
||||||
Description: "administrator's initial password (Windows only)",
|
Description: "Administrator's initial password (Windows only)",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@ -204,7 +197,7 @@ func resourceMachine() *schema.Resource {
|
|||||||
|
|
||||||
// deprecated fields
|
// deprecated fields
|
||||||
"networks": {
|
"networks": {
|
||||||
Description: "desired network IDs",
|
Description: "Desired network IDs",
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@ -218,7 +211,7 @@ func resourceMachine() *schema.Resource {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func resourceMachineCreate(d *schema.ResourceData, meta interface{}) error {
|
func resourceMachineCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
var networks []string
|
var networks []string
|
||||||
for _, network := range d.Get("networks").([]interface{}) {
|
for _, network := range d.Get("networks").([]interface{}) {
|
||||||
@ -242,7 +235,7 @@ func resourceMachineCreate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
tags[k] = v.(string)
|
tags[k] = v.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
machine, err := client.CreateMachine(cloudapi.CreateMachineOpts{
|
machine, err := client.Machines().CreateMachine(&triton.CreateMachineInput{
|
||||||
Name: d.Get("name").(string),
|
Name: d.Get("name").(string),
|
||||||
Package: d.Get("package").(string),
|
Package: d.Get("package").(string),
|
||||||
Image: d.Get("image").(string),
|
Image: d.Get("image").(string),
|
||||||
@ -255,47 +248,64 @@ func resourceMachineCreate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = waitForMachineState(client, machine.Id, machineStateRunning, machineStateChangeTimeout)
|
d.SetId(machine.ID)
|
||||||
|
stateConf := &resource.StateChangeConf{
|
||||||
|
Target: []string{fmt.Sprintf(machineStateRunning)},
|
||||||
|
Refresh: func() (interface{}, string, error) {
|
||||||
|
getResp, err := client.Machines().GetMachine(&triton.GetMachineInput{
|
||||||
|
ID: d.Id(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return getResp, getResp.State, nil
|
||||||
|
},
|
||||||
|
Timeout: machineStateChangeTimeout,
|
||||||
|
MinTimeout: 3 * time.Second,
|
||||||
|
}
|
||||||
|
_, err = stateConf.WaitForState()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// refresh state after it provisions
|
// refresh state after it provisions
|
||||||
d.SetId(machine.Id)
|
return resourceMachineRead(d, meta)
|
||||||
err = resourceMachineRead(d, meta)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceMachineExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
func resourceMachineExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
machine, err := client.GetMachine(d.Id())
|
return resourceExists(client.Machines().GetMachine(&triton.GetMachineInput{
|
||||||
|
ID: d.Id(),
|
||||||
return machine != nil && err == nil, err
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceMachineRead(d *schema.ResourceData, meta interface{}) error {
|
func resourceMachineRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
machine, err := client.GetMachine(d.Id())
|
machine, err := client.Machines().GetMachine(&triton.GetMachineInput{
|
||||||
|
ID: d.Id(),
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
nics, err := client.ListNICs(d.Id())
|
nics, err := client.Machines().ListNICs(&triton.ListNICsInput{
|
||||||
|
MachineID: d.Id(),
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(machine.Id)
|
|
||||||
d.Set("name", machine.Name)
|
d.Set("name", machine.Name)
|
||||||
d.Set("type", machine.Type)
|
d.Set("type", machine.Type)
|
||||||
d.Set("state", machine.State)
|
d.Set("state", machine.State)
|
||||||
d.Set("dataset", machine.Dataset)
|
d.Set("dataset", machine.Image)
|
||||||
|
d.Set("image", machine.Image)
|
||||||
d.Set("memory", machine.Memory)
|
d.Set("memory", machine.Memory)
|
||||||
d.Set("disk", machine.Disk)
|
d.Set("disk", machine.Disk)
|
||||||
d.Set("ips", machine.IPs)
|
d.Set("ips", machine.IPs)
|
||||||
@ -340,23 +350,40 @@ func resourceMachineRead(d *schema.ResourceData, meta interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func resourceMachineUpdate(d *schema.ResourceData, meta interface{}) error {
|
func resourceMachineUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
d.Partial(true)
|
d.Partial(true)
|
||||||
|
|
||||||
if d.HasChange("name") {
|
if d.HasChange("name") {
|
||||||
if err := client.RenameMachine(d.Id(), d.Get("name").(string)); err != nil {
|
oldNameInterface, newNameInterface := d.GetChange("name")
|
||||||
|
oldName := oldNameInterface.(string)
|
||||||
|
newName := newNameInterface.(string)
|
||||||
|
|
||||||
|
err := client.Machines().RenameMachine(&triton.RenameMachineInput{
|
||||||
|
ID: d.Id(),
|
||||||
|
Name: newName,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err := waitFor(
|
stateConf := &resource.StateChangeConf{
|
||||||
func() (bool, error) {
|
Pending: []string{oldName},
|
||||||
machine, err := client.GetMachine(d.Id())
|
Target: []string{newName},
|
||||||
return machine.Name == d.Get("name").(string), err
|
Refresh: func() (interface{}, string, error) {
|
||||||
|
getResp, err := client.Machines().GetMachine(&triton.GetMachineInput{
|
||||||
|
ID: d.Id(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return getResp, getResp.Name, nil
|
||||||
},
|
},
|
||||||
machineStateChangeCheckInterval,
|
Timeout: machineStateChangeTimeout,
|
||||||
1*time.Minute,
|
MinTimeout: 3 * time.Second,
|
||||||
)
|
}
|
||||||
|
_, err = stateConf.WaitForState()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -372,22 +399,36 @@ func resourceMachineUpdate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
|
|
||||||
var err error
|
var err error
|
||||||
if len(tags) == 0 {
|
if len(tags) == 0 {
|
||||||
err = client.DeleteMachineTags(d.Id())
|
err = client.Machines().DeleteMachineTags(&triton.DeleteMachineTagsInput{
|
||||||
|
ID: d.Id(),
|
||||||
|
})
|
||||||
} else {
|
} else {
|
||||||
_, err = client.ReplaceMachineTags(d.Id(), tags)
|
err = client.Machines().ReplaceMachineTags(&triton.ReplaceMachineTagsInput{
|
||||||
|
ID: d.Id(),
|
||||||
|
Tags: tags,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = waitFor(
|
expectedTagsMD5 := stableMapHash(tags)
|
||||||
func() (bool, error) {
|
stateConf := &resource.StateChangeConf{
|
||||||
machine, err := client.GetMachine(d.Id())
|
Target: []string{expectedTagsMD5},
|
||||||
return reflect.DeepEqual(machine.Tags, tags), err
|
Refresh: func() (interface{}, string, error) {
|
||||||
|
getResp, err := client.Machines().GetMachine(&triton.GetMachineInput{
|
||||||
|
ID: d.Id(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return getResp, stableMapHash(getResp.Tags), nil
|
||||||
},
|
},
|
||||||
machineStateChangeCheckInterval,
|
Timeout: machineStateChangeTimeout,
|
||||||
1*time.Minute,
|
MinTimeout: 3 * time.Second,
|
||||||
)
|
}
|
||||||
|
_, err = stateConf.WaitForState()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -396,18 +437,32 @@ func resourceMachineUpdate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("package") {
|
if d.HasChange("package") {
|
||||||
if err := client.ResizeMachine(d.Id(), d.Get("package").(string)); err != nil {
|
newPackage := d.Get("package").(string)
|
||||||
|
|
||||||
|
err := client.Machines().ResizeMachine(&triton.ResizeMachineInput{
|
||||||
|
ID: d.Id(),
|
||||||
|
Package: newPackage,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err := waitFor(
|
stateConf := &resource.StateChangeConf{
|
||||||
func() (bool, error) {
|
Target: []string{fmt.Sprintf("%s@%s", newPackage, "running")},
|
||||||
machine, err := client.GetMachine(d.Id())
|
Refresh: func() (interface{}, string, error) {
|
||||||
return machine.Package == d.Get("package").(string) && machine.State == machineStateRunning, err
|
getResp, err := client.Machines().GetMachine(&triton.GetMachineInput{
|
||||||
|
ID: d.Id(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return getResp, fmt.Sprintf("%s@%s", getResp.Package, getResp.State), nil
|
||||||
},
|
},
|
||||||
machineStateChangeCheckInterval,
|
Timeout: machineStateChangeTimeout,
|
||||||
machineStateChangeTimeout,
|
MinTimeout: 3 * time.Second,
|
||||||
)
|
}
|
||||||
|
_, err = stateConf.WaitForState()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -416,25 +471,38 @@ func resourceMachineUpdate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("firewall_enabled") {
|
if d.HasChange("firewall_enabled") {
|
||||||
|
enable := d.Get("firewall_enabled").(bool)
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
if d.Get("firewall_enabled").(bool) {
|
if enable {
|
||||||
err = client.EnableFirewallMachine(d.Id())
|
err = client.Machines().EnableMachineFirewall(&triton.EnableMachineFirewallInput{
|
||||||
|
ID: d.Id(),
|
||||||
|
})
|
||||||
} else {
|
} else {
|
||||||
err = client.DisableFirewallMachine(d.Id())
|
err = client.Machines().DisableMachineFirewall(&triton.DisableMachineFirewallInput{
|
||||||
|
ID: d.Id(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = waitFor(
|
stateConf := &resource.StateChangeConf{
|
||||||
func() (bool, error) {
|
Target: []string{fmt.Sprintf("%t", enable)},
|
||||||
machine, err := client.GetMachine(d.Id())
|
Refresh: func() (interface{}, string, error) {
|
||||||
return machine.FirewallEnabled == d.Get("firewall_enabled").(bool), err
|
getResp, err := client.Machines().GetMachine(&triton.GetMachineInput{
|
||||||
},
|
ID: d.Id(),
|
||||||
machineStateChangeCheckInterval,
|
})
|
||||||
machineStateChangeTimeout,
|
if err != nil {
|
||||||
)
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return getResp, fmt.Sprintf("%t", getResp.FirewallEnabled), nil
|
||||||
|
},
|
||||||
|
Timeout: machineStateChangeTimeout,
|
||||||
|
MinTimeout: 3 * time.Second,
|
||||||
|
}
|
||||||
|
_, err = stateConf.WaitForState()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -452,24 +520,24 @@ func resourceMachineUpdate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
oldNICs := o.(*schema.Set)
|
oldNICs := o.(*schema.Set)
|
||||||
newNICs := o.(*schema.Set)
|
newNICs := n.(*schema.Set)
|
||||||
|
|
||||||
// add new NICs that are not in old NICs
|
|
||||||
for _, nicI := range newNICs.Difference(oldNICs).List() {
|
for _, nicI := range newNICs.Difference(oldNICs).List() {
|
||||||
nic := nicI.(map[string]interface{})
|
nic := nicI.(map[string]interface{})
|
||||||
fmt.Printf("adding %+v\n", nic)
|
if _, err := client.Machines().AddNIC(&triton.AddNICInput{
|
||||||
_, err := client.AddNIC(d.Id(), nic["network"].(string))
|
MachineID: d.Id(),
|
||||||
if err != nil {
|
Network: nic["network"].(string),
|
||||||
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove old NICs that are not in new NICs
|
|
||||||
for _, nicI := range oldNICs.Difference(newNICs).List() {
|
for _, nicI := range oldNICs.Difference(newNICs).List() {
|
||||||
nic := nicI.(map[string]interface{})
|
nic := nicI.(map[string]interface{})
|
||||||
fmt.Printf("removing %+v\n", nic)
|
if err := client.Machines().RemoveNIC(&triton.RemoveNICInput{
|
||||||
err := client.RemoveNIC(d.Id(), nic["mac"].(string))
|
MachineID: d.Id(),
|
||||||
if err != nil {
|
MAC: nic["mac"].(string),
|
||||||
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -477,7 +545,6 @@ func resourceMachineUpdate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
d.SetPartial("nic")
|
d.SetPartial("nic")
|
||||||
}
|
}
|
||||||
|
|
||||||
// metadata stuff
|
|
||||||
metadata := map[string]string{}
|
metadata := map[string]string{}
|
||||||
for schemaName, metadataKey := range resourceMachineMetadataKeys {
|
for schemaName, metadataKey := range resourceMachineMetadataKeys {
|
||||||
if d.HasChange(schemaName) {
|
if d.HasChange(schemaName) {
|
||||||
@ -485,24 +552,35 @@ func resourceMachineUpdate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(metadata) > 0 {
|
if len(metadata) > 0 {
|
||||||
_, err := client.UpdateMachineMetadata(d.Id(), metadata)
|
if _, err := client.Machines().UpdateMachineMetadata(&triton.UpdateMachineMetadataInput{
|
||||||
if err != nil {
|
ID: d.Id(),
|
||||||
|
Metadata: metadata,
|
||||||
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = waitFor(
|
stateConf := &resource.StateChangeConf{
|
||||||
func() (bool, error) {
|
Target: []string{"converged"},
|
||||||
machine, err := client.GetMachine(d.Id())
|
Refresh: func() (interface{}, string, error) {
|
||||||
|
getResp, err := client.Machines().GetMachine(&triton.GetMachineInput{
|
||||||
|
ID: d.Id(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
for k, v := range metadata {
|
for k, v := range metadata {
|
||||||
if provider_v, ok := machine.Metadata[k]; !ok || v != provider_v {
|
if upstream, ok := getResp.Metadata[k]; !ok || v != upstream {
|
||||||
return false, err
|
return getResp, "converging", nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true, err
|
|
||||||
|
return getResp, "converged", nil
|
||||||
},
|
},
|
||||||
machineStateChangeCheckInterval,
|
Timeout: machineStateChangeTimeout,
|
||||||
1*time.Minute,
|
MinTimeout: 3 * time.Second,
|
||||||
)
|
}
|
||||||
|
_, err := stateConf.WaitForState()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -516,57 +594,43 @@ func resourceMachineUpdate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
|
|
||||||
d.Partial(false)
|
d.Partial(false)
|
||||||
|
|
||||||
err := resourceMachineRead(d, meta)
|
return resourceMachineRead(d, meta)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceMachineDelete(d *schema.ResourceData, meta interface{}) error {
|
func resourceMachineDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
state, err := readMachineState(client, d.Id())
|
err := client.Machines().DeleteMachine(&triton.DeleteMachineInput{
|
||||||
if state != machineStateStopped {
|
ID: d.Id(),
|
||||||
err = client.StopMachine(d.Id())
|
})
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
waitForMachineState(client, d.Id(), machineStateStopped, machineStateChangeTimeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = client.DeleteMachine(d.Id())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
waitForMachineState(client, d.Id(), machineStateDeleted, machineStateChangeTimeout)
|
stateConf := &resource.StateChangeConf{
|
||||||
return nil
|
Target: []string{machineStateDeleted},
|
||||||
}
|
Refresh: func() (interface{}, string, error) {
|
||||||
|
getResp, err := client.Machines().GetMachine(&triton.GetMachineInput{
|
||||||
|
ID: d.Id(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if triton.IsResourceNotFound(err) {
|
||||||
|
return nil, "deleted", nil
|
||||||
|
}
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
func readMachineState(api *cloudapi.Client, id string) (string, error) {
|
return getResp, getResp.State, nil
|
||||||
machine, err := api.GetMachine(id)
|
},
|
||||||
|
Timeout: machineStateChangeTimeout,
|
||||||
|
MinTimeout: 3 * time.Second,
|
||||||
|
}
|
||||||
|
_, err = stateConf.WaitForState()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return machine.State, nil
|
return nil
|
||||||
}
|
|
||||||
|
|
||||||
// waitForMachineState waits for a machine to be in the desired state (waiting
|
|
||||||
// some seconds between each poll). If it doesn't reach the state within the
|
|
||||||
// duration specified in `timeout`, it returns ErrMachineStateTimeout.
|
|
||||||
func waitForMachineState(api *cloudapi.Client, id, state string, timeout time.Duration) error {
|
|
||||||
return waitFor(
|
|
||||||
func() (bool, error) {
|
|
||||||
currentState, err := readMachineState(api, id)
|
|
||||||
return currentState == state, err
|
|
||||||
},
|
|
||||||
machineStateChangeCheckInterval,
|
|
||||||
machineStateChangeTimeout,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceMachineValidateName(value interface{}, name string) (warnings []string, errors []error) {
|
func resourceMachineValidateName(value interface{}, name string) (warnings []string, errors []error) {
|
||||||
@ -580,7 +644,3 @@ func resourceMachineValidateName(value interface{}, name string) (warnings []str
|
|||||||
|
|
||||||
return warnings, errors
|
return warnings, errors
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceMachineImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
|
|
||||||
return []*schema.ResourceData{d}, nil
|
|
||||||
}
|
|
||||||
|
@ -2,14 +2,16 @@ package triton
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
"regexp"
|
"regexp"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
"github.com/joyent/gosdc/cloudapi"
|
"github.com/joyent/triton-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccTritonMachine_basic(t *testing.T) {
|
func TestAccTritonMachine_basic(t *testing.T) {
|
||||||
@ -21,7 +23,7 @@ func TestAccTritonMachine_basic(t *testing.T) {
|
|||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testCheckTritonMachineDestroy,
|
CheckDestroy: testCheckTritonMachineDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: config,
|
Config: config,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonMachineExists("triton_machine.test"),
|
testCheckTritonMachineExists("triton_machine.test"),
|
||||||
@ -44,20 +46,16 @@ func TestAccTritonMachine_dns(t *testing.T) {
|
|||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testCheckTritonMachineDestroy,
|
CheckDestroy: testCheckTritonMachineDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: dns_output,
|
Config: dns_output,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonMachineExists("triton_machine.test"),
|
testCheckTritonMachineExists("triton_machine.test"),
|
||||||
func(*terraform.State) error {
|
func(state *terraform.State) error {
|
||||||
time.Sleep(10 * time.Second)
|
time.Sleep(10 * time.Second)
|
||||||
|
log.Printf("[DEBUG] %s", spew.Sdump(state))
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
),
|
resource.TestMatchOutput("domain_names", regexp.MustCompile(".*acctest-.*")),
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
Config: dns_output,
|
|
||||||
Check: resource.TestMatchOutput(
|
|
||||||
"domain_names", regexp.MustCompile(".*acctest-.*"),
|
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -66,14 +64,14 @@ func TestAccTritonMachine_dns(t *testing.T) {
|
|||||||
|
|
||||||
func TestAccTritonMachine_nic(t *testing.T) {
|
func TestAccTritonMachine_nic(t *testing.T) {
|
||||||
machineName := fmt.Sprintf("acctest-%d", acctest.RandInt())
|
machineName := fmt.Sprintf("acctest-%d", acctest.RandInt())
|
||||||
config := fmt.Sprintf(testAccTritonMachine_withnic, machineName, machineName)
|
config := testAccTritonMachine_singleNIC(machineName, acctest.RandIntRange(1024, 2048))
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testCheckTritonMachineDestroy,
|
CheckDestroy: testCheckTritonMachineDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: config,
|
Config: config,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonMachineExists("triton_machine.test"),
|
testCheckTritonMachineExists("triton_machine.test"),
|
||||||
@ -88,32 +86,33 @@ func TestAccTritonMachine_nic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccTritonMachine_addnic(t *testing.T) {
|
func TestAccTritonMachine_addNIC(t *testing.T) {
|
||||||
machineName := fmt.Sprintf("acctest-%d", acctest.RandInt())
|
machineName := fmt.Sprintf("acctest-%d", acctest.RandInt())
|
||||||
without := fmt.Sprintf(testAccTritonMachine_withoutnic, machineName, machineName)
|
vlanNumber := acctest.RandIntRange(1024, 2048)
|
||||||
with := fmt.Sprintf(testAccTritonMachine_withnic, machineName, machineName)
|
|
||||||
|
singleNICConfig := testAccTritonMachine_singleNIC(machineName, vlanNumber)
|
||||||
|
dualNICConfig := testAccTritonMachine_dualNIC(machineName, vlanNumber)
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testCheckTritonMachineDestroy,
|
CheckDestroy: testCheckTritonMachineDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: without,
|
Config: singleNICConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonMachineExists("triton_machine.test"),
|
testCheckTritonMachineExists("triton_machine.test"),
|
||||||
func(*terraform.State) error {
|
func(*terraform.State) error {
|
||||||
time.Sleep(10 * time.Second)
|
time.Sleep(10 * time.Second)
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
testCheckTritonMachineHasNoFabric("triton_machine.test", "triton_fabric.test"),
|
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: with,
|
Config: dualNICConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonMachineExists("triton_machine.test"),
|
testCheckTritonMachineExists("triton_machine.test"),
|
||||||
testCheckTritonMachineHasFabric("triton_machine.test", "triton_fabric.test"),
|
testCheckTritonMachineHasFabric("triton_machine.test", "triton_fabric.test_add"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -127,14 +126,16 @@ func testCheckTritonMachineExists(name string) resource.TestCheckFunc {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Not found: %s", name)
|
return fmt.Errorf("Not found: %s", name)
|
||||||
}
|
}
|
||||||
conn := testAccProvider.Meta().(*cloudapi.Client)
|
conn := testAccProvider.Meta().(*triton.Client)
|
||||||
|
|
||||||
rule, err := conn.GetMachine(rs.Primary.ID)
|
machine, err := conn.Machines().GetMachine(&triton.GetMachineInput{
|
||||||
|
ID: rs.Primary.ID,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Bad: Check Machine Exists: %s", err)
|
return fmt.Errorf("Bad: Check Machine Exists: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if rule == nil {
|
if machine == nil {
|
||||||
return fmt.Errorf("Bad: Machine %q does not exist", rs.Primary.ID)
|
return fmt.Errorf("Bad: Machine %q does not exist", rs.Primary.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -154,9 +155,11 @@ func testCheckTritonMachineHasFabric(name, fabricName string) resource.TestCheck
|
|||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Not found: %s", fabricName)
|
return fmt.Errorf("Not found: %s", fabricName)
|
||||||
}
|
}
|
||||||
conn := testAccProvider.Meta().(*cloudapi.Client)
|
conn := testAccProvider.Meta().(*triton.Client)
|
||||||
|
|
||||||
nics, err := conn.ListNICs(machine.Primary.ID)
|
nics, err := conn.Machines().ListNICs(&triton.ListNICsInput{
|
||||||
|
MachineID: machine.Primary.ID,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Bad: Check NICs Exist: %s", err)
|
return fmt.Errorf("Bad: Check NICs Exist: %s", err)
|
||||||
}
|
}
|
||||||
@ -171,49 +174,25 @@ func testCheckTritonMachineHasFabric(name, fabricName string) resource.TestCheck
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testCheckTritonMachineHasNoFabric(name, fabricName string) resource.TestCheckFunc {
|
|
||||||
return func(s *terraform.State) error {
|
|
||||||
// Ensure we have enough information in state to look up in API
|
|
||||||
machine, ok := s.RootModule().Resources[name]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("Not found: %s", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
network, ok := s.RootModule().Resources[fabricName]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("Not found: %s", fabricName)
|
|
||||||
}
|
|
||||||
conn := testAccProvider.Meta().(*cloudapi.Client)
|
|
||||||
|
|
||||||
nics, err := conn.ListNICs(machine.Primary.ID)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Bad: Check NICs Exist: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, nic := range nics {
|
|
||||||
if nic.Network == network.Primary.ID {
|
|
||||||
return fmt.Errorf("Bad: Machine %q has Fabric %q", machine.Primary.ID, network.Primary.ID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testCheckTritonMachineDestroy(s *terraform.State) error {
|
func testCheckTritonMachineDestroy(s *terraform.State) error {
|
||||||
conn := testAccProvider.Meta().(*cloudapi.Client)
|
conn := testAccProvider.Meta().(*triton.Client)
|
||||||
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
for _, rs := range s.RootModule().Resources {
|
||||||
if rs.Type != "triton_machine" {
|
if rs.Type != "triton_machine" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := conn.GetMachine(rs.Primary.ID)
|
resp, err := conn.Machines().GetMachine(&triton.GetMachineInput{
|
||||||
|
ID: rs.Primary.ID,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
if triton.IsResourceNotFound(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp != nil {
|
if resp != nil && resp.State != machineStateDeleted {
|
||||||
return fmt.Errorf("Bad: Machine %q still exists", rs.Primary.ID)
|
return fmt.Errorf("Bad: Machine %q still exists", rs.Primary.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -231,7 +210,7 @@ func TestAccTritonMachine_firewall(t *testing.T) {
|
|||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testCheckTritonMachineDestroy,
|
CheckDestroy: testCheckTritonMachineDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: enabled_config,
|
Config: enabled_config,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonMachineExists("triton_machine.test"),
|
testCheckTritonMachineExists("triton_machine.test"),
|
||||||
@ -239,7 +218,7 @@ func TestAccTritonMachine_firewall(t *testing.T) {
|
|||||||
"triton_machine.test", "firewall_enabled", "true"),
|
"triton_machine.test", "firewall_enabled", "true"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: disabled_config,
|
Config: disabled_config,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonMachineExists("triton_machine.test"),
|
testCheckTritonMachineExists("triton_machine.test"),
|
||||||
@ -247,7 +226,7 @@ func TestAccTritonMachine_firewall(t *testing.T) {
|
|||||||
"triton_machine.test", "firewall_enabled", "false"),
|
"triton_machine.test", "firewall_enabled", "false"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: enabled_config,
|
Config: enabled_config,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonMachineExists("triton_machine.test"),
|
testCheckTritonMachineExists("triton_machine.test"),
|
||||||
@ -271,13 +250,13 @@ func TestAccTritonMachine_metadata(t *testing.T) {
|
|||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testCheckTritonMachineDestroy,
|
CheckDestroy: testCheckTritonMachineDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: basic,
|
Config: basic,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonMachineExists("triton_machine.test"),
|
testCheckTritonMachineExists("triton_machine.test"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: add_metadata,
|
Config: add_metadata,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonMachineExists("triton_machine.test"),
|
testCheckTritonMachineExists("triton_machine.test"),
|
||||||
@ -285,7 +264,7 @@ func TestAccTritonMachine_metadata(t *testing.T) {
|
|||||||
"triton_machine.test", "user_data", "hello"),
|
"triton_machine.test", "user_data", "hello"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: add_metadata_2,
|
Config: add_metadata_2,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonMachineExists("triton_machine.test"),
|
testCheckTritonMachineExists("triton_machine.test"),
|
||||||
@ -294,7 +273,7 @@ func TestAccTritonMachine_metadata(t *testing.T) {
|
|||||||
"tags.triton.cns.services", "test-cns-service"),
|
"tags.triton.cns.services", "test-cns-service"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
{
|
||||||
Config: add_metadata_3,
|
Config: add_metadata_3,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonMachineExists("triton_machine.test"),
|
testCheckTritonMachineExists("triton_machine.test"),
|
||||||
@ -311,7 +290,7 @@ var testAccTritonMachine_basic = `
|
|||||||
resource "triton_machine" "test" {
|
resource "triton_machine" "test" {
|
||||||
name = "%s"
|
name = "%s"
|
||||||
package = "g4-general-4G"
|
package = "g4-general-4G"
|
||||||
image = "c20b4b7c-e1a6-11e5-9a4d-ef590901732e"
|
image = "fb5fe970-e6e4-11e6-9820-4b51be190db9"
|
||||||
|
|
||||||
tags = {
|
tags = {
|
||||||
test = "hello!"
|
test = "hello!"
|
||||||
@ -332,7 +311,7 @@ var testAccTritonMachine_firewall_1 = `
|
|||||||
resource "triton_machine" "test" {
|
resource "triton_machine" "test" {
|
||||||
name = "%s"
|
name = "%s"
|
||||||
package = "g4-general-4G"
|
package = "g4-general-4G"
|
||||||
image = "c20b4b7c-e1a6-11e5-9a4d-ef590901732e"
|
image = "fb5fe970-e6e4-11e6-9820-4b51be190db9"
|
||||||
|
|
||||||
firewall_enabled = 1
|
firewall_enabled = 1
|
||||||
}
|
}
|
||||||
@ -361,7 +340,7 @@ variable "tags" {
|
|||||||
resource "triton_machine" "test" {
|
resource "triton_machine" "test" {
|
||||||
name = "%s"
|
name = "%s"
|
||||||
package = "g4-highcpu-128M"
|
package = "g4-highcpu-128M"
|
||||||
image = "c20b4b7c-e1a6-11e5-9a4d-ef590901732e"
|
image = "fb5fe970-e6e4-11e6-9820-4b51be190db9"
|
||||||
|
|
||||||
user_data = "hello"
|
user_data = "hello"
|
||||||
|
|
||||||
@ -372,7 +351,7 @@ var testAccTritonMachine_metadata_3 = `
|
|||||||
resource "triton_machine" "test" {
|
resource "triton_machine" "test" {
|
||||||
name = "%s"
|
name = "%s"
|
||||||
package = "g4-highcpu-128M"
|
package = "g4-highcpu-128M"
|
||||||
image = "c20b4b7c-e1a6-11e5-9a4d-ef590901732e"
|
image = "fb5fe970-e6e4-11e6-9820-4b51be190db9"
|
||||||
|
|
||||||
user_data = "hello"
|
user_data = "hello"
|
||||||
|
|
||||||
@ -382,57 +361,91 @@ resource "triton_machine" "test" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
var testAccTritonMachine_withnic = `
|
var testAccTritonMachine_singleNIC = func(name string, vlanNumber int) string {
|
||||||
|
return fmt.Sprintf(`resource "triton_vlan" "test" {
|
||||||
|
vlan_id = %d
|
||||||
|
name = "%s-vlan"
|
||||||
|
description = "test vlan"
|
||||||
|
}
|
||||||
|
|
||||||
resource "triton_fabric" "test" {
|
resource "triton_fabric" "test" {
|
||||||
name = "%s-network"
|
name = "%s-network"
|
||||||
description = "test network"
|
description = "test network"
|
||||||
vlan_id = 2 # every DC seems to have a vlan 2 available
|
vlan_id = "${triton_vlan.test.vlan_id}"
|
||||||
|
|
||||||
subnet = "10.0.0.0/22"
|
subnet = "10.10.0.0/24"
|
||||||
gateway = "10.0.0.1"
|
gateway = "10.10.0.1"
|
||||||
provision_start_ip = "10.0.0.5"
|
provision_start_ip = "10.10.0.10"
|
||||||
provision_end_ip = "10.0.3.250"
|
provision_end_ip = "10.10.0.250"
|
||||||
|
|
||||||
resolvers = ["8.8.8.8", "8.8.4.4"]
|
resolvers = ["8.8.8.8", "8.8.4.4"]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "triton_machine" "test" {
|
resource "triton_machine" "test" {
|
||||||
name = "%s"
|
name = "%s-instance"
|
||||||
package = "g4-general-4G"
|
package = "g4-highcpu-128M"
|
||||||
image = "842e6fa6-6e9b-11e5-8402-1b490459e334"
|
image = "fb5fe970-e6e4-11e6-9820-4b51be190db9"
|
||||||
|
|
||||||
tags = {
|
tags = {
|
||||||
test = "hello!"
|
test = "Test"
|
||||||
}
|
}
|
||||||
|
|
||||||
nic { network = "${triton_fabric.test.id}" }
|
nic {
|
||||||
|
network = "${triton_fabric.test.id}"
|
||||||
|
}
|
||||||
|
}`, vlanNumber, name, name, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
var testAccTritonMachine_dualNIC = func(name string, vlanNumber int) string {
|
||||||
|
return fmt.Sprintf(`resource "triton_vlan" "test" {
|
||||||
|
vlan_id = %d
|
||||||
|
name = "%s-vlan"
|
||||||
|
description = "test vlan"
|
||||||
}
|
}
|
||||||
`
|
|
||||||
|
|
||||||
var testAccTritonMachine_withoutnic = `
|
|
||||||
resource "triton_fabric" "test" {
|
resource "triton_fabric" "test" {
|
||||||
name = "%s-network"
|
name = "%s-network"
|
||||||
description = "test network"
|
description = "test network"
|
||||||
vlan_id = 2 # every DC seems to have a vlan 2 available
|
vlan_id = "${triton_vlan.test.vlan_id}"
|
||||||
|
|
||||||
subnet = "10.0.0.0/22"
|
subnet = "10.10.0.0/24"
|
||||||
gateway = "10.0.0.1"
|
gateway = "10.10.0.1"
|
||||||
provision_start_ip = "10.0.0.5"
|
provision_start_ip = "10.10.0.10"
|
||||||
provision_end_ip = "10.0.3.250"
|
provision_end_ip = "10.10.0.250"
|
||||||
|
|
||||||
resolvers = ["8.8.8.8", "8.8.4.4"]
|
resolvers = ["8.8.8.8", "8.8.4.4"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "triton_fabric" "test_add" {
|
||||||
|
name = "%s-network-2"
|
||||||
|
description = "test network 2"
|
||||||
|
vlan_id = "${triton_vlan.test.vlan_id}"
|
||||||
|
|
||||||
|
subnet = "172.23.0.0/24"
|
||||||
|
gateway = "172.23.0.1"
|
||||||
|
provision_start_ip = "172.23.0.10"
|
||||||
|
provision_end_ip = "172.23.0.250"
|
||||||
|
|
||||||
|
resolvers = ["8.8.8.8", "8.8.4.4"]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "triton_machine" "test" {
|
resource "triton_machine" "test" {
|
||||||
name = "%s"
|
name = "%s-instance"
|
||||||
package = "g4-general-4G"
|
package = "g4-highcpu-128M"
|
||||||
image = "842e6fa6-6e9b-11e5-8402-1b490459e334"
|
image = "fb5fe970-e6e4-11e6-9820-4b51be190db9"
|
||||||
|
|
||||||
tags = {
|
tags = {
|
||||||
test = "hello!"
|
test = "Test"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nic {
|
||||||
|
network = "${triton_fabric.test.id}"
|
||||||
|
}
|
||||||
|
nic {
|
||||||
|
network = "${triton_fabric.test_add.id}"
|
||||||
|
}
|
||||||
|
}`, vlanNumber, name, name, name, name)
|
||||||
}
|
}
|
||||||
`
|
|
||||||
|
|
||||||
var testAccTritonMachine_dns = `
|
var testAccTritonMachine_dns = `
|
||||||
provider "triton" {
|
provider "triton" {
|
||||||
@ -441,8 +454,9 @@ provider "triton" {
|
|||||||
resource "triton_machine" "test" {
|
resource "triton_machine" "test" {
|
||||||
name = "%s"
|
name = "%s"
|
||||||
package = "g4-highcpu-128M"
|
package = "g4-highcpu-128M"
|
||||||
image = "e1faace4-e19b-11e5-928b-83849e2fd94a"
|
image = "fb5fe970-e6e4-11e6-9820-4b51be190db9"
|
||||||
}
|
}
|
||||||
|
|
||||||
output "domain_names" {
|
output "domain_names" {
|
||||||
value = "${join(", ", triton_machine.test.domain_names)}"
|
value = "${join(", ", triton_machine.test.domain_names)}"
|
||||||
}
|
}
|
||||||
|
@ -5,23 +5,24 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"github.com/joyent/gosdc/cloudapi"
|
"github.com/joyent/triton-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceVLAN() *schema.Resource {
|
func resourceVLAN() *schema.Resource {
|
||||||
return &schema.Resource{
|
return &schema.Resource{
|
||||||
Create: resourceVLANCreate,
|
Create: resourceVLANCreate,
|
||||||
Exists: resourceVLANExists,
|
Exists: resourceVLANExists,
|
||||||
Read: resourceVLANRead,
|
Read: resourceVLANRead,
|
||||||
Update: resourceVLANUpdate,
|
Update: resourceVLANUpdate,
|
||||||
Delete: resourceVLANDelete,
|
Delete: resourceVLANDelete,
|
||||||
|
Timeouts: fastResourceTimeout,
|
||||||
Importer: &schema.ResourceImporter{
|
Importer: &schema.ResourceImporter{
|
||||||
State: resourceVLANImporter,
|
State: schema.ImportStatePassthrough,
|
||||||
},
|
},
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"vlan_id": {
|
"vlan_id": {
|
||||||
Description: "number between 0-4095 indicating VLAN ID",
|
Description: "Number between 0-4095 indicating VLAN ID",
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
@ -39,7 +40,7 @@ func resourceVLAN() *schema.Resource {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
},
|
},
|
||||||
"description": {
|
"description": {
|
||||||
Description: "Optional description of the VLAN",
|
Description: "Description of the VLAN",
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
},
|
},
|
||||||
@ -48,10 +49,10 @@ func resourceVLAN() *schema.Resource {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func resourceVLANCreate(d *schema.ResourceData, meta interface{}) error {
|
func resourceVLANCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
vlan, err := client.CreateFabricVLAN(cloudapi.FabricVLAN{
|
vlan, err := client.Fabrics().CreateFabricVLAN(&triton.CreateFabricVLANInput{
|
||||||
Id: int16(d.Get("vlan_id").(int)),
|
ID: d.Get("vlan_id").(int),
|
||||||
Name: d.Get("name").(string),
|
Name: d.Get("name").(string),
|
||||||
Description: d.Get("description").(string),
|
Description: d.Get("description").(string),
|
||||||
})
|
})
|
||||||
@ -59,33 +60,39 @@ func resourceVLANCreate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(resourceVLANIDString(vlan.Id))
|
d.SetId(strconv.Itoa(vlan.ID))
|
||||||
return resourceVLANRead(d, meta)
|
return resourceVLANRead(d, meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceVLANExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
func resourceVLANExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
id, err := resourceVLANIDInt16(d.Id())
|
id, err := resourceVLANIDInt(d.Id())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
vlan, err := client.GetFabricVLAN(id)
|
return resourceExists(client.Fabrics().GetFabricVLAN(&triton.GetFabricVLANInput{
|
||||||
|
ID: id,
|
||||||
return vlan != nil && err == nil, err
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceVLANRead(d *schema.ResourceData, meta interface{}) error {
|
func resourceVLANRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
vlan, err := client.GetFabricVLAN(int16(d.Get("vlan_id").(int)))
|
id, err := resourceVLANIDInt(d.Id())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(resourceVLANIDString(vlan.Id))
|
vlan, err := client.Fabrics().GetFabricVLAN(&triton.GetFabricVLANInput{
|
||||||
d.Set("vlan_id", vlan.Id)
|
ID: id,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("vlan_id", vlan.ID)
|
||||||
d.Set("name", vlan.Name)
|
d.Set("name", vlan.Name)
|
||||||
d.Set("description", vlan.Description)
|
d.Set("description", vlan.Description)
|
||||||
|
|
||||||
@ -93,10 +100,10 @@ func resourceVLANRead(d *schema.ResourceData, meta interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func resourceVLANUpdate(d *schema.ResourceData, meta interface{}) error {
|
func resourceVLANUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
vlan, err := client.UpdateFabricVLAN(cloudapi.FabricVLAN{
|
vlan, err := client.Fabrics().UpdateFabricVLAN(&triton.UpdateFabricVLANInput{
|
||||||
Id: int16(d.Get("vlan_id").(int)),
|
ID: d.Get("vlan_id").(int),
|
||||||
Name: d.Get("name").(string),
|
Name: d.Get("name").(string),
|
||||||
Description: d.Get("description").(string),
|
Description: d.Get("description").(string),
|
||||||
})
|
})
|
||||||
@ -104,36 +111,28 @@ func resourceVLANUpdate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(resourceVLANIDString(vlan.Id))
|
d.SetId(strconv.Itoa(vlan.ID))
|
||||||
return resourceVLANRead(d, meta)
|
return resourceVLANRead(d, meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceVLANDelete(d *schema.ResourceData, meta interface{}) error {
|
func resourceVLANDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
client := meta.(*cloudapi.Client)
|
client := meta.(*triton.Client)
|
||||||
|
|
||||||
id, err := resourceVLANIDInt16(d.Id())
|
id, err := resourceVLANIDInt(d.Id())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return client.DeleteFabricVLAN(id)
|
return client.Fabrics().DeleteFabricVLAN(&triton.DeleteFabricVLANInput{
|
||||||
|
ID: id,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// convenience conversion functions
|
func resourceVLANIDInt(id string) (int, error) {
|
||||||
|
result, err := strconv.ParseInt(id, 10, 32)
|
||||||
func resourceVLANIDString(id int16) string {
|
|
||||||
return strconv.Itoa(int(id))
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceVLANIDInt16(id string) (int16, error) {
|
|
||||||
result, err := strconv.ParseInt(id, 10, 16)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return -1, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return int16(result), nil
|
return int(result), nil
|
||||||
}
|
|
||||||
|
|
||||||
func resourceVLANImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
|
|
||||||
return []*schema.ResourceData{d}, nil
|
|
||||||
}
|
}
|
||||||
|
@ -2,22 +2,24 @@ package triton
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
"github.com/joyent/gosdc/cloudapi"
|
"github.com/joyent/triton-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccTritonVLAN_basic(t *testing.T) {
|
func TestAccTritonVLAN_basic(t *testing.T) {
|
||||||
config := testAccTritonVLAN_basic
|
config := testAccTritonVLAN_basic(acctest.RandIntRange(3, 2048))
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testCheckTritonVLANDestroy,
|
CheckDestroy: testCheckTritonVLANDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: config,
|
Config: config,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonVLANExists("triton_vlan.test"),
|
testCheckTritonVLANExists("triton_vlan.test"),
|
||||||
@ -28,27 +30,30 @@ func TestAccTritonVLAN_basic(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAccTritonVLAN_update(t *testing.T) {
|
func TestAccTritonVLAN_update(t *testing.T) {
|
||||||
preConfig := testAccTritonVLAN_basic
|
vlanNumber := acctest.RandIntRange(3, 2048)
|
||||||
postConfig := testAccTritonVLAN_update
|
preConfig := testAccTritonVLAN_basic(vlanNumber)
|
||||||
|
postConfig := testAccTritonVLAN_update(vlanNumber)
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testCheckTritonVLANDestroy,
|
CheckDestroy: testCheckTritonVLANDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: preConfig,
|
Config: preConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonVLANExists("triton_vlan.test"),
|
testCheckTritonVLANExists("triton_vlan.test"),
|
||||||
|
resource.TestCheckResourceAttr("triton_vlan.test", "vlan_id", strconv.Itoa(vlanNumber)),
|
||||||
resource.TestCheckResourceAttr("triton_vlan.test", "name", "test-vlan"),
|
resource.TestCheckResourceAttr("triton_vlan.test", "name", "test-vlan"),
|
||||||
resource.TestCheckResourceAttr("triton_vlan.test", "description", "test vlan"),
|
resource.TestCheckResourceAttr("triton_vlan.test", "description", "test vlan"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
{
|
||||||
Config: postConfig,
|
Config: postConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testCheckTritonVLANExists("triton_vlan.test"),
|
testCheckTritonVLANExists("triton_vlan.test"),
|
||||||
|
resource.TestCheckResourceAttr("triton_vlan.test", "vlan_id", strconv.Itoa(vlanNumber)),
|
||||||
resource.TestCheckResourceAttr("triton_vlan.test", "name", "test-vlan-2"),
|
resource.TestCheckResourceAttr("triton_vlan.test", "name", "test-vlan-2"),
|
||||||
resource.TestCheckResourceAttr("triton_vlan.test", "description", "test vlan 2"),
|
resource.TestCheckResourceAttr("triton_vlan.test", "description", "test vlan 2"),
|
||||||
),
|
),
|
||||||
@ -64,19 +69,23 @@ func testCheckTritonVLANExists(name string) resource.TestCheckFunc {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Not found: %s", name)
|
return fmt.Errorf("Not found: %s", name)
|
||||||
}
|
}
|
||||||
conn := testAccProvider.Meta().(*cloudapi.Client)
|
conn := testAccProvider.Meta().(*triton.Client)
|
||||||
|
|
||||||
id, err := resourceVLANIDInt16(rs.Primary.ID)
|
id, err := resourceVLANIDInt(rs.Primary.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
rule, err := conn.GetFabricVLAN(id)
|
resp, err := conn.Fabrics().GetFabricVLAN(&triton.GetFabricVLANInput{
|
||||||
if err != nil {
|
ID: id,
|
||||||
|
})
|
||||||
|
if err != nil && triton.IsResourceNotFound(err) {
|
||||||
return fmt.Errorf("Bad: Check VLAN Exists: %s", err)
|
return fmt.Errorf("Bad: Check VLAN Exists: %s", err)
|
||||||
|
} else if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if rule == nil {
|
if resp == nil {
|
||||||
return fmt.Errorf("Bad: VLAN %q does not exist", rs.Primary.ID)
|
return fmt.Errorf("Bad: VLAN %q does not exist", rs.Primary.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -85,21 +94,25 @@ func testCheckTritonVLANExists(name string) resource.TestCheckFunc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testCheckTritonVLANDestroy(s *terraform.State) error {
|
func testCheckTritonVLANDestroy(s *terraform.State) error {
|
||||||
conn := testAccProvider.Meta().(*cloudapi.Client)
|
conn := testAccProvider.Meta().(*triton.Client)
|
||||||
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
for _, rs := range s.RootModule().Resources {
|
||||||
if rs.Type != "triton_vlan" {
|
if rs.Type != "triton_vlan" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := resourceVLANIDInt16(rs.Primary.ID)
|
id, err := resourceVLANIDInt(rs.Primary.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := conn.GetFabricVLAN(id)
|
resp, err := conn.Fabrics().GetFabricVLAN(&triton.GetFabricVLANInput{
|
||||||
if err != nil {
|
ID: id,
|
||||||
|
})
|
||||||
|
if triton.IsResourceNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
@ -110,18 +123,18 @@ func testCheckTritonVLANDestroy(s *terraform.State) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var testAccTritonVLAN_basic = `
|
var testAccTritonVLAN_basic = func(vlanID int) string {
|
||||||
resource "triton_vlan" "test" {
|
return fmt.Sprintf(`resource "triton_vlan" "test" {
|
||||||
vlan_id = 1024
|
vlan_id = %d
|
||||||
name = "test-vlan"
|
name = "test-vlan"
|
||||||
description = "test vlan"
|
description = "test vlan"
|
||||||
|
}`, vlanID)
|
||||||
}
|
}
|
||||||
`
|
|
||||||
|
|
||||||
var testAccTritonVLAN_update = `
|
var testAccTritonVLAN_update = func(vlanID int) string {
|
||||||
resource "triton_vlan" "test" {
|
return fmt.Sprintf(`resource "triton_vlan" "test" {
|
||||||
vlan_id = 1024
|
vlan_id = %d
|
||||||
name = "test-vlan-2"
|
name = "test-vlan-2"
|
||||||
description = "test vlan 2"
|
description = "test vlan 2"
|
||||||
|
}`, vlanID)
|
||||||
}
|
}
|
||||||
`
|
|
||||||
|
@ -1,30 +0,0 @@
|
|||||||
package triton
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrTimeout is returned when waiting for state change
|
|
||||||
ErrTimeout = errors.New("timed out while waiting for resource change")
|
|
||||||
)
|
|
||||||
|
|
||||||
func waitFor(f func() (bool, error), every, timeout time.Duration) error {
|
|
||||||
start := time.Now()
|
|
||||||
|
|
||||||
for time.Since(start) <= timeout {
|
|
||||||
stop, err := f()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if stop {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(every)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ErrTimeout
|
|
||||||
}
|
|
@ -1,4 +1,4 @@
|
|||||||
// Code generated by "stringer -type=countHookAction hook_count_action.go"; DO NOT EDIT
|
// Code generated by "stringer -type=countHookAction hook_count_action.go"; DO NOT EDIT.
|
||||||
|
|
||||||
package command
|
package command
|
||||||
|
|
||||||
|
@ -406,6 +406,134 @@ func TestInit_copyBackendDst(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestInit_backendReinitWithExtra(t *testing.T) {
|
||||||
|
td := tempDir(t)
|
||||||
|
copy.CopyDir(testFixturePath("init-backend-empty"), td)
|
||||||
|
defer os.RemoveAll(td)
|
||||||
|
defer testChdir(t, td)()
|
||||||
|
|
||||||
|
m := testMetaBackend(t, nil)
|
||||||
|
opts := &BackendOpts{
|
||||||
|
ConfigExtra: map[string]interface{}{"path": "hello"},
|
||||||
|
Init: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := m.backendConfig(opts)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ui := new(cli.MockUi)
|
||||||
|
c := &InitCommand{
|
||||||
|
Meta: Meta{
|
||||||
|
ContextOpts: testCtxConfig(testProvider()),
|
||||||
|
Ui: ui,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
args := []string{"-backend-config", "path=hello"}
|
||||||
|
if code := c.Run(args); code != 0 {
|
||||||
|
t.Fatalf("bad: \n%s", ui.ErrorWriter.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read our saved backend config and verify we have our settings
|
||||||
|
state := testStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename))
|
||||||
|
if v := state.Backend.Config["path"]; v != "hello" {
|
||||||
|
t.Fatalf("bad: %#v", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
if state.Backend.Hash != b.Hash {
|
||||||
|
t.Fatal("mismatched state and config backend hashes")
|
||||||
|
}
|
||||||
|
|
||||||
|
if state.Backend.Rehash() != b.Rehash() {
|
||||||
|
t.Fatal("mismatched state and config re-hashes")
|
||||||
|
}
|
||||||
|
|
||||||
|
// init again and make sure nothing changes
|
||||||
|
if code := c.Run(args); code != 0 {
|
||||||
|
t.Fatalf("bad: \n%s", ui.ErrorWriter.String())
|
||||||
|
}
|
||||||
|
state = testStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename))
|
||||||
|
if v := state.Backend.Config["path"]; v != "hello" {
|
||||||
|
t.Fatalf("bad: %#v", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
if state.Backend.Hash != b.Hash {
|
||||||
|
t.Fatal("mismatched state and config backend hashes")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// move option from config to -backend-config args
|
||||||
|
func TestInit_backendReinitConfigToExtra(t *testing.T) {
|
||||||
|
td := tempDir(t)
|
||||||
|
copy.CopyDir(testFixturePath("init-backend"), td)
|
||||||
|
defer os.RemoveAll(td)
|
||||||
|
defer testChdir(t, td)()
|
||||||
|
|
||||||
|
ui := new(cli.MockUi)
|
||||||
|
c := &InitCommand{
|
||||||
|
Meta: Meta{
|
||||||
|
ContextOpts: testCtxConfig(testProvider()),
|
||||||
|
Ui: ui,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if code := c.Run([]string{"-input=false"}); code != 0 {
|
||||||
|
t.Fatalf("bad: \n%s", ui.ErrorWriter.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read our saved backend config and verify we have our settings
|
||||||
|
state := testStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename))
|
||||||
|
if v := state.Backend.Config["path"]; v != "foo" {
|
||||||
|
t.Fatalf("bad: %#v", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
backendHash := state.Backend.Hash
|
||||||
|
|
||||||
|
// init again but remove the path option from the config
|
||||||
|
cfg := "terraform {\n backend \"local\" {}\n}\n"
|
||||||
|
if err := ioutil.WriteFile("main.tf", []byte(cfg), 0644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
args := []string{"-input=false", "-backend-config=path=foo"}
|
||||||
|
if code := c.Run(args); code != 0 {
|
||||||
|
t.Fatalf("bad: \n%s", ui.ErrorWriter.String())
|
||||||
|
}
|
||||||
|
state = testStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename))
|
||||||
|
|
||||||
|
if state.Backend.Hash == backendHash {
|
||||||
|
t.Fatal("state.Backend.Hash was not updated")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure inputFalse stops execution on migrate
|
||||||
|
func TestInit_inputFalse(t *testing.T) {
|
||||||
|
td := tempDir(t)
|
||||||
|
copy.CopyDir(testFixturePath("init-backend"), td)
|
||||||
|
defer os.RemoveAll(td)
|
||||||
|
defer testChdir(t, td)()
|
||||||
|
|
||||||
|
ui := new(cli.MockUi)
|
||||||
|
c := &InitCommand{
|
||||||
|
Meta: Meta{
|
||||||
|
ContextOpts: testCtxConfig(testProvider()),
|
||||||
|
Ui: ui,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
args := []string{"-input=false", "-backend-config=path=foo"}
|
||||||
|
if code := c.Run([]string{"-input=false"}); code != 0 {
|
||||||
|
t.Fatalf("bad: \n%s", ui.ErrorWriter)
|
||||||
|
}
|
||||||
|
|
||||||
|
args = []string{"-input=false", "-backend-config=path=bar"}
|
||||||
|
if code := c.Run(args); code == 0 {
|
||||||
|
t.Fatal("init should have failed", ui.OutputWriter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
func TestInit_remoteState(t *testing.T) {
|
func TestInit_remoteState(t *testing.T) {
|
||||||
tmp, cwd := testCwd(t)
|
tmp, cwd := testCwd(t)
|
||||||
|
@ -3,6 +3,7 @@ package command
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -341,6 +342,9 @@ func (m *Meta) uiHook() *UiHook {
|
|||||||
|
|
||||||
// confirm asks a yes/no confirmation.
|
// confirm asks a yes/no confirmation.
|
||||||
func (m *Meta) confirm(opts *terraform.InputOpts) (bool, error) {
|
func (m *Meta) confirm(opts *terraform.InputOpts) (bool, error) {
|
||||||
|
if !m.input {
|
||||||
|
return false, errors.New("input disabled")
|
||||||
|
}
|
||||||
for {
|
for {
|
||||||
v, err := m.UIInput().Input(opts)
|
v, err := m.UIInput().Input(opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -415,8 +415,16 @@ func (m *Meta) backendFromConfig(opts *BackendOpts) (backend.Backend, error) {
|
|||||||
case c != nil && s.Remote.Empty() && !s.Backend.Empty():
|
case c != nil && s.Remote.Empty() && !s.Backend.Empty():
|
||||||
// If our configuration is the same, then we're just initializing
|
// If our configuration is the same, then we're just initializing
|
||||||
// a previously configured remote backend.
|
// a previously configured remote backend.
|
||||||
if !s.Backend.Empty() && s.Backend.Hash == cHash {
|
if !s.Backend.Empty() {
|
||||||
return m.backend_C_r_S_unchanged(c, sMgr)
|
hash := s.Backend.Hash
|
||||||
|
// on init we need an updated hash containing any extra options
|
||||||
|
// that were added after merging.
|
||||||
|
if opts.Init {
|
||||||
|
hash = s.Backend.Rehash()
|
||||||
|
}
|
||||||
|
if hash == cHash {
|
||||||
|
return m.backend_C_r_S_unchanged(c, sMgr)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !opts.Init {
|
if !opts.Init {
|
||||||
@ -451,7 +459,11 @@ func (m *Meta) backendFromConfig(opts *BackendOpts) (backend.Backend, error) {
|
|||||||
case c != nil && !s.Remote.Empty() && !s.Backend.Empty():
|
case c != nil && !s.Remote.Empty() && !s.Backend.Empty():
|
||||||
// If the hashes are the same, we have a legacy remote state with
|
// If the hashes are the same, we have a legacy remote state with
|
||||||
// an unchanged stored backend state.
|
// an unchanged stored backend state.
|
||||||
if s.Backend.Hash == cHash {
|
hash := s.Backend.Hash
|
||||||
|
if opts.Init {
|
||||||
|
hash = s.Backend.Rehash()
|
||||||
|
}
|
||||||
|
if hash == cHash {
|
||||||
if !opts.Init {
|
if !opts.Init {
|
||||||
initReason := fmt.Sprintf(
|
initReason := fmt.Sprintf(
|
||||||
"Legacy remote state found with configured backend %q",
|
"Legacy remote state found with configured backend %q",
|
||||||
@ -1146,6 +1158,16 @@ func (m *Meta) backend_C_r_S_unchanged(
|
|||||||
c *config.Backend, sMgr state.State) (backend.Backend, error) {
|
c *config.Backend, sMgr state.State) (backend.Backend, error) {
|
||||||
s := sMgr.State()
|
s := sMgr.State()
|
||||||
|
|
||||||
|
// it's possible for a backend to be unchanged, and the config itself to
|
||||||
|
// have changed by moving a paramter from the config to `-backend-config`
|
||||||
|
// In this case we only need to update the Hash.
|
||||||
|
if c != nil && s.Backend.Hash != c.Hash {
|
||||||
|
s.Backend.Hash = c.Hash
|
||||||
|
if err := sMgr.WriteState(s); err != nil {
|
||||||
|
return nil, fmt.Errorf(errBackendWriteSaved, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Create the config. We do this from the backend state since this
|
// Create the config. We do this from the backend state since this
|
||||||
// has the complete configuration data whereas the config itself
|
// has the complete configuration data whereas the config itself
|
||||||
// may require input.
|
// may require input.
|
||||||
|
@ -3217,6 +3217,110 @@ func TestMetaBackend_planLegacy(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// init a backend using -backend-config options multiple times
|
||||||
|
func TestMetaBackend_configureWithExtra(t *testing.T) {
|
||||||
|
// Create a temporary working directory that is empty
|
||||||
|
td := tempDir(t)
|
||||||
|
copy.CopyDir(testFixturePath("init-backend-empty"), td)
|
||||||
|
defer os.RemoveAll(td)
|
||||||
|
defer testChdir(t, td)()
|
||||||
|
|
||||||
|
extras := map[string]interface{}{"path": "hello"}
|
||||||
|
m := testMetaBackend(t, nil)
|
||||||
|
opts := &BackendOpts{
|
||||||
|
ConfigExtra: extras,
|
||||||
|
Init: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
backendCfg, err := m.backendConfig(opts)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// init the backend
|
||||||
|
_, err = m.Backend(&BackendOpts{
|
||||||
|
ConfigExtra: extras,
|
||||||
|
Init: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("bad: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the state
|
||||||
|
s := testStateRead(t, filepath.Join(DefaultDataDir, backendlocal.DefaultStateFilename))
|
||||||
|
if s.Backend.Hash != backendCfg.Hash {
|
||||||
|
t.Fatal("mismatched state and config backend hashes")
|
||||||
|
}
|
||||||
|
if s.Backend.Rehash() == s.Backend.Hash {
|
||||||
|
t.Fatal("saved hash should not match actual hash")
|
||||||
|
}
|
||||||
|
if s.Backend.Rehash() != backendCfg.Rehash() {
|
||||||
|
t.Fatal("mismatched state and config re-hashes")
|
||||||
|
}
|
||||||
|
|
||||||
|
// init the backend again with the same options
|
||||||
|
m = testMetaBackend(t, nil)
|
||||||
|
_, err = m.Backend(&BackendOpts{
|
||||||
|
ConfigExtra: extras,
|
||||||
|
Init: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("bad: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the state
|
||||||
|
s = testStateRead(t, filepath.Join(DefaultDataDir, backendlocal.DefaultStateFilename))
|
||||||
|
if s.Backend.Hash != backendCfg.Hash {
|
||||||
|
t.Fatal("mismatched state and config backend hashes")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// move options from config to -backend-config
|
||||||
|
func TestMetaBackend_configToExtra(t *testing.T) {
|
||||||
|
// Create a temporary working directory that is empty
|
||||||
|
td := tempDir(t)
|
||||||
|
copy.CopyDir(testFixturePath("init-backend"), td)
|
||||||
|
defer os.RemoveAll(td)
|
||||||
|
defer testChdir(t, td)()
|
||||||
|
|
||||||
|
// init the backend
|
||||||
|
m := testMetaBackend(t, nil)
|
||||||
|
_, err := m.Backend(&BackendOpts{
|
||||||
|
Init: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("bad: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the state
|
||||||
|
s := testStateRead(t, filepath.Join(DefaultDataDir, backendlocal.DefaultStateFilename))
|
||||||
|
backendHash := s.Backend.Hash
|
||||||
|
|
||||||
|
// init again but remove the path option from the config
|
||||||
|
cfg := "terraform {\n backend \"local\" {}\n}\n"
|
||||||
|
if err := ioutil.WriteFile("main.tf", []byte(cfg), 0644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// init the backend again with the options
|
||||||
|
extras := map[string]interface{}{"path": "hello"}
|
||||||
|
m = testMetaBackend(t, nil)
|
||||||
|
m.forceInitCopy = true
|
||||||
|
_, err = m.Backend(&BackendOpts{
|
||||||
|
ConfigExtra: extras,
|
||||||
|
Init: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("bad: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s = testStateRead(t, filepath.Join(DefaultDataDir, backendlocal.DefaultStateFilename))
|
||||||
|
|
||||||
|
if s.Backend.Hash == backendHash {
|
||||||
|
t.Fatal("state.Backend.Hash was not updated")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func testMetaBackend(t *testing.T, args []string) *Meta {
|
func testMetaBackend(t *testing.T, args []string) *Meta {
|
||||||
var m Meta
|
var m Meta
|
||||||
m.Ui = new(cli.MockUi)
|
m.Ui = new(cli.MockUi)
|
||||||
|
4
command/test-fixtures/init-backend-empty/main.tf
Normal file
4
command/test-fixtures/init-backend-empty/main.tf
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
terraform {
|
||||||
|
backend "local" {
|
||||||
|
}
|
||||||
|
}
|
@ -1,3 +1,8 @@
|
|||||||
|
variable "var_with_escaped_interp" {
|
||||||
|
# This is here because in the past it failed. See Github #13001
|
||||||
|
default = "foo-$${bar.baz}"
|
||||||
|
}
|
||||||
|
|
||||||
resource "test_instance" "foo" {
|
resource "test_instance" "foo" {
|
||||||
ami = "bar"
|
ami = "bar"
|
||||||
|
|
||||||
|
@ -285,8 +285,15 @@ func (c *Config) Validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
interp := false
|
interp := false
|
||||||
fn := func(ast.Node) (interface{}, error) {
|
fn := func(n ast.Node) (interface{}, error) {
|
||||||
interp = true
|
// LiteralNode is a literal string (outside of a ${ ... } sequence).
|
||||||
|
// interpolationWalker skips most of these. but in particular it
|
||||||
|
// visits those that have escaped sequences (like $${foo}) as a
|
||||||
|
// signal that *some* processing is required on this string. For
|
||||||
|
// our purposes here though, this is fine and not an interpolation.
|
||||||
|
if _, ok := n.(*ast.LiteralNode); !ok {
|
||||||
|
interp = true
|
||||||
|
}
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,7 +72,7 @@ type Backend struct {
|
|||||||
Hash uint64
|
Hash uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns a unique content hash for this backend's configuration
|
// Rehash returns a unique content hash for this backend's configuration
|
||||||
// as a uint64 value.
|
// as a uint64 value.
|
||||||
func (b *Backend) Rehash() uint64 {
|
func (b *Backend) Rehash() uint64 {
|
||||||
// If we have no backend, the value is zero
|
// If we have no backend, the value is zero
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user