mirror of
https://github.com/opentofu/opentofu.git
synced 2025-02-25 18:45:20 -06:00
Merge branch 'terraform' into hmrc
This commit is contained in:
commit
cc54785b1c
@ -9,9 +9,7 @@ go:
|
|||||||
install: make updatedeps
|
install: make updatedeps
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- go test ./...
|
- make test
|
||||||
- make vet
|
|
||||||
#- go test -race ./...
|
|
||||||
|
|
||||||
branches:
|
branches:
|
||||||
only:
|
only:
|
||||||
|
38
CHANGELOG.md
38
CHANGELOG.md
@ -2,51 +2,83 @@
|
|||||||
|
|
||||||
FEATURES:
|
FEATURES:
|
||||||
|
|
||||||
|
* **New provider: `tls`** - A utility provider for generating TLS keys/self-signed certificates for development and testing [GH-2778]
|
||||||
|
* **New provider: `dyn`** - Manage DNS records on Dyn
|
||||||
* **New resource: `aws_cloudformation_stack`** [GH-2636]
|
* **New resource: `aws_cloudformation_stack`** [GH-2636]
|
||||||
* **New resource: `aws_cloudtrail`** [GH-3094]
|
* **New resource: `aws_cloudtrail`** [GH-3094]
|
||||||
* **New resource: `aws_route`** [GH-3548]
|
* **New resource: `aws_route`** [GH-3548]
|
||||||
* **New resource: `aws_codecommit_repository`** [GH-3274]
|
* **New resource: `aws_codecommit_repository`** [GH-3274]
|
||||||
* **New provider: `tls`** - A utility provider for generating TLS keys/self-signed certificates for development and testing [GH-2778]
|
* **New resource: `aws_kinesis_firehose_delivery_stream`** [GH-3833]
|
||||||
* **New resource: `google_sql_database` and `google_sql_database_instance`** [GH-3617]
|
* **New resource: `google_sql_database` and `google_sql_database_instance`** [GH-3617]
|
||||||
* **New resource: `google_compute_global_address`** [GH-3701]
|
* **New resource: `google_compute_global_address`** [GH-3701]
|
||||||
|
* **New resource: `google_compute_https_health_check`** [GH-3883]
|
||||||
* **New resource: `google_compute_ssl_certificate`** [GH-3723]
|
* **New resource: `google_compute_ssl_certificate`** [GH-3723]
|
||||||
* **New resource: `google_compute_url_map`** [GH-3722]
|
* **New resource: `google_compute_url_map`** [GH-3722]
|
||||||
* **New resource: `google_compute_target_http_proxy`** [GH-3727]
|
* **New resource: `google_compute_target_http_proxy`** [GH-3727]
|
||||||
* **New resource: `google_compute_target_https_proxy`** [GH-3728]
|
* **New resource: `google_compute_target_https_proxy`** [GH-3728]
|
||||||
* **New resource: `google_compute_global_forwarding_rule`** [GH-3702]
|
* **New resource: `google_compute_global_forwarding_rule`** [GH-3702]
|
||||||
* **New resource: `openstack_networking_port_v2`** [GH-3731]
|
* **New resource: `openstack_networking_port_v2`** [GH-3731]
|
||||||
|
* New interpolation function: `coalesce` [GH-3814]
|
||||||
|
|
||||||
IMPROVEMENTS:
|
IMPROVEMENTS:
|
||||||
|
|
||||||
|
* core: Improve message to list only resources which will be destroyed when using `--target` [GH-3859]
|
||||||
|
* connection/ssh: accept private_key contents instead of paths [GH-3846]
|
||||||
* provider/google: preemptible option for instance_template [GH-3667]
|
* provider/google: preemptible option for instance_template [GH-3667]
|
||||||
* provider/google: Accurate Terraform Version [GH-3554]
|
* provider/google: Accurate Terraform Version [GH-3554]
|
||||||
* provider/google: Simplified auth (DefaultClient support) [GH-3553]
|
* provider/google: Simplified auth (DefaultClient support) [GH-3553]
|
||||||
* provider/google: automatic_restart, preemptible, on_host_maintenance options [GH-3643]
|
* provider/google: automatic_restart, preemptible, on_host_maintenance options [GH-3643]
|
||||||
|
* provider/google: read credentials as contents instead of path [GH-3901]
|
||||||
* null_resource: enhance and document [GH-3244, GH-3659]
|
* null_resource: enhance and document [GH-3244, GH-3659]
|
||||||
* provider/aws: Add CORS settings to S3 bucket [GH-3387]
|
* provider/aws: Add CORS settings to S3 bucket [GH-3387]
|
||||||
* provider/aws: Add notification topic ARN for ElastiCache clusters [GH-3674]
|
* provider/aws: Add notification topic ARN for ElastiCache clusters [GH-3674]
|
||||||
* provider/aws: Add `kinesis_endpoint` for configuring Kinesis [GH-3255]
|
* provider/aws: Add `kinesis_endpoint` for configuring Kinesis [GH-3255]
|
||||||
* provider/aws: Add a computed ARN for S3 Buckets [GH-3685]
|
* provider/aws: Add a computed ARN for S3 Buckets [GH-3685]
|
||||||
|
* provider/aws: Add S3 support for Lambda Function resource [GH-3794]
|
||||||
|
* provider/aws: Add `name_prefix` option to launch configurations [GH-3802]
|
||||||
|
* provider/aws: Provide `source_security_group_id` for ELBs inside a VPC [GH-3780]
|
||||||
|
* provider/aws: Add snapshot window and retention limits for ElastiCache (Redis) [GH-3707]
|
||||||
|
* provider/aws: Add username updates for `aws_iam_user` [GH-3227]
|
||||||
|
* provider/aws: Add AutoMinorVersionUpgrade to RDS Instances [GH-3677]
|
||||||
|
* provider/aws: Add `access_logs` to ELB resource [GH-3756]
|
||||||
|
* provider/aws: Add a retry function to rescue an error in creating Autoscaling Lifecycle Hooks [GH-3694]
|
||||||
|
* provider/aws: `engine_version` is now optional for DB Instance [GH-3744]
|
||||||
* provider/aws: Add configuration to enable copying RDS tags to final snapshot [GH-3529]
|
* provider/aws: Add configuration to enable copying RDS tags to final snapshot [GH-3529]
|
||||||
* provider/aws: RDS Cluster additions (`backup_retention_period`, `preferred_backup_window`, `preferred_maintenance_window`) [GH-3757]
|
* provider/aws: RDS Cluster additions (`backup_retention_period`, `preferred_backup_window`, `preferred_maintenance_window`) [GH-3757]
|
||||||
|
* provider/aws: Document and validate ELB ssl_cert and protocol requirements [GH-3887]
|
||||||
|
* provider/azure: Read publish_settings as contents instead of path [GH-3899]
|
||||||
* provider/openstack: Use IPv4 as the defeault IP version for subnets [GH-3091]
|
* provider/openstack: Use IPv4 as the defeault IP version for subnets [GH-3091]
|
||||||
* provider/aws: Apply security group after restoring db_instance from snapshot [GH-3513]
|
* provider/aws: Apply security group after restoring db_instance from snapshot [GH-3513]
|
||||||
* provider/aws: Making the AutoScalingGroup name optional [GH-3710]
|
* provider/aws: Making the AutoScalingGroup name optional [GH-3710]
|
||||||
* provider/openstack: Add "delete on termination" boot-from-volume option [GH-3232]
|
* provider/openstack: Add "delete on termination" boot-from-volume option [GH-3232]
|
||||||
* provider/digitalocean: Make user_data force a new droplet [GH-3740]
|
* provider/digitalocean: Make user_data force a new droplet [GH-3740]
|
||||||
* provider/vsphere: Do not add network interfaces by default [GH-3652]
|
* provider/vsphere: Do not add network interfaces by default [GH-3652]
|
||||||
|
* provider/openstack: Configure Fixed IPs through ports [GH-3772]
|
||||||
|
* provider/openstack: Specify a port ID on a Router Interface [GH-3903]
|
||||||
|
|
||||||
BUG FIXES:
|
BUG FIXES:
|
||||||
|
|
||||||
* `terraform remote config`: update `--help` output [GH-3632]
|
* `terraform remote config`: update `--help` output [GH-3632]
|
||||||
* core: modules on Git branches now update properly [GH-1568]
|
* core: modules on Git branches now update properly [GH-1568]
|
||||||
|
* core: Fix issue preventing input prompts for unset variables during plan [GH-3843]
|
||||||
|
* core: Orphan resources can now be targets [GH-3912]
|
||||||
* provider/google: Timeout when deleting large instance_group_manager [GH-3591]
|
* provider/google: Timeout when deleting large instance_group_manager [GH-3591]
|
||||||
* provider/aws: Fix issue with order of Termincation Policies in AutoScaling Groups.
|
* provider/aws: Fix issue with order of Termincation Policies in AutoScaling Groups.
|
||||||
This will introduce plans on upgrade to this version, in order to correct the ordering [GH-2890]
|
This will introduce plans on upgrade to this version, in order to correct the ordering [GH-2890]
|
||||||
* provider/aws: Allow cluster name, not only ARN for `aws_ecs_service` [GH-3668]
|
* provider/aws: Allow cluster name, not only ARN for `aws_ecs_service` [GH-3668]
|
||||||
|
* provider/aws: Only set `weight` on an `aws_route53_record` if it has been set in configuration [GH-3900]
|
||||||
* provider/aws: ignore association not exist on route table destroy [GH-3615]
|
* provider/aws: ignore association not exist on route table destroy [GH-3615]
|
||||||
* provider/aws: Fix policy encoding issue with SNS Topics [GH-3700]
|
* provider/aws: Fix policy encoding issue with SNS Topics [GH-3700]
|
||||||
|
* provider/aws: Correctly export ARN in `aws_iam_saml_provider` [GH-3827]
|
||||||
* provider/aws: Tolerate ElastiCache clusters being deleted outside Terraform [GH-3767]
|
* provider/aws: Tolerate ElastiCache clusters being deleted outside Terraform [GH-3767]
|
||||||
|
* provider/aws: Downcase Route 53 record names in statefile to match API output [GH-3574]
|
||||||
|
* provider/aws: Fix issue that could occur if no ECS Cluster was found for a give name [GH-3829]
|
||||||
|
* provider/aws: Fix issue with SNS topic policy if omitted [GH-3777]
|
||||||
|
* provider/aws: Support scratch volumes in `aws_ecs_task_definition` [GH-3810]
|
||||||
|
* provider/aws: Treat `aws_ecs_service` w/ Status==INACTIVE as deleted [GH-3828]
|
||||||
|
* provider/aws: Expand ~ to homedir in `aws_s3_bucket_object.source` [GH-3910]
|
||||||
|
* provider/aws: Fix issue with updating the `aws_ecs_task_definition` where `aws_ecs_service` didn't wait for a new computed ARN [GH-3924]
|
||||||
|
* provider/aws: Prevent crashing when deleting `aws_ecs_service` that is already gone [GH-3914]
|
||||||
* provider/azure: various bugfixes [GH-3695]
|
* provider/azure: various bugfixes [GH-3695]
|
||||||
* provider/digitalocean: fix issue preventing SSH fingerprints from working [GH-3633]
|
* provider/digitalocean: fix issue preventing SSH fingerprints from working [GH-3633]
|
||||||
* provider/digitalocean: Fixing the DigitalOcean Droplet 404 potential on refresh of state [GH-3768]
|
* provider/digitalocean: Fixing the DigitalOcean Droplet 404 potential on refresh of state [GH-3768]
|
||||||
@ -57,6 +89,10 @@ BUG FIXES:
|
|||||||
* provider/openstack: Fix boot from volume [GH-3206]
|
* provider/openstack: Fix boot from volume [GH-3206]
|
||||||
* provider/openstack: Fix crashing when image is no longer accessible [GH-2189]
|
* provider/openstack: Fix crashing when image is no longer accessible [GH-2189]
|
||||||
* provider/openstack: Better handling of network resource state changes [GH-3712]
|
* provider/openstack: Better handling of network resource state changes [GH-3712]
|
||||||
|
* provider/openstack: Fix crashing when no security group is specified [GH-3801]
|
||||||
|
* provider/packet: Fix issue that could cause errors when provisioning many devices at once [GH-3847]
|
||||||
|
* provider/openstack: Fix issue preventing security group rules from being removed [GH-3796]
|
||||||
|
* provider/template: template_file: source contents instead of path [GH-3909]
|
||||||
|
|
||||||
## 0.6.6 (October 23, 2015)
|
## 0.6.6 (October 23, 2015)
|
||||||
|
|
||||||
|
12
builtin/bins/provider-dyn/main.go
Normal file
12
builtin/bins/provider-dyn/main.go
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/terraform/builtin/providers/dyn"
|
||||||
|
"github.com/hashicorp/terraform/plugin"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
plugin.Serve(&plugin.ServeOpts{
|
||||||
|
ProviderFunc: dyn.Provider,
|
||||||
|
})
|
||||||
|
}
|
1
builtin/bins/provider-dyn/main_test.go
Normal file
1
builtin/bins/provider-dyn/main_test.go
Normal file
@ -0,0 +1 @@
|
|||||||
|
package main
|
@ -27,6 +27,7 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/service/elasticache"
|
"github.com/aws/aws-sdk-go/service/elasticache"
|
||||||
elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice"
|
elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice"
|
||||||
"github.com/aws/aws-sdk-go/service/elb"
|
"github.com/aws/aws-sdk-go/service/elb"
|
||||||
|
"github.com/aws/aws-sdk-go/service/firehose"
|
||||||
"github.com/aws/aws-sdk-go/service/glacier"
|
"github.com/aws/aws-sdk-go/service/glacier"
|
||||||
"github.com/aws/aws-sdk-go/service/iam"
|
"github.com/aws/aws-sdk-go/service/iam"
|
||||||
"github.com/aws/aws-sdk-go/service/kinesis"
|
"github.com/aws/aws-sdk-go/service/kinesis"
|
||||||
@ -74,6 +75,7 @@ type AWSClient struct {
|
|||||||
rdsconn *rds.RDS
|
rdsconn *rds.RDS
|
||||||
iamconn *iam.IAM
|
iamconn *iam.IAM
|
||||||
kinesisconn *kinesis.Kinesis
|
kinesisconn *kinesis.Kinesis
|
||||||
|
firehoseconn *firehose.Firehose
|
||||||
elasticacheconn *elasticache.ElastiCache
|
elasticacheconn *elasticache.ElastiCache
|
||||||
lambdaconn *lambda.Lambda
|
lambdaconn *lambda.Lambda
|
||||||
opsworksconn *opsworks.OpsWorks
|
opsworksconn *opsworks.OpsWorks
|
||||||
@ -168,6 +170,9 @@ func (c *Config) Client() (interface{}, error) {
|
|||||||
errs = append(errs, authErr)
|
errs = append(errs, authErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Println("[INFO] Initializing Kinesis Firehose Connection")
|
||||||
|
client.firehoseconn = firehose.New(sess)
|
||||||
|
|
||||||
log.Println("[INFO] Initializing AutoScaling connection")
|
log.Println("[INFO] Initializing AutoScaling connection")
|
||||||
client.autoscalingconn = autoscaling.New(sess)
|
client.autoscalingconn = autoscaling.New(sess)
|
||||||
|
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
package aws
|
package aws
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/awslabs/aws-sdk-go/aws"
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
)
|
)
|
||||||
|
|
||||||
func makeAwsStringList(in []interface{}) []*string {
|
func makeAwsStringList(in []interface{}) []*string {
|
||||||
|
@ -5,11 +5,12 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/awslabs/aws-sdk-go/aws/credentials/ec2rolecreds"
|
|
||||||
"github.com/hashicorp/terraform/helper/hashcode"
|
"github.com/hashicorp/terraform/helper/hashcode"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Provider returns a terraform.ResourceProvider.
|
// Provider returns a terraform.ResourceProvider.
|
||||||
@ -163,107 +164,108 @@ func Provider() terraform.ResourceProvider {
|
|||||||
},
|
},
|
||||||
|
|
||||||
ResourcesMap: map[string]*schema.Resource{
|
ResourcesMap: map[string]*schema.Resource{
|
||||||
"aws_ami": resourceAwsAmi(),
|
"aws_ami": resourceAwsAmi(),
|
||||||
"aws_ami_copy": resourceAwsAmiCopy(),
|
"aws_ami_copy": resourceAwsAmiCopy(),
|
||||||
"aws_ami_from_instance": resourceAwsAmiFromInstance(),
|
"aws_ami_from_instance": resourceAwsAmiFromInstance(),
|
||||||
"aws_app_cookie_stickiness_policy": resourceAwsAppCookieStickinessPolicy(),
|
"aws_app_cookie_stickiness_policy": resourceAwsAppCookieStickinessPolicy(),
|
||||||
"aws_autoscaling_group": resourceAwsAutoscalingGroup(),
|
"aws_autoscaling_group": resourceAwsAutoscalingGroup(),
|
||||||
"aws_autoscaling_notification": resourceAwsAutoscalingNotification(),
|
"aws_autoscaling_notification": resourceAwsAutoscalingNotification(),
|
||||||
"aws_autoscaling_policy": resourceAwsAutoscalingPolicy(),
|
"aws_autoscaling_policy": resourceAwsAutoscalingPolicy(),
|
||||||
"aws_cloudformation_stack": resourceAwsCloudFormationStack(),
|
"aws_cloudformation_stack": resourceAwsCloudFormationStack(),
|
||||||
"aws_cloudtrail": resourceAwsCloudTrail(),
|
"aws_cloudtrail": resourceAwsCloudTrail(),
|
||||||
"aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(),
|
"aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(),
|
||||||
"aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(),
|
"aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(),
|
||||||
"aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(),
|
"aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(),
|
||||||
"aws_codedeploy_app": resourceAwsCodeDeployApp(),
|
"aws_codedeploy_app": resourceAwsCodeDeployApp(),
|
||||||
"aws_codedeploy_deployment_group": resourceAwsCodeDeployDeploymentGroup(),
|
"aws_codedeploy_deployment_group": resourceAwsCodeDeployDeploymentGroup(),
|
||||||
"aws_codecommit_repository": resourceAwsCodeCommitRepository(),
|
"aws_codecommit_repository": resourceAwsCodeCommitRepository(),
|
||||||
"aws_customer_gateway": resourceAwsCustomerGateway(),
|
"aws_customer_gateway": resourceAwsCustomerGateway(),
|
||||||
"aws_db_instance": resourceAwsDbInstance(),
|
"aws_db_instance": resourceAwsDbInstance(),
|
||||||
"aws_db_parameter_group": resourceAwsDbParameterGroup(),
|
"aws_db_parameter_group": resourceAwsDbParameterGroup(),
|
||||||
"aws_db_security_group": resourceAwsDbSecurityGroup(),
|
"aws_db_security_group": resourceAwsDbSecurityGroup(),
|
||||||
"aws_db_subnet_group": resourceAwsDbSubnetGroup(),
|
"aws_db_subnet_group": resourceAwsDbSubnetGroup(),
|
||||||
"aws_directory_service_directory": resourceAwsDirectoryServiceDirectory(),
|
"aws_directory_service_directory": resourceAwsDirectoryServiceDirectory(),
|
||||||
"aws_dynamodb_table": resourceAwsDynamoDbTable(),
|
"aws_dynamodb_table": resourceAwsDynamoDbTable(),
|
||||||
"aws_ebs_volume": resourceAwsEbsVolume(),
|
"aws_ebs_volume": resourceAwsEbsVolume(),
|
||||||
"aws_ecs_cluster": resourceAwsEcsCluster(),
|
"aws_ecs_cluster": resourceAwsEcsCluster(),
|
||||||
"aws_ecs_service": resourceAwsEcsService(),
|
"aws_ecs_service": resourceAwsEcsService(),
|
||||||
"aws_ecs_task_definition": resourceAwsEcsTaskDefinition(),
|
"aws_ecs_task_definition": resourceAwsEcsTaskDefinition(),
|
||||||
"aws_efs_file_system": resourceAwsEfsFileSystem(),
|
"aws_efs_file_system": resourceAwsEfsFileSystem(),
|
||||||
"aws_efs_mount_target": resourceAwsEfsMountTarget(),
|
"aws_efs_mount_target": resourceAwsEfsMountTarget(),
|
||||||
"aws_eip": resourceAwsEip(),
|
"aws_eip": resourceAwsEip(),
|
||||||
"aws_elasticache_cluster": resourceAwsElasticacheCluster(),
|
"aws_elasticache_cluster": resourceAwsElasticacheCluster(),
|
||||||
"aws_elasticache_parameter_group": resourceAwsElasticacheParameterGroup(),
|
"aws_elasticache_parameter_group": resourceAwsElasticacheParameterGroup(),
|
||||||
"aws_elasticache_security_group": resourceAwsElasticacheSecurityGroup(),
|
"aws_elasticache_security_group": resourceAwsElasticacheSecurityGroup(),
|
||||||
"aws_elasticache_subnet_group": resourceAwsElasticacheSubnetGroup(),
|
"aws_elasticache_subnet_group": resourceAwsElasticacheSubnetGroup(),
|
||||||
"aws_elasticsearch_domain": resourceAwsElasticSearchDomain(),
|
"aws_elasticsearch_domain": resourceAwsElasticSearchDomain(),
|
||||||
"aws_elb": resourceAwsElb(),
|
"aws_elb": resourceAwsElb(),
|
||||||
"aws_flow_log": resourceAwsFlowLog(),
|
"aws_flow_log": resourceAwsFlowLog(),
|
||||||
"aws_glacier_vault": resourceAwsGlacierVault(),
|
"aws_glacier_vault": resourceAwsGlacierVault(),
|
||||||
"aws_iam_access_key": resourceAwsIamAccessKey(),
|
"aws_iam_access_key": resourceAwsIamAccessKey(),
|
||||||
"aws_iam_group_policy": resourceAwsIamGroupPolicy(),
|
"aws_iam_group_policy": resourceAwsIamGroupPolicy(),
|
||||||
"aws_iam_group": resourceAwsIamGroup(),
|
"aws_iam_group": resourceAwsIamGroup(),
|
||||||
"aws_iam_group_membership": resourceAwsIamGroupMembership(),
|
"aws_iam_group_membership": resourceAwsIamGroupMembership(),
|
||||||
"aws_iam_instance_profile": resourceAwsIamInstanceProfile(),
|
"aws_iam_instance_profile": resourceAwsIamInstanceProfile(),
|
||||||
"aws_iam_policy": resourceAwsIamPolicy(),
|
"aws_iam_policy": resourceAwsIamPolicy(),
|
||||||
"aws_iam_policy_attachment": resourceAwsIamPolicyAttachment(),
|
"aws_iam_policy_attachment": resourceAwsIamPolicyAttachment(),
|
||||||
"aws_iam_role_policy": resourceAwsIamRolePolicy(),
|
"aws_iam_role_policy": resourceAwsIamRolePolicy(),
|
||||||
"aws_iam_role": resourceAwsIamRole(),
|
"aws_iam_role": resourceAwsIamRole(),
|
||||||
"aws_iam_saml_provider": resourceAwsIamSamlProvider(),
|
"aws_iam_saml_provider": resourceAwsIamSamlProvider(),
|
||||||
"aws_iam_server_certificate": resourceAwsIAMServerCertificate(),
|
"aws_iam_server_certificate": resourceAwsIAMServerCertificate(),
|
||||||
"aws_iam_user_policy": resourceAwsIamUserPolicy(),
|
"aws_iam_user_policy": resourceAwsIamUserPolicy(),
|
||||||
"aws_iam_user": resourceAwsIamUser(),
|
"aws_iam_user": resourceAwsIamUser(),
|
||||||
"aws_instance": resourceAwsInstance(),
|
"aws_instance": resourceAwsInstance(),
|
||||||
"aws_internet_gateway": resourceAwsInternetGateway(),
|
"aws_internet_gateway": resourceAwsInternetGateway(),
|
||||||
"aws_key_pair": resourceAwsKeyPair(),
|
"aws_key_pair": resourceAwsKeyPair(),
|
||||||
"aws_kinesis_stream": resourceAwsKinesisStream(),
|
"aws_kinesis_firehose_delivery_stream": resourceAwsKinesisFirehoseDeliveryStream(),
|
||||||
"aws_lambda_function": resourceAwsLambdaFunction(),
|
"aws_kinesis_stream": resourceAwsKinesisStream(),
|
||||||
"aws_launch_configuration": resourceAwsLaunchConfiguration(),
|
"aws_lambda_function": resourceAwsLambdaFunction(),
|
||||||
"aws_lb_cookie_stickiness_policy": resourceAwsLBCookieStickinessPolicy(),
|
"aws_launch_configuration": resourceAwsLaunchConfiguration(),
|
||||||
"aws_main_route_table_association": resourceAwsMainRouteTableAssociation(),
|
"aws_lb_cookie_stickiness_policy": resourceAwsLBCookieStickinessPolicy(),
|
||||||
"aws_network_acl": resourceAwsNetworkAcl(),
|
"aws_main_route_table_association": resourceAwsMainRouteTableAssociation(),
|
||||||
"aws_network_interface": resourceAwsNetworkInterface(),
|
"aws_network_acl": resourceAwsNetworkAcl(),
|
||||||
"aws_opsworks_stack": resourceAwsOpsworksStack(),
|
"aws_network_interface": resourceAwsNetworkInterface(),
|
||||||
"aws_opsworks_java_app_layer": resourceAwsOpsworksJavaAppLayer(),
|
"aws_opsworks_stack": resourceAwsOpsworksStack(),
|
||||||
"aws_opsworks_haproxy_layer": resourceAwsOpsworksHaproxyLayer(),
|
"aws_opsworks_java_app_layer": resourceAwsOpsworksJavaAppLayer(),
|
||||||
"aws_opsworks_static_web_layer": resourceAwsOpsworksStaticWebLayer(),
|
"aws_opsworks_haproxy_layer": resourceAwsOpsworksHaproxyLayer(),
|
||||||
"aws_opsworks_php_app_layer": resourceAwsOpsworksPhpAppLayer(),
|
"aws_opsworks_static_web_layer": resourceAwsOpsworksStaticWebLayer(),
|
||||||
"aws_opsworks_rails_app_layer": resourceAwsOpsworksRailsAppLayer(),
|
"aws_opsworks_php_app_layer": resourceAwsOpsworksPhpAppLayer(),
|
||||||
"aws_opsworks_nodejs_app_layer": resourceAwsOpsworksNodejsAppLayer(),
|
"aws_opsworks_rails_app_layer": resourceAwsOpsworksRailsAppLayer(),
|
||||||
"aws_opsworks_memcached_layer": resourceAwsOpsworksMemcachedLayer(),
|
"aws_opsworks_nodejs_app_layer": resourceAwsOpsworksNodejsAppLayer(),
|
||||||
"aws_opsworks_mysql_layer": resourceAwsOpsworksMysqlLayer(),
|
"aws_opsworks_memcached_layer": resourceAwsOpsworksMemcachedLayer(),
|
||||||
"aws_opsworks_ganglia_layer": resourceAwsOpsworksGangliaLayer(),
|
"aws_opsworks_mysql_layer": resourceAwsOpsworksMysqlLayer(),
|
||||||
"aws_opsworks_custom_layer": resourceAwsOpsworksCustomLayer(),
|
"aws_opsworks_ganglia_layer": resourceAwsOpsworksGangliaLayer(),
|
||||||
"aws_placement_group": resourceAwsPlacementGroup(),
|
"aws_opsworks_custom_layer": resourceAwsOpsworksCustomLayer(),
|
||||||
"aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(),
|
"aws_placement_group": resourceAwsPlacementGroup(),
|
||||||
"aws_rds_cluster": resourceAwsRDSCluster(),
|
"aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(),
|
||||||
"aws_rds_cluster_instance": resourceAwsRDSClusterInstance(),
|
"aws_rds_cluster": resourceAwsRDSCluster(),
|
||||||
"aws_route53_delegation_set": resourceAwsRoute53DelegationSet(),
|
"aws_rds_cluster_instance": resourceAwsRDSClusterInstance(),
|
||||||
"aws_route53_record": resourceAwsRoute53Record(),
|
"aws_route53_delegation_set": resourceAwsRoute53DelegationSet(),
|
||||||
"aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(),
|
"aws_route53_record": resourceAwsRoute53Record(),
|
||||||
"aws_route53_zone": resourceAwsRoute53Zone(),
|
"aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(),
|
||||||
"aws_route53_health_check": resourceAwsRoute53HealthCheck(),
|
"aws_route53_zone": resourceAwsRoute53Zone(),
|
||||||
"aws_route": resourceAwsRoute(),
|
"aws_route53_health_check": resourceAwsRoute53HealthCheck(),
|
||||||
"aws_route_table": resourceAwsRouteTable(),
|
"aws_route": resourceAwsRoute(),
|
||||||
"aws_route_table_association": resourceAwsRouteTableAssociation(),
|
"aws_route_table": resourceAwsRouteTable(),
|
||||||
"aws_s3_bucket": resourceAwsS3Bucket(),
|
"aws_route_table_association": resourceAwsRouteTableAssociation(),
|
||||||
"aws_s3_bucket_object": resourceAwsS3BucketObject(),
|
"aws_s3_bucket": resourceAwsS3Bucket(),
|
||||||
"aws_security_group": resourceAwsSecurityGroup(),
|
"aws_s3_bucket_object": resourceAwsS3BucketObject(),
|
||||||
"aws_security_group_rule": resourceAwsSecurityGroupRule(),
|
"aws_security_group": resourceAwsSecurityGroup(),
|
||||||
"aws_spot_instance_request": resourceAwsSpotInstanceRequest(),
|
"aws_security_group_rule": resourceAwsSecurityGroupRule(),
|
||||||
"aws_sqs_queue": resourceAwsSqsQueue(),
|
"aws_spot_instance_request": resourceAwsSpotInstanceRequest(),
|
||||||
"aws_sns_topic": resourceAwsSnsTopic(),
|
"aws_sqs_queue": resourceAwsSqsQueue(),
|
||||||
"aws_sns_topic_subscription": resourceAwsSnsTopicSubscription(),
|
"aws_sns_topic": resourceAwsSnsTopic(),
|
||||||
"aws_subnet": resourceAwsSubnet(),
|
"aws_sns_topic_subscription": resourceAwsSnsTopicSubscription(),
|
||||||
"aws_volume_attachment": resourceAwsVolumeAttachment(),
|
"aws_subnet": resourceAwsSubnet(),
|
||||||
"aws_vpc_dhcp_options_association": resourceAwsVpcDhcpOptionsAssociation(),
|
"aws_volume_attachment": resourceAwsVolumeAttachment(),
|
||||||
"aws_vpc_dhcp_options": resourceAwsVpcDhcpOptions(),
|
"aws_vpc_dhcp_options_association": resourceAwsVpcDhcpOptionsAssociation(),
|
||||||
"aws_vpc_peering_connection": resourceAwsVpcPeeringConnection(),
|
"aws_vpc_dhcp_options": resourceAwsVpcDhcpOptions(),
|
||||||
"aws_vpc": resourceAwsVpc(),
|
"aws_vpc_peering_connection": resourceAwsVpcPeeringConnection(),
|
||||||
"aws_vpc_endpoint": resourceAwsVpcEndpoint(),
|
"aws_vpc": resourceAwsVpc(),
|
||||||
"aws_vpn_connection": resourceAwsVpnConnection(),
|
"aws_vpc_endpoint": resourceAwsVpcEndpoint(),
|
||||||
"aws_vpn_connection_route": resourceAwsVpnConnectionRoute(),
|
"aws_vpn_connection": resourceAwsVpnConnection(),
|
||||||
"aws_vpn_gateway": resourceAwsVpnGateway(),
|
"aws_vpn_connection_route": resourceAwsVpnConnectionRoute(),
|
||||||
|
"aws_vpn_gateway": resourceAwsVpnGateway(),
|
||||||
},
|
},
|
||||||
|
|
||||||
ConfigureFunc: providerConfigure,
|
ConfigureFunc: providerConfigure,
|
||||||
|
@ -3,9 +3,13 @@ package aws
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/service/autoscaling"
|
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -55,14 +59,26 @@ func resourceAwsAutoscalingLifecycleHook() *schema.Resource {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func resourceAwsAutoscalingLifecycleHookPut(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsAutoscalingLifecycleHookPut(d *schema.ResourceData, meta interface{}) error {
|
||||||
autoscalingconn := meta.(*AWSClient).autoscalingconn
|
conn := meta.(*AWSClient).autoscalingconn
|
||||||
|
|
||||||
params := getAwsAutoscalingPutLifecycleHookInput(d)
|
params := getAwsAutoscalingPutLifecycleHookInput(d)
|
||||||
|
|
||||||
log.Printf("[DEBUG] AutoScaling PutLifecyleHook: %#v", params)
|
log.Printf("[DEBUG] AutoScaling PutLifecyleHook: %s", params)
|
||||||
_, err := autoscalingconn.PutLifecycleHook(¶ms)
|
err := resource.Retry(5*time.Minute, func() error {
|
||||||
|
_, err := conn.PutLifecycleHook(¶ms)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
if strings.Contains(awsErr.Message(), "Unable to publish test message to notification target") {
|
||||||
|
return fmt.Errorf("[DEBUG] Retrying AWS AutoScaling Lifecycle Hook: %s", params)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resource.RetryError{Err: fmt.Errorf("Error putting lifecycle hook: %s", err)}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error putting lifecycle hook: %s", err)
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(d.Get("name").(string))
|
d.SetId(d.Get("name").(string))
|
||||||
|
@ -23,7 +23,7 @@ func TestAccAWSCodeDeployApp_basic(t *testing.T) {
|
|||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAWSCodeDeployAppModifier,
|
Config: testAccAWSCodeDeployAppModified,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSCodeDeployAppExists("aws_codedeploy_app.foo"),
|
testAccCheckAWSCodeDeployAppExists("aws_codedeploy_app.foo"),
|
||||||
),
|
),
|
||||||
@ -72,7 +72,7 @@ resource "aws_codedeploy_app" "foo" {
|
|||||||
name = "foo"
|
name = "foo"
|
||||||
}`
|
}`
|
||||||
|
|
||||||
var testAccAWSCodeDeployAppModifier = `
|
var testAccAWSCodeDeployAppModified = `
|
||||||
resource "aws_codedeploy_app" "foo" {
|
resource "aws_codedeploy_app" "foo" {
|
||||||
name = "bar"
|
name = "bar"
|
||||||
}`
|
}`
|
||||||
|
@ -23,7 +23,7 @@ func TestAccAWSCodeDeployDeploymentGroup_basic(t *testing.T) {
|
|||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAWSCodeDeployDeploymentGroupModifier,
|
Config: testAccAWSCodeDeployDeploymentGroupModified,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo"),
|
testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo"),
|
||||||
),
|
),
|
||||||
@ -133,7 +133,7 @@ resource "aws_codedeploy_deployment_group" "foo" {
|
|||||||
}
|
}
|
||||||
}`
|
}`
|
||||||
|
|
||||||
var testAccAWSCodeDeployDeploymentGroupModifier = `
|
var testAccAWSCodeDeployDeploymentGroupModified = `
|
||||||
resource "aws_codedeploy_app" "foo_app" {
|
resource "aws_codedeploy_app" "foo_app" {
|
||||||
name = "foo_app"
|
name = "foo_app"
|
||||||
}
|
}
|
||||||
|
@ -54,7 +54,8 @@ func resourceAwsDbInstance() *schema.Resource {
|
|||||||
|
|
||||||
"engine_version": &schema.Schema{
|
"engine_version": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"storage_encrypted": &schema.Schema{
|
"storage_encrypted": &schema.Schema{
|
||||||
@ -245,8 +246,8 @@ func resourceAwsDbInstance() *schema.Resource {
|
|||||||
|
|
||||||
"auto_minor_version_upgrade": &schema.Schema{
|
"auto_minor_version_upgrade": &schema.Schema{
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Computed: false,
|
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
Default: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"allow_major_version_upgrade": &schema.Schema{
|
"allow_major_version_upgrade": &schema.Schema{
|
||||||
@ -293,14 +294,11 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
|
|||||||
}
|
}
|
||||||
} else if _, ok := d.GetOk("snapshot_identifier"); ok {
|
} else if _, ok := d.GetOk("snapshot_identifier"); ok {
|
||||||
opts := rds.RestoreDBInstanceFromDBSnapshotInput{
|
opts := rds.RestoreDBInstanceFromDBSnapshotInput{
|
||||||
DBInstanceClass: aws.String(d.Get("instance_class").(string)),
|
DBInstanceClass: aws.String(d.Get("instance_class").(string)),
|
||||||
DBInstanceIdentifier: aws.String(d.Get("identifier").(string)),
|
DBInstanceIdentifier: aws.String(d.Get("identifier").(string)),
|
||||||
DBSnapshotIdentifier: aws.String(d.Get("snapshot_identifier").(string)),
|
DBSnapshotIdentifier: aws.String(d.Get("snapshot_identifier").(string)),
|
||||||
Tags: tags,
|
AutoMinorVersionUpgrade: aws.Bool(d.Get("auto_minor_version_upgrade").(bool)),
|
||||||
}
|
Tags: tags,
|
||||||
|
|
||||||
if attr, ok := d.GetOk("auto_minor_version_upgrade"); ok {
|
|
||||||
opts.AutoMinorVersionUpgrade = aws.Bool(attr.(bool))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if attr, ok := d.GetOk("availability_zone"); ok {
|
if attr, ok := d.GetOk("availability_zone"); ok {
|
||||||
@ -386,17 +384,17 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
opts := rds.CreateDBInstanceInput{
|
opts := rds.CreateDBInstanceInput{
|
||||||
AllocatedStorage: aws.Int64(int64(d.Get("allocated_storage").(int))),
|
AllocatedStorage: aws.Int64(int64(d.Get("allocated_storage").(int))),
|
||||||
CopyTagsToSnapshot: aws.Bool(d.Get("copy_tags_to_snapshot").(bool)),
|
DBName: aws.String(d.Get("name").(string)),
|
||||||
DBName: aws.String(d.Get("name").(string)),
|
DBInstanceClass: aws.String(d.Get("instance_class").(string)),
|
||||||
DBInstanceClass: aws.String(d.Get("instance_class").(string)),
|
DBInstanceIdentifier: aws.String(d.Get("identifier").(string)),
|
||||||
DBInstanceIdentifier: aws.String(d.Get("identifier").(string)),
|
MasterUsername: aws.String(d.Get("username").(string)),
|
||||||
MasterUsername: aws.String(d.Get("username").(string)),
|
MasterUserPassword: aws.String(d.Get("password").(string)),
|
||||||
MasterUserPassword: aws.String(d.Get("password").(string)),
|
Engine: aws.String(d.Get("engine").(string)),
|
||||||
Engine: aws.String(d.Get("engine").(string)),
|
EngineVersion: aws.String(d.Get("engine_version").(string)),
|
||||||
EngineVersion: aws.String(d.Get("engine_version").(string)),
|
StorageEncrypted: aws.Bool(d.Get("storage_encrypted").(bool)),
|
||||||
StorageEncrypted: aws.Bool(d.Get("storage_encrypted").(bool)),
|
AutoMinorVersionUpgrade: aws.Bool(d.Get("auto_minor_version_upgrade").(bool)),
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
}
|
}
|
||||||
|
|
||||||
attr := d.Get("backup_retention_period")
|
attr := d.Get("backup_retention_period")
|
||||||
@ -509,6 +507,7 @@ func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error {
|
|||||||
d.Set("engine_version", v.EngineVersion)
|
d.Set("engine_version", v.EngineVersion)
|
||||||
d.Set("allocated_storage", v.AllocatedStorage)
|
d.Set("allocated_storage", v.AllocatedStorage)
|
||||||
d.Set("copy_tags_to_snapshot", v.CopyTagsToSnapshot)
|
d.Set("copy_tags_to_snapshot", v.CopyTagsToSnapshot)
|
||||||
|
d.Set("auto_minor_version_upgrade", v.AutoMinorVersionUpgrade)
|
||||||
d.Set("storage_type", v.StorageType)
|
d.Set("storage_type", v.StorageType)
|
||||||
d.Set("instance_class", v.DBInstanceClass)
|
d.Set("instance_class", v.DBInstanceClass)
|
||||||
d.Set("availability_zone", v.AvailabilityZone)
|
d.Set("availability_zone", v.AvailabilityZone)
|
||||||
@ -711,6 +710,11 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
|
|||||||
req.StorageType = aws.String(d.Get("storage_type").(string))
|
req.StorageType = aws.String(d.Get("storage_type").(string))
|
||||||
requestUpdate = true
|
requestUpdate = true
|
||||||
}
|
}
|
||||||
|
if d.HasChange("auto_minor_version_upgrade") {
|
||||||
|
d.SetPartial("auto_minor_version_upgrade")
|
||||||
|
req.AutoMinorVersionUpgrade = aws.Bool(d.Get("auto_minor_version_upgrade").(bool))
|
||||||
|
requestUpdate = true
|
||||||
|
}
|
||||||
|
|
||||||
if d.HasChange("vpc_security_group_ids") {
|
if d.HasChange("vpc_security_group_ids") {
|
||||||
if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
|
if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
|
||||||
|
@ -31,8 +31,6 @@ func TestAccAWSDBInstance_basic(t *testing.T) {
|
|||||||
"aws_db_instance.bar", "allocated_storage", "10"),
|
"aws_db_instance.bar", "allocated_storage", "10"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_db_instance.bar", "engine", "mysql"),
|
"aws_db_instance.bar", "engine", "mysql"),
|
||||||
resource.TestCheckResourceAttr(
|
|
||||||
"aws_db_instance.bar", "engine_version", "5.6.21"),
|
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_db_instance.bar", "license_model", "general-public-license"),
|
"aws_db_instance.bar", "license_model", "general-public-license"),
|
||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
@ -111,7 +109,7 @@ func testAccCheckAWSDBInstanceAttributes(v *rds.DBInstance) resource.TestCheckFu
|
|||||||
return fmt.Errorf("bad engine: %#v", *v.Engine)
|
return fmt.Errorf("bad engine: %#v", *v.Engine)
|
||||||
}
|
}
|
||||||
|
|
||||||
if *v.EngineVersion != "5.6.21" {
|
if *v.EngineVersion == "" {
|
||||||
return fmt.Errorf("bad engine_version: %#v", *v.EngineVersion)
|
return fmt.Errorf("bad engine_version: %#v", *v.EngineVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,9 +59,16 @@ func resourceAwsEcsClusterRead(d *schema.ResourceData, meta interface{}) error {
|
|||||||
}
|
}
|
||||||
log.Printf("[DEBUG] Received ECS clusters: %s", out.Clusters)
|
log.Printf("[DEBUG] Received ECS clusters: %s", out.Clusters)
|
||||||
|
|
||||||
d.SetId(*out.Clusters[0].ClusterArn)
|
for _, c := range out.Clusters {
|
||||||
d.Set("name", *out.Clusters[0].ClusterName)
|
if *c.ClusterName == clusterName {
|
||||||
|
d.SetId(*c.ClusterArn)
|
||||||
|
d.Set("name", c.ClusterName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[ERR] No matching ECS Cluster found for (%s)", d.Id())
|
||||||
|
d.SetId("")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,10 +156,20 @@ func resourceAwsEcsServiceRead(d *schema.ResourceData, meta interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(out.Services) < 1 {
|
if len(out.Services) < 1 {
|
||||||
|
log.Printf("[DEBUG] Removing ECS service %s (%s) because it's gone", d.Get("name").(string), d.Id())
|
||||||
|
d.SetId("")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
service := out.Services[0]
|
service := out.Services[0]
|
||||||
|
|
||||||
|
// Status==INACTIVE means deleted service
|
||||||
|
if *service.Status == "INACTIVE" {
|
||||||
|
log.Printf("[DEBUG] Removing ECS service %q because it's INACTIVE", *service.ServiceArn)
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] Received ECS service %s", service)
|
log.Printf("[DEBUG] Received ECS service %s", service)
|
||||||
|
|
||||||
d.SetId(*service.ServiceArn)
|
d.SetId(*service.ServiceArn)
|
||||||
@ -239,6 +249,12 @@ func resourceAwsEcsServiceDelete(d *schema.ResourceData, meta interface{}) error
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(resp.Services) == 0 {
|
||||||
|
log.Printf("[DEBUG] ECS Service %q is already gone", d.Id())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] ECS service %s is currently %s", d.Id(), *resp.Services[0].Status)
|
log.Printf("[DEBUG] ECS service %s is currently %s", d.Id(), *resp.Services[0].Status)
|
||||||
|
|
||||||
if *resp.Services[0].Status == "INACTIVE" {
|
if *resp.Services[0].Status == "INACTIVE" {
|
||||||
|
@ -319,7 +319,7 @@ resource "aws_iam_role" "ecs_service" {
|
|||||||
name = "EcsService"
|
name = "EcsService"
|
||||||
assume_role_policy = <<EOF
|
assume_role_policy = <<EOF
|
||||||
{
|
{
|
||||||
"Version": "2008-10-17",
|
"Version": "2012-10-17",
|
||||||
"Statement": [
|
"Statement": [
|
||||||
{
|
{
|
||||||
"Action": "sts:AssumeRole",
|
"Action": "sts:AssumeRole",
|
||||||
|
@ -17,7 +17,6 @@ func resourceAwsEcsTaskDefinition() *schema.Resource {
|
|||||||
return &schema.Resource{
|
return &schema.Resource{
|
||||||
Create: resourceAwsEcsTaskDefinitionCreate,
|
Create: resourceAwsEcsTaskDefinitionCreate,
|
||||||
Read: resourceAwsEcsTaskDefinitionRead,
|
Read: resourceAwsEcsTaskDefinitionRead,
|
||||||
Update: resourceAwsEcsTaskDefinitionUpdate,
|
|
||||||
Delete: resourceAwsEcsTaskDefinitionDelete,
|
Delete: resourceAwsEcsTaskDefinitionDelete,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
@ -40,6 +39,7 @@ func resourceAwsEcsTaskDefinition() *schema.Resource {
|
|||||||
"container_definitions": &schema.Schema{
|
"container_definitions": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
StateFunc: func(v interface{}) string {
|
StateFunc: func(v interface{}) string {
|
||||||
hash := sha1.Sum([]byte(v.(string)))
|
hash := sha1.Sum([]byte(v.(string)))
|
||||||
return hex.EncodeToString(hash[:])
|
return hex.EncodeToString(hash[:])
|
||||||
@ -49,6 +49,7 @@ func resourceAwsEcsTaskDefinition() *schema.Resource {
|
|||||||
"volume": &schema.Schema{
|
"volume": &schema.Schema{
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"name": &schema.Schema{
|
"name": &schema.Schema{
|
||||||
@ -58,7 +59,7 @@ func resourceAwsEcsTaskDefinition() *schema.Resource {
|
|||||||
|
|
||||||
"host_path": &schema.Schema{
|
"host_path": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -131,29 +132,6 @@ func resourceAwsEcsTaskDefinitionRead(d *schema.ResourceData, meta interface{})
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceAwsEcsTaskDefinitionUpdate(d *schema.ResourceData, meta interface{}) error {
|
|
||||||
oldArn := d.Get("arn").(string)
|
|
||||||
|
|
||||||
log.Printf("[DEBUG] Creating new revision of task definition %q", d.Id())
|
|
||||||
err := resourceAwsEcsTaskDefinitionCreate(d, meta)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Printf("[DEBUG] New revision of %q created: %q", d.Id(), d.Get("arn").(string))
|
|
||||||
|
|
||||||
log.Printf("[DEBUG] Deregistering old revision of task definition %q: %q", d.Id(), oldArn)
|
|
||||||
conn := meta.(*AWSClient).ecsconn
|
|
||||||
_, err = conn.DeregisterTaskDefinition(&ecs.DeregisterTaskDefinitionInput{
|
|
||||||
TaskDefinition: aws.String(oldArn),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Printf("[DEBUG] Old revision of task definition deregistered: %q", oldArn)
|
|
||||||
|
|
||||||
return resourceAwsEcsTaskDefinitionRead(d, meta)
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceAwsEcsTaskDefinitionDelete(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsEcsTaskDefinitionDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
conn := meta.(*AWSClient).ecsconn
|
conn := meta.(*AWSClient).ecsconn
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ func TestAccAWSEcsTaskDefinition_basic(t *testing.T) {
|
|||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAWSEcsTaskDefinitionModifier,
|
Config: testAccAWSEcsTaskDefinitionModified,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins"),
|
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.jenkins"),
|
||||||
),
|
),
|
||||||
@ -32,6 +32,48 @@ func TestAccAWSEcsTaskDefinition_basic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Regression for https://github.com/hashicorp/terraform/issues/2370
|
||||||
|
func TestAccAWSEcsTaskDefinition_withScratchVolume(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSEcsTaskDefinitionDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSEcsTaskDefinitionWithScratchVolume,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Regression for https://github.com/hashicorp/terraform/issues/2694
|
||||||
|
func TestAccAWSEcsTaskDefinition_withEcsService(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSEcsTaskDefinitionDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSEcsTaskDefinitionWithEcsService,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep"),
|
||||||
|
testAccCheckAWSEcsServiceExists("aws_ecs_service.sleep-svc"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSEcsTaskDefinitionWithEcsServiceModified,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep"),
|
||||||
|
testAccCheckAWSEcsServiceExists("aws_ecs_service.sleep-svc"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckAWSEcsTaskDefinitionDestroy(s *terraform.State) error {
|
func testAccCheckAWSEcsTaskDefinitionDestroy(s *terraform.State) error {
|
||||||
conn := testAccProvider.Meta().(*AWSClient).ecsconn
|
conn := testAccProvider.Meta().(*AWSClient).ecsconn
|
||||||
|
|
||||||
@ -116,7 +158,94 @@ TASK_DEFINITION
|
|||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
var testAccAWSEcsTaskDefinitionModifier = `
|
var testAccAWSEcsTaskDefinitionWithScratchVolume = `
|
||||||
|
resource "aws_ecs_task_definition" "sleep" {
|
||||||
|
family = "terraform-acc-sc-volume-test"
|
||||||
|
container_definitions = <<TASK_DEFINITION
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name": "sleep",
|
||||||
|
"image": "busybox",
|
||||||
|
"cpu": 10,
|
||||||
|
"command": ["sleep","360"],
|
||||||
|
"memory": 10,
|
||||||
|
"essential": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
TASK_DEFINITION
|
||||||
|
|
||||||
|
volume {
|
||||||
|
name = "database_scratch"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var testAccAWSEcsTaskDefinitionWithEcsService = `
|
||||||
|
resource "aws_ecs_cluster" "default" {
|
||||||
|
name = "terraform-acc-test"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_ecs_service" "sleep-svc" {
|
||||||
|
name = "tf-acc-ecs-svc"
|
||||||
|
cluster = "${aws_ecs_cluster.default.id}"
|
||||||
|
task_definition = "${aws_ecs_task_definition.sleep.arn}"
|
||||||
|
desired_count = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_ecs_task_definition" "sleep" {
|
||||||
|
family = "terraform-acc-sc-volume-test"
|
||||||
|
container_definitions = <<TASK_DEFINITION
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name": "sleep",
|
||||||
|
"image": "busybox",
|
||||||
|
"cpu": 10,
|
||||||
|
"command": ["sleep","360"],
|
||||||
|
"memory": 10,
|
||||||
|
"essential": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
TASK_DEFINITION
|
||||||
|
|
||||||
|
volume {
|
||||||
|
name = "database_scratch"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
var testAccAWSEcsTaskDefinitionWithEcsServiceModified = `
|
||||||
|
resource "aws_ecs_cluster" "default" {
|
||||||
|
name = "terraform-acc-test"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_ecs_service" "sleep-svc" {
|
||||||
|
name = "tf-acc-ecs-svc"
|
||||||
|
cluster = "${aws_ecs_cluster.default.id}"
|
||||||
|
task_definition = "${aws_ecs_task_definition.sleep.arn}"
|
||||||
|
desired_count = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_ecs_task_definition" "sleep" {
|
||||||
|
family = "terraform-acc-sc-volume-test"
|
||||||
|
container_definitions = <<TASK_DEFINITION
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name": "sleep",
|
||||||
|
"image": "busybox",
|
||||||
|
"cpu": 20,
|
||||||
|
"command": ["sleep","360"],
|
||||||
|
"memory": 50,
|
||||||
|
"essential": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
TASK_DEFINITION
|
||||||
|
|
||||||
|
volume {
|
||||||
|
name = "database_scratch"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var testAccAWSEcsTaskDefinitionModified = `
|
||||||
resource "aws_ecs_task_definition" "jenkins" {
|
resource "aws_ecs_task_definition" "jenkins" {
|
||||||
family = "terraform-acc-test"
|
family = "terraform-acc-test"
|
||||||
container_definitions = <<TASK_DEFINITION
|
container_definitions = <<TASK_DEFINITION
|
||||||
|
@ -138,6 +138,24 @@ func resourceAwsElasticacheCluster() *schema.Resource {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"snapshot_window": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"snapshot_retention_limit": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
|
||||||
|
value := v.(int)
|
||||||
|
if value > 35 {
|
||||||
|
es = append(es, fmt.Errorf(
|
||||||
|
"snapshot retention limit cannot be more than 35 days"))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
"tags": tagsSchema(),
|
"tags": tagsSchema(),
|
||||||
|
|
||||||
// apply_immediately is used to determine when the update modifications
|
// apply_immediately is used to determine when the update modifications
|
||||||
@ -187,6 +205,14 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{
|
|||||||
req.CacheParameterGroupName = aws.String(v.(string))
|
req.CacheParameterGroupName = aws.String(v.(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("snapshot_retention_limit"); ok {
|
||||||
|
req.SnapshotRetentionLimit = aws.Int64(int64(v.(int)))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("snapshot_window"); ok {
|
||||||
|
req.SnapshotWindow = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
if v, ok := d.GetOk("maintenance_window"); ok {
|
if v, ok := d.GetOk("maintenance_window"); ok {
|
||||||
req.PreferredMaintenanceWindow = aws.String(v.(string))
|
req.PreferredMaintenanceWindow = aws.String(v.(string))
|
||||||
}
|
}
|
||||||
@ -267,6 +293,8 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{})
|
|||||||
d.Set("security_group_ids", c.SecurityGroups)
|
d.Set("security_group_ids", c.SecurityGroups)
|
||||||
d.Set("parameter_group_name", c.CacheParameterGroup)
|
d.Set("parameter_group_name", c.CacheParameterGroup)
|
||||||
d.Set("maintenance_window", c.PreferredMaintenanceWindow)
|
d.Set("maintenance_window", c.PreferredMaintenanceWindow)
|
||||||
|
d.Set("snapshot_window", c.SnapshotWindow)
|
||||||
|
d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit)
|
||||||
if c.NotificationConfiguration != nil {
|
if c.NotificationConfiguration != nil {
|
||||||
if *c.NotificationConfiguration.TopicStatus == "active" {
|
if *c.NotificationConfiguration.TopicStatus == "active" {
|
||||||
d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn)
|
d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn)
|
||||||
@ -350,6 +378,16 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{
|
|||||||
requestUpdate = true
|
requestUpdate = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.HasChange("snapshot_window") {
|
||||||
|
req.SnapshotWindow = aws.String(d.Get("snapshot_window").(string))
|
||||||
|
requestUpdate = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("snapshot_retention_limit") {
|
||||||
|
req.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int)))
|
||||||
|
requestUpdate = true
|
||||||
|
}
|
||||||
|
|
||||||
if d.HasChange("num_cache_nodes") {
|
if d.HasChange("num_cache_nodes") {
|
||||||
req.NumCacheNodes = aws.Int64(int64(d.Get("num_cache_nodes").(int)))
|
req.NumCacheNodes = aws.Int64(int64(d.Get("num_cache_nodes").(int)))
|
||||||
requestUpdate = true
|
requestUpdate = true
|
||||||
|
@ -33,6 +33,45 @@ func TestAccAWSElasticacheCluster_basic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSElasticacheCluster_snapshotsWithUpdates(t *testing.T) {
|
||||||
|
var ec elasticache.CacheCluster
|
||||||
|
|
||||||
|
ri := genRandInt()
|
||||||
|
preConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshots, ri, ri, ri)
|
||||||
|
postConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfig_snapshotsUpdated, ri, ri, ri)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSElasticacheClusterDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: preConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"),
|
||||||
|
testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_elasticache_cluster.bar", "snapshot_window", "05:00-09:00"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_elasticache_cluster.bar", "snapshot_retention_limit", "3"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
resource.TestStep{
|
||||||
|
Config: postConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"),
|
||||||
|
testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_elasticache_cluster.bar", "snapshot_window", "07:00-09:00"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_elasticache_cluster.bar", "snapshot_retention_limit", "7"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccAWSElasticacheCluster_vpc(t *testing.T) {
|
func TestAccAWSElasticacheCluster_vpc(t *testing.T) {
|
||||||
var csg elasticache.CacheSubnetGroup
|
var csg elasticache.CacheSubnetGroup
|
||||||
var ec elasticache.CacheCluster
|
var ec elasticache.CacheCluster
|
||||||
@ -152,6 +191,75 @@ resource "aws_elasticache_cluster" "bar" {
|
|||||||
}
|
}
|
||||||
`, genRandInt(), genRandInt(), genRandInt())
|
`, genRandInt(), genRandInt(), genRandInt())
|
||||||
|
|
||||||
|
var testAccAWSElasticacheClusterConfig_snapshots = `
|
||||||
|
provider "aws" {
|
||||||
|
region = "us-east-1"
|
||||||
|
}
|
||||||
|
resource "aws_security_group" "bar" {
|
||||||
|
name = "tf-test-security-group-%03d"
|
||||||
|
description = "tf-test-security-group-descr"
|
||||||
|
ingress {
|
||||||
|
from_port = -1
|
||||||
|
to_port = -1
|
||||||
|
protocol = "icmp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_elasticache_security_group" "bar" {
|
||||||
|
name = "tf-test-security-group-%03d"
|
||||||
|
description = "tf-test-security-group-descr"
|
||||||
|
security_group_names = ["${aws_security_group.bar.name}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_elasticache_cluster" "bar" {
|
||||||
|
cluster_id = "tf-test-%03d"
|
||||||
|
engine = "redis"
|
||||||
|
node_type = "cache.m1.small"
|
||||||
|
num_cache_nodes = 1
|
||||||
|
port = 6379
|
||||||
|
parameter_group_name = "default.redis2.8"
|
||||||
|
security_group_names = ["${aws_elasticache_security_group.bar.name}"]
|
||||||
|
snapshot_window = "05:00-09:00"
|
||||||
|
snapshot_retention_limit = 3
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var testAccAWSElasticacheClusterConfig_snapshotsUpdated = `
|
||||||
|
provider "aws" {
|
||||||
|
region = "us-east-1"
|
||||||
|
}
|
||||||
|
resource "aws_security_group" "bar" {
|
||||||
|
name = "tf-test-security-group-%03d"
|
||||||
|
description = "tf-test-security-group-descr"
|
||||||
|
ingress {
|
||||||
|
from_port = -1
|
||||||
|
to_port = -1
|
||||||
|
protocol = "icmp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_elasticache_security_group" "bar" {
|
||||||
|
name = "tf-test-security-group-%03d"
|
||||||
|
description = "tf-test-security-group-descr"
|
||||||
|
security_group_names = ["${aws_security_group.bar.name}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_elasticache_cluster" "bar" {
|
||||||
|
cluster_id = "tf-test-%03d"
|
||||||
|
engine = "redis"
|
||||||
|
node_type = "cache.m1.small"
|
||||||
|
num_cache_nodes = 1
|
||||||
|
port = 6379
|
||||||
|
parameter_group_name = "default.redis2.8"
|
||||||
|
security_group_names = ["${aws_elasticache_security_group.bar.name}"]
|
||||||
|
snapshot_window = "07:00-09:00"
|
||||||
|
snapshot_retention_limit = 7
|
||||||
|
apply_immediately = true
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
var testAccAWSElasticacheClusterInVPCConfig = fmt.Sprintf(`
|
var testAccAWSElasticacheClusterInVPCConfig = fmt.Sprintf(`
|
||||||
resource "aws_vpc" "foo" {
|
resource "aws_vpc" "foo" {
|
||||||
cidr_block = "192.168.0.0/16"
|
cidr_block = "192.168.0.0/16"
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/aws/aws-sdk-go/service/elb"
|
"github.com/aws/aws-sdk-go/service/elb"
|
||||||
"github.com/hashicorp/terraform/helper/hashcode"
|
"github.com/hashicorp/terraform/helper/hashcode"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
@ -74,6 +75,11 @@ func resourceAwsElb() *schema.Resource {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"source_security_group_id": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
"subnets": &schema.Schema{
|
"subnets": &schema.Schema{
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
@ -101,6 +107,29 @@ func resourceAwsElb() *schema.Resource {
|
|||||||
Default: 300,
|
Default: 300,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"access_logs": &schema.Schema{
|
||||||
|
Type: schema.TypeSet,
|
||||||
|
Optional: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"interval": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 60,
|
||||||
|
},
|
||||||
|
"bucket": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
"bucket_prefix": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Set: resourceAwsElbAccessLogsHash,
|
||||||
|
},
|
||||||
|
|
||||||
"listener": &schema.Schema{
|
"listener": &schema.Schema{
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Required: true,
|
Required: true,
|
||||||
@ -300,11 +329,28 @@ func resourceAwsElbRead(d *schema.ResourceData, meta interface{}) error {
|
|||||||
d.Set("security_groups", lb.SecurityGroups)
|
d.Set("security_groups", lb.SecurityGroups)
|
||||||
if lb.SourceSecurityGroup != nil {
|
if lb.SourceSecurityGroup != nil {
|
||||||
d.Set("source_security_group", lb.SourceSecurityGroup.GroupName)
|
d.Set("source_security_group", lb.SourceSecurityGroup.GroupName)
|
||||||
|
|
||||||
|
// Manually look up the ELB Security Group ID, since it's not provided
|
||||||
|
var elbVpc string
|
||||||
|
if lb.VPCId != nil {
|
||||||
|
elbVpc = *lb.VPCId
|
||||||
|
}
|
||||||
|
sgId, err := sourceSGIdByName(meta, *lb.SourceSecurityGroup.GroupName, elbVpc)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("[WARN] Error looking up ELB Security Group ID: %s", err)
|
||||||
|
} else {
|
||||||
|
d.Set("source_security_group_id", sgId)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
d.Set("subnets", lb.Subnets)
|
d.Set("subnets", lb.Subnets)
|
||||||
d.Set("idle_timeout", lbAttrs.ConnectionSettings.IdleTimeout)
|
d.Set("idle_timeout", lbAttrs.ConnectionSettings.IdleTimeout)
|
||||||
d.Set("connection_draining", lbAttrs.ConnectionDraining.Enabled)
|
d.Set("connection_draining", lbAttrs.ConnectionDraining.Enabled)
|
||||||
d.Set("connection_draining_timeout", lbAttrs.ConnectionDraining.Timeout)
|
d.Set("connection_draining_timeout", lbAttrs.ConnectionDraining.Timeout)
|
||||||
|
if lbAttrs.AccessLog != nil {
|
||||||
|
if err := d.Set("access_logs", flattenAccessLog(lbAttrs.AccessLog)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
resp, err := elbconn.DescribeTags(&elb.DescribeTagsInput{
|
resp, err := elbconn.DescribeTags(&elb.DescribeTagsInput{
|
||||||
LoadBalancerNames: []*string{lb.LoadBalancerName},
|
LoadBalancerNames: []*string{lb.LoadBalancerName},
|
||||||
@ -405,7 +451,7 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
d.SetPartial("instances")
|
d.SetPartial("instances")
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("cross_zone_load_balancing") || d.HasChange("idle_timeout") {
|
if d.HasChange("cross_zone_load_balancing") || d.HasChange("idle_timeout") || d.HasChange("access_logs") {
|
||||||
attrs := elb.ModifyLoadBalancerAttributesInput{
|
attrs := elb.ModifyLoadBalancerAttributesInput{
|
||||||
LoadBalancerName: aws.String(d.Get("name").(string)),
|
LoadBalancerName: aws.String(d.Get("name").(string)),
|
||||||
LoadBalancerAttributes: &elb.LoadBalancerAttributes{
|
LoadBalancerAttributes: &elb.LoadBalancerAttributes{
|
||||||
@ -418,6 +464,30 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logs := d.Get("access_logs").(*schema.Set).List()
|
||||||
|
if len(logs) > 1 {
|
||||||
|
return fmt.Errorf("Only one access logs config per ELB is supported")
|
||||||
|
} else if len(logs) == 1 {
|
||||||
|
log := logs[0].(map[string]interface{})
|
||||||
|
accessLog := &elb.AccessLog{
|
||||||
|
Enabled: aws.Bool(true),
|
||||||
|
EmitInterval: aws.Int64(int64(log["interval"].(int))),
|
||||||
|
S3BucketName: aws.String(log["bucket"].(string)),
|
||||||
|
}
|
||||||
|
|
||||||
|
if log["bucket_prefix"] != "" {
|
||||||
|
accessLog.S3BucketPrefix = aws.String(log["bucket_prefix"].(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
attrs.LoadBalancerAttributes.AccessLog = accessLog
|
||||||
|
} else if len(logs) == 0 {
|
||||||
|
// disable access logs
|
||||||
|
attrs.LoadBalancerAttributes.AccessLog = &elb.AccessLog{
|
||||||
|
Enabled: aws.Bool(false),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] ELB Modify Load Balancer Attributes Request: %#v", attrs)
|
||||||
_, err := elbconn.ModifyLoadBalancerAttributes(&attrs)
|
_, err := elbconn.ModifyLoadBalancerAttributes(&attrs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failure configuring ELB attributes: %s", err)
|
return fmt.Errorf("Failure configuring ELB attributes: %s", err)
|
||||||
@ -550,6 +620,19 @@ func resourceAwsElbHealthCheckHash(v interface{}) int {
|
|||||||
return hashcode.String(buf.String())
|
return hashcode.String(buf.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func resourceAwsElbAccessLogsHash(v interface{}) int {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
m := v.(map[string]interface{})
|
||||||
|
buf.WriteString(fmt.Sprintf("%d-", m["interval"].(int)))
|
||||||
|
buf.WriteString(fmt.Sprintf("%s-",
|
||||||
|
strings.ToLower(m["bucket"].(string))))
|
||||||
|
if v, ok := m["bucket_prefix"]; ok {
|
||||||
|
buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(v.(string))))
|
||||||
|
}
|
||||||
|
|
||||||
|
return hashcode.String(buf.String())
|
||||||
|
}
|
||||||
|
|
||||||
func resourceAwsElbListenerHash(v interface{}) int {
|
func resourceAwsElbListenerHash(v interface{}) int {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
m := v.(map[string]interface{})
|
m := v.(map[string]interface{})
|
||||||
@ -594,3 +677,52 @@ func validateElbName(v interface{}, k string) (ws []string, errors []error) {
|
|||||||
return
|
return
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func sourceSGIdByName(meta interface{}, sg, vpcId string) (string, error) {
|
||||||
|
conn := meta.(*AWSClient).ec2conn
|
||||||
|
var filters []*ec2.Filter
|
||||||
|
var sgFilterName, sgFilterVPCID *ec2.Filter
|
||||||
|
sgFilterName = &ec2.Filter{
|
||||||
|
Name: aws.String("group-name"),
|
||||||
|
Values: []*string{aws.String(sg)},
|
||||||
|
}
|
||||||
|
|
||||||
|
if vpcId != "" {
|
||||||
|
sgFilterVPCID = &ec2.Filter{
|
||||||
|
Name: aws.String("vpc-id"),
|
||||||
|
Values: []*string{aws.String(vpcId)},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
filters = append(filters, sgFilterName)
|
||||||
|
|
||||||
|
if sgFilterVPCID != nil {
|
||||||
|
filters = append(filters, sgFilterVPCID)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &ec2.DescribeSecurityGroupsInput{
|
||||||
|
Filters: filters,
|
||||||
|
}
|
||||||
|
resp, err := conn.DescribeSecurityGroups(req)
|
||||||
|
if err != nil {
|
||||||
|
if ec2err, ok := err.(awserr.Error); ok {
|
||||||
|
if ec2err.Code() == "InvalidSecurityGroupID.NotFound" ||
|
||||||
|
ec2err.Code() == "InvalidGroup.NotFound" {
|
||||||
|
resp = nil
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Error on ELB SG look up: %s", err)
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp == nil || len(resp.SecurityGroups) == 0 {
|
||||||
|
return "", fmt.Errorf("No security groups found for name %s and vpc id %s", sg, vpcId)
|
||||||
|
}
|
||||||
|
|
||||||
|
group := resp.SecurityGroups[0]
|
||||||
|
return *group.GroupId, nil
|
||||||
|
}
|
||||||
|
@ -75,6 +75,52 @@ func TestAccAWSELB_fullCharacterRange(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSELB_AccessLogs(t *testing.T) {
|
||||||
|
var conf elb.LoadBalancerDescription
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSELBAccessLogs,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_elb.foo", "name", "FoobarTerraform-test123"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSELBAccessLogsOn,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_elb.foo", "name", "FoobarTerraform-test123"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_elb.foo", "access_logs.#", "1"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_elb.foo", "access_logs.1713209538.bucket", "terraform-access-logs-bucket"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_elb.foo", "access_logs.1713209538.interval", "5"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSELBAccessLogs,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_elb.foo", "name", "FoobarTerraform-test123"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_elb.foo", "access_logs.#", "0"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccAWSELB_generatedName(t *testing.T) {
|
func TestAccAWSELB_generatedName(t *testing.T) {
|
||||||
var conf elb.LoadBalancerDescription
|
var conf elb.LoadBalancerDescription
|
||||||
generatedNameRegexp := regexp.MustCompile("^tf-lb-")
|
generatedNameRegexp := regexp.MustCompile("^tf-lb-")
|
||||||
@ -611,6 +657,15 @@ func testAccCheckAWSELBExists(n string, res *elb.LoadBalancerDescription) resour
|
|||||||
|
|
||||||
*res = *describe.LoadBalancerDescriptions[0]
|
*res = *describe.LoadBalancerDescriptions[0]
|
||||||
|
|
||||||
|
// Confirm source_security_group_id for ELBs in a VPC
|
||||||
|
// See https://github.com/hashicorp/terraform/pull/3780
|
||||||
|
if res.VPCId != nil {
|
||||||
|
sgid := rs.Primary.Attributes["source_security_group_id"]
|
||||||
|
if sgid == "" {
|
||||||
|
return fmt.Errorf("Expected to find source_security_group_id for ELB, but was empty")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -650,6 +705,64 @@ resource "aws_elb" "foo" {
|
|||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
const testAccAWSELBAccessLogs = `
|
||||||
|
resource "aws_elb" "foo" {
|
||||||
|
name = "FoobarTerraform-test123"
|
||||||
|
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
|
||||||
|
|
||||||
|
listener {
|
||||||
|
instance_port = 8000
|
||||||
|
instance_protocol = "http"
|
||||||
|
lb_port = 80
|
||||||
|
lb_protocol = "http"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
const testAccAWSELBAccessLogsOn = `
|
||||||
|
# an S3 bucket configured for Access logs
|
||||||
|
# The 797873946194 is the AWS ID for us-west-2, so this test
|
||||||
|
# must be ran in us-west-2
|
||||||
|
resource "aws_s3_bucket" "acceslogs_bucket" {
|
||||||
|
bucket = "terraform-access-logs-bucket"
|
||||||
|
acl = "private"
|
||||||
|
force_destroy = true
|
||||||
|
policy = <<EOF
|
||||||
|
{
|
||||||
|
"Id": "Policy1446577137248",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Action": "s3:PutObject",
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Principal": {
|
||||||
|
"AWS": "arn:aws:iam::797873946194:root"
|
||||||
|
},
|
||||||
|
"Resource": "arn:aws:s3:::terraform-access-logs-bucket/*",
|
||||||
|
"Sid": "Stmt1446575236270"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"Version": "2012-10-17"
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_elb" "foo" {
|
||||||
|
name = "FoobarTerraform-test123"
|
||||||
|
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
|
||||||
|
|
||||||
|
listener {
|
||||||
|
instance_port = 8000
|
||||||
|
instance_protocol = "http"
|
||||||
|
lb_port = 80
|
||||||
|
lb_protocol = "http"
|
||||||
|
}
|
||||||
|
|
||||||
|
access_logs {
|
||||||
|
interval = 5
|
||||||
|
bucket = "${aws_s3_bucket.acceslogs_bucket.bucket}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
const testAccAWSELBGeneratedName = `
|
const testAccAWSELBGeneratedName = `
|
||||||
resource "aws_elb" "foo" {
|
resource "aws_elb" "foo" {
|
||||||
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
|
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
|
||||||
|
@ -135,6 +135,9 @@ func removeUsersFromGroup(conn *iam.IAM, users []*string, group string) error {
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -68,6 +68,7 @@ func resourceAwsIamSamlProviderRead(d *schema.ResourceData, meta interface{}) er
|
|||||||
}
|
}
|
||||||
|
|
||||||
validUntil := out.ValidUntil.Format(time.RFC1123)
|
validUntil := out.ValidUntil.Format(time.RFC1123)
|
||||||
|
d.Set("arn", d.Id())
|
||||||
d.Set("valid_until", validUntil)
|
d.Set("valid_until", validUntil)
|
||||||
d.Set("saml_metadata_document", *out.SAMLMetadataDocument)
|
d.Set("saml_metadata_document", *out.SAMLMetadataDocument)
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@ package aws
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
@ -14,9 +15,7 @@ func resourceAwsIamUser() *schema.Resource {
|
|||||||
return &schema.Resource{
|
return &schema.Resource{
|
||||||
Create: resourceAwsIamUserCreate,
|
Create: resourceAwsIamUserCreate,
|
||||||
Read: resourceAwsIamUserRead,
|
Read: resourceAwsIamUserRead,
|
||||||
// There is an UpdateUser API call, but goamz doesn't support it yet.
|
Update: resourceAwsIamUserUpdate,
|
||||||
// XXX but we aren't using goamz anymore.
|
|
||||||
//Update: resourceAwsIamUserUpdate,
|
|
||||||
Delete: resourceAwsIamUserDelete,
|
Delete: resourceAwsIamUserDelete,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
@ -39,7 +38,6 @@ func resourceAwsIamUser() *schema.Resource {
|
|||||||
"name": &schema.Schema{
|
"name": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
|
||||||
},
|
},
|
||||||
"path": &schema.Schema{
|
"path": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
@ -54,12 +52,14 @@ func resourceAwsIamUser() *schema.Resource {
|
|||||||
func resourceAwsIamUserCreate(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsIamUserCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
iamconn := meta.(*AWSClient).iamconn
|
iamconn := meta.(*AWSClient).iamconn
|
||||||
name := d.Get("name").(string)
|
name := d.Get("name").(string)
|
||||||
|
path := d.Get("path").(string)
|
||||||
|
|
||||||
request := &iam.CreateUserInput{
|
request := &iam.CreateUserInput{
|
||||||
Path: aws.String(d.Get("path").(string)),
|
Path: aws.String(path),
|
||||||
UserName: aws.String(name),
|
UserName: aws.String(name),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Println("[DEBUG] Create IAM User request:", request)
|
||||||
createResp, err := iamconn.CreateUser(request)
|
createResp, err := iamconn.CreateUser(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error creating IAM User %s: %s", name, err)
|
return fmt.Errorf("Error creating IAM User %s: %s", name, err)
|
||||||
@ -69,14 +69,15 @@ func resourceAwsIamUserCreate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
|
|
||||||
func resourceAwsIamUserRead(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsIamUserRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
iamconn := meta.(*AWSClient).iamconn
|
iamconn := meta.(*AWSClient).iamconn
|
||||||
|
name := d.Get("name").(string)
|
||||||
request := &iam.GetUserInput{
|
request := &iam.GetUserInput{
|
||||||
UserName: aws.String(d.Id()),
|
UserName: aws.String(name),
|
||||||
}
|
}
|
||||||
|
|
||||||
getResp, err := iamconn.GetUser(request)
|
getResp, err := iamconn.GetUser(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { // XXX test me
|
if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" { // XXX test me
|
||||||
|
log.Printf("[WARN] No IAM user by name (%s) found", d.Id())
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -102,6 +103,32 @@ func resourceAwsIamUserReadResult(d *schema.ResourceData, user *iam.User) error
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func resourceAwsIamUserUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
if d.HasChange("name") || d.HasChange("path") {
|
||||||
|
iamconn := meta.(*AWSClient).iamconn
|
||||||
|
on, nn := d.GetChange("name")
|
||||||
|
_, np := d.GetChange("path")
|
||||||
|
|
||||||
|
request := &iam.UpdateUserInput{
|
||||||
|
UserName: aws.String(on.(string)),
|
||||||
|
NewUserName: aws.String(nn.(string)),
|
||||||
|
NewPath: aws.String(np.(string)),
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("[DEBUG] Update IAM User request:", request)
|
||||||
|
_, err := iamconn.UpdateUser(request)
|
||||||
|
if err != nil {
|
||||||
|
if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" {
|
||||||
|
log.Printf("[WARN] No IAM user by name (%s) found", d.Id())
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Error updating IAM User %s: %s", d.Id(), err)
|
||||||
|
}
|
||||||
|
return resourceAwsIamUserRead(d, meta)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
func resourceAwsIamUserDelete(d *schema.ResourceData, meta interface{}) error {
|
func resourceAwsIamUserDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
iamconn := meta.(*AWSClient).iamconn
|
iamconn := meta.(*AWSClient).iamconn
|
||||||
|
|
||||||
@ -109,6 +136,7 @@ func resourceAwsIamUserDelete(d *schema.ResourceData, meta interface{}) error {
|
|||||||
UserName: aws.String(d.Id()),
|
UserName: aws.String(d.Id()),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Println("[DEBUG] Delete IAM User request:", request)
|
||||||
if _, err := iamconn.DeleteUser(request); err != nil {
|
if _, err := iamconn.DeleteUser(request); err != nil {
|
||||||
return fmt.Errorf("Error deleting IAM User %s: %s", d.Id(), err)
|
return fmt.Errorf("Error deleting IAM User %s: %s", d.Id(), err)
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,14 @@ func TestAccAWSUser_basic(t *testing.T) {
|
|||||||
Config: testAccAWSUserConfig,
|
Config: testAccAWSUserConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAWSUserExists("aws_iam_user.user", &conf),
|
testAccCheckAWSUserExists("aws_iam_user.user", &conf),
|
||||||
testAccCheckAWSUserAttributes(&conf),
|
testAccCheckAWSUserAttributes(&conf, "test-user", "/"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSUserConfig2,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSUserExists("aws_iam_user.user", &conf),
|
||||||
|
testAccCheckAWSUserAttributes(&conf, "test-user2", "/path2/"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -85,13 +92,13 @@ func testAccCheckAWSUserExists(n string, res *iam.GetUserOutput) resource.TestCh
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckAWSUserAttributes(user *iam.GetUserOutput) resource.TestCheckFunc {
|
func testAccCheckAWSUserAttributes(user *iam.GetUserOutput, name string, path string) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
if *user.User.UserName != "test-user" {
|
if *user.User.UserName != name {
|
||||||
return fmt.Errorf("Bad name: %s", *user.User.UserName)
|
return fmt.Errorf("Bad name: %s", *user.User.UserName)
|
||||||
}
|
}
|
||||||
|
|
||||||
if *user.User.Path != "/" {
|
if *user.User.Path != path {
|
||||||
return fmt.Errorf("Bad path: %s", *user.User.Path)
|
return fmt.Errorf("Bad path: %s", *user.User.Path)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -105,3 +112,9 @@ resource "aws_iam_user" "user" {
|
|||||||
path = "/"
|
path = "/"
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
const testAccAWSUserConfig2 = `
|
||||||
|
resource "aws_iam_user" "user" {
|
||||||
|
name = "test-user2"
|
||||||
|
path = "/path2/"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
@ -0,0 +1,281 @@
|
|||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/service/firehose"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceAwsKinesisFirehoseDeliveryStreamCreate,
|
||||||
|
Read: resourceAwsKinesisFirehoseDeliveryStreamRead,
|
||||||
|
Update: resourceAwsKinesisFirehoseDeliveryStreamUpdate,
|
||||||
|
Delete: resourceAwsKinesisFirehoseDeliveryStreamDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"destination": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
StateFunc: func(v interface{}) string {
|
||||||
|
value := v.(string)
|
||||||
|
return strings.ToLower(value)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"role_arn": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"s3_bucket_arn": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"s3_prefix": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"s3_buffer_size": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 5,
|
||||||
|
},
|
||||||
|
|
||||||
|
"s3_buffer_interval": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 300,
|
||||||
|
},
|
||||||
|
|
||||||
|
"s3_data_compression": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "UNCOMPRESSED",
|
||||||
|
},
|
||||||
|
|
||||||
|
"arn": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"version_id": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"destination_id": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsKinesisFirehoseDeliveryStreamCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).firehoseconn
|
||||||
|
|
||||||
|
if d.Get("destination").(string) != "s3" {
|
||||||
|
return fmt.Errorf("[ERROR] AWS Kinesis Firehose only supports S3 destinations for the first implementation")
|
||||||
|
}
|
||||||
|
|
||||||
|
sn := d.Get("name").(string)
|
||||||
|
input := &firehose.CreateDeliveryStreamInput{
|
||||||
|
DeliveryStreamName: aws.String(sn),
|
||||||
|
}
|
||||||
|
|
||||||
|
s3_config := &firehose.S3DestinationConfiguration{
|
||||||
|
BucketARN: aws.String(d.Get("s3_bucket_arn").(string)),
|
||||||
|
RoleARN: aws.String(d.Get("role_arn").(string)),
|
||||||
|
BufferingHints: &firehose.BufferingHints{
|
||||||
|
IntervalInSeconds: aws.Int64(int64(d.Get("s3_buffer_interval").(int))),
|
||||||
|
SizeInMBs: aws.Int64(int64(d.Get("s3_buffer_size").(int))),
|
||||||
|
},
|
||||||
|
CompressionFormat: aws.String(d.Get("s3_data_compression").(string)),
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("s3_prefix"); ok {
|
||||||
|
s3_config.Prefix = aws.String(v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
input.S3DestinationConfiguration = s3_config
|
||||||
|
|
||||||
|
_, err := conn.CreateDeliveryStream(input)
|
||||||
|
if err != nil {
|
||||||
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
return fmt.Errorf("[WARN] Error creating Kinesis Firehose Delivery Stream: \"%s\", code: \"%s\"", awsErr.Message(), awsErr.Code())
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
stateConf := &resource.StateChangeConf{
|
||||||
|
Pending: []string{"CREATING"},
|
||||||
|
Target: "ACTIVE",
|
||||||
|
Refresh: firehoseStreamStateRefreshFunc(conn, sn),
|
||||||
|
Timeout: 5 * time.Minute,
|
||||||
|
Delay: 10 * time.Second,
|
||||||
|
MinTimeout: 3 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
firehoseStream, err := stateConf.WaitForState()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"Error waiting for Kinesis Stream (%s) to become active: %s",
|
||||||
|
sn, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s := firehoseStream.(*firehose.DeliveryStreamDescription)
|
||||||
|
d.SetId(*s.DeliveryStreamARN)
|
||||||
|
d.Set("arn", s.DeliveryStreamARN)
|
||||||
|
|
||||||
|
return resourceAwsKinesisFirehoseDeliveryStreamRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsKinesisFirehoseDeliveryStreamUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).firehoseconn
|
||||||
|
|
||||||
|
if d.Get("destination").(string) != "s3" {
|
||||||
|
return fmt.Errorf("[ERROR] AWS Kinesis Firehose only supports S3 destinations for the first implementation")
|
||||||
|
}
|
||||||
|
|
||||||
|
sn := d.Get("name").(string)
|
||||||
|
s3_config := &firehose.S3DestinationUpdate{}
|
||||||
|
|
||||||
|
if d.HasChange("role_arn") {
|
||||||
|
s3_config.RoleARN = aws.String(d.Get("role_arn").(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("s3_bucket_arn") {
|
||||||
|
s3_config.BucketARN = aws.String(d.Get("s3_bucket_arn").(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("s3_prefix") {
|
||||||
|
s3_config.Prefix = aws.String(d.Get("s3_prefix").(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("s3_data_compression") {
|
||||||
|
s3_config.CompressionFormat = aws.String(d.Get("s3_data_compression").(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.HasChange("s3_buffer_interval") || d.HasChange("s3_buffer_size") {
|
||||||
|
bufferingHints := &firehose.BufferingHints{
|
||||||
|
IntervalInSeconds: aws.Int64(int64(d.Get("s3_buffer_interval").(int))),
|
||||||
|
SizeInMBs: aws.Int64(int64(d.Get("s3_buffer_size").(int))),
|
||||||
|
}
|
||||||
|
s3_config.BufferingHints = bufferingHints
|
||||||
|
}
|
||||||
|
|
||||||
|
destOpts := &firehose.UpdateDestinationInput{
|
||||||
|
DeliveryStreamName: aws.String(sn),
|
||||||
|
CurrentDeliveryStreamVersionId: aws.String(d.Get("version_id").(string)),
|
||||||
|
DestinationId: aws.String(d.Get("destination_id").(string)),
|
||||||
|
S3DestinationUpdate: s3_config,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := conn.UpdateDestination(destOpts)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"Error Updating Kinesis Firehose Delivery Stream: \"%s\"\n%s",
|
||||||
|
sn, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceAwsKinesisFirehoseDeliveryStreamRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsKinesisFirehoseDeliveryStreamRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).firehoseconn
|
||||||
|
sn := d.Get("name").(string)
|
||||||
|
describeOpts := &firehose.DescribeDeliveryStreamInput{
|
||||||
|
DeliveryStreamName: aws.String(sn),
|
||||||
|
}
|
||||||
|
resp, err := conn.DescribeDeliveryStream(describeOpts)
|
||||||
|
if err != nil {
|
||||||
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
if awsErr.Code() == "ResourceNotFoundException" {
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("[WARN] Error reading Kinesis Firehose Delivery Stream: \"%s\", code: \"%s\"", awsErr.Message(), awsErr.Code())
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
s := resp.DeliveryStreamDescription
|
||||||
|
d.Set("version_id", s.VersionId)
|
||||||
|
d.Set("arn", *s.DeliveryStreamARN)
|
||||||
|
if len(s.Destinations) > 0 {
|
||||||
|
destination := s.Destinations[0]
|
||||||
|
d.Set("destination_id", *destination.DestinationId)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceAwsKinesisFirehoseDeliveryStreamDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
conn := meta.(*AWSClient).firehoseconn
|
||||||
|
|
||||||
|
sn := d.Get("name").(string)
|
||||||
|
_, err := conn.DeleteDeliveryStream(&firehose.DeleteDeliveryStreamInput{
|
||||||
|
DeliveryStreamName: aws.String(sn),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
stateConf := &resource.StateChangeConf{
|
||||||
|
Pending: []string{"DELETING"},
|
||||||
|
Target: "DESTROYED",
|
||||||
|
Refresh: firehoseStreamStateRefreshFunc(conn, sn),
|
||||||
|
Timeout: 5 * time.Minute,
|
||||||
|
Delay: 10 * time.Second,
|
||||||
|
MinTimeout: 3 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = stateConf.WaitForState()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"Error waiting for Delivery Stream (%s) to be destroyed: %s",
|
||||||
|
sn, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func firehoseStreamStateRefreshFunc(conn *firehose.Firehose, sn string) resource.StateRefreshFunc {
|
||||||
|
return func() (interface{}, string, error) {
|
||||||
|
describeOpts := &firehose.DescribeDeliveryStreamInput{
|
||||||
|
DeliveryStreamName: aws.String(sn),
|
||||||
|
}
|
||||||
|
resp, err := conn.DescribeDeliveryStream(describeOpts)
|
||||||
|
if err != nil {
|
||||||
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
if awsErr.Code() == "ResourceNotFoundException" {
|
||||||
|
return 42, "DESTROYED", nil
|
||||||
|
}
|
||||||
|
return nil, awsErr.Code(), err
|
||||||
|
}
|
||||||
|
return nil, "failed", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp.DeliveryStreamDescription, *resp.DeliveryStreamDescription.DeliveryStreamStatus, nil
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,189 @@
|
|||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"math/rand"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/service/firehose"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccAWSKinesisFirehoseDeliveryStream_basic(t *testing.T) {
|
||||||
|
var stream firehose.DeliveryStreamDescription
|
||||||
|
|
||||||
|
ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int()
|
||||||
|
config := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_basic, ri, ri)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: config,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream),
|
||||||
|
testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccAWSKinesisFirehoseDeliveryStream_s3ConfigUpdates(t *testing.T) {
|
||||||
|
var stream firehose.DeliveryStreamDescription
|
||||||
|
|
||||||
|
ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int()
|
||||||
|
preconfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3, ri, ri)
|
||||||
|
postConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3Updates, ri, ri)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: preconfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream),
|
||||||
|
testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_kinesis_firehose_delivery_stream.test_stream", "s3_buffer_size", "5"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_kinesis_firehose_delivery_stream.test_stream", "s3_buffer_interval", "300"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_kinesis_firehose_delivery_stream.test_stream", "s3_data_compression", "UNCOMPRESSED"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
resource.TestStep{
|
||||||
|
Config: postConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream),
|
||||||
|
testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_kinesis_firehose_delivery_stream.test_stream", "s3_buffer_size", "10"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_kinesis_firehose_delivery_stream.test_stream", "s3_buffer_interval", "400"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"aws_kinesis_firehose_delivery_stream.test_stream", "s3_data_compression", "GZIP"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckKinesisFirehoseDeliveryStreamExists(n string, stream *firehose.DeliveryStreamDescription) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
log.Printf("State: %#v", s.RootModule().Resources)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No Kinesis Firehose ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).firehoseconn
|
||||||
|
describeOpts := &firehose.DescribeDeliveryStreamInput{
|
||||||
|
DeliveryStreamName: aws.String(rs.Primary.Attributes["name"]),
|
||||||
|
}
|
||||||
|
resp, err := conn.DescribeDeliveryStream(describeOpts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*stream = *resp.DeliveryStreamDescription
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(stream *firehose.DeliveryStreamDescription) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
if !strings.HasPrefix(*stream.DeliveryStreamName, "terraform-kinesis-firehose") {
|
||||||
|
return fmt.Errorf("Bad Stream name: %s", *stream.DeliveryStreamName)
|
||||||
|
}
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "aws_kinesis_firehose_delivery_stream" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if *stream.DeliveryStreamARN != rs.Primary.Attributes["arn"] {
|
||||||
|
return fmt.Errorf("Bad Delivery Stream ARN\n\t expected: %s\n\tgot: %s\n", rs.Primary.Attributes["arn"], *stream.DeliveryStreamARN)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckKinesisFirehoseDeliveryStreamDestroy(s *terraform.State) error {
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "aws_kinesis_firehose_delivery_stream" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
conn := testAccProvider.Meta().(*AWSClient).firehoseconn
|
||||||
|
describeOpts := &firehose.DescribeDeliveryStreamInput{
|
||||||
|
DeliveryStreamName: aws.String(rs.Primary.Attributes["name"]),
|
||||||
|
}
|
||||||
|
resp, err := conn.DescribeDeliveryStream(describeOpts)
|
||||||
|
if err == nil {
|
||||||
|
if resp.DeliveryStreamDescription != nil && *resp.DeliveryStreamDescription.DeliveryStreamStatus != "DELETING" {
|
||||||
|
return fmt.Errorf("Error: Delivery Stream still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var testAccKinesisFirehoseDeliveryStreamConfig_basic = `
|
||||||
|
resource "aws_s3_bucket" "bucket" {
|
||||||
|
bucket = "tf-test-bucket-%d"
|
||||||
|
acl = "private"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_kinesis_firehose_delivery_stream" "test_stream" {
|
||||||
|
name = "terraform-kinesis-firehose-basictest-%d"
|
||||||
|
destination = "s3"
|
||||||
|
role_arn = "arn:aws:iam::946579370547:role/firehose_delivery_role"
|
||||||
|
s3_bucket_arn = "${aws_s3_bucket.bucket.arn}"
|
||||||
|
}`
|
||||||
|
|
||||||
|
var testAccKinesisFirehoseDeliveryStreamConfig_s3 = `
|
||||||
|
resource "aws_s3_bucket" "bucket" {
|
||||||
|
bucket = "tf-test-bucket-%d"
|
||||||
|
acl = "private"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_kinesis_firehose_delivery_stream" "test_stream" {
|
||||||
|
name = "terraform-kinesis-firehose-s3test-%d"
|
||||||
|
destination = "s3"
|
||||||
|
role_arn = "arn:aws:iam::946579370547:role/firehose_delivery_role"
|
||||||
|
s3_bucket_arn = "${aws_s3_bucket.bucket.arn}"
|
||||||
|
}`
|
||||||
|
|
||||||
|
var testAccKinesisFirehoseDeliveryStreamConfig_s3Updates = `
|
||||||
|
resource "aws_s3_bucket" "bucket" {
|
||||||
|
bucket = "tf-test-bucket-01-%d"
|
||||||
|
acl = "private"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_kinesis_firehose_delivery_stream" "test_stream" {
|
||||||
|
name = "terraform-kinesis-firehose-s3test-%d"
|
||||||
|
destination = "s3"
|
||||||
|
role_arn = "arn:aws:iam::946579370547:role/firehose_delivery_role"
|
||||||
|
s3_bucket_arn = "${aws_s3_bucket.bucket.arn}"
|
||||||
|
s3_buffer_size = 10
|
||||||
|
s3_buffer_interval = 400
|
||||||
|
s3_data_compression = "GZIP"
|
||||||
|
}`
|
@ -13,6 +13,8 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/service/lambda"
|
"github.com/aws/aws-sdk-go/service/lambda"
|
||||||
"github.com/mitchellh/go-homedir"
|
"github.com/mitchellh/go-homedir"
|
||||||
|
|
||||||
|
"errors"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -25,13 +27,28 @@ func resourceAwsLambdaFunction() *schema.Resource {
|
|||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"filename": &schema.Schema{
|
"filename": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Optional: true,
|
||||||
|
ConflictsWith: []string{"s3_bucket", "s3_key", "s3_object_version"},
|
||||||
|
},
|
||||||
|
"s3_bucket": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ConflictsWith: []string{"filename"},
|
||||||
|
},
|
||||||
|
"s3_key": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ConflictsWith: []string{"filename"},
|
||||||
|
},
|
||||||
|
"s3_object_version": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ConflictsWith: []string{"filename"},
|
||||||
},
|
},
|
||||||
"description": &schema.Schema{
|
"description": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true, // TODO make this editable
|
|
||||||
},
|
},
|
||||||
"function_name": &schema.Schema{
|
"function_name": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
@ -93,22 +110,36 @@ func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) e
|
|||||||
|
|
||||||
log.Printf("[DEBUG] Creating Lambda Function %s with role %s", functionName, iamRole)
|
log.Printf("[DEBUG] Creating Lambda Function %s with role %s", functionName, iamRole)
|
||||||
|
|
||||||
filename, err := homedir.Expand(d.Get("filename").(string))
|
var functionCode *lambda.FunctionCode
|
||||||
if err != nil {
|
if v, ok := d.GetOk("filename"); ok {
|
||||||
return err
|
filename, err := homedir.Expand(v.(string))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
zipfile, err := ioutil.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.Set("source_code_hash", sha256.Sum256(zipfile))
|
||||||
|
functionCode = &lambda.FunctionCode{
|
||||||
|
ZipFile: zipfile,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
s3Bucket, bucketOk := d.GetOk("s3_bucket")
|
||||||
|
s3Key, keyOk := d.GetOk("s3_key")
|
||||||
|
s3ObjectVersion, versionOk := d.GetOk("s3_object_version")
|
||||||
|
if !bucketOk || !keyOk || !versionOk {
|
||||||
|
return errors.New("s3_bucket, s3_key and s3_object_version must all be set while using S3 code source")
|
||||||
|
}
|
||||||
|
functionCode = &lambda.FunctionCode{
|
||||||
|
S3Bucket: aws.String(s3Bucket.(string)),
|
||||||
|
S3Key: aws.String(s3Key.(string)),
|
||||||
|
S3ObjectVersion: aws.String(s3ObjectVersion.(string)),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
zipfile, err := ioutil.ReadFile(filename)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
d.Set("source_code_hash", sha256.Sum256(zipfile))
|
|
||||||
|
|
||||||
log.Printf("[DEBUG] ")
|
|
||||||
|
|
||||||
params := &lambda.CreateFunctionInput{
|
params := &lambda.CreateFunctionInput{
|
||||||
Code: &lambda.FunctionCode{
|
Code: functionCode,
|
||||||
ZipFile: zipfile,
|
|
||||||
},
|
|
||||||
Description: aws.String(d.Get("description").(string)),
|
Description: aws.String(d.Get("description").(string)),
|
||||||
FunctionName: aws.String(functionName),
|
FunctionName: aws.String(functionName),
|
||||||
Handler: aws.String(d.Get("handler").(string)),
|
Handler: aws.String(d.Get("handler").(string)),
|
||||||
@ -118,6 +149,7 @@ func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) e
|
|||||||
Timeout: aws.Int64(int64(d.Get("timeout").(int))),
|
Timeout: aws.Int64(int64(d.Get("timeout").(int))),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
_, err = conn.CreateFunction(params)
|
_, err = conn.CreateFunction(params)
|
||||||
if awsErr, ok := err.(awserr.Error); ok {
|
if awsErr, ok := err.(awserr.Error); ok {
|
||||||
|
@ -26,10 +26,11 @@ func resourceAwsLaunchConfiguration() *schema.Resource {
|
|||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"name": &schema.Schema{
|
"name": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
ConflictsWith: []string{"name_prefix"},
|
||||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||||
// https://github.com/boto/botocore/blob/9f322b1/botocore/data/autoscaling/2011-01-01/service-2.json#L1932-L1939
|
// https://github.com/boto/botocore/blob/9f322b1/botocore/data/autoscaling/2011-01-01/service-2.json#L1932-L1939
|
||||||
value := v.(string)
|
value := v.(string)
|
||||||
@ -41,6 +42,22 @@ func resourceAwsLaunchConfiguration() *schema.Resource {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"name_prefix": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
// https://github.com/boto/botocore/blob/9f322b1/botocore/data/autoscaling/2011-01-01/service-2.json#L1932-L1939
|
||||||
|
// uuid is 26 characters, limit the prefix to 229.
|
||||||
|
value := v.(string)
|
||||||
|
if len(value) > 229 {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot be longer than 229 characters, name is limited to 255", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
"image_id": &schema.Schema{
|
"image_id": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
@ -386,6 +403,8 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
|
|||||||
var lcName string
|
var lcName string
|
||||||
if v, ok := d.GetOk("name"); ok {
|
if v, ok := d.GetOk("name"); ok {
|
||||||
lcName = v.(string)
|
lcName = v.(string)
|
||||||
|
} else if v, ok := d.GetOk("name_prefix"); ok {
|
||||||
|
lcName = resource.PrefixedUniqueId(v.(string))
|
||||||
} else {
|
} else {
|
||||||
lcName = resource.UniqueId()
|
lcName = resource.UniqueId()
|
||||||
}
|
}
|
||||||
|
@ -30,6 +30,14 @@ func TestAccAWSLaunchConfiguration_basic(t *testing.T) {
|
|||||||
"aws_launch_configuration.bar", "terraform-"),
|
"aws_launch_configuration.bar", "terraform-"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccAWSLaunchConfigurationPrefixNameConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.baz", &conf),
|
||||||
|
testAccCheckAWSLaunchConfigurationGeneratedNamePrefix(
|
||||||
|
"aws_launch_configuration.baz", "baz-"),
|
||||||
|
),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -255,3 +263,13 @@ resource "aws_launch_configuration" "bar" {
|
|||||||
associate_public_ip_address = false
|
associate_public_ip_address = false
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
const testAccAWSLaunchConfigurationPrefixNameConfig = `
|
||||||
|
resource "aws_launch_configuration" "baz" {
|
||||||
|
name_prefix = "baz-"
|
||||||
|
image_id = "ami-21f78e11"
|
||||||
|
instance_type = "t1.micro"
|
||||||
|
user_data = "foobar-user-data-change"
|
||||||
|
associate_public_ip_address = false
|
||||||
|
}
|
||||||
|
`
|
||||||
|
@ -4,11 +4,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/service/iam"
|
"github.com/aws/aws-sdk-go/service/iam"
|
||||||
"github.com/aws/aws-sdk-go/service/opsworks"
|
"github.com/aws/aws-sdk-go/service/opsworks"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// These tests assume the existence of predefined Opsworks IAM roles named `aws-opsworks-ec2-role`
|
// These tests assume the existence of predefined Opsworks IAM roles named `aws-opsworks-ec2-role`
|
||||||
@ -49,7 +50,7 @@ resource "aws_opsworks_stack" "tf-acc" {
|
|||||||
custom_cookbooks_source {
|
custom_cookbooks_source {
|
||||||
type = "git"
|
type = "git"
|
||||||
revision = "master"
|
revision = "master"
|
||||||
url = "https://github.com/awslabs/opsworks-example-cookbooks.git"
|
url = "https://github.com/aws/opsworks-example-cookbooks.git"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
@ -129,7 +130,7 @@ resource "aws_opsworks_stack" "tf-acc" {
|
|||||||
custom_cookbooks_source {
|
custom_cookbooks_source {
|
||||||
type = "git"
|
type = "git"
|
||||||
revision = "master"
|
revision = "master"
|
||||||
url = "https://github.com/awslabs/opsworks-example-cookbooks.git"
|
url = "https://github.com/aws/opsworks-example-cookbooks.git"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
@ -259,7 +260,7 @@ var testAccAwsOpsworksStackCheckResourceAttrsUpdate = resource.ComposeTestCheckF
|
|||||||
resource.TestCheckResourceAttr(
|
resource.TestCheckResourceAttr(
|
||||||
"aws_opsworks_stack.tf-acc",
|
"aws_opsworks_stack.tf-acc",
|
||||||
"custom_cookbooks_source.0.url",
|
"custom_cookbooks_source.0.url",
|
||||||
"https://github.com/awslabs/opsworks-example-cookbooks.git",
|
"https://github.com/aws/opsworks-example-cookbooks.git",
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -28,6 +28,10 @@ func resourceAwsRoute53Record() *schema.Resource {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
StateFunc: func(v interface{}) string {
|
||||||
|
value := v.(string)
|
||||||
|
return strings.ToLower(value)
|
||||||
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
"fqdn": &schema.Schema{
|
"fqdn": &schema.Schema{
|
||||||
@ -192,12 +196,13 @@ func resourceAwsRoute53RecordCreate(d *schema.ResourceData, meta interface{}) er
|
|||||||
// Generate an ID
|
// Generate an ID
|
||||||
vars := []string{
|
vars := []string{
|
||||||
zone,
|
zone,
|
||||||
d.Get("name").(string),
|
strings.ToLower(d.Get("name").(string)),
|
||||||
d.Get("type").(string),
|
d.Get("type").(string),
|
||||||
}
|
}
|
||||||
if v, ok := d.GetOk("set_identifier"); ok {
|
if v, ok := d.GetOk("set_identifier"); ok {
|
||||||
vars = append(vars, v.(string))
|
vars = append(vars, v.(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(strings.Join(vars, "_"))
|
d.SetId(strings.Join(vars, "_"))
|
||||||
|
|
||||||
// Wait until we are done
|
// Wait until we are done
|
||||||
@ -242,6 +247,8 @@ func resourceAwsRoute53RecordRead(d *schema.ResourceData, meta interface{}) erro
|
|||||||
StartRecordType: aws.String(d.Get("type").(string)),
|
StartRecordType: aws.String(d.Get("type").(string)),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] List resource records sets for zone: %s, opts: %s",
|
||||||
|
zone, lopts)
|
||||||
resp, err := conn.ListResourceRecordSets(lopts)
|
resp, err := conn.ListResourceRecordSets(lopts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -251,7 +258,7 @@ func resourceAwsRoute53RecordRead(d *schema.ResourceData, meta interface{}) erro
|
|||||||
found := false
|
found := false
|
||||||
for _, record := range resp.ResourceRecordSets {
|
for _, record := range resp.ResourceRecordSets {
|
||||||
name := cleanRecordName(*record.Name)
|
name := cleanRecordName(*record.Name)
|
||||||
if FQDN(name) != FQDN(*lopts.StartRecordName) {
|
if FQDN(strings.ToLower(name)) != FQDN(strings.ToLower(*lopts.StartRecordName)) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if strings.ToUpper(*record.Type) != strings.ToUpper(*lopts.StartRecordType) {
|
if strings.ToUpper(*record.Type) != strings.ToUpper(*lopts.StartRecordType) {
|
||||||
@ -279,6 +286,7 @@ func resourceAwsRoute53RecordRead(d *schema.ResourceData, meta interface{}) erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !found {
|
if !found {
|
||||||
|
log.Printf("[DEBUG] No matching record found for: %s, removing from state file", en)
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -409,7 +417,10 @@ func resourceAwsRoute53RecordBuildSet(d *schema.ResourceData, zoneName string) (
|
|||||||
|
|
||||||
if v, ok := d.GetOk("set_identifier"); ok {
|
if v, ok := d.GetOk("set_identifier"); ok {
|
||||||
rec.SetIdentifier = aws.String(v.(string))
|
rec.SetIdentifier = aws.String(v.(string))
|
||||||
rec.Weight = aws.Int64(int64(d.Get("weight").(int)))
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("weight"); ok {
|
||||||
|
rec.Weight = aws.Int64(int64(v.(int)))
|
||||||
}
|
}
|
||||||
|
|
||||||
return rec, nil
|
return rec, nil
|
||||||
@ -440,7 +451,7 @@ func cleanRecordName(name string) string {
|
|||||||
// If it does not, add the zone name to form a fully qualified name
|
// If it does not, add the zone name to form a fully qualified name
|
||||||
// and keep AWS happy.
|
// and keep AWS happy.
|
||||||
func expandRecordName(name, zone string) string {
|
func expandRecordName(name, zone string) string {
|
||||||
rn := strings.TrimSuffix(name, ".")
|
rn := strings.ToLower(strings.TrimSuffix(name, "."))
|
||||||
zone = strings.TrimSuffix(zone, ".")
|
zone = strings.TrimSuffix(zone, ".")
|
||||||
if !strings.HasSuffix(rn, zone) {
|
if !strings.HasSuffix(rn, zone) {
|
||||||
rn = strings.Join([]string{name, zone}, ".")
|
rn = strings.Join([]string{name, zone}, ".")
|
||||||
|
@ -122,6 +122,23 @@ func TestAccAWSRoute53Record_wildcard(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccAWSRoute53Record_failover(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckRoute53RecordDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccRoute53FailoverCNAMERecord,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckRoute53RecordExists("aws_route53_record.www-primary"),
|
||||||
|
testAccCheckRoute53RecordExists("aws_route53_record.www-secondary"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccAWSRoute53Record_weighted(t *testing.T) {
|
func TestAccAWSRoute53Record_weighted(t *testing.T) {
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
@ -291,7 +308,7 @@ func testAccCheckRoute53RecordExists(n string) resource.TestCheckFunc {
|
|||||||
// rec := resp.ResourceRecordSets[0]
|
// rec := resp.ResourceRecordSets[0]
|
||||||
for _, rec := range resp.ResourceRecordSets {
|
for _, rec := range resp.ResourceRecordSets {
|
||||||
recName := cleanRecordName(*rec.Name)
|
recName := cleanRecordName(*rec.Name)
|
||||||
if FQDN(recName) == FQDN(en) && *rec.Type == rType {
|
if FQDN(strings.ToLower(recName)) == FQDN(strings.ToLower(en)) && *rec.Type == rType {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -306,7 +323,7 @@ resource "aws_route53_zone" "main" {
|
|||||||
|
|
||||||
resource "aws_route53_record" "default" {
|
resource "aws_route53_record" "default" {
|
||||||
zone_id = "${aws_route53_zone.main.zone_id}"
|
zone_id = "${aws_route53_zone.main.zone_id}"
|
||||||
name = "www.notexample.com"
|
name = "www.NOTexamplE.com"
|
||||||
type = "A"
|
type = "A"
|
||||||
ttl = "30"
|
ttl = "30"
|
||||||
records = ["127.0.0.1", "127.0.0.27"]
|
records = ["127.0.0.1", "127.0.0.27"]
|
||||||
@ -384,6 +401,46 @@ resource "aws_route53_record" "default" {
|
|||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
|
const testAccRoute53FailoverCNAMERecord = `
|
||||||
|
resource "aws_route53_zone" "main" {
|
||||||
|
name = "notexample.com"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route53_health_check" "foo" {
|
||||||
|
fqdn = "dev.notexample.com"
|
||||||
|
port = 80
|
||||||
|
type = "HTTP"
|
||||||
|
resource_path = "/"
|
||||||
|
failure_threshold = "2"
|
||||||
|
request_interval = "30"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
Name = "tf-test-health-check"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route53_record" "www-primary" {
|
||||||
|
zone_id = "${aws_route53_zone.main.zone_id}"
|
||||||
|
name = "www"
|
||||||
|
type = "CNAME"
|
||||||
|
ttl = "5"
|
||||||
|
failover = "PRIMARY"
|
||||||
|
health_check_id = "${aws_route53_health_check.foo.id}"
|
||||||
|
set_identifier = "www-primary"
|
||||||
|
records = ["primary.notexample.com"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route53_record" "www-secondary" {
|
||||||
|
zone_id = "${aws_route53_zone.main.zone_id}"
|
||||||
|
name = "www"
|
||||||
|
type = "CNAME"
|
||||||
|
ttl = "5"
|
||||||
|
failover = "SECONDARY"
|
||||||
|
set_identifier = "www-secondary"
|
||||||
|
records = ["secondary.notexample.com"]
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
const testAccRoute53WeightedCNAMERecord = `
|
const testAccRoute53WeightedCNAMERecord = `
|
||||||
resource "aws_route53_zone" "main" {
|
resource "aws_route53_zone" "main" {
|
||||||
name = "notexample.com"
|
name = "notexample.com"
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"github.com/mitchellh/go-homedir"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
@ -95,7 +96,11 @@ func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) erro
|
|||||||
|
|
||||||
if v, ok := d.GetOk("source"); ok {
|
if v, ok := d.GetOk("source"); ok {
|
||||||
source := v.(string)
|
source := v.(string)
|
||||||
file, err := os.Open(source)
|
path, err := homedir.Expand(source)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error expanding homedir in source (%s): %s", source, err)
|
||||||
|
}
|
||||||
|
file, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error opening S3 bucket object source (%s): %s", source, err)
|
return fmt.Errorf("Error opening S3 bucket object source (%s): %s", source, err)
|
||||||
}
|
}
|
||||||
|
@ -430,7 +430,7 @@ func testAccCheckAWSS3BucketCors(n string, corsRules []*s3.CORSRule) resource.Te
|
|||||||
// within AWS
|
// within AWS
|
||||||
var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int()
|
var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int()
|
||||||
var testAccWebsiteEndpoint = fmt.Sprintf("tf-test-bucket-%d.s3-website-us-west-2.amazonaws.com", randInt)
|
var testAccWebsiteEndpoint = fmt.Sprintf("tf-test-bucket-%d.s3-website-us-west-2.amazonaws.com", randInt)
|
||||||
var testAccAWSS3BucketPolicy = fmt.Sprintf(`{ "Version": "2008-10-17", "Statement": [ { "Sid": "", "Effect": "Allow", "Principal": { "AWS": "*" }, "Action": "s3:GetObject", "Resource": "arn:aws:s3:::tf-test-bucket-%d/*" } ] }`, randInt)
|
var testAccAWSS3BucketPolicy = fmt.Sprintf(`{ "Version": "2012-10-17", "Statement": [ { "Sid": "", "Effect": "Allow", "Principal": { "AWS": "*" }, "Action": "s3:GetObject", "Resource": "arn:aws:s3:::tf-test-bucket-%d/*" } ] }`, randInt)
|
||||||
|
|
||||||
var testAccAWSS3BucketConfig = fmt.Sprintf(`
|
var testAccAWSS3BucketConfig = fmt.Sprintf(`
|
||||||
resource "aws_s3_bucket" "bucket" {
|
resource "aws_s3_bucket" "bucket" {
|
||||||
|
@ -44,10 +44,13 @@ func resourceAwsSnsTopic() *schema.Resource {
|
|||||||
"policy": &schema.Schema{
|
"policy": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: false,
|
|
||||||
Computed: true,
|
Computed: true,
|
||||||
StateFunc: func(v interface{}) string {
|
StateFunc: func(v interface{}) string {
|
||||||
jsonb := []byte(v.(string))
|
s, ok := v.(string)
|
||||||
|
if !ok || s == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
jsonb := []byte(s)
|
||||||
buffer := new(bytes.Buffer)
|
buffer := new(bytes.Buffer)
|
||||||
if err := json.Compact(buffer, jsonb); err != nil {
|
if err := json.Compact(buffer, jsonb); err != nil {
|
||||||
log.Printf("[WARN] Error compacting JSON for Policy in SNS Topic")
|
log.Printf("[WARN] Error compacting JSON for Policy in SNS Topic")
|
||||||
|
@ -128,7 +128,7 @@ resource "aws_sns_topic" "test_topic" {
|
|||||||
name = "example"
|
name = "example"
|
||||||
policy = <<EOF
|
policy = <<EOF
|
||||||
{
|
{
|
||||||
"Version": "2008-10-17",
|
"Version": "2012-10-17",
|
||||||
"Id": "Policy1445931846145",
|
"Id": "Policy1445931846145",
|
||||||
"Statement": [
|
"Statement": [
|
||||||
{
|
{
|
||||||
|
@ -136,7 +136,7 @@ resource "aws_vpc_endpoint" "second-private-s3" {
|
|||||||
route_table_ids = ["${aws_route_table.default.id}"]
|
route_table_ids = ["${aws_route_table.default.id}"]
|
||||||
policy = <<POLICY
|
policy = <<POLICY
|
||||||
{
|
{
|
||||||
"Version": "2008-10-17",
|
"Version": "2012-10-17",
|
||||||
"Statement": [
|
"Statement": [
|
||||||
{
|
{
|
||||||
"Sid":"AllowAll",
|
"Sid":"AllowAll",
|
||||||
@ -176,7 +176,7 @@ resource "aws_vpc_endpoint" "second-private-s3" {
|
|||||||
route_table_ids = ["${aws_route_table.default.id}"]
|
route_table_ids = ["${aws_route_table.default.id}"]
|
||||||
policy = <<POLICY
|
policy = <<POLICY
|
||||||
{
|
{
|
||||||
"Version": "2008-10-17",
|
"Version": "2012-10-17",
|
||||||
"Statement": [
|
"Statement": [
|
||||||
{
|
{
|
||||||
"Sid":"AllowAll",
|
"Sid":"AllowAll",
|
||||||
|
@ -44,7 +44,23 @@ func expandListeners(configured []interface{}) ([]*elb.Listener, error) {
|
|||||||
l.SSLCertificateId = aws.String(v.(string))
|
l.SSLCertificateId = aws.String(v.(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
listeners = append(listeners, l)
|
var valid bool
|
||||||
|
if l.SSLCertificateId != nil && *l.SSLCertificateId != "" {
|
||||||
|
// validate the protocol is correct
|
||||||
|
for _, p := range []string{"https", "ssl"} {
|
||||||
|
if (*l.InstanceProtocol == p) || (*l.Protocol == p) {
|
||||||
|
valid = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
valid = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if valid {
|
||||||
|
listeners = append(listeners, l)
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("[ERR] ELB Listener: ssl_certificate_id may be set only when protocol is 'https' or 'ssl'")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return listeners, nil
|
return listeners, nil
|
||||||
@ -62,9 +78,13 @@ func expandEcsVolumes(configured []interface{}) ([]*ecs.Volume, error) {
|
|||||||
|
|
||||||
l := &ecs.Volume{
|
l := &ecs.Volume{
|
||||||
Name: aws.String(data["name"].(string)),
|
Name: aws.String(data["name"].(string)),
|
||||||
Host: &ecs.HostVolumeProperties{
|
}
|
||||||
SourcePath: aws.String(data["host_path"].(string)),
|
|
||||||
},
|
hostPath := data["host_path"].(string)
|
||||||
|
if hostPath != "" {
|
||||||
|
l.Host = &ecs.HostVolumeProperties{
|
||||||
|
SourcePath: aws.String(hostPath),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
volumes = append(volumes, l)
|
volumes = append(volumes, l)
|
||||||
@ -234,6 +254,30 @@ func expandElastiCacheParameters(configured []interface{}) ([]*elasticache.Param
|
|||||||
return parameters, nil
|
return parameters, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Flattens an access log into something that flatmap.Flatten() can handle
|
||||||
|
func flattenAccessLog(l *elb.AccessLog) []map[string]interface{} {
|
||||||
|
result := make([]map[string]interface{}, 0, 1)
|
||||||
|
|
||||||
|
if l != nil && *l.Enabled {
|
||||||
|
r := make(map[string]interface{})
|
||||||
|
if l.S3BucketName != nil {
|
||||||
|
r["bucket"] = *l.S3BucketName
|
||||||
|
}
|
||||||
|
|
||||||
|
if l.S3BucketPrefix != nil {
|
||||||
|
r["bucket_prefix"] = *l.S3BucketPrefix
|
||||||
|
}
|
||||||
|
|
||||||
|
if l.EmitInterval != nil {
|
||||||
|
r["interval"] = *l.EmitInterval
|
||||||
|
}
|
||||||
|
|
||||||
|
result = append(result, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
// Flattens a health check into something that flatmap.Flatten()
|
// Flattens a health check into something that flatmap.Flatten()
|
||||||
// can handle
|
// can handle
|
||||||
func flattenHealthCheck(check *elb.HealthCheck) []map[string]interface{} {
|
func flattenHealthCheck(check *elb.HealthCheck) []map[string]interface{} {
|
||||||
@ -314,9 +358,13 @@ func flattenEcsVolumes(list []*ecs.Volume) []map[string]interface{} {
|
|||||||
result := make([]map[string]interface{}, 0, len(list))
|
result := make([]map[string]interface{}, 0, len(list))
|
||||||
for _, volume := range list {
|
for _, volume := range list {
|
||||||
l := map[string]interface{}{
|
l := map[string]interface{}{
|
||||||
"name": *volume.Name,
|
"name": *volume.Name,
|
||||||
"host_path": *volume.Host.SourcePath,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if volume.Host.SourcePath != nil {
|
||||||
|
l["host_path"] = *volume.Host.SourcePath
|
||||||
|
}
|
||||||
|
|
||||||
result = append(result, l)
|
result = append(result, l)
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
|
@ -2,6 +2,7 @@ package aws
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
@ -37,7 +38,7 @@ func testConf() map[string]string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestexpandIPPerms(t *testing.T) {
|
func TestExpandIPPerms(t *testing.T) {
|
||||||
hash := schema.HashString
|
hash := schema.HashString
|
||||||
|
|
||||||
expanded := []interface{}{
|
expanded := []interface{}{
|
||||||
@ -287,7 +288,7 @@ func TestExpandIPPerms_nonVPC(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestexpandListeners(t *testing.T) {
|
func TestExpandListeners(t *testing.T) {
|
||||||
expanded := []interface{}{
|
expanded := []interface{}{
|
||||||
map[string]interface{}{
|
map[string]interface{}{
|
||||||
"instance_port": 8000,
|
"instance_port": 8000,
|
||||||
@ -295,6 +296,13 @@ func TestexpandListeners(t *testing.T) {
|
|||||||
"instance_protocol": "http",
|
"instance_protocol": "http",
|
||||||
"lb_protocol": "http",
|
"lb_protocol": "http",
|
||||||
},
|
},
|
||||||
|
map[string]interface{}{
|
||||||
|
"instance_port": 8000,
|
||||||
|
"lb_port": 80,
|
||||||
|
"instance_protocol": "https",
|
||||||
|
"lb_protocol": "https",
|
||||||
|
"ssl_certificate_id": "something",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
listeners, err := expandListeners(expanded)
|
listeners, err := expandListeners(expanded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -314,10 +322,34 @@ func TestexpandListeners(t *testing.T) {
|
|||||||
listeners[0],
|
listeners[0],
|
||||||
expected)
|
expected)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestflattenHealthCheck(t *testing.T) {
|
// this test should produce an error from expandlisteners on an invalid
|
||||||
|
// combination
|
||||||
|
func TestExpandListeners_invalid(t *testing.T) {
|
||||||
|
expanded := []interface{}{
|
||||||
|
map[string]interface{}{
|
||||||
|
"instance_port": 8000,
|
||||||
|
"lb_port": 80,
|
||||||
|
"instance_protocol": "http",
|
||||||
|
"lb_protocol": "http",
|
||||||
|
"ssl_certificate_id": "something",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_, err := expandListeners(expanded)
|
||||||
|
if err != nil {
|
||||||
|
// Check the error we got
|
||||||
|
if !strings.Contains(err.Error(), "ssl_certificate_id may be set only when protocol") {
|
||||||
|
t.Fatalf("Got error in TestExpandListeners_invalid, but not what we expected: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Expected TestExpandListeners_invalid to fail, but passed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFlattenHealthCheck(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
Input *elb.HealthCheck
|
Input *elb.HealthCheck
|
||||||
Output []map[string]interface{}
|
Output []map[string]interface{}
|
||||||
@ -367,7 +399,7 @@ func TestExpandStringList(t *testing.T) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestexpandParameters(t *testing.T) {
|
func TestExpandParameters(t *testing.T) {
|
||||||
expanded := []interface{}{
|
expanded := []interface{}{
|
||||||
map[string]interface{}{
|
map[string]interface{}{
|
||||||
"name": "character_set_client",
|
"name": "character_set_client",
|
||||||
@ -394,11 +426,11 @@ func TestexpandParameters(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestexpandElasticacheParameters(t *testing.T) {
|
func TestExpandElasticacheParameters(t *testing.T) {
|
||||||
expanded := []interface{}{
|
expanded := []interface{}{
|
||||||
map[string]interface{}{
|
map[string]interface{}{
|
||||||
"name": "character_set_client",
|
"name": "activerehashing",
|
||||||
"value": "utf8",
|
"value": "yes",
|
||||||
"apply_method": "immediate",
|
"apply_method": "immediate",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -407,7 +439,7 @@ func TestexpandElasticacheParameters(t *testing.T) {
|
|||||||
t.Fatalf("bad: %#v", err)
|
t.Fatalf("bad: %#v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
expected := &elasticache.Parameter{
|
expected := &elasticache.ParameterNameValue{
|
||||||
ParameterName: aws.String("activerehashing"),
|
ParameterName: aws.String("activerehashing"),
|
||||||
ParameterValue: aws.String("yes"),
|
ParameterValue: aws.String("yes"),
|
||||||
}
|
}
|
||||||
@ -420,7 +452,7 @@ func TestexpandElasticacheParameters(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestflattenParameters(t *testing.T) {
|
func TestFlattenParameters(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
Input []*rds.Parameter
|
Input []*rds.Parameter
|
||||||
Output []map[string]interface{}
|
Output []map[string]interface{}
|
||||||
@ -449,7 +481,7 @@ func TestflattenParameters(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestflattenElasticacheParameters(t *testing.T) {
|
func TestFlattenElasticacheParameters(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
Input []*elasticache.Parameter
|
Input []*elasticache.Parameter
|
||||||
Output []map[string]interface{}
|
Output []map[string]interface{}
|
||||||
@ -478,7 +510,7 @@ func TestflattenElasticacheParameters(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestexpandInstanceString(t *testing.T) {
|
func TestExpandInstanceString(t *testing.T) {
|
||||||
|
|
||||||
expected := []*elb.Instance{
|
expected := []*elb.Instance{
|
||||||
&elb.Instance{InstanceId: aws.String("test-one")},
|
&elb.Instance{InstanceId: aws.String("test-one")},
|
||||||
@ -497,7 +529,7 @@ func TestexpandInstanceString(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestflattenNetworkInterfacesPrivateIPAddresses(t *testing.T) {
|
func TestFlattenNetworkInterfacesPrivateIPAddresses(t *testing.T) {
|
||||||
expanded := []*ec2.NetworkInterfacePrivateIpAddress{
|
expanded := []*ec2.NetworkInterfacePrivateIpAddress{
|
||||||
&ec2.NetworkInterfacePrivateIpAddress{PrivateIpAddress: aws.String("192.168.0.1")},
|
&ec2.NetworkInterfacePrivateIpAddress{PrivateIpAddress: aws.String("192.168.0.1")},
|
||||||
&ec2.NetworkInterfacePrivateIpAddress{PrivateIpAddress: aws.String("192.168.0.2")},
|
&ec2.NetworkInterfacePrivateIpAddress{PrivateIpAddress: aws.String("192.168.0.2")},
|
||||||
@ -522,7 +554,7 @@ func TestflattenNetworkInterfacesPrivateIPAddresses(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestflattenGroupIdentifiers(t *testing.T) {
|
func TestFlattenGroupIdentifiers(t *testing.T) {
|
||||||
expanded := []*ec2.GroupIdentifier{
|
expanded := []*ec2.GroupIdentifier{
|
||||||
&ec2.GroupIdentifier{GroupId: aws.String("sg-001")},
|
&ec2.GroupIdentifier{GroupId: aws.String("sg-001")},
|
||||||
&ec2.GroupIdentifier{GroupId: aws.String("sg-002")},
|
&ec2.GroupIdentifier{GroupId: aws.String("sg-002")},
|
||||||
@ -543,7 +575,7 @@ func TestflattenGroupIdentifiers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestexpandPrivateIPAddresses(t *testing.T) {
|
func TestExpandPrivateIPAddresses(t *testing.T) {
|
||||||
|
|
||||||
ip1 := "192.168.0.1"
|
ip1 := "192.168.0.1"
|
||||||
ip2 := "192.168.0.2"
|
ip2 := "192.168.0.2"
|
||||||
@ -567,7 +599,7 @@ func TestexpandPrivateIPAddresses(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestflattenAttachment(t *testing.T) {
|
func TestFlattenAttachment(t *testing.T) {
|
||||||
expanded := &ec2.NetworkInterfaceAttachment{
|
expanded := &ec2.NetworkInterfaceAttachment{
|
||||||
InstanceId: aws.String("i-00001"),
|
InstanceId: aws.String("i-00001"),
|
||||||
DeviceIndex: aws.Int64(int64(1)),
|
DeviceIndex: aws.Int64(int64(1)),
|
||||||
|
@ -3,12 +3,10 @@ package azure
|
|||||||
import (
|
import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/pathorcontents"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
"github.com/mitchellh/go-homedir"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Provider returns a terraform.ResourceProvider.
|
// Provider returns a terraform.ResourceProvider.
|
||||||
@ -20,6 +18,14 @@ func Provider() terraform.ResourceProvider {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
DefaultFunc: schema.EnvDefaultFunc("AZURE_SETTINGS_FILE", nil),
|
DefaultFunc: schema.EnvDefaultFunc("AZURE_SETTINGS_FILE", nil),
|
||||||
ValidateFunc: validateSettingsFile,
|
ValidateFunc: validateSettingsFile,
|
||||||
|
Deprecated: "Use the publish_settings field instead",
|
||||||
|
},
|
||||||
|
|
||||||
|
"publish_settings": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
DefaultFunc: schema.EnvDefaultFunc("AZURE_PUBLISH_SETTINGS", nil),
|
||||||
|
ValidateFunc: validatePublishSettings,
|
||||||
},
|
},
|
||||||
|
|
||||||
"subscription_id": &schema.Schema{
|
"subscription_id": &schema.Schema{
|
||||||
@ -64,11 +70,14 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
|||||||
Certificate: []byte(d.Get("certificate").(string)),
|
Certificate: []byte(d.Get("certificate").(string)),
|
||||||
}
|
}
|
||||||
|
|
||||||
settingsFile := d.Get("settings_file").(string)
|
publishSettings := d.Get("publish_settings").(string)
|
||||||
if settingsFile != "" {
|
if publishSettings == "" {
|
||||||
|
publishSettings = d.Get("settings_file").(string)
|
||||||
|
}
|
||||||
|
if publishSettings != "" {
|
||||||
// any errors from readSettings would have been caught at the validate
|
// any errors from readSettings would have been caught at the validate
|
||||||
// step, so we can avoid handling them now
|
// step, so we can avoid handling them now
|
||||||
settings, _, _ := readSettings(settingsFile)
|
settings, _, _ := readSettings(publishSettings)
|
||||||
config.Settings = settings
|
config.Settings = settings
|
||||||
return config.NewClientFromSettingsData()
|
return config.NewClientFromSettingsData()
|
||||||
}
|
}
|
||||||
@ -92,37 +101,42 @@ func validateSettingsFile(v interface{}, k string) ([]string, []error) {
|
|||||||
return warnings, errors
|
return warnings, errors
|
||||||
}
|
}
|
||||||
|
|
||||||
const settingsPathWarnMsg = `
|
func validatePublishSettings(v interface{}, k string) (ws []string, es []error) {
|
||||||
settings_file is not valid XML, so we are assuming it is a file path. This
|
value := v.(string)
|
||||||
support will be removed in the future. Please update your configuration to use
|
if value == "" {
|
||||||
${file("filename.publishsettings")} instead.`
|
return
|
||||||
|
}
|
||||||
|
|
||||||
func readSettings(pathOrContents string) (s []byte, ws []string, es []error) {
|
|
||||||
var settings settingsData
|
var settings settingsData
|
||||||
if err := xml.Unmarshal([]byte(pathOrContents), &settings); err == nil {
|
if err := xml.Unmarshal([]byte(value), &settings); err != nil {
|
||||||
s = []byte(pathOrContents)
|
es = append(es, fmt.Errorf("error parsing publish_settings as XML: %s", err))
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ws = append(ws, settingsPathWarnMsg)
|
|
||||||
path, err := homedir.Expand(pathOrContents)
|
|
||||||
if err != nil {
|
|
||||||
es = append(es, fmt.Errorf("Error expanding path: %s", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s, err = ioutil.ReadFile(path)
|
|
||||||
if err != nil {
|
|
||||||
es = append(es, fmt.Errorf("Could not read file '%s': %s", path, err))
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func isFile(v string) (bool, error) {
|
const settingsPathWarnMsg = `
|
||||||
if _, err := os.Stat(v); err != nil {
|
settings_file was provided as a file path. This support
|
||||||
return false, err
|
will be removed in the future. Please update your configuration
|
||||||
|
to use ${file("filename.publishsettings")} instead.`
|
||||||
|
|
||||||
|
func readSettings(pathOrContents string) (s []byte, ws []string, es []error) {
|
||||||
|
contents, wasPath, err := pathorcontents.Read(pathOrContents)
|
||||||
|
if err != nil {
|
||||||
|
es = append(es, fmt.Errorf("error reading settings_file: %s", err))
|
||||||
}
|
}
|
||||||
return true, nil
|
if wasPath {
|
||||||
|
ws = append(ws, settingsPathWarnMsg)
|
||||||
|
}
|
||||||
|
|
||||||
|
var settings settingsData
|
||||||
|
if err := xml.Unmarshal([]byte(contents), &settings); err != nil {
|
||||||
|
es = append(es, fmt.Errorf("error parsing settings_file as XML: %s", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
s = []byte(contents)
|
||||||
|
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// settingsData is a private struct used to test the unmarshalling of the
|
// settingsData is a private struct used to test the unmarshalling of the
|
||||||
|
@ -51,12 +51,12 @@ func TestProvider_impl(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testAccPreCheck(t *testing.T) {
|
func testAccPreCheck(t *testing.T) {
|
||||||
if v := os.Getenv("AZURE_SETTINGS_FILE"); v == "" {
|
if v := os.Getenv("AZURE_PUBLISH_SETTINGS"); v == "" {
|
||||||
subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID")
|
subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID")
|
||||||
certificate := os.Getenv("AZURE_CERTIFICATE")
|
certificate := os.Getenv("AZURE_CERTIFICATE")
|
||||||
|
|
||||||
if subscriptionID == "" || certificate == "" {
|
if subscriptionID == "" || certificate == "" {
|
||||||
t.Fatal("either AZURE_SETTINGS_FILE, or AZURE_SUBSCRIPTION_ID " +
|
t.Fatal("either AZURE_PUBLISH_SETTINGS, or AZURE_SUBSCRIPTION_ID " +
|
||||||
"and AZURE_CERTIFICATE must be set for acceptance tests")
|
"and AZURE_CERTIFICATE must be set for acceptance tests")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -78,6 +78,11 @@ func TestAzure_validateSettingsFile(t *testing.T) {
|
|||||||
t.Fatalf("Error creating temporary file with XML in TestAzure_validateSettingsFile: %s", err)
|
t.Fatalf("Error creating temporary file with XML in TestAzure_validateSettingsFile: %s", err)
|
||||||
}
|
}
|
||||||
defer os.Remove(fx.Name())
|
defer os.Remove(fx.Name())
|
||||||
|
_, err = io.WriteString(fx, "<PublishData></PublishData>")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error writing XML File: %s", err)
|
||||||
|
}
|
||||||
|
fx.Close()
|
||||||
|
|
||||||
home, err := homedir.Dir()
|
home, err := homedir.Dir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -88,12 +93,11 @@ func TestAzure_validateSettingsFile(t *testing.T) {
|
|||||||
t.Fatalf("Error creating homedir-based temporary file: %s", err)
|
t.Fatalf("Error creating homedir-based temporary file: %s", err)
|
||||||
}
|
}
|
||||||
defer os.Remove(fh.Name())
|
defer os.Remove(fh.Name())
|
||||||
|
_, err = io.WriteString(fh, "<PublishData></PublishData>")
|
||||||
_, err = io.WriteString(fx, "<PublishData></PublishData>")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error writing XML File: %s", err)
|
t.Fatalf("Error writing XML File: %s", err)
|
||||||
}
|
}
|
||||||
fx.Close()
|
fh.Close()
|
||||||
|
|
||||||
r := strings.NewReplacer(home, "~")
|
r := strings.NewReplacer(home, "~")
|
||||||
homePath := r.Replace(fh.Name())
|
homePath := r.Replace(fh.Name())
|
||||||
@ -103,8 +107,8 @@ func TestAzure_validateSettingsFile(t *testing.T) {
|
|||||||
W int // expected count of warnings
|
W int // expected count of warnings
|
||||||
E int // expected count of errors
|
E int // expected count of errors
|
||||||
}{
|
}{
|
||||||
{"test", 1, 1},
|
{"test", 0, 1},
|
||||||
{f.Name(), 1, 0},
|
{f.Name(), 1, 1},
|
||||||
{fx.Name(), 1, 0},
|
{fx.Name(), 1, 0},
|
||||||
{homePath, 1, 0},
|
{homePath, 1, 0},
|
||||||
{"<PublishData></PublishData>", 0, 0},
|
{"<PublishData></PublishData>", 0, 0},
|
||||||
@ -114,10 +118,10 @@ func TestAzure_validateSettingsFile(t *testing.T) {
|
|||||||
w, e := validateSettingsFile(tc.Input, "")
|
w, e := validateSettingsFile(tc.Input, "")
|
||||||
|
|
||||||
if len(w) != tc.W {
|
if len(w) != tc.W {
|
||||||
t.Errorf("Error in TestAzureValidateSettingsFile: input: %s , warnings: %#v, errors: %#v", tc.Input, w, e)
|
t.Errorf("Error in TestAzureValidateSettingsFile: input: %s , warnings: %v, errors: %v", tc.Input, w, e)
|
||||||
}
|
}
|
||||||
if len(e) != tc.E {
|
if len(e) != tc.E {
|
||||||
t.Errorf("Error in TestAzureValidateSettingsFile: input: %s , warnings: %#v, errors: %#v", tc.Input, w, e)
|
t.Errorf("Error in TestAzureValidateSettingsFile: input: %s , warnings: %v, errors: %v", tc.Input, w, e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -164,33 +168,8 @@ func TestAzure_providerConfigure(t *testing.T) {
|
|||||||
err = rp.Configure(terraform.NewResourceConfig(rawConfig))
|
err = rp.Configure(terraform.NewResourceConfig(rawConfig))
|
||||||
meta := rp.(*schema.Provider).Meta()
|
meta := rp.(*schema.Provider).Meta()
|
||||||
if (meta == nil) != tc.NilMeta {
|
if (meta == nil) != tc.NilMeta {
|
||||||
t.Fatalf("expected NilMeta: %t, got meta: %#v", tc.NilMeta, meta)
|
t.Fatalf("expected NilMeta: %t, got meta: %#v, settings_file: %q",
|
||||||
}
|
tc.NilMeta, meta, tc.SettingsFile)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAzure_isFile(t *testing.T) {
|
|
||||||
f, err := ioutil.TempFile("", "tf-test-file")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error creating temporary file with XML in TestAzure_isFile: %s", err)
|
|
||||||
}
|
|
||||||
cases := []struct {
|
|
||||||
Input string // String path to file
|
|
||||||
B bool // expected true/false
|
|
||||||
E bool // expect error
|
|
||||||
}{
|
|
||||||
{"test", false, true},
|
|
||||||
{f.Name(), true, false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range cases {
|
|
||||||
x, y := isFile(tc.Input)
|
|
||||||
if tc.B != x {
|
|
||||||
t.Errorf("Error in TestAzure_isFile: input: %s , returned: %#v, expected: %#v", tc.Input, x, tc.B)
|
|
||||||
}
|
|
||||||
|
|
||||||
if tc.E != (y != nil) {
|
|
||||||
t.Errorf("Error in TestAzure_isFile: input: %s , returned: %#v, expected: %#v", tc.Input, y, tc.E)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
28
builtin/providers/dyn/config.go
Normal file
28
builtin/providers/dyn/config.go
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
package dyn
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/nesv/go-dynect/dynect"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
CustomerName string
|
||||||
|
Username string
|
||||||
|
Password string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client() returns a new client for accessing dyn.
|
||||||
|
func (c *Config) Client() (*dynect.ConvenientClient, error) {
|
||||||
|
client := dynect.NewConvenientClient(c.CustomerName)
|
||||||
|
err := client.Login(c.Username, c.Password)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error setting up Dyn client: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] Dyn client configured for customer: %s, user: %s", c.CustomerName, c.Username)
|
||||||
|
|
||||||
|
return client, nil
|
||||||
|
}
|
50
builtin/providers/dyn/provider.go
Normal file
50
builtin/providers/dyn/provider.go
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
package dyn
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Provider returns a terraform.ResourceProvider.
|
||||||
|
func Provider() terraform.ResourceProvider {
|
||||||
|
return &schema.Provider{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"customer_name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
DefaultFunc: schema.EnvDefaultFunc("DYN_CUSTOMER_NAME", nil),
|
||||||
|
Description: "A Dyn customer name.",
|
||||||
|
},
|
||||||
|
|
||||||
|
"username": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
DefaultFunc: schema.EnvDefaultFunc("DYN_USERNAME", nil),
|
||||||
|
Description: "A Dyn username.",
|
||||||
|
},
|
||||||
|
|
||||||
|
"password": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
DefaultFunc: schema.EnvDefaultFunc("DYN_PASSWORD", nil),
|
||||||
|
Description: "The Dyn password.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
ResourcesMap: map[string]*schema.Resource{
|
||||||
|
"dyn_record": resourceDynRecord(),
|
||||||
|
},
|
||||||
|
|
||||||
|
ConfigureFunc: providerConfigure,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
||||||
|
config := Config{
|
||||||
|
CustomerName: d.Get("customer_name").(string),
|
||||||
|
Username: d.Get("username").(string),
|
||||||
|
Password: d.Get("password").(string),
|
||||||
|
}
|
||||||
|
|
||||||
|
return config.Client()
|
||||||
|
}
|
47
builtin/providers/dyn/provider_test.go
Normal file
47
builtin/providers/dyn/provider_test.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
package dyn
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
var testAccProviders map[string]terraform.ResourceProvider
|
||||||
|
var testAccProvider *schema.Provider
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
testAccProvider = Provider().(*schema.Provider)
|
||||||
|
testAccProviders = map[string]terraform.ResourceProvider{
|
||||||
|
"dyn": testAccProvider,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProvider(t *testing.T) {
|
||||||
|
if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProvider_impl(t *testing.T) {
|
||||||
|
var _ terraform.ResourceProvider = Provider()
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccPreCheck(t *testing.T) {
|
||||||
|
if v := os.Getenv("DYN_CUSTOMER_NAME"); v == "" {
|
||||||
|
t.Fatal("DYN_CUSTOMER_NAME must be set for acceptance tests")
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := os.Getenv("DYN_USERNAME"); v == "" {
|
||||||
|
t.Fatal("DYN_USERNAME must be set for acceptance tests")
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := os.Getenv("DYN_PASSWORD"); v == "" {
|
||||||
|
t.Fatal("DYN_PASSWORD must be set for acceptance tests.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := os.Getenv("DYN_ZONE"); v == "" {
|
||||||
|
t.Fatal("DYN_ZONE must be set for acceptance tests. The domain is used to ` and destroy record against.")
|
||||||
|
}
|
||||||
|
}
|
198
builtin/providers/dyn/resource_dyn_record.go
Normal file
198
builtin/providers/dyn/resource_dyn_record.go
Normal file
@ -0,0 +1,198 @@
|
|||||||
|
package dyn
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"github.com/nesv/go-dynect/dynect"
|
||||||
|
)
|
||||||
|
|
||||||
|
var mutex = &sync.Mutex{}
|
||||||
|
|
||||||
|
func resourceDynRecord() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceDynRecordCreate,
|
||||||
|
Read: resourceDynRecordRead,
|
||||||
|
Update: resourceDynRecordUpdate,
|
||||||
|
Delete: resourceDynRecordDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"zone": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"fqdn": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"type": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"value": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"ttl": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "0", // 0 means use zone default
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceDynRecordCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
mutex.Lock()
|
||||||
|
|
||||||
|
client := meta.(*dynect.ConvenientClient)
|
||||||
|
|
||||||
|
record := &dynect.Record{
|
||||||
|
Name: d.Get("name").(string),
|
||||||
|
Zone: d.Get("zone").(string),
|
||||||
|
Type: d.Get("type").(string),
|
||||||
|
TTL: d.Get("ttl").(string),
|
||||||
|
Value: d.Get("value").(string),
|
||||||
|
}
|
||||||
|
log.Printf("[DEBUG] Dyn record create configuration: %#v", record)
|
||||||
|
|
||||||
|
// create the record
|
||||||
|
err := client.CreateRecord(record)
|
||||||
|
if err != nil {
|
||||||
|
mutex.Unlock()
|
||||||
|
return fmt.Errorf("Failed to create Dyn record: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// publish the zone
|
||||||
|
err = client.PublishZone(record.Zone)
|
||||||
|
if err != nil {
|
||||||
|
mutex.Unlock()
|
||||||
|
return fmt.Errorf("Failed to publish Dyn zone: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the record ID
|
||||||
|
err = client.GetRecordID(record)
|
||||||
|
if err != nil {
|
||||||
|
mutex.Unlock()
|
||||||
|
return fmt.Errorf("%s", err)
|
||||||
|
}
|
||||||
|
d.SetId(record.ID)
|
||||||
|
|
||||||
|
mutex.Unlock()
|
||||||
|
return resourceDynRecordRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceDynRecordRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
mutex.Lock()
|
||||||
|
defer mutex.Unlock()
|
||||||
|
|
||||||
|
client := meta.(*dynect.ConvenientClient)
|
||||||
|
|
||||||
|
record := &dynect.Record{
|
||||||
|
ID: d.Id(),
|
||||||
|
Name: d.Get("name").(string),
|
||||||
|
Zone: d.Get("zone").(string),
|
||||||
|
TTL: d.Get("ttl").(string),
|
||||||
|
FQDN: d.Get("fqdn").(string),
|
||||||
|
Type: d.Get("type").(string),
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.GetRecord(record)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Couldn't find Dyn record: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("zone", record.Zone)
|
||||||
|
d.Set("fqdn", record.FQDN)
|
||||||
|
d.Set("name", record.Name)
|
||||||
|
d.Set("type", record.Type)
|
||||||
|
d.Set("ttl", record.TTL)
|
||||||
|
d.Set("value", record.Value)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceDynRecordUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
mutex.Lock()
|
||||||
|
|
||||||
|
client := meta.(*dynect.ConvenientClient)
|
||||||
|
|
||||||
|
record := &dynect.Record{
|
||||||
|
Name: d.Get("name").(string),
|
||||||
|
Zone: d.Get("zone").(string),
|
||||||
|
TTL: d.Get("ttl").(string),
|
||||||
|
Type: d.Get("type").(string),
|
||||||
|
Value: d.Get("value").(string),
|
||||||
|
}
|
||||||
|
log.Printf("[DEBUG] Dyn record update configuration: %#v", record)
|
||||||
|
|
||||||
|
// update the record
|
||||||
|
err := client.UpdateRecord(record)
|
||||||
|
if err != nil {
|
||||||
|
mutex.Unlock()
|
||||||
|
return fmt.Errorf("Failed to update Dyn record: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// publish the zone
|
||||||
|
err = client.PublishZone(record.Zone)
|
||||||
|
if err != nil {
|
||||||
|
mutex.Unlock()
|
||||||
|
return fmt.Errorf("Failed to publish Dyn zone: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the record ID
|
||||||
|
err = client.GetRecordID(record)
|
||||||
|
if err != nil {
|
||||||
|
mutex.Unlock()
|
||||||
|
return fmt.Errorf("%s", err)
|
||||||
|
}
|
||||||
|
d.SetId(record.ID)
|
||||||
|
|
||||||
|
mutex.Unlock()
|
||||||
|
return resourceDynRecordRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceDynRecordDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
mutex.Lock()
|
||||||
|
defer mutex.Unlock()
|
||||||
|
|
||||||
|
client := meta.(*dynect.ConvenientClient)
|
||||||
|
|
||||||
|
record := &dynect.Record{
|
||||||
|
ID: d.Id(),
|
||||||
|
Name: d.Get("name").(string),
|
||||||
|
Zone: d.Get("zone").(string),
|
||||||
|
FQDN: d.Get("fqdn").(string),
|
||||||
|
Type: d.Get("type").(string),
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] Deleting Dyn record: %s, %s", record.FQDN, record.ID)
|
||||||
|
|
||||||
|
// delete the record
|
||||||
|
err := client.DeleteRecord(record)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to delete Dyn record: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// publish the zone
|
||||||
|
err = client.PublishZone(record.Zone)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to publish Dyn zone: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
239
builtin/providers/dyn/resource_dyn_record_test.go
Normal file
239
builtin/providers/dyn/resource_dyn_record_test.go
Normal file
@ -0,0 +1,239 @@
|
|||||||
|
package dyn
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
"github.com/nesv/go-dynect/dynect"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccDynRecord_Basic(t *testing.T) {
|
||||||
|
var record dynect.Record
|
||||||
|
zone := os.Getenv("DYN_ZONE")
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckDynRecordDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: fmt.Sprintf(testAccCheckDynRecordConfig_basic, zone),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckDynRecordExists("dyn_record.foobar", &record),
|
||||||
|
testAccCheckDynRecordAttributes(&record),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"dyn_record.foobar", "name", "terraform"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"dyn_record.foobar", "zone", zone),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"dyn_record.foobar", "value", "192.168.0.10"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccDynRecord_Updated(t *testing.T) {
|
||||||
|
var record dynect.Record
|
||||||
|
zone := os.Getenv("DYN_ZONE")
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckDynRecordDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: fmt.Sprintf(testAccCheckDynRecordConfig_basic, zone),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckDynRecordExists("dyn_record.foobar", &record),
|
||||||
|
testAccCheckDynRecordAttributes(&record),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"dyn_record.foobar", "name", "terraform"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"dyn_record.foobar", "zone", zone),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"dyn_record.foobar", "value", "192.168.0.10"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: fmt.Sprintf(testAccCheckDynRecordConfig_new_value, zone),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckDynRecordExists("dyn_record.foobar", &record),
|
||||||
|
testAccCheckDynRecordAttributesUpdated(&record),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"dyn_record.foobar", "name", "terraform"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"dyn_record.foobar", "zone", zone),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"dyn_record.foobar", "value", "192.168.0.11"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccDynRecord_Multiple(t *testing.T) {
|
||||||
|
var record dynect.Record
|
||||||
|
zone := os.Getenv("DYN_ZONE")
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckDynRecordDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: fmt.Sprintf(testAccCheckDynRecordConfig_multiple, zone, zone, zone),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckDynRecordExists("dyn_record.foobar1", &record),
|
||||||
|
testAccCheckDynRecordAttributes(&record),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"dyn_record.foobar1", "name", "terraform1"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"dyn_record.foobar1", "zone", zone),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"dyn_record.foobar1", "value", "192.168.0.10"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"dyn_record.foobar2", "name", "terraform2"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"dyn_record.foobar2", "zone", zone),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"dyn_record.foobar2", "value", "192.168.1.10"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"dyn_record.foobar3", "name", "terraform3"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"dyn_record.foobar3", "zone", zone),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"dyn_record.foobar3", "value", "192.168.2.10"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckDynRecordDestroy(s *terraform.State) error {
|
||||||
|
client := testAccProvider.Meta().(*dynect.ConvenientClient)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "dyn_record" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
foundRecord := &dynect.Record{
|
||||||
|
Zone: rs.Primary.Attributes["zone"],
|
||||||
|
ID: rs.Primary.ID,
|
||||||
|
FQDN: rs.Primary.Attributes["fqdn"],
|
||||||
|
Type: rs.Primary.Attributes["type"],
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.GetRecord(foundRecord)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Record still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckDynRecordAttributes(record *dynect.Record) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
|
||||||
|
if record.Value != "192.168.0.10" {
|
||||||
|
return fmt.Errorf("Bad value: %s", record.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckDynRecordAttributesUpdated(record *dynect.Record) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
|
||||||
|
if record.Value != "192.168.0.11" {
|
||||||
|
return fmt.Errorf("Bad value: %s", record.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckDynRecordExists(n string, record *dynect.Record) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No Record ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
client := testAccProvider.Meta().(*dynect.ConvenientClient)
|
||||||
|
|
||||||
|
foundRecord := &dynect.Record{
|
||||||
|
Zone: rs.Primary.Attributes["zone"],
|
||||||
|
ID: rs.Primary.ID,
|
||||||
|
FQDN: rs.Primary.Attributes["fqdn"],
|
||||||
|
Type: rs.Primary.Attributes["type"],
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.GetRecord(foundRecord)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if foundRecord.ID != rs.Primary.ID {
|
||||||
|
return fmt.Errorf("Record not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
*record = *foundRecord
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const testAccCheckDynRecordConfig_basic = `
|
||||||
|
resource "dyn_record" "foobar" {
|
||||||
|
zone = "%s"
|
||||||
|
name = "terraform"
|
||||||
|
value = "192.168.0.10"
|
||||||
|
type = "A"
|
||||||
|
ttl = 3600
|
||||||
|
}`
|
||||||
|
|
||||||
|
const testAccCheckDynRecordConfig_new_value = `
|
||||||
|
resource "dyn_record" "foobar" {
|
||||||
|
zone = "%s"
|
||||||
|
name = "terraform"
|
||||||
|
value = "192.168.0.11"
|
||||||
|
type = "A"
|
||||||
|
ttl = 3600
|
||||||
|
}`
|
||||||
|
|
||||||
|
const testAccCheckDynRecordConfig_multiple = `
|
||||||
|
resource "dyn_record" "foobar1" {
|
||||||
|
zone = "%s"
|
||||||
|
name = "terraform1"
|
||||||
|
value = "192.168.0.10"
|
||||||
|
type = "A"
|
||||||
|
ttl = 3600
|
||||||
|
}
|
||||||
|
resource "dyn_record" "foobar2" {
|
||||||
|
zone = "%s"
|
||||||
|
name = "terraform2"
|
||||||
|
value = "192.168.1.10"
|
||||||
|
type = "A"
|
||||||
|
ttl = 3600
|
||||||
|
}
|
||||||
|
resource "dyn_record" "foobar3" {
|
||||||
|
zone = "%s"
|
||||||
|
name = "terraform3"
|
||||||
|
value = "192.168.2.10"
|
||||||
|
type = "A"
|
||||||
|
ttl = 3600
|
||||||
|
}`
|
@ -3,13 +3,12 @@ package google
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/pathorcontents"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
"golang.org/x/oauth2/google"
|
"golang.org/x/oauth2/google"
|
||||||
@ -24,7 +23,7 @@ import (
|
|||||||
// Config is the configuration structure used to instantiate the Google
|
// Config is the configuration structure used to instantiate the Google
|
||||||
// provider.
|
// provider.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
AccountFile string
|
Credentials string
|
||||||
Project string
|
Project string
|
||||||
Region string
|
Region string
|
||||||
|
|
||||||
@ -44,46 +43,17 @@ func (c *Config) loadAndValidate() error {
|
|||||||
"https://www.googleapis.com/auth/devstorage.full_control",
|
"https://www.googleapis.com/auth/devstorage.full_control",
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.AccountFile == "" {
|
|
||||||
c.AccountFile = os.Getenv("GOOGLE_ACCOUNT_FILE")
|
|
||||||
}
|
|
||||||
if c.Project == "" {
|
|
||||||
c.Project = os.Getenv("GOOGLE_PROJECT")
|
|
||||||
}
|
|
||||||
if c.Region == "" {
|
|
||||||
c.Region = os.Getenv("GOOGLE_REGION")
|
|
||||||
}
|
|
||||||
|
|
||||||
var client *http.Client
|
var client *http.Client
|
||||||
|
|
||||||
if c.AccountFile != "" {
|
if c.Credentials != "" {
|
||||||
contents := c.AccountFile
|
contents, _, err := pathorcontents.Read(c.Credentials)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error loading credentials: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Assume account_file is a JSON string
|
// Assume account_file is a JSON string
|
||||||
if err := parseJSON(&account, contents); err != nil {
|
if err := parseJSON(&account, contents); err != nil {
|
||||||
// If account_file was not JSON, assume it is a file path instead
|
return fmt.Errorf("Error parsing credentials '%s': %s", contents, err)
|
||||||
if _, err := os.Stat(c.AccountFile); os.IsNotExist(err) {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"account_file path does not exist: %s",
|
|
||||||
c.AccountFile)
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err := ioutil.ReadFile(c.AccountFile)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"Error reading account_file from path '%s': %s",
|
|
||||||
c.AccountFile,
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
contents = string(b)
|
|
||||||
|
|
||||||
if err := parseJSON(&account, contents); err != nil {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"Error parsing account file '%s': %s",
|
|
||||||
contents,
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the token for use in our requests
|
// Get the token for use in our requests
|
||||||
|
@ -5,11 +5,11 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
const testFakeAccountFilePath = "./test-fixtures/fake_account.json"
|
const testFakeCredentialsPath = "./test-fixtures/fake_account.json"
|
||||||
|
|
||||||
func TestConfigLoadAndValidate_accountFilePath(t *testing.T) {
|
func TestConfigLoadAndValidate_accountFilePath(t *testing.T) {
|
||||||
config := Config{
|
config := Config{
|
||||||
AccountFile: testFakeAccountFilePath,
|
Credentials: testFakeCredentialsPath,
|
||||||
Project: "my-gce-project",
|
Project: "my-gce-project",
|
||||||
Region: "us-central1",
|
Region: "us-central1",
|
||||||
}
|
}
|
||||||
@ -21,12 +21,12 @@ func TestConfigLoadAndValidate_accountFilePath(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestConfigLoadAndValidate_accountFileJSON(t *testing.T) {
|
func TestConfigLoadAndValidate_accountFileJSON(t *testing.T) {
|
||||||
contents, err := ioutil.ReadFile(testFakeAccountFilePath)
|
contents, err := ioutil.ReadFile(testFakeCredentialsPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error: %v", err)
|
t.Fatalf("error: %v", err)
|
||||||
}
|
}
|
||||||
config := Config{
|
config := Config{
|
||||||
AccountFile: string(contents),
|
Credentials: string(contents),
|
||||||
Project: "my-gce-project",
|
Project: "my-gce-project",
|
||||||
Region: "us-central1",
|
Region: "us-central1",
|
||||||
}
|
}
|
||||||
@ -39,7 +39,7 @@ func TestConfigLoadAndValidate_accountFileJSON(t *testing.T) {
|
|||||||
|
|
||||||
func TestConfigLoadAndValidate_accountFileJSONInvalid(t *testing.T) {
|
func TestConfigLoadAndValidate_accountFileJSONInvalid(t *testing.T) {
|
||||||
config := Config{
|
config := Config{
|
||||||
AccountFile: "{this is not json}",
|
Credentials: "{this is not json}",
|
||||||
Project: "my-gce-project",
|
Project: "my-gce-project",
|
||||||
Region: "us-central1",
|
Region: "us-central1",
|
||||||
}
|
}
|
||||||
|
@ -3,8 +3,8 @@ package google
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/pathorcontents"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
)
|
)
|
||||||
@ -18,6 +18,14 @@ func Provider() terraform.ResourceProvider {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil),
|
DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil),
|
||||||
ValidateFunc: validateAccountFile,
|
ValidateFunc: validateAccountFile,
|
||||||
|
Deprecated: "Use the credentials field instead",
|
||||||
|
},
|
||||||
|
|
||||||
|
"credentials": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
DefaultFunc: schema.EnvDefaultFunc("GOOGLE_CREDENTIALS", nil),
|
||||||
|
ValidateFunc: validateCredentials,
|
||||||
},
|
},
|
||||||
|
|
||||||
"project": &schema.Schema{
|
"project": &schema.Schema{
|
||||||
@ -43,6 +51,7 @@ func Provider() terraform.ResourceProvider {
|
|||||||
"google_compute_global_address": resourceComputeGlobalAddress(),
|
"google_compute_global_address": resourceComputeGlobalAddress(),
|
||||||
"google_compute_global_forwarding_rule": resourceComputeGlobalForwardingRule(),
|
"google_compute_global_forwarding_rule": resourceComputeGlobalForwardingRule(),
|
||||||
"google_compute_http_health_check": resourceComputeHttpHealthCheck(),
|
"google_compute_http_health_check": resourceComputeHttpHealthCheck(),
|
||||||
|
"google_compute_https_health_check": resourceComputeHttpsHealthCheck(),
|
||||||
"google_compute_instance": resourceComputeInstance(),
|
"google_compute_instance": resourceComputeInstance(),
|
||||||
"google_compute_instance_group_manager": resourceComputeInstanceGroupManager(),
|
"google_compute_instance_group_manager": resourceComputeInstanceGroupManager(),
|
||||||
"google_compute_instance_template": resourceComputeInstanceTemplate(),
|
"google_compute_instance_template": resourceComputeInstanceTemplate(),
|
||||||
@ -72,8 +81,12 @@ func Provider() terraform.ResourceProvider {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
||||||
|
credentials := d.Get("credentials").(string)
|
||||||
|
if credentials == "" {
|
||||||
|
credentials = d.Get("account_file").(string)
|
||||||
|
}
|
||||||
config := Config{
|
config := Config{
|
||||||
AccountFile: d.Get("account_file").(string),
|
Credentials: credentials,
|
||||||
Project: d.Get("project").(string),
|
Project: d.Get("project").(string),
|
||||||
Region: d.Get("region").(string),
|
Region: d.Get("region").(string),
|
||||||
}
|
}
|
||||||
@ -96,22 +109,34 @@ func validateAccountFile(v interface{}, k string) (warnings []string, errors []e
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var account accountFile
|
contents, wasPath, err := pathorcontents.Read(value)
|
||||||
if err := json.Unmarshal([]byte(value), &account); err != nil {
|
if err != nil {
|
||||||
warnings = append(warnings, `
|
errors = append(errors, fmt.Errorf("Error loading Account File: %s", err))
|
||||||
account_file is not valid JSON, so we are assuming it is a file path. This
|
}
|
||||||
support will be removed in the future. Please update your configuration to use
|
if wasPath {
|
||||||
${file("filename.json")} instead.`)
|
warnings = append(warnings, `account_file was provided as a path instead of
|
||||||
} else {
|
as file contents. This support will be removed in the future. Please update
|
||||||
return
|
your configuration to use ${file("filename.json")} instead.`)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := os.Stat(value); err != nil {
|
var account accountFile
|
||||||
|
if err := json.Unmarshal([]byte(contents), &account); err != nil {
|
||||||
errors = append(errors,
|
errors = append(errors,
|
||||||
fmt.Errorf(
|
fmt.Errorf("account_file not valid JSON '%s': %s", contents, err))
|
||||||
"account_file path could not be read from '%s': %s",
|
}
|
||||||
value,
|
|
||||||
err))
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateCredentials(v interface{}, k string) (warnings []string, errors []error) {
|
||||||
|
if v == nil || v.(string) == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
creds := v.(string)
|
||||||
|
var account accountFile
|
||||||
|
if err := json.Unmarshal([]byte(creds), &account); err != nil {
|
||||||
|
errors = append(errors,
|
||||||
|
fmt.Errorf("credentials are not valid JSON '%s': %s", creds, err))
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -29,8 +29,8 @@ func TestProvider_impl(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testAccPreCheck(t *testing.T) {
|
func testAccPreCheck(t *testing.T) {
|
||||||
if v := os.Getenv("GOOGLE_ACCOUNT_FILE"); v == "" {
|
if v := os.Getenv("GOOGLE_CREDENTIALS"); v == "" {
|
||||||
t.Fatal("GOOGLE_ACCOUNT_FILE must be set for acceptance tests")
|
t.Fatal("GOOGLE_CREDENTIALS must be set for acceptance tests")
|
||||||
}
|
}
|
||||||
|
|
||||||
if v := os.Getenv("GOOGLE_PROJECT"); v == "" {
|
if v := os.Getenv("GOOGLE_PROJECT"); v == "" {
|
||||||
|
227
builtin/providers/google/resource_compute_https_health_check.go
Normal file
227
builtin/providers/google/resource_compute_https_health_check.go
Normal file
@ -0,0 +1,227 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"google.golang.org/api/compute/v1"
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceComputeHttpsHealthCheck() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceComputeHttpsHealthCheckCreate,
|
||||||
|
Read: resourceComputeHttpsHealthCheckRead,
|
||||||
|
Delete: resourceComputeHttpsHealthCheckDelete,
|
||||||
|
Update: resourceComputeHttpsHealthCheckUpdate,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"check_interval_sec": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 5,
|
||||||
|
},
|
||||||
|
|
||||||
|
"description": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"healthy_threshold": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 2,
|
||||||
|
},
|
||||||
|
|
||||||
|
"host": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"port": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 443,
|
||||||
|
},
|
||||||
|
|
||||||
|
"request_path": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "/",
|
||||||
|
},
|
||||||
|
|
||||||
|
"self_link": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"timeout_sec": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 5,
|
||||||
|
},
|
||||||
|
|
||||||
|
"unhealthy_threshold": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeHttpsHealthCheckCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
// Build the parameter
|
||||||
|
hchk := &compute.HttpsHealthCheck{
|
||||||
|
Name: d.Get("name").(string),
|
||||||
|
}
|
||||||
|
// Optional things
|
||||||
|
if v, ok := d.GetOk("description"); ok {
|
||||||
|
hchk.Description = v.(string)
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("host"); ok {
|
||||||
|
hchk.Host = v.(string)
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("request_path"); ok {
|
||||||
|
hchk.RequestPath = v.(string)
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("check_interval_sec"); ok {
|
||||||
|
hchk.CheckIntervalSec = int64(v.(int))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("healthy_threshold"); ok {
|
||||||
|
hchk.HealthyThreshold = int64(v.(int))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("port"); ok {
|
||||||
|
hchk.Port = int64(v.(int))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("timeout_sec"); ok {
|
||||||
|
hchk.TimeoutSec = int64(v.(int))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("unhealthy_threshold"); ok {
|
||||||
|
hchk.UnhealthyThreshold = int64(v.(int))
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] HttpsHealthCheck insert request: %#v", hchk)
|
||||||
|
op, err := config.clientCompute.HttpsHealthChecks.Insert(
|
||||||
|
config.Project, hchk).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating HttpsHealthCheck: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// It probably maybe worked, so store the ID now
|
||||||
|
d.SetId(hchk.Name)
|
||||||
|
|
||||||
|
err = computeOperationWaitGlobal(config, op, "Creating Https Health Check")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceComputeHttpsHealthCheckRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeHttpsHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
// Build the parameter
|
||||||
|
hchk := &compute.HttpsHealthCheck{
|
||||||
|
Name: d.Get("name").(string),
|
||||||
|
}
|
||||||
|
// Optional things
|
||||||
|
if v, ok := d.GetOk("description"); ok {
|
||||||
|
hchk.Description = v.(string)
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("host"); ok {
|
||||||
|
hchk.Host = v.(string)
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("request_path"); ok {
|
||||||
|
hchk.RequestPath = v.(string)
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("check_interval_sec"); ok {
|
||||||
|
hchk.CheckIntervalSec = int64(v.(int))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("healthy_threshold"); ok {
|
||||||
|
hchk.HealthyThreshold = int64(v.(int))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("port"); ok {
|
||||||
|
hchk.Port = int64(v.(int))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("timeout_sec"); ok {
|
||||||
|
hchk.TimeoutSec = int64(v.(int))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("unhealthy_threshold"); ok {
|
||||||
|
hchk.UnhealthyThreshold = int64(v.(int))
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] HttpsHealthCheck patch request: %#v", hchk)
|
||||||
|
op, err := config.clientCompute.HttpsHealthChecks.Patch(
|
||||||
|
config.Project, hchk.Name, hchk).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error patching HttpsHealthCheck: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// It probably maybe worked, so store the ID now
|
||||||
|
d.SetId(hchk.Name)
|
||||||
|
|
||||||
|
err = computeOperationWaitGlobal(config, op, "Updating Https Health Check")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceComputeHttpsHealthCheckRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeHttpsHealthCheckRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
hchk, err := config.clientCompute.HttpsHealthChecks.Get(
|
||||||
|
config.Project, d.Id()).Do()
|
||||||
|
if err != nil {
|
||||||
|
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||||
|
// The resource doesn't exist anymore
|
||||||
|
d.SetId("")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("Error reading HttpsHealthCheck: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("host", hchk.Host)
|
||||||
|
d.Set("request_path", hchk.RequestPath)
|
||||||
|
d.Set("check_interval_sec", hchk.CheckIntervalSec)
|
||||||
|
d.Set("health_threshold", hchk.HealthyThreshold)
|
||||||
|
d.Set("port", hchk.Port)
|
||||||
|
d.Set("timeout_sec", hchk.TimeoutSec)
|
||||||
|
d.Set("unhealthy_threshold", hchk.UnhealthyThreshold)
|
||||||
|
d.Set("self_link", hchk.SelfLink)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeHttpsHealthCheckDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
// Delete the HttpsHealthCheck
|
||||||
|
op, err := config.clientCompute.HttpsHealthChecks.Delete(
|
||||||
|
config.Project, d.Id()).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error deleting HttpsHealthCheck: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = computeOperationWaitGlobal(config, op, "Deleting Https Health Check")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
@ -0,0 +1,171 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
"google.golang.org/api/compute/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccComputeHttpsHealthCheck_basic(t *testing.T) {
|
||||||
|
var healthCheck compute.HttpsHealthCheck
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeHttpsHealthCheckDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeHttpsHealthCheck_basic,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeHttpsHealthCheckExists(
|
||||||
|
"google_compute_https_health_check.foobar", &healthCheck),
|
||||||
|
testAccCheckComputeHttpsHealthCheckRequestPath(
|
||||||
|
"/health_check", &healthCheck),
|
||||||
|
testAccCheckComputeHttpsHealthCheckThresholds(
|
||||||
|
3, 3, &healthCheck),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeHttpsHealthCheck_update(t *testing.T) {
|
||||||
|
var healthCheck compute.HttpsHealthCheck
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeHttpsHealthCheckDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeHttpsHealthCheck_update1,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeHttpsHealthCheckExists(
|
||||||
|
"google_compute_https_health_check.foobar", &healthCheck),
|
||||||
|
testAccCheckComputeHttpsHealthCheckRequestPath(
|
||||||
|
"/not_default", &healthCheck),
|
||||||
|
testAccCheckComputeHttpsHealthCheckThresholds(
|
||||||
|
2, 2, &healthCheck),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeHttpsHealthCheck_update2,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeHttpsHealthCheckExists(
|
||||||
|
"google_compute_https_health_check.foobar", &healthCheck),
|
||||||
|
testAccCheckComputeHttpsHealthCheckRequestPath(
|
||||||
|
"/", &healthCheck),
|
||||||
|
testAccCheckComputeHttpsHealthCheckThresholds(
|
||||||
|
10, 10, &healthCheck),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeHttpsHealthCheckDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_https_health_check" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientCompute.HttpsHealthChecks.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("HttpsHealthCheck still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeHttpsHealthCheckExists(n string, healthCheck *compute.HttpsHealthCheck) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
found, err := config.clientCompute.HttpsHealthChecks.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if found.Name != rs.Primary.ID {
|
||||||
|
return fmt.Errorf("HttpsHealthCheck not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
*healthCheck = *found
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeHttpsHealthCheckRequestPath(path string, healthCheck *compute.HttpsHealthCheck) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
if healthCheck.RequestPath != path {
|
||||||
|
return fmt.Errorf("RequestPath doesn't match: expected %s, got %s", path, healthCheck.RequestPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeHttpsHealthCheckThresholds(healthy, unhealthy int64, healthCheck *compute.HttpsHealthCheck) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
if healthCheck.HealthyThreshold != healthy {
|
||||||
|
return fmt.Errorf("HealthyThreshold doesn't match: expected %d, got %d", healthy, healthCheck.HealthyThreshold)
|
||||||
|
}
|
||||||
|
|
||||||
|
if healthCheck.UnhealthyThreshold != unhealthy {
|
||||||
|
return fmt.Errorf("UnhealthyThreshold doesn't match: expected %d, got %d", unhealthy, healthCheck.UnhealthyThreshold)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const testAccComputeHttpsHealthCheck_basic = `
|
||||||
|
resource "google_compute_https_health_check" "foobar" {
|
||||||
|
check_interval_sec = 3
|
||||||
|
description = "Resource created for Terraform acceptance testing"
|
||||||
|
healthy_threshold = 3
|
||||||
|
host = "foobar"
|
||||||
|
name = "terraform-test"
|
||||||
|
port = "80"
|
||||||
|
request_path = "/health_check"
|
||||||
|
timeout_sec = 2
|
||||||
|
unhealthy_threshold = 3
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
const testAccComputeHttpsHealthCheck_update1 = `
|
||||||
|
resource "google_compute_https_health_check" "foobar" {
|
||||||
|
name = "terraform-test"
|
||||||
|
description = "Resource created for Terraform acceptance testing"
|
||||||
|
request_path = "/not_default"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
/* Change description, restore request_path to default, and change
|
||||||
|
* thresholds from defaults */
|
||||||
|
const testAccComputeHttpsHealthCheck_update2 = `
|
||||||
|
resource "google_compute_https_health_check" "foobar" {
|
||||||
|
name = "terraform-test"
|
||||||
|
description = "Resource updated for Terraform acceptance testing"
|
||||||
|
healthy_threshold = 10
|
||||||
|
unhealthy_threshold = 10
|
||||||
|
}
|
||||||
|
`
|
@ -11,6 +11,7 @@ func canonicalizeServiceScope(scope string) string {
|
|||||||
"datastore": "https://www.googleapis.com/auth/datastore",
|
"datastore": "https://www.googleapis.com/auth/datastore",
|
||||||
"logging-write": "https://www.googleapis.com/auth/logging.write",
|
"logging-write": "https://www.googleapis.com/auth/logging.write",
|
||||||
"monitoring": "https://www.googleapis.com/auth/monitoring",
|
"monitoring": "https://www.googleapis.com/auth/monitoring",
|
||||||
|
"pubsub": "https://www.googleapis.com/auth/pubsub",
|
||||||
"sql": "https://www.googleapis.com/auth/sqlservice",
|
"sql": "https://www.googleapis.com/auth/sqlservice",
|
||||||
"sql-admin": "https://www.googleapis.com/auth/sqlservice.admin",
|
"sql-admin": "https://www.googleapis.com/auth/sqlservice.admin",
|
||||||
"storage-full": "https://www.googleapis.com/auth/devstorage.full_control",
|
"storage-full": "https://www.googleapis.com/auth/devstorage.full_control",
|
||||||
@ -22,9 +23,9 @@ func canonicalizeServiceScope(scope string) string {
|
|||||||
"userinfo-email": "https://www.googleapis.com/auth/userinfo.email",
|
"userinfo-email": "https://www.googleapis.com/auth/userinfo.email",
|
||||||
}
|
}
|
||||||
|
|
||||||
if matchedUrl, ok := scopeMap[scope]; ok {
|
if matchedURL, ok := scopeMap[scope]; ok {
|
||||||
return matchedUrl
|
return matchedURL
|
||||||
} else {
|
|
||||||
return scope
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return scope
|
||||||
}
|
}
|
||||||
|
@ -95,6 +95,7 @@ func resourceComputeInstanceV2() *schema.Resource {
|
|||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: false,
|
ForceNew: false,
|
||||||
|
Computed: true,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
Set: schema.HashString,
|
Set: schema.HashString,
|
||||||
},
|
},
|
||||||
|
@ -38,8 +38,9 @@ func resourceComputeSecGroupV2() *schema.Resource {
|
|||||||
ForceNew: false,
|
ForceNew: false,
|
||||||
},
|
},
|
||||||
"rule": &schema.Schema{
|
"rule": &schema.Schema{
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"id": &schema.Schema{
|
"id": &schema.Schema{
|
||||||
@ -79,6 +80,7 @@ func resourceComputeSecGroupV2() *schema.Resource {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Set: secgroupRuleV2Hash,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -129,13 +131,10 @@ func resourceComputeSecGroupV2Read(d *schema.ResourceData, meta interface{}) err
|
|||||||
|
|
||||||
d.Set("name", sg.Name)
|
d.Set("name", sg.Name)
|
||||||
d.Set("description", sg.Description)
|
d.Set("description", sg.Description)
|
||||||
rtm := rulesToMap(sg.Rules)
|
|
||||||
for _, v := range rtm {
|
rtm, err := rulesToMap(computeClient, d, sg.Rules)
|
||||||
if v["group"] == d.Get("name") {
|
if err != nil {
|
||||||
v["self"] = "1"
|
return err
|
||||||
} else {
|
|
||||||
v["self"] = "0"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
log.Printf("[DEBUG] rulesToMap(sg.Rules): %+v", rtm)
|
log.Printf("[DEBUG] rulesToMap(sg.Rules): %+v", rtm)
|
||||||
d.Set("rule", rtm)
|
d.Set("rule", rtm)
|
||||||
@ -164,14 +163,11 @@ func resourceComputeSecGroupV2Update(d *schema.ResourceData, meta interface{}) e
|
|||||||
|
|
||||||
if d.HasChange("rule") {
|
if d.HasChange("rule") {
|
||||||
oldSGRaw, newSGRaw := d.GetChange("rule")
|
oldSGRaw, newSGRaw := d.GetChange("rule")
|
||||||
oldSGRSlice, newSGRSlice := oldSGRaw.([]interface{}), newSGRaw.([]interface{})
|
oldSGRSet, newSGRSet := oldSGRaw.(*schema.Set), newSGRaw.(*schema.Set)
|
||||||
oldSGRSet := schema.NewSet(secgroupRuleV2Hash, oldSGRSlice)
|
|
||||||
newSGRSet := schema.NewSet(secgroupRuleV2Hash, newSGRSlice)
|
|
||||||
secgrouprulesToAdd := newSGRSet.Difference(oldSGRSet)
|
secgrouprulesToAdd := newSGRSet.Difference(oldSGRSet)
|
||||||
secgrouprulesToRemove := oldSGRSet.Difference(newSGRSet)
|
secgrouprulesToRemove := oldSGRSet.Difference(newSGRSet)
|
||||||
|
|
||||||
log.Printf("[DEBUG] Security group rules to add: %v", secgrouprulesToAdd)
|
log.Printf("[DEBUG] Security group rules to add: %v", secgrouprulesToAdd)
|
||||||
|
|
||||||
log.Printf("[DEBUG] Security groups rules to remove: %v", secgrouprulesToRemove)
|
log.Printf("[DEBUG] Security groups rules to remove: %v", secgrouprulesToRemove)
|
||||||
|
|
||||||
for _, rawRule := range secgrouprulesToAdd.List() {
|
for _, rawRule := range secgrouprulesToAdd.List() {
|
||||||
@ -231,67 +227,83 @@ func resourceComputeSecGroupV2Delete(d *schema.ResourceData, meta interface{}) e
|
|||||||
}
|
}
|
||||||
|
|
||||||
func resourceSecGroupRulesV2(d *schema.ResourceData) []secgroups.CreateRuleOpts {
|
func resourceSecGroupRulesV2(d *schema.ResourceData) []secgroups.CreateRuleOpts {
|
||||||
rawRules := d.Get("rule").([]interface{})
|
rawRules := d.Get("rule").(*schema.Set).List()
|
||||||
createRuleOptsList := make([]secgroups.CreateRuleOpts, len(rawRules))
|
createRuleOptsList := make([]secgroups.CreateRuleOpts, len(rawRules))
|
||||||
for i, raw := range rawRules {
|
for i, rawRule := range rawRules {
|
||||||
rawMap := raw.(map[string]interface{})
|
createRuleOptsList[i] = resourceSecGroupRuleCreateOptsV2(d, rawRule)
|
||||||
groupId := rawMap["from_group_id"].(string)
|
|
||||||
if rawMap["self"].(bool) {
|
|
||||||
groupId = d.Id()
|
|
||||||
}
|
|
||||||
createRuleOptsList[i] = secgroups.CreateRuleOpts{
|
|
||||||
ParentGroupID: d.Id(),
|
|
||||||
FromPort: rawMap["from_port"].(int),
|
|
||||||
ToPort: rawMap["to_port"].(int),
|
|
||||||
IPProtocol: rawMap["ip_protocol"].(string),
|
|
||||||
CIDR: rawMap["cidr"].(string),
|
|
||||||
FromGroupID: groupId,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return createRuleOptsList
|
return createRuleOptsList
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceSecGroupRuleCreateOptsV2(d *schema.ResourceData, raw interface{}) secgroups.CreateRuleOpts {
|
func resourceSecGroupRuleCreateOptsV2(d *schema.ResourceData, rawRule interface{}) secgroups.CreateRuleOpts {
|
||||||
rawMap := raw.(map[string]interface{})
|
rawRuleMap := rawRule.(map[string]interface{})
|
||||||
groupId := rawMap["from_group_id"].(string)
|
groupId := rawRuleMap["from_group_id"].(string)
|
||||||
if rawMap["self"].(bool) {
|
if rawRuleMap["self"].(bool) {
|
||||||
groupId = d.Id()
|
groupId = d.Id()
|
||||||
}
|
}
|
||||||
return secgroups.CreateRuleOpts{
|
return secgroups.CreateRuleOpts{
|
||||||
ParentGroupID: d.Id(),
|
ParentGroupID: d.Id(),
|
||||||
FromPort: rawMap["from_port"].(int),
|
FromPort: rawRuleMap["from_port"].(int),
|
||||||
ToPort: rawMap["to_port"].(int),
|
ToPort: rawRuleMap["to_port"].(int),
|
||||||
IPProtocol: rawMap["ip_protocol"].(string),
|
IPProtocol: rawRuleMap["ip_protocol"].(string),
|
||||||
CIDR: rawMap["cidr"].(string),
|
CIDR: rawRuleMap["cidr"].(string),
|
||||||
FromGroupID: groupId,
|
FromGroupID: groupId,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceSecGroupRuleV2(d *schema.ResourceData, raw interface{}) secgroups.Rule {
|
func resourceSecGroupRuleV2(d *schema.ResourceData, rawRule interface{}) secgroups.Rule {
|
||||||
rawMap := raw.(map[string]interface{})
|
rawRuleMap := rawRule.(map[string]interface{})
|
||||||
return secgroups.Rule{
|
return secgroups.Rule{
|
||||||
ID: rawMap["id"].(string),
|
ID: rawRuleMap["id"].(string),
|
||||||
ParentGroupID: d.Id(),
|
ParentGroupID: d.Id(),
|
||||||
FromPort: rawMap["from_port"].(int),
|
FromPort: rawRuleMap["from_port"].(int),
|
||||||
ToPort: rawMap["to_port"].(int),
|
ToPort: rawRuleMap["to_port"].(int),
|
||||||
IPProtocol: rawMap["ip_protocol"].(string),
|
IPProtocol: rawRuleMap["ip_protocol"].(string),
|
||||||
IPRange: secgroups.IPRange{CIDR: rawMap["cidr"].(string)},
|
IPRange: secgroups.IPRange{CIDR: rawRuleMap["cidr"].(string)},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func rulesToMap(sgrs []secgroups.Rule) []map[string]interface{} {
|
func rulesToMap(computeClient *gophercloud.ServiceClient, d *schema.ResourceData, sgrs []secgroups.Rule) ([]map[string]interface{}, error) {
|
||||||
sgrMap := make([]map[string]interface{}, len(sgrs))
|
sgrMap := make([]map[string]interface{}, len(sgrs))
|
||||||
for i, sgr := range sgrs {
|
for i, sgr := range sgrs {
|
||||||
|
groupId := ""
|
||||||
|
self := false
|
||||||
|
if sgr.Group.Name != "" {
|
||||||
|
if sgr.Group.Name == d.Get("name").(string) {
|
||||||
|
self = true
|
||||||
|
} else {
|
||||||
|
// Since Nova only returns the secgroup Name (and not the ID) for the group attribute,
|
||||||
|
// we need to look up all security groups and match the name.
|
||||||
|
// Nevermind that Nova wants the ID when setting the Group *and* that multiple groups
|
||||||
|
// with the same name can exist...
|
||||||
|
allPages, err := secgroups.List(computeClient).AllPages()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
securityGroups, err := secgroups.ExtractSecurityGroups(allPages)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, sg := range securityGroups {
|
||||||
|
if sg.Name == sgr.Group.Name {
|
||||||
|
groupId = sg.ID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
sgrMap[i] = map[string]interface{}{
|
sgrMap[i] = map[string]interface{}{
|
||||||
"id": sgr.ID,
|
"id": sgr.ID,
|
||||||
"from_port": sgr.FromPort,
|
"from_port": sgr.FromPort,
|
||||||
"to_port": sgr.ToPort,
|
"to_port": sgr.ToPort,
|
||||||
"ip_protocol": sgr.IPProtocol,
|
"ip_protocol": sgr.IPProtocol,
|
||||||
"cidr": sgr.IPRange.CIDR,
|
"cidr": sgr.IPRange.CIDR,
|
||||||
"group": sgr.Group.Name,
|
"self": self,
|
||||||
|
"from_group_id": groupId,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return sgrMap
|
return sgrMap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func secgroupRuleV2Hash(v interface{}) int {
|
func secgroupRuleV2Hash(v interface{}) int {
|
||||||
@ -301,6 +313,8 @@ func secgroupRuleV2Hash(v interface{}) int {
|
|||||||
buf.WriteString(fmt.Sprintf("%d-", m["to_port"].(int)))
|
buf.WriteString(fmt.Sprintf("%d-", m["to_port"].(int)))
|
||||||
buf.WriteString(fmt.Sprintf("%s-", m["ip_protocol"].(string)))
|
buf.WriteString(fmt.Sprintf("%s-", m["ip_protocol"].(string)))
|
||||||
buf.WriteString(fmt.Sprintf("%s-", m["cidr"].(string)))
|
buf.WriteString(fmt.Sprintf("%s-", m["cidr"].(string)))
|
||||||
|
buf.WriteString(fmt.Sprintf("%s-", m["from_group_id"].(string)))
|
||||||
|
buf.WriteString(fmt.Sprintf("%t-", m["self"].(bool)))
|
||||||
|
|
||||||
return hashcode.String(buf.String())
|
return hashcode.String(buf.String())
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@ func TestAccComputeV2SecGroup_basic(t *testing.T) {
|
|||||||
CheckDestroy: testAccCheckComputeV2SecGroupDestroy,
|
CheckDestroy: testAccCheckComputeV2SecGroupDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccComputeV2SecGroup_basic,
|
Config: testAccComputeV2SecGroup_basic_orig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckComputeV2SecGroupExists(t, "openstack_compute_secgroup_v2.foo", &secgroup),
|
testAccCheckComputeV2SecGroupExists(t, "openstack_compute_secgroup_v2.foo", &secgroup),
|
||||||
),
|
),
|
||||||
@ -28,6 +28,84 @@ func TestAccComputeV2SecGroup_basic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccComputeV2SecGroup_update(t *testing.T) {
|
||||||
|
var secgroup secgroups.SecurityGroup
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeV2SecGroupDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeV2SecGroup_basic_orig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeV2SecGroupExists(t, "openstack_compute_secgroup_v2.foo", &secgroup),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeV2SecGroup_basic_update,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeV2SecGroupExists(t, "openstack_compute_secgroup_v2.foo", &secgroup),
|
||||||
|
testAccCheckComputeV2SecGroupRuleCount(t, &secgroup, 2),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeV2SecGroup_groupID(t *testing.T) {
|
||||||
|
var secgroup1, secgroup2, secgroup3 secgroups.SecurityGroup
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeV2SecGroupDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeV2SecGroup_groupID_orig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeV2SecGroupExists(t, "openstack_compute_secgroup_v2.test_group_1", &secgroup1),
|
||||||
|
testAccCheckComputeV2SecGroupExists(t, "openstack_compute_secgroup_v2.test_group_2", &secgroup2),
|
||||||
|
testAccCheckComputeV2SecGroupExists(t, "openstack_compute_secgroup_v2.test_group_3", &secgroup3),
|
||||||
|
testAccCheckComputeV2SecGroupGroupIDMatch(t, &secgroup1, &secgroup3),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeV2SecGroup_groupID_update,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeV2SecGroupExists(t, "openstack_compute_secgroup_v2.test_group_1", &secgroup1),
|
||||||
|
testAccCheckComputeV2SecGroupExists(t, "openstack_compute_secgroup_v2.test_group_2", &secgroup2),
|
||||||
|
testAccCheckComputeV2SecGroupExists(t, "openstack_compute_secgroup_v2.test_group_3", &secgroup3),
|
||||||
|
testAccCheckComputeV2SecGroupGroupIDMatch(t, &secgroup2, &secgroup3),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeV2SecGroup_self(t *testing.T) {
|
||||||
|
var secgroup secgroups.SecurityGroup
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeV2SecGroupDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeV2SecGroup_self,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeV2SecGroupExists(t, "openstack_compute_secgroup_v2.test_group_1", &secgroup),
|
||||||
|
testAccCheckComputeV2SecGroupGroupIDMatch(t, &secgroup, &secgroup),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"openstack_compute_secgroup_v2.test_group_1", "rule.1118853483.self", "true"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"openstack_compute_secgroup_v2.test_group_1", "rule.1118853483.from_group_id", ""),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeV2SecGroupDestroy(s *terraform.State) error {
|
func testAccCheckComputeV2SecGroupDestroy(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
computeClient, err := config.computeV2Client(OS_REGION_NAME)
|
computeClient, err := config.computeV2Client(OS_REGION_NAME)
|
||||||
@ -81,10 +159,148 @@ func testAccCheckComputeV2SecGroupExists(t *testing.T, n string, secgroup *secgr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var testAccComputeV2SecGroup_basic = fmt.Sprintf(`
|
func testAccCheckComputeV2SecGroupRuleCount(t *testing.T, secgroup *secgroups.SecurityGroup, count int) resource.TestCheckFunc {
|
||||||
resource "openstack_compute_secgroup_v2" "foo" {
|
return func(s *terraform.State) error {
|
||||||
region = "%s"
|
if len(secgroup.Rules) != count {
|
||||||
name = "test_group_1"
|
return fmt.Errorf("Security group rule count does not match. Expected %d, got %d", count, len(secgroup.Rules))
|
||||||
description = "first test security group"
|
}
|
||||||
}`,
|
|
||||||
OS_REGION_NAME)
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeV2SecGroupGroupIDMatch(t *testing.T, sg1, sg2 *secgroups.SecurityGroup) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
if len(sg2.Rules) == 1 {
|
||||||
|
if sg1.Name != sg2.Rules[0].Group.Name || sg1.TenantID != sg2.Rules[0].Group.TenantID {
|
||||||
|
return fmt.Errorf("%s was not correctly applied to %s", sg1.Name, sg2.Name)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("%s rule count is incorrect", sg2.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var testAccComputeV2SecGroup_basic_orig = fmt.Sprintf(`
|
||||||
|
resource "openstack_compute_secgroup_v2" "foo" {
|
||||||
|
name = "test_group_1"
|
||||||
|
description = "first test security group"
|
||||||
|
rule {
|
||||||
|
from_port = 22
|
||||||
|
to_port = 22
|
||||||
|
ip_protocol = "tcp"
|
||||||
|
cidr = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
rule {
|
||||||
|
from_port = 1
|
||||||
|
to_port = 65535
|
||||||
|
ip_protocol = "udp"
|
||||||
|
cidr = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
rule {
|
||||||
|
from_port = -1
|
||||||
|
to_port = -1
|
||||||
|
ip_protocol = "icmp"
|
||||||
|
cidr = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
|
||||||
|
var testAccComputeV2SecGroup_basic_update = fmt.Sprintf(`
|
||||||
|
resource "openstack_compute_secgroup_v2" "foo" {
|
||||||
|
name = "test_group_1"
|
||||||
|
description = "first test security group"
|
||||||
|
rule {
|
||||||
|
from_port = 2200
|
||||||
|
to_port = 2200
|
||||||
|
ip_protocol = "tcp"
|
||||||
|
cidr = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
rule {
|
||||||
|
from_port = -1
|
||||||
|
to_port = -1
|
||||||
|
ip_protocol = "icmp"
|
||||||
|
cidr = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
|
||||||
|
var testAccComputeV2SecGroup_groupID_orig = fmt.Sprintf(`
|
||||||
|
resource "openstack_compute_secgroup_v2" "test_group_1" {
|
||||||
|
name = "test_group_1"
|
||||||
|
description = "first test security group"
|
||||||
|
rule {
|
||||||
|
from_port = 22
|
||||||
|
to_port = 22
|
||||||
|
ip_protocol = "tcp"
|
||||||
|
cidr = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_secgroup_v2" "test_group_2" {
|
||||||
|
name = "test_group_2"
|
||||||
|
description = "second test security group"
|
||||||
|
rule {
|
||||||
|
from_port = -1
|
||||||
|
to_port = -1
|
||||||
|
ip_protocol = "icmp"
|
||||||
|
cidr = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_secgroup_v2" "test_group_3" {
|
||||||
|
name = "test_group_3"
|
||||||
|
description = "third test security group"
|
||||||
|
rule {
|
||||||
|
from_port = 80
|
||||||
|
to_port = 80
|
||||||
|
ip_protocol = "tcp"
|
||||||
|
from_group_id = "${openstack_compute_secgroup_v2.test_group_1.id}"
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
|
||||||
|
var testAccComputeV2SecGroup_groupID_update = fmt.Sprintf(`
|
||||||
|
resource "openstack_compute_secgroup_v2" "test_group_1" {
|
||||||
|
name = "test_group_1"
|
||||||
|
description = "first test security group"
|
||||||
|
rule {
|
||||||
|
from_port = 22
|
||||||
|
to_port = 22
|
||||||
|
ip_protocol = "tcp"
|
||||||
|
cidr = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_secgroup_v2" "test_group_2" {
|
||||||
|
name = "test_group_2"
|
||||||
|
description = "second test security group"
|
||||||
|
rule {
|
||||||
|
from_port = -1
|
||||||
|
to_port = -1
|
||||||
|
ip_protocol = "icmp"
|
||||||
|
cidr = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_secgroup_v2" "test_group_3" {
|
||||||
|
name = "test_group_3"
|
||||||
|
description = "third test security group"
|
||||||
|
rule {
|
||||||
|
from_port = 80
|
||||||
|
to_port = 80
|
||||||
|
ip_protocol = "tcp"
|
||||||
|
from_group_id = "${openstack_compute_secgroup_v2.test_group_2.id}"
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
|
||||||
|
var testAccComputeV2SecGroup_self = fmt.Sprintf(`
|
||||||
|
resource "openstack_compute_secgroup_v2" "test_group_1" {
|
||||||
|
name = "test_group_1"
|
||||||
|
description = "first test security group"
|
||||||
|
rule {
|
||||||
|
from_port = 22
|
||||||
|
to_port = 22
|
||||||
|
ip_protocol = "tcp"
|
||||||
|
self = true
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
@ -117,51 +117,53 @@ func TestAccNetworkingV2Network_fullstack(t *testing.T) {
|
|||||||
var subnet subnets.Subnet
|
var subnet subnets.Subnet
|
||||||
|
|
||||||
var testAccNetworkingV2Network_fullstack = fmt.Sprintf(`
|
var testAccNetworkingV2Network_fullstack = fmt.Sprintf(`
|
||||||
resource "openstack_networking_network_v2" "foo" {
|
resource "openstack_networking_network_v2" "foo" {
|
||||||
region = "%s"
|
region = "%s"
|
||||||
name = "network_1"
|
name = "network_1"
|
||||||
admin_state_up = "true"
|
admin_state_up = "true"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_subnet_v2" "foo" {
|
resource "openstack_networking_subnet_v2" "foo" {
|
||||||
region = "%s"
|
region = "%s"
|
||||||
name = "subnet_1"
|
name = "subnet_1"
|
||||||
network_id = "${openstack_networking_network_v2.foo.id}"
|
network_id = "${openstack_networking_network_v2.foo.id}"
|
||||||
cidr = "192.168.199.0/24"
|
cidr = "192.168.199.0/24"
|
||||||
ip_version = 4
|
ip_version = 4
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_secgroup_v2" "foo" {
|
resource "openstack_compute_secgroup_v2" "foo" {
|
||||||
region = "%s"
|
region = "%s"
|
||||||
name = "secgroup_1"
|
name = "secgroup_1"
|
||||||
description = "a security group"
|
description = "a security group"
|
||||||
rule {
|
rule {
|
||||||
from_port = 22
|
from_port = 22
|
||||||
to_port = 22
|
to_port = 22
|
||||||
ip_protocol = "tcp"
|
ip_protocol = "tcp"
|
||||||
cidr = "0.0.0.0/0"
|
cidr = "0.0.0.0/0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_port_v2" "foo" {
|
resource "openstack_networking_port_v2" "foo" {
|
||||||
region = "%s"
|
region = "%s"
|
||||||
name = "port_1"
|
name = "port_1"
|
||||||
network_id = "${openstack_networking_network_v2.foo.id}"
|
network_id = "${openstack_networking_network_v2.foo.id}"
|
||||||
admin_state_up = "true"
|
admin_state_up = "true"
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.foo.id}"]
|
security_group_ids = ["${openstack_compute_secgroup_v2.foo.id}"]
|
||||||
|
fixed_ip {
|
||||||
|
"subnet_id" = "${openstack_networking_subnet_v2.foo.id}"
|
||||||
|
"ip_address" = "192.168.199.23"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
depends_on = ["openstack_networking_subnet_v2.foo"]
|
resource "openstack_compute_instance_v2" "foo" {
|
||||||
}
|
region = "%s"
|
||||||
|
name = "terraform-test"
|
||||||
|
security_groups = ["${openstack_compute_secgroup_v2.foo.name}"]
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "foo" {
|
network {
|
||||||
region = "%s"
|
port = "${openstack_networking_port_v2.foo.id}"
|
||||||
name = "terraform-test"
|
}
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.foo.name}"]
|
}`, region, region, region, region, region)
|
||||||
|
|
||||||
network {
|
|
||||||
port = "${openstack_networking_port_v2.foo.id}"
|
|
||||||
}
|
|
||||||
}`, region, region, region, region, region)
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
@ -3,7 +3,6 @@ package openstack
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"strconv"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/hashcode"
|
"github.com/hashicorp/terraform/helper/hashcode"
|
||||||
@ -39,7 +38,7 @@ func resourceNetworkingPortV2() *schema.Resource {
|
|||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"admin_state_up": &schema.Schema{
|
"admin_state_up": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: false,
|
ForceNew: false,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@ -62,7 +61,7 @@ func resourceNetworkingPortV2() *schema.Resource {
|
|||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"security_groups": &schema.Schema{
|
"security_group_ids": &schema.Schema{
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: false,
|
ForceNew: false,
|
||||||
@ -78,6 +77,23 @@ func resourceNetworkingPortV2() *schema.Resource {
|
|||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"fixed_ip": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: false,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"subnet_id": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
"ip_address": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -98,6 +114,7 @@ func resourceNetworkingPortV2Create(d *schema.ResourceData, meta interface{}) er
|
|||||||
DeviceOwner: d.Get("device_owner").(string),
|
DeviceOwner: d.Get("device_owner").(string),
|
||||||
SecurityGroups: resourcePortSecurityGroupsV2(d),
|
SecurityGroups: resourcePortSecurityGroupsV2(d),
|
||||||
DeviceID: d.Get("device_id").(string),
|
DeviceID: d.Get("device_id").(string),
|
||||||
|
FixedIPs: resourcePortFixedIpsV2(d),
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] Create Options: %#v", createOpts)
|
log.Printf("[DEBUG] Create Options: %#v", createOpts)
|
||||||
@ -139,13 +156,14 @@ func resourceNetworkingPortV2Read(d *schema.ResourceData, meta interface{}) erro
|
|||||||
log.Printf("[DEBUG] Retreived Port %s: %+v", d.Id(), p)
|
log.Printf("[DEBUG] Retreived Port %s: %+v", d.Id(), p)
|
||||||
|
|
||||||
d.Set("name", p.Name)
|
d.Set("name", p.Name)
|
||||||
d.Set("admin_state_up", strconv.FormatBool(p.AdminStateUp))
|
d.Set("admin_state_up", p.AdminStateUp)
|
||||||
d.Set("network_id", p.NetworkID)
|
d.Set("network_id", p.NetworkID)
|
||||||
d.Set("mac_address", p.MACAddress)
|
d.Set("mac_address", p.MACAddress)
|
||||||
d.Set("tenant_id", p.TenantID)
|
d.Set("tenant_id", p.TenantID)
|
||||||
d.Set("device_owner", p.DeviceOwner)
|
d.Set("device_owner", p.DeviceOwner)
|
||||||
d.Set("security_groups", p.SecurityGroups)
|
d.Set("security_group_ids", p.SecurityGroups)
|
||||||
d.Set("device_id", p.DeviceID)
|
d.Set("device_id", p.DeviceID)
|
||||||
|
d.Set("fixed_ip", p.FixedIPs)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -171,7 +189,7 @@ func resourceNetworkingPortV2Update(d *schema.ResourceData, meta interface{}) er
|
|||||||
updateOpts.DeviceOwner = d.Get("device_owner").(string)
|
updateOpts.DeviceOwner = d.Get("device_owner").(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("security_groups") {
|
if d.HasChange("security_group_ids") {
|
||||||
updateOpts.SecurityGroups = resourcePortSecurityGroupsV2(d)
|
updateOpts.SecurityGroups = resourcePortSecurityGroupsV2(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -179,6 +197,10 @@ func resourceNetworkingPortV2Update(d *schema.ResourceData, meta interface{}) er
|
|||||||
updateOpts.DeviceID = d.Get("device_id").(string)
|
updateOpts.DeviceID = d.Get("device_id").(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.HasChange("fixed_ip") {
|
||||||
|
updateOpts.FixedIPs = resourcePortFixedIpsV2(d)
|
||||||
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] Updating Port %s with options: %+v", d.Id(), updateOpts)
|
log.Printf("[DEBUG] Updating Port %s with options: %+v", d.Id(), updateOpts)
|
||||||
|
|
||||||
_, err = ports.Update(networkingClient, d.Id(), updateOpts).Extract()
|
_, err = ports.Update(networkingClient, d.Id(), updateOpts).Extract()
|
||||||
@ -215,7 +237,7 @@ func resourceNetworkingPortV2Delete(d *schema.ResourceData, meta interface{}) er
|
|||||||
}
|
}
|
||||||
|
|
||||||
func resourcePortSecurityGroupsV2(d *schema.ResourceData) []string {
|
func resourcePortSecurityGroupsV2(d *schema.ResourceData) []string {
|
||||||
rawSecurityGroups := d.Get("security_groups").(*schema.Set)
|
rawSecurityGroups := d.Get("security_group_ids").(*schema.Set)
|
||||||
groups := make([]string, rawSecurityGroups.Len())
|
groups := make([]string, rawSecurityGroups.Len())
|
||||||
for i, raw := range rawSecurityGroups.List() {
|
for i, raw := range rawSecurityGroups.List() {
|
||||||
groups[i] = raw.(string)
|
groups[i] = raw.(string)
|
||||||
@ -223,10 +245,24 @@ func resourcePortSecurityGroupsV2(d *schema.ResourceData) []string {
|
|||||||
return groups
|
return groups
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func resourcePortFixedIpsV2(d *schema.ResourceData) []ports.IP {
|
||||||
|
rawIP := d.Get("fixed_ip").([]interface{})
|
||||||
|
ip := make([]ports.IP, len(rawIP))
|
||||||
|
for i, raw := range rawIP {
|
||||||
|
rawMap := raw.(map[string]interface{})
|
||||||
|
ip[i] = ports.IP{
|
||||||
|
SubnetID: rawMap["subnet_id"].(string),
|
||||||
|
IPAddress: rawMap["ip_address"].(string),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ip
|
||||||
|
}
|
||||||
|
|
||||||
func resourcePortAdminStateUpV2(d *schema.ResourceData) *bool {
|
func resourcePortAdminStateUpV2(d *schema.ResourceData) *bool {
|
||||||
value := false
|
value := false
|
||||||
|
|
||||||
if raw, ok := d.GetOk("admin_state_up"); ok && raw == "true" {
|
if raw, ok := d.GetOk("admin_state_up"); ok && raw == true {
|
||||||
value = true
|
value = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
|
|
||||||
"github.com/rackspace/gophercloud/openstack/networking/v2/networks"
|
"github.com/rackspace/gophercloud/openstack/networking/v2/networks"
|
||||||
"github.com/rackspace/gophercloud/openstack/networking/v2/ports"
|
"github.com/rackspace/gophercloud/openstack/networking/v2/ports"
|
||||||
|
"github.com/rackspace/gophercloud/openstack/networking/v2/subnets"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccNetworkingV2Port_basic(t *testing.T) {
|
func TestAccNetworkingV2Port_basic(t *testing.T) {
|
||||||
@ -17,6 +18,7 @@ func TestAccNetworkingV2Port_basic(t *testing.T) {
|
|||||||
|
|
||||||
var network networks.Network
|
var network networks.Network
|
||||||
var port ports.Port
|
var port ports.Port
|
||||||
|
var subnet subnets.Subnet
|
||||||
|
|
||||||
var testAccNetworkingV2Port_basic = fmt.Sprintf(`
|
var testAccNetworkingV2Port_basic = fmt.Sprintf(`
|
||||||
resource "openstack_networking_network_v2" "foo" {
|
resource "openstack_networking_network_v2" "foo" {
|
||||||
@ -25,12 +27,24 @@ func TestAccNetworkingV2Port_basic(t *testing.T) {
|
|||||||
admin_state_up = "true"
|
admin_state_up = "true"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_subnet_v2" "foo" {
|
||||||
|
region = "%s"
|
||||||
|
name = "subnet_1"
|
||||||
|
network_id = "${openstack_networking_network_v2.foo.id}"
|
||||||
|
cidr = "192.168.199.0/24"
|
||||||
|
ip_version = 4
|
||||||
|
}
|
||||||
|
|
||||||
resource "openstack_networking_port_v2" "foo" {
|
resource "openstack_networking_port_v2" "foo" {
|
||||||
region = "%s"
|
region = "%s"
|
||||||
name = "port_1"
|
name = "port_1"
|
||||||
network_id = "${openstack_networking_network_v2.foo.id}"
|
network_id = "${openstack_networking_network_v2.foo.id}"
|
||||||
admin_state_up = "true"
|
admin_state_up = "true"
|
||||||
}`, region, region)
|
fixed_ip {
|
||||||
|
subnet_id = "${openstack_networking_subnet_v2.foo.id}"
|
||||||
|
ip_address = "192.168.199.23"
|
||||||
|
}
|
||||||
|
}`, region, region, region)
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
@ -40,6 +54,7 @@ func TestAccNetworkingV2Port_basic(t *testing.T) {
|
|||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccNetworkingV2Port_basic,
|
Config: testAccNetworkingV2Port_basic,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckNetworkingV2SubnetExists(t, "openstack_networking_subnet_v2.foo", &subnet),
|
||||||
testAccCheckNetworkingV2NetworkExists(t, "openstack_networking_network_v2.foo", &network),
|
testAccCheckNetworkingV2NetworkExists(t, "openstack_networking_network_v2.foo", &network),
|
||||||
testAccCheckNetworkingV2PortExists(t, "openstack_networking_port_v2.foo", &port),
|
testAccCheckNetworkingV2PortExists(t, "openstack_networking_port_v2.foo", &port),
|
||||||
),
|
),
|
||||||
|
@ -33,7 +33,12 @@ func resourceNetworkingRouterInterfaceV2() *schema.Resource {
|
|||||||
},
|
},
|
||||||
"subnet_id": &schema.Schema{
|
"subnet_id": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"port_id": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -49,6 +54,7 @@ func resourceNetworkingRouterInterfaceV2Create(d *schema.ResourceData, meta inte
|
|||||||
|
|
||||||
createOpts := routers.InterfaceOpts{
|
createOpts := routers.InterfaceOpts{
|
||||||
SubnetID: d.Get("subnet_id").(string),
|
SubnetID: d.Get("subnet_id").(string),
|
||||||
|
PortID: d.Get("port_id").(string),
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] Create Options: %#v", createOpts)
|
log.Printf("[DEBUG] Create Options: %#v", createOpts)
|
||||||
@ -148,6 +154,7 @@ func waitForRouterInterfaceDelete(networkingClient *gophercloud.ServiceClient, d
|
|||||||
|
|
||||||
removeOpts := routers.InterfaceOpts{
|
removeOpts := routers.InterfaceOpts{
|
||||||
SubnetID: d.Get("subnet_id").(string),
|
SubnetID: d.Get("subnet_id").(string),
|
||||||
|
PortID: d.Get("port_id").(string),
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := ports.Get(networkingClient, routerInterfaceId).Extract()
|
r, err := ports.Get(networkingClient, routerInterfaceId).Extract()
|
||||||
|
@ -7,18 +7,53 @@ import (
|
|||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
|
||||||
|
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers"
|
||||||
|
"github.com/rackspace/gophercloud/openstack/networking/v2/networks"
|
||||||
"github.com/rackspace/gophercloud/openstack/networking/v2/ports"
|
"github.com/rackspace/gophercloud/openstack/networking/v2/ports"
|
||||||
|
"github.com/rackspace/gophercloud/openstack/networking/v2/subnets"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccNetworkingV2RouterInterface_basic(t *testing.T) {
|
func TestAccNetworkingV2RouterInterface_basic_subnet(t *testing.T) {
|
||||||
|
var network networks.Network
|
||||||
|
var router routers.Router
|
||||||
|
var subnet subnets.Subnet
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckNetworkingV2RouterInterfaceDestroy,
|
CheckDestroy: testAccCheckNetworkingV2RouterInterfaceDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccNetworkingV2RouterInterface_basic,
|
Config: testAccNetworkingV2RouterInterface_basic_subnet,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckNetworkingV2NetworkExists(t, "openstack_networking_network_v2.network_1", &network),
|
||||||
|
testAccCheckNetworkingV2SubnetExists(t, "openstack_networking_subnet_v2.subnet_1", &subnet),
|
||||||
|
testAccCheckNetworkingV2RouterExists(t, "openstack_networking_router_v2.router_1", &router),
|
||||||
|
testAccCheckNetworkingV2RouterInterfaceExists(t, "openstack_networking_router_interface_v2.int_1"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccNetworkingV2RouterInterface_basic_port(t *testing.T) {
|
||||||
|
var network networks.Network
|
||||||
|
var port ports.Port
|
||||||
|
var router routers.Router
|
||||||
|
var subnet subnets.Subnet
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckNetworkingV2RouterInterfaceDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccNetworkingV2RouterInterface_basic_port,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckNetworkingV2NetworkExists(t, "openstack_networking_network_v2.network_1", &network),
|
||||||
|
testAccCheckNetworkingV2SubnetExists(t, "openstack_networking_subnet_v2.subnet_1", &subnet),
|
||||||
|
testAccCheckNetworkingV2RouterExists(t, "openstack_networking_router_v2.router_1", &router),
|
||||||
|
testAccCheckNetworkingV2PortExists(t, "openstack_networking_port_v2.port_1", &port),
|
||||||
testAccCheckNetworkingV2RouterInterfaceExists(t, "openstack_networking_router_interface_v2.int_1"),
|
testAccCheckNetworkingV2RouterInterfaceExists(t, "openstack_networking_router_interface_v2.int_1"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
@ -77,24 +112,56 @@ func testAccCheckNetworkingV2RouterInterfaceExists(t *testing.T, n string) resou
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var testAccNetworkingV2RouterInterface_basic = fmt.Sprintf(`
|
var testAccNetworkingV2RouterInterface_basic_subnet = fmt.Sprintf(`
|
||||||
resource "openstack_networking_router_v2" "router_1" {
|
resource "openstack_networking_router_v2" "router_1" {
|
||||||
name = "router_1"
|
name = "router_1"
|
||||||
admin_state_up = "true"
|
admin_state_up = "true"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_router_interface_v2" "int_1" {
|
resource "openstack_networking_router_interface_v2" "int_1" {
|
||||||
subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}"
|
subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}"
|
||||||
router_id = "${openstack_networking_router_v2.router_1.id}"
|
router_id = "${openstack_networking_router_v2.router_1.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_network_v2" "network_1" {
|
resource "openstack_networking_network_v2" "network_1" {
|
||||||
name = "network_1"
|
name = "network_1"
|
||||||
admin_state_up = "true"
|
admin_state_up = "true"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_subnet_v2" "subnet_1" {
|
resource "openstack_networking_subnet_v2" "subnet_1" {
|
||||||
network_id = "${openstack_networking_network_v2.network_1.id}"
|
network_id = "${openstack_networking_network_v2.network_1.id}"
|
||||||
cidr = "192.168.199.0/24"
|
cidr = "192.168.199.0/24"
|
||||||
ip_version = 4
|
ip_version = 4
|
||||||
}`)
|
}`)
|
||||||
|
|
||||||
|
var testAccNetworkingV2RouterInterface_basic_port = fmt.Sprintf(`
|
||||||
|
resource "openstack_networking_router_v2" "router_1" {
|
||||||
|
name = "router_1"
|
||||||
|
admin_state_up = "true"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_router_interface_v2" "int_1" {
|
||||||
|
router_id = "${openstack_networking_router_v2.router_1.id}"
|
||||||
|
port_id = "${openstack_networking_port_v2.port_1.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_network_v2" "network_1" {
|
||||||
|
name = "network_1"
|
||||||
|
admin_state_up = "true"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_subnet_v2" "subnet_1" {
|
||||||
|
network_id = "${openstack_networking_network_v2.network_1.id}"
|
||||||
|
cidr = "192.168.199.0/24"
|
||||||
|
ip_version = 4
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_port_v2" "port_1" {
|
||||||
|
name = "port_1"
|
||||||
|
network_id = "${openstack_networking_network_v2.network_1.id}"
|
||||||
|
admin_state_up = "true"
|
||||||
|
fixed_ip {
|
||||||
|
subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}"
|
||||||
|
ip_address = "192.168.199.1"
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
@ -158,7 +158,7 @@ func resourcePacketDeviceCreate(d *schema.ResourceData, meta interface{}) error
|
|||||||
|
|
||||||
log.Printf("[INFO] Device ID: %s", d.Id())
|
log.Printf("[INFO] Device ID: %s", d.Id())
|
||||||
|
|
||||||
_, err = WaitForDeviceAttribute(d, "active", []string{"provisioning"}, "state", meta)
|
_, err = WaitForDeviceAttribute(d, "active", []string{"queued", "provisioning"}, "state", meta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"Error waiting for device (%s) to become ready: %s", d.Id(), err)
|
"Error waiting for device (%s) to become ready: %s", d.Id(), err)
|
||||||
|
@ -4,7 +4,6 @@ import (
|
|||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -12,8 +11,8 @@ import (
|
|||||||
"github.com/hashicorp/terraform/config"
|
"github.com/hashicorp/terraform/config"
|
||||||
"github.com/hashicorp/terraform/config/lang"
|
"github.com/hashicorp/terraform/config/lang"
|
||||||
"github.com/hashicorp/terraform/config/lang/ast"
|
"github.com/hashicorp/terraform/config/lang/ast"
|
||||||
|
"github.com/hashicorp/terraform/helper/pathorcontents"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"github.com/mitchellh/go-homedir"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resource() *schema.Resource {
|
func resource() *schema.Resource {
|
||||||
@ -24,13 +23,23 @@ func resource() *schema.Resource {
|
|||||||
Read: Read,
|
Read: Read,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
|
"template": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Description: "Contents of the template",
|
||||||
|
ForceNew: true,
|
||||||
|
ConflictsWith: []string{"filename"},
|
||||||
|
},
|
||||||
"filename": &schema.Schema{
|
"filename": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Optional: true,
|
||||||
Description: "file to read template from",
|
Description: "file to read template from",
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
// Make a "best effort" attempt to relativize the file path.
|
// Make a "best effort" attempt to relativize the file path.
|
||||||
StateFunc: func(v interface{}) string {
|
StateFunc: func(v interface{}) string {
|
||||||
|
if v == nil || v.(string) == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
pwd, err := os.Getwd()
|
pwd, err := os.Getwd()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return v.(string)
|
return v.(string)
|
||||||
@ -41,6 +50,8 @@ func resource() *schema.Resource {
|
|||||||
}
|
}
|
||||||
return rel
|
return rel
|
||||||
},
|
},
|
||||||
|
Deprecated: "Use the 'template' attribute instead.",
|
||||||
|
ConflictsWith: []string{"template"},
|
||||||
},
|
},
|
||||||
"vars": &schema.Schema{
|
"vars": &schema.Schema{
|
||||||
Type: schema.TypeMap,
|
Type: schema.TypeMap,
|
||||||
@ -96,23 +107,21 @@ func Read(d *schema.ResourceData, meta interface{}) error {
|
|||||||
|
|
||||||
type templateRenderError error
|
type templateRenderError error
|
||||||
|
|
||||||
var readfile func(string) ([]byte, error) = ioutil.ReadFile // testing hook
|
|
||||||
|
|
||||||
func render(d *schema.ResourceData) (string, error) {
|
func render(d *schema.ResourceData) (string, error) {
|
||||||
|
template := d.Get("template").(string)
|
||||||
filename := d.Get("filename").(string)
|
filename := d.Get("filename").(string)
|
||||||
vars := d.Get("vars").(map[string]interface{})
|
vars := d.Get("vars").(map[string]interface{})
|
||||||
|
|
||||||
path, err := homedir.Expand(filename)
|
if template == "" && filename != "" {
|
||||||
|
template = filename
|
||||||
|
}
|
||||||
|
|
||||||
|
contents, _, err := pathorcontents.Read(template)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
buf, err := readfile(path)
|
rendered, err := execute(contents, vars)
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
rendered, err := execute(string(buf), vars)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", templateRenderError(
|
return "", templateRenderError(
|
||||||
fmt.Errorf("failed to render %v: %v", filename, err),
|
fmt.Errorf("failed to render %v: %v", filename, err),
|
||||||
|
@ -26,15 +26,10 @@ func TestTemplateRendering(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range cases {
|
for _, tt := range cases {
|
||||||
r.Test(t, r.TestCase{
|
r.Test(t, r.TestCase{
|
||||||
PreCheck: func() {
|
|
||||||
readfile = func(string) ([]byte, error) {
|
|
||||||
return []byte(tt.template), nil
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Providers: testProviders,
|
Providers: testProviders,
|
||||||
Steps: []r.TestStep{
|
Steps: []r.TestStep{
|
||||||
r.TestStep{
|
r.TestStep{
|
||||||
Config: testTemplateConfig(tt.vars),
|
Config: testTemplateConfig(tt.template, tt.vars),
|
||||||
Check: func(s *terraform.State) error {
|
Check: func(s *terraform.State) error {
|
||||||
got := s.RootModule().Outputs["rendered"]
|
got := s.RootModule().Outputs["rendered"]
|
||||||
if tt.want != got {
|
if tt.want != got {
|
||||||
@ -62,14 +57,7 @@ func TestTemplateVariableChange(t *testing.T) {
|
|||||||
var testSteps []r.TestStep
|
var testSteps []r.TestStep
|
||||||
for i, step := range steps {
|
for i, step := range steps {
|
||||||
testSteps = append(testSteps, r.TestStep{
|
testSteps = append(testSteps, r.TestStep{
|
||||||
PreConfig: func(template string) func() {
|
Config: testTemplateConfig(step.template, step.vars),
|
||||||
return func() {
|
|
||||||
readfile = func(string) ([]byte, error) {
|
|
||||||
return []byte(template), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}(step.template),
|
|
||||||
Config: testTemplateConfig(step.vars),
|
|
||||||
Check: func(i int, want string) r.TestCheckFunc {
|
Check: func(i int, want string) r.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
got := s.RootModule().Outputs["rendered"]
|
got := s.RootModule().Outputs["rendered"]
|
||||||
@ -88,14 +76,13 @@ func TestTemplateVariableChange(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func testTemplateConfig(vars string) string {
|
func testTemplateConfig(template, vars string) string {
|
||||||
return `
|
return fmt.Sprintf(`
|
||||||
resource "template_file" "t0" {
|
resource "template_file" "t0" {
|
||||||
filename = "mock"
|
template = "%s"
|
||||||
vars = ` + vars + `
|
vars = %s
|
||||||
}
|
}
|
||||||
output "rendered" {
|
output "rendered" {
|
||||||
value = "${template_file.t0.rendered}"
|
value = "${template_file.t0.rendered}"
|
||||||
}
|
}`, template, vars)
|
||||||
`
|
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"text/template"
|
"text/template"
|
||||||
@ -16,6 +16,7 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/communicator"
|
"github.com/hashicorp/terraform/communicator"
|
||||||
"github.com/hashicorp/terraform/communicator/remote"
|
"github.com/hashicorp/terraform/communicator/remote"
|
||||||
|
"github.com/hashicorp/terraform/helper/pathorcontents"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
"github.com/mitchellh/go-homedir"
|
"github.com/mitchellh/go-homedir"
|
||||||
"github.com/mitchellh/go-linereader"
|
"github.com/mitchellh/go-linereader"
|
||||||
@ -79,18 +80,22 @@ type Provisioner struct {
|
|||||||
OSType string `mapstructure:"os_type"`
|
OSType string `mapstructure:"os_type"`
|
||||||
PreventSudo bool `mapstructure:"prevent_sudo"`
|
PreventSudo bool `mapstructure:"prevent_sudo"`
|
||||||
RunList []string `mapstructure:"run_list"`
|
RunList []string `mapstructure:"run_list"`
|
||||||
SecretKeyPath string `mapstructure:"secret_key_path"`
|
SecretKey string `mapstructure:"secret_key"`
|
||||||
ServerURL string `mapstructure:"server_url"`
|
ServerURL string `mapstructure:"server_url"`
|
||||||
SkipInstall bool `mapstructure:"skip_install"`
|
SkipInstall bool `mapstructure:"skip_install"`
|
||||||
SSLVerifyMode string `mapstructure:"ssl_verify_mode"`
|
SSLVerifyMode string `mapstructure:"ssl_verify_mode"`
|
||||||
ValidationClientName string `mapstructure:"validation_client_name"`
|
ValidationClientName string `mapstructure:"validation_client_name"`
|
||||||
ValidationKeyPath string `mapstructure:"validation_key_path"`
|
ValidationKey string `mapstructure:"validation_key"`
|
||||||
Version string `mapstructure:"version"`
|
Version string `mapstructure:"version"`
|
||||||
|
|
||||||
installChefClient func(terraform.UIOutput, communicator.Communicator) error
|
installChefClient func(terraform.UIOutput, communicator.Communicator) error
|
||||||
createConfigFiles func(terraform.UIOutput, communicator.Communicator) error
|
createConfigFiles func(terraform.UIOutput, communicator.Communicator) error
|
||||||
runChefClient func(terraform.UIOutput, communicator.Communicator) error
|
runChefClient func(terraform.UIOutput, communicator.Communicator) error
|
||||||
useSudo bool
|
useSudo bool
|
||||||
|
|
||||||
|
// Deprecated Fields
|
||||||
|
SecretKeyPath string `mapstructure:"secret_key_path"`
|
||||||
|
ValidationKeyPath string `mapstructure:"validation_key_path"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResourceProvisioner represents a generic chef provisioner
|
// ResourceProvisioner represents a generic chef provisioner
|
||||||
@ -189,8 +194,9 @@ func (r *ResourceProvisioner) Validate(c *terraform.ResourceConfig) (ws []string
|
|||||||
if p.ValidationClientName == "" {
|
if p.ValidationClientName == "" {
|
||||||
es = append(es, fmt.Errorf("Key not found: validation_client_name"))
|
es = append(es, fmt.Errorf("Key not found: validation_client_name"))
|
||||||
}
|
}
|
||||||
if p.ValidationKeyPath == "" {
|
if p.ValidationKey == "" && p.ValidationKeyPath == "" {
|
||||||
es = append(es, fmt.Errorf("Key not found: validation_key_path"))
|
es = append(es, fmt.Errorf(
|
||||||
|
"One of validation_key or the deprecated validation_key_path must be provided"))
|
||||||
}
|
}
|
||||||
if p.UsePolicyfile && p.PolicyName == "" {
|
if p.UsePolicyfile && p.PolicyName == "" {
|
||||||
es = append(es, fmt.Errorf("Policyfile enabled but key not found: policy_name"))
|
es = append(es, fmt.Errorf("Policyfile enabled but key not found: policy_name"))
|
||||||
@ -198,6 +204,14 @@ func (r *ResourceProvisioner) Validate(c *terraform.ResourceConfig) (ws []string
|
|||||||
if p.UsePolicyfile && p.PolicyGroup == "" {
|
if p.UsePolicyfile && p.PolicyGroup == "" {
|
||||||
es = append(es, fmt.Errorf("Policyfile enabled but key not found: policy_group"))
|
es = append(es, fmt.Errorf("Policyfile enabled but key not found: policy_group"))
|
||||||
}
|
}
|
||||||
|
if p.ValidationKeyPath != "" {
|
||||||
|
ws = append(ws, "validation_key_path is deprecated, please use "+
|
||||||
|
"validation_key instead and load the key contents via file()")
|
||||||
|
}
|
||||||
|
if p.SecretKeyPath != "" {
|
||||||
|
ws = append(ws, "secret_key_path is deprecated, please use "+
|
||||||
|
"secret_key instead and load the key contents via file()")
|
||||||
|
}
|
||||||
|
|
||||||
return ws, es
|
return ws, es
|
||||||
}
|
}
|
||||||
@ -247,20 +261,12 @@ func (r *ResourceProvisioner) decodeConfig(c *terraform.ResourceConfig) (*Provis
|
|||||||
p.OhaiHints[i] = hintPath
|
p.OhaiHints[i] = hintPath
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.ValidationKeyPath != "" {
|
if p.ValidationKey == "" && p.ValidationKeyPath != "" {
|
||||||
keyPath, err := homedir.Expand(p.ValidationKeyPath)
|
p.ValidationKey = p.ValidationKeyPath
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Error expanding the validation key path: %v", err)
|
|
||||||
}
|
|
||||||
p.ValidationKeyPath = keyPath
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.SecretKeyPath != "" {
|
if p.SecretKey == "" && p.SecretKeyPath != "" {
|
||||||
keyPath, err := homedir.Expand(p.SecretKeyPath)
|
p.SecretKey = p.SecretKeyPath
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Error expanding the secret key path: %v", err)
|
|
||||||
}
|
|
||||||
p.SecretKeyPath = keyPath
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if attrs, ok := c.Config["attributes"]; ok {
|
if attrs, ok := c.Config["attributes"]; ok {
|
||||||
@ -316,7 +322,7 @@ func (p *Provisioner) runChefClientFunc(
|
|||||||
chefCmd string,
|
chefCmd string,
|
||||||
confDir string) func(terraform.UIOutput, communicator.Communicator) error {
|
confDir string) func(terraform.UIOutput, communicator.Communicator) error {
|
||||||
return func(o terraform.UIOutput, comm communicator.Communicator) error {
|
return func(o terraform.UIOutput, comm communicator.Communicator) error {
|
||||||
fb := path.Join(confDir, firstBoot)
|
fb := filepath.Join(confDir, firstBoot)
|
||||||
var cmd string
|
var cmd string
|
||||||
|
|
||||||
// Policyfiles do not support chef environments, so don't pass the `-E` flag.
|
// Policyfiles do not support chef environments, so don't pass the `-E` flag.
|
||||||
@ -331,8 +337,8 @@ func (p *Provisioner) runChefClientFunc(
|
|||||||
return fmt.Errorf("Error creating logfile directory %s: %v", logfileDir, err)
|
return fmt.Errorf("Error creating logfile directory %s: %v", logfileDir, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
logFile := path.Join(logfileDir, p.NodeName)
|
logFile := filepath.Join(logfileDir, p.NodeName)
|
||||||
f, err := os.Create(path.Join(logFile))
|
f, err := os.Create(filepath.Join(logFile))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error creating logfile %s: %v", logFile, err)
|
return fmt.Errorf("Error creating logfile %s: %v", logFile, err)
|
||||||
}
|
}
|
||||||
@ -348,7 +354,7 @@ func (p *Provisioner) runChefClientFunc(
|
|||||||
|
|
||||||
// Output implementation of terraform.UIOutput interface
|
// Output implementation of terraform.UIOutput interface
|
||||||
func (p *Provisioner) Output(output string) {
|
func (p *Provisioner) Output(output string) {
|
||||||
logFile := path.Join(logfileDir, p.NodeName)
|
logFile := filepath.Join(logfileDir, p.NodeName)
|
||||||
f, err := os.OpenFile(logFile, os.O_APPEND|os.O_WRONLY, 0666)
|
f, err := os.OpenFile(logFile, os.O_APPEND|os.O_WRONLY, 0666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error creating logfile %s: %v", logFile, err)
|
log.Printf("Error creating logfile %s: %v", logFile, err)
|
||||||
@ -376,28 +382,25 @@ func (p *Provisioner) deployConfigFiles(
|
|||||||
o terraform.UIOutput,
|
o terraform.UIOutput,
|
||||||
comm communicator.Communicator,
|
comm communicator.Communicator,
|
||||||
confDir string) error {
|
confDir string) error {
|
||||||
// Open the validation key file
|
contents, _, err := pathorcontents.Read(p.ValidationKey)
|
||||||
f, err := os.Open(p.ValidationKeyPath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer f.Close()
|
f := strings.NewReader(contents)
|
||||||
|
|
||||||
// Copy the validation key to the new instance
|
// Copy the validation key to the new instance
|
||||||
if err := comm.Upload(path.Join(confDir, validationKey), f); err != nil {
|
if err := comm.Upload(filepath.Join(confDir, validationKey), f); err != nil {
|
||||||
return fmt.Errorf("Uploading %s failed: %v", validationKey, err)
|
return fmt.Errorf("Uploading %s failed: %v", validationKey, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.SecretKeyPath != "" {
|
if p.SecretKey != "" {
|
||||||
// Open the secret key file
|
contents, _, err := pathorcontents.Read(p.SecretKey)
|
||||||
s, err := os.Open(p.SecretKeyPath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer s.Close()
|
s := strings.NewReader(contents)
|
||||||
|
|
||||||
// Copy the secret key to the new instance
|
// Copy the secret key to the new instance
|
||||||
if err := comm.Upload(path.Join(confDir, secretKey), s); err != nil {
|
if err := comm.Upload(filepath.Join(confDir, secretKey), s); err != nil {
|
||||||
return fmt.Errorf("Uploading %s failed: %v", secretKey, err)
|
return fmt.Errorf("Uploading %s failed: %v", secretKey, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -417,7 +420,7 @@ func (p *Provisioner) deployConfigFiles(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Copy the client config to the new instance
|
// Copy the client config to the new instance
|
||||||
if err := comm.Upload(path.Join(confDir, clienrb), &buf); err != nil {
|
if err := comm.Upload(filepath.Join(confDir, clienrb), &buf); err != nil {
|
||||||
return fmt.Errorf("Uploading %s failed: %v", clienrb, err)
|
return fmt.Errorf("Uploading %s failed: %v", clienrb, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -446,7 +449,7 @@ func (p *Provisioner) deployConfigFiles(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Copy the first-boot.json to the new instance
|
// Copy the first-boot.json to the new instance
|
||||||
if err := comm.Upload(path.Join(confDir, firstBoot), bytes.NewReader(d)); err != nil {
|
if err := comm.Upload(filepath.Join(confDir, firstBoot), bytes.NewReader(d)); err != nil {
|
||||||
return fmt.Errorf("Uploading %s failed: %v", firstBoot, err)
|
return fmt.Errorf("Uploading %s failed: %v", firstBoot, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -466,8 +469,8 @@ func (p *Provisioner) deployOhaiHints(
|
|||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
// Copy the hint to the new instance
|
// Copy the hint to the new instance
|
||||||
if err := comm.Upload(path.Join(hintDir, path.Base(hint)), f); err != nil {
|
if err := comm.Upload(filepath.Join(hintDir, filepath.Base(hint)), f); err != nil {
|
||||||
return fmt.Errorf("Uploading %s failed: %v", path.Base(hint), err)
|
return fmt.Errorf("Uploading %s failed: %v", filepath.Base(hint), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ func TestResourceProvider_Validate_good(t *testing.T) {
|
|||||||
"run_list": []interface{}{"cookbook::recipe"},
|
"run_list": []interface{}{"cookbook::recipe"},
|
||||||
"server_url": "https://chef.local",
|
"server_url": "https://chef.local",
|
||||||
"validation_client_name": "validator",
|
"validation_client_name": "validator",
|
||||||
"validation_key_path": "validator.pem",
|
"validation_key": "contentsofsomevalidator.pem",
|
||||||
})
|
})
|
||||||
r := new(ResourceProvisioner)
|
r := new(ResourceProvisioner)
|
||||||
warn, errs := r.Validate(c)
|
warn, errs := r.Validate(c)
|
||||||
|
@ -111,11 +111,27 @@ func (c *ApplyCommand) Run(args []string) int {
|
|||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
if !destroyForce && c.Destroy {
|
if !destroyForce && c.Destroy {
|
||||||
|
// Default destroy message
|
||||||
|
desc := "Terraform will delete all your managed infrastructure.\n" +
|
||||||
|
"There is no undo. Only 'yes' will be accepted to confirm."
|
||||||
|
|
||||||
|
// If targets are specified, list those to user
|
||||||
|
if c.Meta.targets != nil {
|
||||||
|
var descBuffer bytes.Buffer
|
||||||
|
descBuffer.WriteString("Terraform will delete the following infrastructure:\n")
|
||||||
|
for _, target := range c.Meta.targets {
|
||||||
|
descBuffer.WriteString("\t")
|
||||||
|
descBuffer.WriteString(target)
|
||||||
|
descBuffer.WriteString("\n")
|
||||||
|
}
|
||||||
|
descBuffer.WriteString("There is no undo. Only 'yes' will be accepted to confirm")
|
||||||
|
desc = descBuffer.String()
|
||||||
|
}
|
||||||
|
|
||||||
v, err := c.UIInput().Input(&terraform.InputOpts{
|
v, err := c.UIInput().Input(&terraform.InputOpts{
|
||||||
Id: "destroy",
|
Id: "destroy",
|
||||||
Query: "Do you really want to destroy?",
|
Query: "Do you really want to destroy?",
|
||||||
Description: "Terraform will delete all your managed infrastructure.\n" +
|
Description: desc,
|
||||||
"There is no undo. Only 'yes' will be accepted to confirm.",
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Ui.Error(fmt.Sprintf("Error asking for confirmation: %s", err))
|
c.Ui.Error(fmt.Sprintf("Error asking for confirmation: %s", err))
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// generated by stringer -type=countHookAction hook_count_action.go; DO NOT EDIT
|
// Code generated by "stringer -type=countHookAction hook_count_action.go"; DO NOT EDIT
|
||||||
|
|
||||||
package command
|
package command
|
||||||
|
|
||||||
|
@ -68,14 +68,16 @@ func (c *PlanCommand) Run(args []string) int {
|
|||||||
c.Ui.Error(err.Error())
|
c.Ui.Error(err.Error())
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
if !validateContext(ctx, c.Ui) {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
if err := ctx.Input(c.InputMode()); err != nil {
|
if err := ctx.Input(c.InputMode()); err != nil {
|
||||||
c.Ui.Error(fmt.Sprintf("Error configuring: %s", err))
|
c.Ui.Error(fmt.Sprintf("Error configuring: %s", err))
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !validateContext(ctx, c.Ui) {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
if refresh {
|
if refresh {
|
||||||
c.Ui.Output("Refreshing Terraform state prior to plan...\n")
|
c.Ui.Output("Refreshing Terraform state prior to plan...\n")
|
||||||
state, err := ctx.Refresh()
|
state, err := ctx.Refresh()
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package command
|
package command
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -330,6 +331,30 @@ func TestPlan_vars(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPlan_varsUnset(t *testing.T) {
|
||||||
|
// Disable test mode so input would be asked
|
||||||
|
test = false
|
||||||
|
defer func() { test = true }()
|
||||||
|
|
||||||
|
defaultInputReader = bytes.NewBufferString("bar\n")
|
||||||
|
|
||||||
|
p := testProvider()
|
||||||
|
ui := new(cli.MockUi)
|
||||||
|
c := &PlanCommand{
|
||||||
|
Meta: Meta{
|
||||||
|
ContextOpts: testCtxConfig(p),
|
||||||
|
Ui: ui,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
testFixturePath("plan-vars"),
|
||||||
|
}
|
||||||
|
if code := c.Run(args); code != 0 {
|
||||||
|
t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestPlan_varFile(t *testing.T) {
|
func TestPlan_varFile(t *testing.T) {
|
||||||
varFilePath := testTempFile(t)
|
varFilePath := testTempFile(t)
|
||||||
if err := ioutil.WriteFile(varFilePath, []byte(planVarFile), 0644); err != nil {
|
if err := ioutil.WriteFile(varFilePath, []byte(planVarFile), 0644); err != nil {
|
||||||
|
@ -93,7 +93,7 @@ func (c *Communicator) Connect(o terraform.UIOutput) (err error) {
|
|||||||
" SSH Agent: %t",
|
" SSH Agent: %t",
|
||||||
c.connInfo.Host, c.connInfo.User,
|
c.connInfo.Host, c.connInfo.User,
|
||||||
c.connInfo.Password != "",
|
c.connInfo.Password != "",
|
||||||
c.connInfo.KeyFile != "",
|
c.connInfo.PrivateKey != "",
|
||||||
c.connInfo.Agent,
|
c.connInfo.Agent,
|
||||||
))
|
))
|
||||||
|
|
||||||
@ -107,7 +107,7 @@ func (c *Communicator) Connect(o terraform.UIOutput) (err error) {
|
|||||||
" SSH Agent: %t",
|
" SSH Agent: %t",
|
||||||
c.connInfo.BastionHost, c.connInfo.BastionUser,
|
c.connInfo.BastionHost, c.connInfo.BastionUser,
|
||||||
c.connInfo.BastionPassword != "",
|
c.connInfo.BastionPassword != "",
|
||||||
c.connInfo.BastionKeyFile != "",
|
c.connInfo.BastionPrivateKey != "",
|
||||||
c.connInfo.Agent,
|
c.connInfo.Agent,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
@ -3,14 +3,13 @@ package ssh
|
|||||||
import (
|
import (
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
"log"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/pathorcontents"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
"github.com/mitchellh/go-homedir"
|
|
||||||
"github.com/mitchellh/mapstructure"
|
"github.com/mitchellh/mapstructure"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
"golang.org/x/crypto/ssh/agent"
|
"golang.org/x/crypto/ssh/agent"
|
||||||
@ -37,7 +36,7 @@ const (
|
|||||||
type connectionInfo struct {
|
type connectionInfo struct {
|
||||||
User string
|
User string
|
||||||
Password string
|
Password string
|
||||||
KeyFile string `mapstructure:"key_file"`
|
PrivateKey string `mapstructure:"private_key"`
|
||||||
Host string
|
Host string
|
||||||
Port int
|
Port int
|
||||||
Agent bool
|
Agent bool
|
||||||
@ -45,11 +44,15 @@ type connectionInfo struct {
|
|||||||
ScriptPath string `mapstructure:"script_path"`
|
ScriptPath string `mapstructure:"script_path"`
|
||||||
TimeoutVal time.Duration `mapstructure:"-"`
|
TimeoutVal time.Duration `mapstructure:"-"`
|
||||||
|
|
||||||
BastionUser string `mapstructure:"bastion_user"`
|
BastionUser string `mapstructure:"bastion_user"`
|
||||||
BastionPassword string `mapstructure:"bastion_password"`
|
BastionPassword string `mapstructure:"bastion_password"`
|
||||||
BastionKeyFile string `mapstructure:"bastion_key_file"`
|
BastionPrivateKey string `mapstructure:"bastion_private_key"`
|
||||||
BastionHost string `mapstructure:"bastion_host"`
|
BastionHost string `mapstructure:"bastion_host"`
|
||||||
BastionPort int `mapstructure:"bastion_port"`
|
BastionPort int `mapstructure:"bastion_port"`
|
||||||
|
|
||||||
|
// Deprecated
|
||||||
|
KeyFile string `mapstructure:"key_file"`
|
||||||
|
BastionKeyFile string `mapstructure:"bastion_key_file"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseConnectionInfo is used to convert the ConnInfo of the InstanceState into
|
// parseConnectionInfo is used to convert the ConnInfo of the InstanceState into
|
||||||
@ -92,6 +95,15 @@ func parseConnectionInfo(s *terraform.InstanceState) (*connectionInfo, error) {
|
|||||||
connInfo.TimeoutVal = DefaultTimeout
|
connInfo.TimeoutVal = DefaultTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Load deprecated fields; we can handle either path or contents in
|
||||||
|
// underlying implementation.
|
||||||
|
if connInfo.PrivateKey == "" && connInfo.KeyFile != "" {
|
||||||
|
connInfo.PrivateKey = connInfo.KeyFile
|
||||||
|
}
|
||||||
|
if connInfo.BastionPrivateKey == "" && connInfo.BastionKeyFile != "" {
|
||||||
|
connInfo.BastionPrivateKey = connInfo.BastionKeyFile
|
||||||
|
}
|
||||||
|
|
||||||
// Default all bastion config attrs to their non-bastion counterparts
|
// Default all bastion config attrs to their non-bastion counterparts
|
||||||
if connInfo.BastionHost != "" {
|
if connInfo.BastionHost != "" {
|
||||||
if connInfo.BastionUser == "" {
|
if connInfo.BastionUser == "" {
|
||||||
@ -100,8 +112,8 @@ func parseConnectionInfo(s *terraform.InstanceState) (*connectionInfo, error) {
|
|||||||
if connInfo.BastionPassword == "" {
|
if connInfo.BastionPassword == "" {
|
||||||
connInfo.BastionPassword = connInfo.Password
|
connInfo.BastionPassword = connInfo.Password
|
||||||
}
|
}
|
||||||
if connInfo.BastionKeyFile == "" {
|
if connInfo.BastionPrivateKey == "" {
|
||||||
connInfo.BastionKeyFile = connInfo.KeyFile
|
connInfo.BastionPrivateKey = connInfo.PrivateKey
|
||||||
}
|
}
|
||||||
if connInfo.BastionPort == 0 {
|
if connInfo.BastionPort == 0 {
|
||||||
connInfo.BastionPort = connInfo.Port
|
connInfo.BastionPort = connInfo.Port
|
||||||
@ -130,10 +142,10 @@ func prepareSSHConfig(connInfo *connectionInfo) (*sshConfig, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
sshConf, err := buildSSHClientConfig(sshClientConfigOpts{
|
sshConf, err := buildSSHClientConfig(sshClientConfigOpts{
|
||||||
user: connInfo.User,
|
user: connInfo.User,
|
||||||
keyFile: connInfo.KeyFile,
|
privateKey: connInfo.PrivateKey,
|
||||||
password: connInfo.Password,
|
password: connInfo.Password,
|
||||||
sshAgent: sshAgent,
|
sshAgent: sshAgent,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -142,10 +154,10 @@ func prepareSSHConfig(connInfo *connectionInfo) (*sshConfig, error) {
|
|||||||
var bastionConf *ssh.ClientConfig
|
var bastionConf *ssh.ClientConfig
|
||||||
if connInfo.BastionHost != "" {
|
if connInfo.BastionHost != "" {
|
||||||
bastionConf, err = buildSSHClientConfig(sshClientConfigOpts{
|
bastionConf, err = buildSSHClientConfig(sshClientConfigOpts{
|
||||||
user: connInfo.BastionUser,
|
user: connInfo.BastionUser,
|
||||||
keyFile: connInfo.BastionKeyFile,
|
privateKey: connInfo.BastionPrivateKey,
|
||||||
password: connInfo.BastionPassword,
|
password: connInfo.BastionPassword,
|
||||||
sshAgent: sshAgent,
|
sshAgent: sshAgent,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -169,10 +181,10 @@ func prepareSSHConfig(connInfo *connectionInfo) (*sshConfig, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type sshClientConfigOpts struct {
|
type sshClientConfigOpts struct {
|
||||||
keyFile string
|
privateKey string
|
||||||
password string
|
password string
|
||||||
sshAgent *sshAgent
|
sshAgent *sshAgent
|
||||||
user string
|
user string
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildSSHClientConfig(opts sshClientConfigOpts) (*ssh.ClientConfig, error) {
|
func buildSSHClientConfig(opts sshClientConfigOpts) (*ssh.ClientConfig, error) {
|
||||||
@ -180,8 +192,8 @@ func buildSSHClientConfig(opts sshClientConfigOpts) (*ssh.ClientConfig, error) {
|
|||||||
User: opts.user,
|
User: opts.user,
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.keyFile != "" {
|
if opts.privateKey != "" {
|
||||||
pubKeyAuth, err := readPublicKeyFromPath(opts.keyFile)
|
pubKeyAuth, err := readPrivateKey(opts.privateKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -201,31 +213,27 @@ func buildSSHClientConfig(opts sshClientConfigOpts) (*ssh.ClientConfig, error) {
|
|||||||
return conf, nil
|
return conf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readPublicKeyFromPath(path string) (ssh.AuthMethod, error) {
|
func readPrivateKey(pk string) (ssh.AuthMethod, error) {
|
||||||
fullPath, err := homedir.Expand(path)
|
key, _, err := pathorcontents.Read(pk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Failed to expand home directory: %s", err)
|
return nil, fmt.Errorf("Failed to read private key %q: %s", pk, err)
|
||||||
}
|
|
||||||
key, err := ioutil.ReadFile(fullPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to read key file %q: %s", path, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We parse the private key on our own first so that we can
|
// We parse the private key on our own first so that we can
|
||||||
// show a nicer error if the private key has a password.
|
// show a nicer error if the private key has a password.
|
||||||
block, _ := pem.Decode(key)
|
block, _ := pem.Decode([]byte(key))
|
||||||
if block == nil {
|
if block == nil {
|
||||||
return nil, fmt.Errorf("Failed to read key %q: no key found", path)
|
return nil, fmt.Errorf("Failed to read key %q: no key found", pk)
|
||||||
}
|
}
|
||||||
if block.Headers["Proc-Type"] == "4,ENCRYPTED" {
|
if block.Headers["Proc-Type"] == "4,ENCRYPTED" {
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf(
|
||||||
"Failed to read key %q: password protected keys are\n"+
|
"Failed to read key %q: password protected keys are\n"+
|
||||||
"not supported. Please decrypt the key prior to use.", path)
|
"not supported. Please decrypt the key prior to use.", pk)
|
||||||
}
|
}
|
||||||
|
|
||||||
signer, err := ssh.ParsePrivateKey(key)
|
signer, err := ssh.ParsePrivateKey([]byte(key))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Failed to parse key file %q: %s", path, err)
|
return nil, fmt.Errorf("Failed to parse key file %q: %s", pk, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ssh.PublicKeys(signer), nil
|
return ssh.PublicKeys(signer), nil
|
||||||
|
@ -10,13 +10,13 @@ func TestProvisioner_connInfo(t *testing.T) {
|
|||||||
r := &terraform.InstanceState{
|
r := &terraform.InstanceState{
|
||||||
Ephemeral: terraform.EphemeralState{
|
Ephemeral: terraform.EphemeralState{
|
||||||
ConnInfo: map[string]string{
|
ConnInfo: map[string]string{
|
||||||
"type": "ssh",
|
"type": "ssh",
|
||||||
"user": "root",
|
"user": "root",
|
||||||
"password": "supersecret",
|
"password": "supersecret",
|
||||||
"key_file": "/my/key/file.pem",
|
"private_key": "someprivatekeycontents",
|
||||||
"host": "127.0.0.1",
|
"host": "127.0.0.1",
|
||||||
"port": "22",
|
"port": "22",
|
||||||
"timeout": "30s",
|
"timeout": "30s",
|
||||||
|
|
||||||
"bastion_host": "127.0.1.1",
|
"bastion_host": "127.0.1.1",
|
||||||
},
|
},
|
||||||
@ -34,7 +34,7 @@ func TestProvisioner_connInfo(t *testing.T) {
|
|||||||
if conf.Password != "supersecret" {
|
if conf.Password != "supersecret" {
|
||||||
t.Fatalf("bad: %v", conf)
|
t.Fatalf("bad: %v", conf)
|
||||||
}
|
}
|
||||||
if conf.KeyFile != "/my/key/file.pem" {
|
if conf.PrivateKey != "someprivatekeycontents" {
|
||||||
t.Fatalf("bad: %v", conf)
|
t.Fatalf("bad: %v", conf)
|
||||||
}
|
}
|
||||||
if conf.Host != "127.0.0.1" {
|
if conf.Host != "127.0.0.1" {
|
||||||
@ -61,7 +61,31 @@ func TestProvisioner_connInfo(t *testing.T) {
|
|||||||
if conf.BastionPassword != "supersecret" {
|
if conf.BastionPassword != "supersecret" {
|
||||||
t.Fatalf("bad: %v", conf)
|
t.Fatalf("bad: %v", conf)
|
||||||
}
|
}
|
||||||
if conf.BastionKeyFile != "/my/key/file.pem" {
|
if conf.BastionPrivateKey != "someprivatekeycontents" {
|
||||||
|
t.Fatalf("bad: %v", conf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProvisioner_connInfoLegacy(t *testing.T) {
|
||||||
|
r := &terraform.InstanceState{
|
||||||
|
Ephemeral: terraform.EphemeralState{
|
||||||
|
ConnInfo: map[string]string{
|
||||||
|
"type": "ssh",
|
||||||
|
"key_file": "/my/key/file.pem",
|
||||||
|
"bastion_host": "127.0.1.1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
conf, err := parseConnectionInfo(r)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if conf.PrivateKey != "/my/key/file.pem" {
|
||||||
|
t.Fatalf("bad: %v", conf)
|
||||||
|
}
|
||||||
|
if conf.BastionPrivateKey != "/my/key/file.pem" {
|
||||||
t.Fatalf("bad: %v", conf)
|
t.Fatalf("bad: %v", conf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -76,6 +76,13 @@ type SelfVariable struct {
|
|||||||
key string
|
key string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SimpleVariable is an unprefixed variable, which can show up when users have
|
||||||
|
// strings they are passing down to resources that use interpolation
|
||||||
|
// internally. The template_file resource is an example of this.
|
||||||
|
type SimpleVariable struct {
|
||||||
|
Key string
|
||||||
|
}
|
||||||
|
|
||||||
// A UserVariable is a variable that is referencing a user variable
|
// A UserVariable is a variable that is referencing a user variable
|
||||||
// that is inputted from outside the configuration. This looks like
|
// that is inputted from outside the configuration. This looks like
|
||||||
// "${var.foo}"
|
// "${var.foo}"
|
||||||
@ -97,6 +104,8 @@ func NewInterpolatedVariable(v string) (InterpolatedVariable, error) {
|
|||||||
return NewUserVariable(v)
|
return NewUserVariable(v)
|
||||||
} else if strings.HasPrefix(v, "module.") {
|
} else if strings.HasPrefix(v, "module.") {
|
||||||
return NewModuleVariable(v)
|
return NewModuleVariable(v)
|
||||||
|
} else if !strings.ContainsRune(v, '.') {
|
||||||
|
return NewSimpleVariable(v)
|
||||||
} else {
|
} else {
|
||||||
return NewResourceVariable(v)
|
return NewResourceVariable(v)
|
||||||
}
|
}
|
||||||
@ -227,6 +236,18 @@ func (v *SelfVariable) GoString() string {
|
|||||||
return fmt.Sprintf("*%#v", *v)
|
return fmt.Sprintf("*%#v", *v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewSimpleVariable(key string) (*SimpleVariable, error) {
|
||||||
|
return &SimpleVariable{key}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *SimpleVariable) FullKey() string {
|
||||||
|
return v.Key
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *SimpleVariable) GoString() string {
|
||||||
|
return fmt.Sprintf("*%#v", *v)
|
||||||
|
}
|
||||||
|
|
||||||
func NewUserVariable(key string) (*UserVariable, error) {
|
func NewUserVariable(key string) (*UserVariable, error) {
|
||||||
name := key[len("var."):]
|
name := key[len("var."):]
|
||||||
elem := ""
|
elem := ""
|
||||||
|
@ -25,6 +25,7 @@ func init() {
|
|||||||
"cidrhost": interpolationFuncCidrHost(),
|
"cidrhost": interpolationFuncCidrHost(),
|
||||||
"cidrnetmask": interpolationFuncCidrNetmask(),
|
"cidrnetmask": interpolationFuncCidrNetmask(),
|
||||||
"cidrsubnet": interpolationFuncCidrSubnet(),
|
"cidrsubnet": interpolationFuncCidrSubnet(),
|
||||||
|
"coalesce": interpolationFuncCoalesce(),
|
||||||
"compact": interpolationFuncCompact(),
|
"compact": interpolationFuncCompact(),
|
||||||
"concat": interpolationFuncConcat(),
|
"concat": interpolationFuncConcat(),
|
||||||
"element": interpolationFuncElement(),
|
"element": interpolationFuncElement(),
|
||||||
@ -145,6 +146,30 @@ func interpolationFuncCidrSubnet() ast.Function {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// interpolationFuncCoalesce implements the "coalesce" function that
|
||||||
|
// returns the first non null / empty string from the provided input
|
||||||
|
func interpolationFuncCoalesce() ast.Function {
|
||||||
|
return ast.Function{
|
||||||
|
ArgTypes: []ast.Type{ast.TypeString},
|
||||||
|
ReturnType: ast.TypeString,
|
||||||
|
Variadic: true,
|
||||||
|
VariadicType: ast.TypeString,
|
||||||
|
Callback: func(args []interface{}) (interface{}, error) {
|
||||||
|
if len(args) < 2 {
|
||||||
|
return nil, fmt.Errorf("must provide at least two arguments")
|
||||||
|
}
|
||||||
|
for _, arg := range args {
|
||||||
|
argument := arg.(string)
|
||||||
|
|
||||||
|
if argument != "" {
|
||||||
|
return argument, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// interpolationFuncConcat implements the "concat" function that
|
// interpolationFuncConcat implements the "concat" function that
|
||||||
// concatenates multiple strings. This isn't actually necessary anymore
|
// concatenates multiple strings. This isn't actually necessary anymore
|
||||||
// since our language supports string concat natively, but for backwards
|
// since our language supports string concat natively, but for backwards
|
||||||
|
@ -147,6 +147,33 @@ func TestInterpolateFuncCidrSubnet(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestInterpolateFuncCoalesce(t *testing.T) {
|
||||||
|
testFunction(t, testFunctionConfig{
|
||||||
|
Cases: []testFunctionCase{
|
||||||
|
{
|
||||||
|
`${coalesce("first", "second", "third")}`,
|
||||||
|
"first",
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
`${coalesce("", "second", "third")}`,
|
||||||
|
"second",
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
`${coalesce("", "", "")}`,
|
||||||
|
"",
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
`${coalesce("foo")}`,
|
||||||
|
nil,
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestInterpolateFuncDeprecatedConcat(t *testing.T) {
|
func TestInterpolateFuncDeprecatedConcat(t *testing.T) {
|
||||||
testFunction(t, testFunctionConfig{
|
testFunction(t, testFunctionConfig{
|
||||||
Cases: []testFunctionCase{
|
Cases: []testFunctionCase{
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// generated by stringer -type=Type; DO NOT EDIT
|
// Code generated by "stringer -type=Type"; DO NOT EDIT
|
||||||
|
|
||||||
package ast
|
package ast
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ func LoadJSON(raw json.RawMessage) (*Config, error) {
|
|||||||
|
|
||||||
// Start building the result
|
// Start building the result
|
||||||
hclConfig := &hclConfigurable{
|
hclConfig := &hclConfigurable{
|
||||||
Object: obj,
|
Root: obj,
|
||||||
}
|
}
|
||||||
|
|
||||||
return hclConfig.Config()
|
return hclConfig.Config()
|
||||||
|
@ -5,15 +5,15 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
|
||||||
"github.com/hashicorp/hcl"
|
"github.com/hashicorp/hcl"
|
||||||
hclobj "github.com/hashicorp/hcl/hcl"
|
"github.com/hashicorp/hcl/hcl/ast"
|
||||||
"github.com/mitchellh/mapstructure"
|
"github.com/mitchellh/mapstructure"
|
||||||
)
|
)
|
||||||
|
|
||||||
// hclConfigurable is an implementation of configurable that knows
|
// hclConfigurable is an implementation of configurable that knows
|
||||||
// how to turn HCL configuration into a *Config object.
|
// how to turn HCL configuration into a *Config object.
|
||||||
type hclConfigurable struct {
|
type hclConfigurable struct {
|
||||||
File string
|
File string
|
||||||
Object *hclobj.Object
|
Root *ast.File
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *hclConfigurable) Config() (*Config, error) {
|
func (t *hclConfigurable) Config() (*Config, error) {
|
||||||
@ -36,7 +36,13 @@ func (t *hclConfigurable) Config() (*Config, error) {
|
|||||||
Variable map[string]*hclVariable
|
Variable map[string]*hclVariable
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := hcl.DecodeObject(&rawConfig, t.Object); err != nil {
|
// Top-level item should be the object list
|
||||||
|
list, ok := t.Root.Node.(*ast.ObjectList)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("error parsing: file doesn't contain a root object")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := hcl.DecodeObject(&rawConfig, list); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,7 +79,7 @@ func (t *hclConfigurable) Config() (*Config, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get Atlas configuration
|
// Get Atlas configuration
|
||||||
if atlas := t.Object.Get("atlas", false); atlas != nil {
|
if atlas := list.Filter("atlas"); len(atlas.Items) > 0 {
|
||||||
var err error
|
var err error
|
||||||
config.Atlas, err = loadAtlasHcl(atlas)
|
config.Atlas, err = loadAtlasHcl(atlas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -82,7 +88,7 @@ func (t *hclConfigurable) Config() (*Config, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Build the modules
|
// Build the modules
|
||||||
if modules := t.Object.Get("module", false); modules != nil {
|
if modules := list.Filter("module"); len(modules.Items) > 0 {
|
||||||
var err error
|
var err error
|
||||||
config.Modules, err = loadModulesHcl(modules)
|
config.Modules, err = loadModulesHcl(modules)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -91,7 +97,7 @@ func (t *hclConfigurable) Config() (*Config, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Build the provider configs
|
// Build the provider configs
|
||||||
if providers := t.Object.Get("provider", false); providers != nil {
|
if providers := list.Filter("provider"); len(providers.Items) > 0 {
|
||||||
var err error
|
var err error
|
||||||
config.ProviderConfigs, err = loadProvidersHcl(providers)
|
config.ProviderConfigs, err = loadProvidersHcl(providers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -100,7 +106,7 @@ func (t *hclConfigurable) Config() (*Config, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Build the resources
|
// Build the resources
|
||||||
if resources := t.Object.Get("resource", false); resources != nil {
|
if resources := list.Filter("resource"); len(resources.Items) > 0 {
|
||||||
var err error
|
var err error
|
||||||
config.Resources, err = loadResourcesHcl(resources)
|
config.Resources, err = loadResourcesHcl(resources)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -109,7 +115,7 @@ func (t *hclConfigurable) Config() (*Config, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Build the outputs
|
// Build the outputs
|
||||||
if outputs := t.Object.Get("output", false); outputs != nil {
|
if outputs := list.Filter("output"); len(outputs.Items) > 0 {
|
||||||
var err error
|
var err error
|
||||||
config.Outputs, err = loadOutputsHcl(outputs)
|
config.Outputs, err = loadOutputsHcl(outputs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -118,8 +124,13 @@ func (t *hclConfigurable) Config() (*Config, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check for invalid keys
|
// Check for invalid keys
|
||||||
for _, elem := range t.Object.Elem(true) {
|
for _, item := range list.Items {
|
||||||
k := elem.Key
|
if len(item.Keys) == 0 {
|
||||||
|
// Not sure how this would happen, but let's avoid a panic
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
k := item.Keys[0].Token.Value().(string)
|
||||||
if _, ok := validKeys[k]; ok {
|
if _, ok := validKeys[k]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -133,8 +144,6 @@ func (t *hclConfigurable) Config() (*Config, error) {
|
|||||||
// loadFileHcl is a fileLoaderFunc that knows how to read HCL
|
// loadFileHcl is a fileLoaderFunc that knows how to read HCL
|
||||||
// files and turn them into hclConfigurables.
|
// files and turn them into hclConfigurables.
|
||||||
func loadFileHcl(root string) (configurable, []string, error) {
|
func loadFileHcl(root string) (configurable, []string, error) {
|
||||||
var obj *hclobj.Object = nil
|
|
||||||
|
|
||||||
// Read the HCL file and prepare for parsing
|
// Read the HCL file and prepare for parsing
|
||||||
d, err := ioutil.ReadFile(root)
|
d, err := ioutil.ReadFile(root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -143,7 +152,7 @@ func loadFileHcl(root string) (configurable, []string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Parse it
|
// Parse it
|
||||||
obj, err = hcl.Parse(string(d))
|
hclRoot, err := hcl.Parse(string(d))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf(
|
return nil, nil, fmt.Errorf(
|
||||||
"Error parsing %s: %s", root, err)
|
"Error parsing %s: %s", root, err)
|
||||||
@ -151,8 +160,8 @@ func loadFileHcl(root string) (configurable, []string, error) {
|
|||||||
|
|
||||||
// Start building the result
|
// Start building the result
|
||||||
result := &hclConfigurable{
|
result := &hclConfigurable{
|
||||||
File: root,
|
File: root,
|
||||||
Object: obj,
|
Root: hclRoot,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dive in, find the imports. This is disabled for now since
|
// Dive in, find the imports. This is disabled for now since
|
||||||
@ -200,9 +209,16 @@ func loadFileHcl(root string) (configurable, []string, error) {
|
|||||||
|
|
||||||
// Given a handle to a HCL object, this transforms it into the Atlas
|
// Given a handle to a HCL object, this transforms it into the Atlas
|
||||||
// configuration.
|
// configuration.
|
||||||
func loadAtlasHcl(obj *hclobj.Object) (*AtlasConfig, error) {
|
func loadAtlasHcl(list *ast.ObjectList) (*AtlasConfig, error) {
|
||||||
|
if len(list.Items) > 1 {
|
||||||
|
return nil, fmt.Errorf("only one 'atlas' block allowed")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get our one item
|
||||||
|
item := list.Items[0]
|
||||||
|
|
||||||
var config AtlasConfig
|
var config AtlasConfig
|
||||||
if err := hcl.DecodeObject(&config, obj); err != nil {
|
if err := hcl.DecodeObject(&config, item.Val); err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf(
|
||||||
"Error reading atlas config: %s",
|
"Error reading atlas config: %s",
|
||||||
err)
|
err)
|
||||||
@ -217,18 +233,10 @@ func loadAtlasHcl(obj *hclobj.Object) (*AtlasConfig, error) {
|
|||||||
// The resulting modules may not be unique, but each module
|
// The resulting modules may not be unique, but each module
|
||||||
// represents exactly one module definition in the HCL configuration.
|
// represents exactly one module definition in the HCL configuration.
|
||||||
// We leave it up to another pass to merge them together.
|
// We leave it up to another pass to merge them together.
|
||||||
func loadModulesHcl(os *hclobj.Object) ([]*Module, error) {
|
func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) {
|
||||||
var allNames []*hclobj.Object
|
list = list.Children()
|
||||||
|
if len(list.Items) == 0 {
|
||||||
// See loadResourcesHcl for why this exists. Don't touch this.
|
return nil, nil
|
||||||
for _, o1 := range os.Elem(false) {
|
|
||||||
// Iterate the inner to get the list of types
|
|
||||||
for _, o2 := range o1.Elem(true) {
|
|
||||||
// Iterate all of this type to get _all_ the types
|
|
||||||
for _, o3 := range o2.Elem(false) {
|
|
||||||
allNames = append(allNames, o3)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Where all the results will go
|
// Where all the results will go
|
||||||
@ -236,11 +244,18 @@ func loadModulesHcl(os *hclobj.Object) ([]*Module, error) {
|
|||||||
|
|
||||||
// Now go over all the types and their children in order to get
|
// Now go over all the types and their children in order to get
|
||||||
// all of the actual resources.
|
// all of the actual resources.
|
||||||
for _, obj := range allNames {
|
for _, item := range list.Items {
|
||||||
k := obj.Key
|
k := item.Keys[0].Token.Value().(string)
|
||||||
|
|
||||||
|
var listVal *ast.ObjectList
|
||||||
|
if ot, ok := item.Val.(*ast.ObjectType); ok {
|
||||||
|
listVal = ot.List
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("module '%s': should be an object", k)
|
||||||
|
}
|
||||||
|
|
||||||
var config map[string]interface{}
|
var config map[string]interface{}
|
||||||
if err := hcl.DecodeObject(&config, obj); err != nil {
|
if err := hcl.DecodeObject(&config, item.Val); err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf(
|
||||||
"Error reading config for %s: %s",
|
"Error reading config for %s: %s",
|
||||||
k,
|
k,
|
||||||
@ -260,8 +275,8 @@ func loadModulesHcl(os *hclobj.Object) ([]*Module, error) {
|
|||||||
|
|
||||||
// If we have a count, then figure it out
|
// If we have a count, then figure it out
|
||||||
var source string
|
var source string
|
||||||
if o := obj.Get("source", false); o != nil {
|
if o := listVal.Filter("source"); len(o.Items) > 0 {
|
||||||
err = hcl.DecodeObject(&source, o)
|
err = hcl.DecodeObject(&source, o.Items[0].Val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf(
|
||||||
"Error parsing source for %s: %s",
|
"Error parsing source for %s: %s",
|
||||||
@ -282,27 +297,19 @@ func loadModulesHcl(os *hclobj.Object) ([]*Module, error) {
|
|||||||
|
|
||||||
// LoadOutputsHcl recurses into the given HCL object and turns
|
// LoadOutputsHcl recurses into the given HCL object and turns
|
||||||
// it into a mapping of outputs.
|
// it into a mapping of outputs.
|
||||||
func loadOutputsHcl(os *hclobj.Object) ([]*Output, error) {
|
func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) {
|
||||||
objects := make(map[string]*hclobj.Object)
|
list = list.Children()
|
||||||
|
if len(list.Items) == 0 {
|
||||||
// Iterate over all the "output" blocks and get the keys along with
|
|
||||||
// their raw configuration objects. We'll parse those later.
|
|
||||||
for _, o1 := range os.Elem(false) {
|
|
||||||
for _, o2 := range o1.Elem(true) {
|
|
||||||
objects[o2.Key] = o2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(objects) == 0 {
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Go through each object and turn it into an actual result.
|
// Go through each object and turn it into an actual result.
|
||||||
result := make([]*Output, 0, len(objects))
|
result := make([]*Output, 0, len(list.Items))
|
||||||
for n, o := range objects {
|
for _, item := range list.Items {
|
||||||
var config map[string]interface{}
|
n := item.Keys[0].Token.Value().(string)
|
||||||
|
|
||||||
if err := hcl.DecodeObject(&config, o); err != nil {
|
var config map[string]interface{}
|
||||||
|
if err := hcl.DecodeObject(&config, item.Val); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -325,27 +332,26 @@ func loadOutputsHcl(os *hclobj.Object) ([]*Output, error) {
|
|||||||
|
|
||||||
// LoadProvidersHcl recurses into the given HCL object and turns
|
// LoadProvidersHcl recurses into the given HCL object and turns
|
||||||
// it into a mapping of provider configs.
|
// it into a mapping of provider configs.
|
||||||
func loadProvidersHcl(os *hclobj.Object) ([]*ProviderConfig, error) {
|
func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) {
|
||||||
var objects []*hclobj.Object
|
list = list.Children()
|
||||||
|
if len(list.Items) == 0 {
|
||||||
// Iterate over all the "provider" blocks and get the keys along with
|
|
||||||
// their raw configuration objects. We'll parse those later.
|
|
||||||
for _, o1 := range os.Elem(false) {
|
|
||||||
for _, o2 := range o1.Elem(true) {
|
|
||||||
objects = append(objects, o2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(objects) == 0 {
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Go through each object and turn it into an actual result.
|
// Go through each object and turn it into an actual result.
|
||||||
result := make([]*ProviderConfig, 0, len(objects))
|
result := make([]*ProviderConfig, 0, len(list.Items))
|
||||||
for _, o := range objects {
|
for _, item := range list.Items {
|
||||||
var config map[string]interface{}
|
n := item.Keys[0].Token.Value().(string)
|
||||||
|
|
||||||
if err := hcl.DecodeObject(&config, o); err != nil {
|
var listVal *ast.ObjectList
|
||||||
|
if ot, ok := item.Val.(*ast.ObjectType); ok {
|
||||||
|
listVal = ot.List
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("module '%s': should be an object", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
var config map[string]interface{}
|
||||||
|
if err := hcl.DecodeObject(&config, item.Val); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -355,24 +361,24 @@ func loadProvidersHcl(os *hclobj.Object) ([]*ProviderConfig, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf(
|
||||||
"Error reading config for provider config %s: %s",
|
"Error reading config for provider config %s: %s",
|
||||||
o.Key,
|
n,
|
||||||
err)
|
err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we have an alias field, then add those in
|
// If we have an alias field, then add those in
|
||||||
var alias string
|
var alias string
|
||||||
if a := o.Get("alias", false); a != nil {
|
if a := listVal.Filter("alias"); len(a.Items) > 0 {
|
||||||
err := hcl.DecodeObject(&alias, a)
|
err := hcl.DecodeObject(&alias, a.Items[0].Val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf(
|
||||||
"Error reading alias for provider[%s]: %s",
|
"Error reading alias for provider[%s]: %s",
|
||||||
o.Key,
|
n,
|
||||||
err)
|
err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
result = append(result, &ProviderConfig{
|
result = append(result, &ProviderConfig{
|
||||||
Name: o.Key,
|
Name: n,
|
||||||
Alias: alias,
|
Alias: alias,
|
||||||
RawConfig: rawConfig,
|
RawConfig: rawConfig,
|
||||||
})
|
})
|
||||||
@ -387,27 +393,10 @@ func loadProvidersHcl(os *hclobj.Object) ([]*ProviderConfig, error) {
|
|||||||
// The resulting resources may not be unique, but each resource
|
// The resulting resources may not be unique, but each resource
|
||||||
// represents exactly one resource definition in the HCL configuration.
|
// represents exactly one resource definition in the HCL configuration.
|
||||||
// We leave it up to another pass to merge them together.
|
// We leave it up to another pass to merge them together.
|
||||||
func loadResourcesHcl(os *hclobj.Object) ([]*Resource, error) {
|
func loadResourcesHcl(list *ast.ObjectList) ([]*Resource, error) {
|
||||||
var allTypes []*hclobj.Object
|
list = list.Children()
|
||||||
|
if len(list.Items) == 0 {
|
||||||
// HCL object iteration is really nasty. Below is likely to make
|
return nil, nil
|
||||||
// no sense to anyone approaching this code. Luckily, it is very heavily
|
|
||||||
// tested. If working on a bug fix or feature, we recommend writing a
|
|
||||||
// test first then doing whatever you want to the code below. If you
|
|
||||||
// break it, the tests will catch it. Likewise, if you change this,
|
|
||||||
// MAKE SURE you write a test for your change, because its fairly impossible
|
|
||||||
// to reason about this mess.
|
|
||||||
//
|
|
||||||
// Functionally, what the code does below is get the libucl.Objects
|
|
||||||
// for all the TYPES, such as "aws_security_group".
|
|
||||||
for _, o1 := range os.Elem(false) {
|
|
||||||
// Iterate the inner to get the list of types
|
|
||||||
for _, o2 := range o1.Elem(true) {
|
|
||||||
// Iterate all of this type to get _all_ the types
|
|
||||||
for _, o3 := range o2.Elem(false) {
|
|
||||||
allTypes = append(allTypes, o3)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Where all the results will go
|
// Where all the results will go
|
||||||
@ -415,191 +404,178 @@ func loadResourcesHcl(os *hclobj.Object) ([]*Resource, error) {
|
|||||||
|
|
||||||
// Now go over all the types and their children in order to get
|
// Now go over all the types and their children in order to get
|
||||||
// all of the actual resources.
|
// all of the actual resources.
|
||||||
for _, t := range allTypes {
|
for _, item := range list.Items {
|
||||||
for _, obj := range t.Elem(true) {
|
if len(item.Keys) != 2 {
|
||||||
k := obj.Key
|
// TODO: bad error message
|
||||||
|
return nil, fmt.Errorf("resource needs exactly 2 names")
|
||||||
var config map[string]interface{}
|
|
||||||
if err := hcl.DecodeObject(&config, obj); err != nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"Error reading config for %s[%s]: %s",
|
|
||||||
t.Key,
|
|
||||||
k,
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the fields we handle specially
|
|
||||||
delete(config, "connection")
|
|
||||||
delete(config, "count")
|
|
||||||
delete(config, "depends_on")
|
|
||||||
delete(config, "provisioner")
|
|
||||||
delete(config, "provider")
|
|
||||||
delete(config, "lifecycle")
|
|
||||||
|
|
||||||
rawConfig, err := NewRawConfig(config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"Error reading config for %s[%s]: %s",
|
|
||||||
t.Key,
|
|
||||||
k,
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we have a count, then figure it out
|
|
||||||
var count string = "1"
|
|
||||||
if o := obj.Get("count", false); o != nil {
|
|
||||||
err = hcl.DecodeObject(&count, o)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"Error parsing count for %s[%s]: %s",
|
|
||||||
t.Key,
|
|
||||||
k,
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
countConfig, err := NewRawConfig(map[string]interface{}{
|
|
||||||
"count": count,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
countConfig.Key = "count"
|
|
||||||
|
|
||||||
// If we have depends fields, then add those in
|
|
||||||
var dependsOn []string
|
|
||||||
if o := obj.Get("depends_on", false); o != nil {
|
|
||||||
err := hcl.DecodeObject(&dependsOn, o)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"Error reading depends_on for %s[%s]: %s",
|
|
||||||
t.Key,
|
|
||||||
k,
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we have connection info, then parse those out
|
|
||||||
var connInfo map[string]interface{}
|
|
||||||
if o := obj.Get("connection", false); o != nil {
|
|
||||||
err := hcl.DecodeObject(&connInfo, o)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"Error reading connection info for %s[%s]: %s",
|
|
||||||
t.Key,
|
|
||||||
k,
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we have provisioners, then parse those out
|
|
||||||
var provisioners []*Provisioner
|
|
||||||
if os := obj.Get("provisioner", false); os != nil {
|
|
||||||
var err error
|
|
||||||
provisioners, err = loadProvisionersHcl(os, connInfo)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"Error reading provisioners for %s[%s]: %s",
|
|
||||||
t.Key,
|
|
||||||
k,
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we have a provider, then parse it out
|
|
||||||
var provider string
|
|
||||||
if o := obj.Get("provider", false); o != nil {
|
|
||||||
err := hcl.DecodeObject(&provider, o)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"Error reading provider for %s[%s]: %s",
|
|
||||||
t.Key,
|
|
||||||
k,
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the resource should be re-created before
|
|
||||||
// destroying the existing instance
|
|
||||||
var lifecycle ResourceLifecycle
|
|
||||||
if o := obj.Get("lifecycle", false); o != nil {
|
|
||||||
var raw map[string]interface{}
|
|
||||||
if err = hcl.DecodeObject(&raw, o); err != nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"Error parsing lifecycle for %s[%s]: %s",
|
|
||||||
t.Key,
|
|
||||||
k,
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := mapstructure.WeakDecode(raw, &lifecycle); err != nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"Error parsing lifecycle for %s[%s]: %s",
|
|
||||||
t.Key,
|
|
||||||
k,
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
result = append(result, &Resource{
|
|
||||||
Name: k,
|
|
||||||
Type: t.Key,
|
|
||||||
RawCount: countConfig,
|
|
||||||
RawConfig: rawConfig,
|
|
||||||
Provisioners: provisioners,
|
|
||||||
Provider: provider,
|
|
||||||
DependsOn: dependsOn,
|
|
||||||
Lifecycle: lifecycle,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t := item.Keys[0].Token.Value().(string)
|
||||||
|
k := item.Keys[1].Token.Value().(string)
|
||||||
|
|
||||||
|
var listVal *ast.ObjectList
|
||||||
|
if ot, ok := item.Val.(*ast.ObjectType); ok {
|
||||||
|
listVal = ot.List
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("resources %s[%s]: should be an object", t, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
var config map[string]interface{}
|
||||||
|
if err := hcl.DecodeObject(&config, item.Val); err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"Error reading config for %s[%s]: %s",
|
||||||
|
t,
|
||||||
|
k,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the fields we handle specially
|
||||||
|
delete(config, "connection")
|
||||||
|
delete(config, "count")
|
||||||
|
delete(config, "depends_on")
|
||||||
|
delete(config, "provisioner")
|
||||||
|
delete(config, "provider")
|
||||||
|
delete(config, "lifecycle")
|
||||||
|
|
||||||
|
rawConfig, err := NewRawConfig(config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"Error reading config for %s[%s]: %s",
|
||||||
|
t,
|
||||||
|
k,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have a count, then figure it out
|
||||||
|
var count string = "1"
|
||||||
|
if o := listVal.Filter("count"); len(o.Items) > 0 {
|
||||||
|
err = hcl.DecodeObject(&count, o.Items[0].Val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"Error parsing count for %s[%s]: %s",
|
||||||
|
t,
|
||||||
|
k,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
countConfig, err := NewRawConfig(map[string]interface{}{
|
||||||
|
"count": count,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
countConfig.Key = "count"
|
||||||
|
|
||||||
|
// If we have depends fields, then add those in
|
||||||
|
var dependsOn []string
|
||||||
|
if o := listVal.Filter("depends_on"); len(o.Items) > 0 {
|
||||||
|
err := hcl.DecodeObject(&dependsOn, o.Items[0].Val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"Error reading depends_on for %s[%s]: %s",
|
||||||
|
t,
|
||||||
|
k,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have connection info, then parse those out
|
||||||
|
var connInfo map[string]interface{}
|
||||||
|
if o := listVal.Filter("connection"); len(o.Items) > 0 {
|
||||||
|
err := hcl.DecodeObject(&connInfo, o.Items[0].Val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"Error reading connection info for %s[%s]: %s",
|
||||||
|
t,
|
||||||
|
k,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have provisioners, then parse those out
|
||||||
|
var provisioners []*Provisioner
|
||||||
|
if os := listVal.Filter("provisioner"); len(os.Items) > 0 {
|
||||||
|
var err error
|
||||||
|
provisioners, err = loadProvisionersHcl(os, connInfo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"Error reading provisioners for %s[%s]: %s",
|
||||||
|
t,
|
||||||
|
k,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have a provider, then parse it out
|
||||||
|
var provider string
|
||||||
|
if o := listVal.Filter("provider"); len(o.Items) > 0 {
|
||||||
|
err := hcl.DecodeObject(&provider, o.Items[0].Val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"Error reading provider for %s[%s]: %s",
|
||||||
|
t,
|
||||||
|
k,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the resource should be re-created before
|
||||||
|
// destroying the existing instance
|
||||||
|
var lifecycle ResourceLifecycle
|
||||||
|
if o := listVal.Filter("lifecycle"); len(o.Items) > 0 {
|
||||||
|
var raw map[string]interface{}
|
||||||
|
if err = hcl.DecodeObject(&raw, o.Items[0].Val); err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"Error parsing lifecycle for %s[%s]: %s",
|
||||||
|
t,
|
||||||
|
k,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mapstructure.WeakDecode(raw, &lifecycle); err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"Error parsing lifecycle for %s[%s]: %s",
|
||||||
|
t,
|
||||||
|
k,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result = append(result, &Resource{
|
||||||
|
Name: k,
|
||||||
|
Type: t,
|
||||||
|
RawCount: countConfig,
|
||||||
|
RawConfig: rawConfig,
|
||||||
|
Provisioners: provisioners,
|
||||||
|
Provider: provider,
|
||||||
|
DependsOn: dependsOn,
|
||||||
|
Lifecycle: lifecycle,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadProvisionersHcl(os *hclobj.Object, connInfo map[string]interface{}) ([]*Provisioner, error) {
|
func loadProvisionersHcl(list *ast.ObjectList, connInfo map[string]interface{}) ([]*Provisioner, error) {
|
||||||
pos := make([]*hclobj.Object, 0, int(os.Len()))
|
list = list.Children()
|
||||||
|
if len(list.Items) == 0 {
|
||||||
// Accumulate all the actual provisioner configuration objects. We
|
|
||||||
// have to iterate twice here:
|
|
||||||
//
|
|
||||||
// 1. The first iteration is of the list of `provisioner` blocks.
|
|
||||||
// 2. The second iteration is of the dictionary within the
|
|
||||||
// provisioner which will have only one element which is the
|
|
||||||
// type of provisioner to use along with tis config.
|
|
||||||
//
|
|
||||||
// In JSON it looks kind of like this:
|
|
||||||
//
|
|
||||||
// [
|
|
||||||
// {
|
|
||||||
// "shell": {
|
|
||||||
// ...
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// ]
|
|
||||||
//
|
|
||||||
for _, o1 := range os.Elem(false) {
|
|
||||||
for _, o2 := range o1.Elem(true) {
|
|
||||||
|
|
||||||
switch o1.Type {
|
|
||||||
case hclobj.ValueTypeList:
|
|
||||||
for _, o3 := range o2.Elem(true) {
|
|
||||||
pos = append(pos, o3)
|
|
||||||
}
|
|
||||||
case hclobj.ValueTypeObject:
|
|
||||||
pos = append(pos, o2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Short-circuit if there are no items
|
|
||||||
if len(pos) == 0 {
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
result := make([]*Provisioner, 0, len(pos))
|
// Go through each object and turn it into an actual result.
|
||||||
for _, po := range pos {
|
result := make([]*Provisioner, 0, len(list.Items))
|
||||||
|
for _, item := range list.Items {
|
||||||
|
n := item.Keys[0].Token.Value().(string)
|
||||||
|
|
||||||
|
var listVal *ast.ObjectList
|
||||||
|
if ot, ok := item.Val.(*ast.ObjectType); ok {
|
||||||
|
listVal = ot.List
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("provisioner '%s': should be an object", n)
|
||||||
|
}
|
||||||
|
|
||||||
var config map[string]interface{}
|
var config map[string]interface{}
|
||||||
if err := hcl.DecodeObject(&config, po); err != nil {
|
if err := hcl.DecodeObject(&config, item.Val); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -614,8 +590,8 @@ func loadProvisionersHcl(os *hclobj.Object, connInfo map[string]interface{}) ([]
|
|||||||
// Check if we have a provisioner-level connection
|
// Check if we have a provisioner-level connection
|
||||||
// block that overrides the resource-level
|
// block that overrides the resource-level
|
||||||
var subConnInfo map[string]interface{}
|
var subConnInfo map[string]interface{}
|
||||||
if o := po.Get("connection", false); o != nil {
|
if o := listVal.Filter("connection"); len(o.Items) > 0 {
|
||||||
err := hcl.DecodeObject(&subConnInfo, o)
|
err := hcl.DecodeObject(&subConnInfo, o.Items[0].Val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -640,7 +616,7 @@ func loadProvisionersHcl(os *hclobj.Object, connInfo map[string]interface{}) ([]
|
|||||||
}
|
}
|
||||||
|
|
||||||
result = append(result, &Provisioner{
|
result = append(result, &Provisioner{
|
||||||
Type: po.Key,
|
Type: n,
|
||||||
RawConfig: rawConfig,
|
RawConfig: rawConfig,
|
||||||
ConnInfo: connRaw,
|
ConnInfo: connRaw,
|
||||||
})
|
})
|
||||||
|
@ -45,6 +45,31 @@ func TestLoadFile_badType(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLoadFileHeredoc(t *testing.T) {
|
||||||
|
c, err := LoadFile(filepath.Join(fixtureDir, "heredoc.tf"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c == nil {
|
||||||
|
t.Fatal("config should not be nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Dir != "" {
|
||||||
|
t.Fatalf("bad: %#v", c.Dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
actual := providerConfigsStr(c.ProviderConfigs)
|
||||||
|
if actual != strings.TrimSpace(heredocProvidersStr) {
|
||||||
|
t.Fatalf("bad:\n%s", actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
actual = resourcesStr(c.Resources)
|
||||||
|
if actual != strings.TrimSpace(heredocResourcesStr) {
|
||||||
|
t.Fatalf("bad:\n%s", actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestLoadFileBasic(t *testing.T) {
|
func TestLoadFileBasic(t *testing.T) {
|
||||||
c, err := LoadFile(filepath.Join(fixtureDir, "basic.tf"))
|
c, err := LoadFile(filepath.Join(fixtureDir, "basic.tf"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -532,6 +557,20 @@ func TestLoad_temporary_files(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const heredocProvidersStr = `
|
||||||
|
aws
|
||||||
|
access_key
|
||||||
|
secret_key
|
||||||
|
`
|
||||||
|
|
||||||
|
const heredocResourcesStr = `
|
||||||
|
aws_iam_policy[policy] (x1)
|
||||||
|
description
|
||||||
|
name
|
||||||
|
path
|
||||||
|
policy
|
||||||
|
`
|
||||||
|
|
||||||
const basicOutputsStr = `
|
const basicOutputsStr = `
|
||||||
web_ip
|
web_ip
|
||||||
vars
|
vars
|
||||||
|
24
config/test-fixtures/heredoc.tf
Normal file
24
config/test-fixtures/heredoc.tf
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
provider "aws" {
|
||||||
|
access_key = "foo"
|
||||||
|
secret_key = "bar"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_policy" "policy" {
|
||||||
|
name = "test_policy"
|
||||||
|
path = "/"
|
||||||
|
description = "My test policy"
|
||||||
|
policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Action": [
|
||||||
|
"ec2:Describe*"
|
||||||
|
],
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Resource": "*"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
}
|
@ -23,7 +23,7 @@ module APIs
|
|||||||
|
|
||||||
module AWS
|
module AWS
|
||||||
def self.path
|
def self.path
|
||||||
@path ||= Pathname(`go list -f '{{.Dir}}' github.com/awslabs/aws-sdk-go/aws`.chomp).parent
|
@path ||= Pathname(`go list -f '{{.Dir}}' github.com/aws/aws-sdk-go/aws`.chomp).parent
|
||||||
end
|
end
|
||||||
|
|
||||||
def self.api_json_files
|
def self.api_json_files
|
||||||
|
@ -1,46 +0,0 @@
|
|||||||
package depgraph
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/digraph"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Dependency is used to create a directed edge between two nouns.
|
|
||||||
// One noun may depend on another and provide version constraints
|
|
||||||
// that cannot be violated
|
|
||||||
type Dependency struct {
|
|
||||||
Name string
|
|
||||||
Meta interface{}
|
|
||||||
Constraints []Constraint
|
|
||||||
Source *Noun
|
|
||||||
Target *Noun
|
|
||||||
}
|
|
||||||
|
|
||||||
// Constraint is used by dependencies to allow arbitrary constraints
|
|
||||||
// between nouns
|
|
||||||
type Constraint interface {
|
|
||||||
Satisfied(head, tail *Noun) (bool, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Head returns the source, or dependent noun
|
|
||||||
func (d *Dependency) Head() digraph.Node {
|
|
||||||
return d.Source
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tail returns the target, or depended upon noun
|
|
||||||
func (d *Dependency) Tail() digraph.Node {
|
|
||||||
return d.Target
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Dependency) GoString() string {
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"*Dependency{Name: %s, Source: %s, Target: %s}",
|
|
||||||
d.Name,
|
|
||||||
d.Source.Name,
|
|
||||||
d.Target.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Dependency) String() string {
|
|
||||||
return d.Name
|
|
||||||
}
|
|
@ -1,379 +0,0 @@
|
|||||||
// The depgraph package is used to create and model a dependency graph
|
|
||||||
// of nouns. Each noun can represent a service, server, application,
|
|
||||||
// network switch, etc. Nouns can depend on other nouns, and provide
|
|
||||||
// versioning constraints. Nouns can also have various meta data that
|
|
||||||
// may be relevant to their construction or configuration.
|
|
||||||
package depgraph
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/digraph"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WalkFunc is the type used for the callback for Walk.
|
|
||||||
type WalkFunc func(*Noun) error
|
|
||||||
|
|
||||||
// Graph is used to represent a dependency graph.
|
|
||||||
type Graph struct {
|
|
||||||
Name string
|
|
||||||
Meta interface{}
|
|
||||||
Nouns []*Noun
|
|
||||||
Root *Noun
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateError implements the Error interface but provides
|
|
||||||
// additional information on a validation error.
|
|
||||||
type ValidateError struct {
|
|
||||||
// If set, then the graph is missing a single root, on which
|
|
||||||
// there are no depdendencies
|
|
||||||
MissingRoot bool
|
|
||||||
|
|
||||||
// Unreachable are nodes that could not be reached from
|
|
||||||
// the root noun.
|
|
||||||
Unreachable []*Noun
|
|
||||||
|
|
||||||
// Cycles are groups of strongly connected nodes, which
|
|
||||||
// form a cycle. This is disallowed.
|
|
||||||
Cycles [][]*Noun
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *ValidateError) Error() string {
|
|
||||||
var msgs []string
|
|
||||||
|
|
||||||
if v.MissingRoot {
|
|
||||||
msgs = append(msgs, "The graph has no single root")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, n := range v.Unreachable {
|
|
||||||
msgs = append(msgs, fmt.Sprintf(
|
|
||||||
"Unreachable node: %s", n.Name))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, c := range v.Cycles {
|
|
||||||
cycleNodes := make([]string, len(c))
|
|
||||||
for i, n := range c {
|
|
||||||
cycleNodes[i] = n.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
msgs = append(msgs, fmt.Sprintf(
|
|
||||||
"Cycle: %s", strings.Join(cycleNodes, " -> ")))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, m := range msgs {
|
|
||||||
msgs[i] = fmt.Sprintf("* %s", m)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"The dependency graph is not valid:\n\n%s",
|
|
||||||
strings.Join(msgs, "\n"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConstraintError is used to return detailed violation
|
|
||||||
// information from CheckConstraints
|
|
||||||
type ConstraintError struct {
|
|
||||||
Violations []*Violation
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ConstraintError) Error() string {
|
|
||||||
return fmt.Sprintf("%d constraint violations", len(c.Violations))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Violation is used to pass along information about
|
|
||||||
// a constraint violation
|
|
||||||
type Violation struct {
|
|
||||||
Source *Noun
|
|
||||||
Target *Noun
|
|
||||||
Dependency *Dependency
|
|
||||||
Constraint Constraint
|
|
||||||
Err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *Violation) Error() string {
|
|
||||||
return fmt.Sprintf("Constraint %v between %v and %v violated: %v",
|
|
||||||
v.Constraint, v.Source, v.Target, v.Err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckConstraints walks the graph and ensures that all
|
|
||||||
// user imposed constraints are satisfied.
|
|
||||||
func (g *Graph) CheckConstraints() error {
|
|
||||||
// Ensure we have a root
|
|
||||||
if g.Root == nil {
|
|
||||||
return fmt.Errorf("Graph must be validated before checking constraint violations")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a constraint error
|
|
||||||
cErr := &ConstraintError{}
|
|
||||||
|
|
||||||
// Walk from the root
|
|
||||||
digraph.DepthFirstWalk(g.Root, func(n digraph.Node) bool {
|
|
||||||
noun := n.(*Noun)
|
|
||||||
for _, dep := range noun.Deps {
|
|
||||||
target := dep.Target
|
|
||||||
for _, constraint := range dep.Constraints {
|
|
||||||
ok, err := constraint.Satisfied(noun, target)
|
|
||||||
if ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
violation := &Violation{
|
|
||||||
Source: noun,
|
|
||||||
Target: target,
|
|
||||||
Dependency: dep,
|
|
||||||
Constraint: constraint,
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
cErr.Violations = append(cErr.Violations, violation)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
if cErr.Violations != nil {
|
|
||||||
return cErr
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Noun returns the noun with the given name, or nil if it cannot be found.
|
|
||||||
func (g *Graph) Noun(name string) *Noun {
|
|
||||||
for _, n := range g.Nouns {
|
|
||||||
if n.Name == name {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// String generates a little ASCII string of the graph, useful in
|
|
||||||
// debugging output.
|
|
||||||
func (g *Graph) String() string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
|
|
||||||
// Alphabetize the output based on the noun name
|
|
||||||
keys := make([]string, 0, len(g.Nouns))
|
|
||||||
mapping := make(map[string]*Noun)
|
|
||||||
for _, n := range g.Nouns {
|
|
||||||
mapping[n.Name] = n
|
|
||||||
keys = append(keys, n.Name)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
|
|
||||||
if g.Root != nil {
|
|
||||||
buf.WriteString(fmt.Sprintf("root: %s\n", g.Root.Name))
|
|
||||||
} else {
|
|
||||||
buf.WriteString("root: <unknown>\n")
|
|
||||||
}
|
|
||||||
for _, k := range keys {
|
|
||||||
n := mapping[k]
|
|
||||||
buf.WriteString(fmt.Sprintf("%s\n", n.Name))
|
|
||||||
|
|
||||||
// Alphabetize the dependency names
|
|
||||||
depKeys := make([]string, 0, len(n.Deps))
|
|
||||||
depMapping := make(map[string]*Dependency)
|
|
||||||
for _, d := range n.Deps {
|
|
||||||
depMapping[d.Target.Name] = d
|
|
||||||
depKeys = append(depKeys, d.Target.Name)
|
|
||||||
}
|
|
||||||
sort.Strings(depKeys)
|
|
||||||
|
|
||||||
for _, k := range depKeys {
|
|
||||||
dep := depMapping[k]
|
|
||||||
buf.WriteString(fmt.Sprintf(
|
|
||||||
" %s -> %s\n",
|
|
||||||
dep.Source,
|
|
||||||
dep.Target))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate is used to ensure that a few properties of the graph are not violated:
|
|
||||||
// 1) There must be a single "root", or source on which nothing depends.
|
|
||||||
// 2) All nouns in the graph must be reachable from the root
|
|
||||||
// 3) The graph must be cycle free, meaning there are no cicular dependencies
|
|
||||||
func (g *Graph) Validate() error {
|
|
||||||
// Convert to node list
|
|
||||||
nodes := make([]digraph.Node, len(g.Nouns))
|
|
||||||
for i, n := range g.Nouns {
|
|
||||||
nodes[i] = n
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a validate erro
|
|
||||||
vErr := &ValidateError{}
|
|
||||||
|
|
||||||
// Search for all the sources, if we have only 1, it must be the root
|
|
||||||
if sources := digraph.Sources(nodes); len(sources) != 1 {
|
|
||||||
vErr.MissingRoot = true
|
|
||||||
goto CHECK_CYCLES
|
|
||||||
} else {
|
|
||||||
g.Root = sources[0].(*Noun)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check reachability
|
|
||||||
if unreached := digraph.Unreachable(g.Root, nodes); len(unreached) > 0 {
|
|
||||||
vErr.Unreachable = make([]*Noun, len(unreached))
|
|
||||||
for i, u := range unreached {
|
|
||||||
vErr.Unreachable[i] = u.(*Noun)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
CHECK_CYCLES:
|
|
||||||
// Check for cycles
|
|
||||||
if cycles := digraph.StronglyConnectedComponents(nodes, true); len(cycles) > 0 {
|
|
||||||
vErr.Cycles = make([][]*Noun, len(cycles))
|
|
||||||
for i, cycle := range cycles {
|
|
||||||
group := make([]*Noun, len(cycle))
|
|
||||||
for j, n := range cycle {
|
|
||||||
group[j] = n.(*Noun)
|
|
||||||
}
|
|
||||||
vErr.Cycles[i] = group
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for loops to yourself
|
|
||||||
for _, n := range g.Nouns {
|
|
||||||
for _, d := range n.Deps {
|
|
||||||
if d.Source == d.Target {
|
|
||||||
vErr.Cycles = append(vErr.Cycles, []*Noun{n})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the detailed error
|
|
||||||
if vErr.MissingRoot || vErr.Unreachable != nil || vErr.Cycles != nil {
|
|
||||||
return vErr
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Walk will walk the tree depth-first (dependency first) and call
|
|
||||||
// the callback.
|
|
||||||
//
|
|
||||||
// The callbacks will be called in parallel, so if you need non-parallelism,
|
|
||||||
// then introduce a lock in your callback.
|
|
||||||
func (g *Graph) Walk(fn WalkFunc) error {
|
|
||||||
// Set so we don't callback for a single noun multiple times
|
|
||||||
var seenMapL sync.RWMutex
|
|
||||||
seenMap := make(map[*Noun]chan struct{})
|
|
||||||
seenMap[g.Root] = make(chan struct{})
|
|
||||||
|
|
||||||
// Keep track of what nodes errored.
|
|
||||||
var errMapL sync.RWMutex
|
|
||||||
errMap := make(map[*Noun]struct{})
|
|
||||||
|
|
||||||
// Build the list of things to visit
|
|
||||||
tovisit := make([]*Noun, 1, len(g.Nouns))
|
|
||||||
tovisit[0] = g.Root
|
|
||||||
|
|
||||||
// Spawn off all our goroutines to walk the tree
|
|
||||||
errCh := make(chan error)
|
|
||||||
for len(tovisit) > 0 {
|
|
||||||
// Grab the current thing to use
|
|
||||||
n := len(tovisit)
|
|
||||||
current := tovisit[n-1]
|
|
||||||
tovisit = tovisit[:n-1]
|
|
||||||
|
|
||||||
// Go through each dependency and run that first
|
|
||||||
for _, dep := range current.Deps {
|
|
||||||
if _, ok := seenMap[dep.Target]; !ok {
|
|
||||||
seenMapL.Lock()
|
|
||||||
seenMap[dep.Target] = make(chan struct{})
|
|
||||||
seenMapL.Unlock()
|
|
||||||
tovisit = append(tovisit, dep.Target)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Spawn off a goroutine to execute our callback once
|
|
||||||
// all our dependencies are satisfied.
|
|
||||||
go func(current *Noun) {
|
|
||||||
seenMapL.RLock()
|
|
||||||
closeCh := seenMap[current]
|
|
||||||
seenMapL.RUnlock()
|
|
||||||
|
|
||||||
defer close(closeCh)
|
|
||||||
|
|
||||||
// Wait for all our dependencies
|
|
||||||
for _, dep := range current.Deps {
|
|
||||||
seenMapL.RLock()
|
|
||||||
ch := seenMap[dep.Target]
|
|
||||||
seenMapL.RUnlock()
|
|
||||||
|
|
||||||
// Wait for the dep to be run
|
|
||||||
<-ch
|
|
||||||
|
|
||||||
// Check if any dependencies errored. If so,
|
|
||||||
// then return right away, we won't walk it.
|
|
||||||
errMapL.RLock()
|
|
||||||
_, errOk := errMap[dep.Target]
|
|
||||||
errMapL.RUnlock()
|
|
||||||
if errOk {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call our callback!
|
|
||||||
if err := fn(current); err != nil {
|
|
||||||
errMapL.Lock()
|
|
||||||
errMap[current] = struct{}{}
|
|
||||||
errMapL.Unlock()
|
|
||||||
|
|
||||||
errCh <- err
|
|
||||||
}
|
|
||||||
}(current)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Aggregate channel that is closed when all goroutines finish
|
|
||||||
doneCh := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(doneCh)
|
|
||||||
|
|
||||||
for _, ch := range seenMap {
|
|
||||||
<-ch
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Wait for finish OR an error
|
|
||||||
select {
|
|
||||||
case <-doneCh:
|
|
||||||
return nil
|
|
||||||
case err := <-errCh:
|
|
||||||
// Drain the error channel
|
|
||||||
go func() {
|
|
||||||
for _ = range errCh {
|
|
||||||
// Nothing
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Wait for the goroutines to end
|
|
||||||
<-doneCh
|
|
||||||
close(errCh)
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DependsOn returns the set of nouns that have a
|
|
||||||
// dependency on a given noun. This can be used to find
|
|
||||||
// the incoming edges to a noun.
|
|
||||||
func (g *Graph) DependsOn(n *Noun) []*Noun {
|
|
||||||
var incoming []*Noun
|
|
||||||
OUTER:
|
|
||||||
for _, other := range g.Nouns {
|
|
||||||
if other == n {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, d := range other.Deps {
|
|
||||||
if d.Target == n {
|
|
||||||
incoming = append(incoming, other)
|
|
||||||
continue OUTER
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return incoming
|
|
||||||
}
|
|
@ -1,467 +0,0 @@
|
|||||||
package depgraph
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ParseNouns is used to parse a string in the format of:
|
|
||||||
// a -> b ; edge name
|
|
||||||
// b -> c
|
|
||||||
// Into a series of nouns and dependencies
|
|
||||||
func ParseNouns(s string) map[string]*Noun {
|
|
||||||
lines := strings.Split(s, "\n")
|
|
||||||
nodes := make(map[string]*Noun)
|
|
||||||
for _, line := range lines {
|
|
||||||
var edgeName string
|
|
||||||
if idx := strings.Index(line, ";"); idx >= 0 {
|
|
||||||
edgeName = strings.Trim(line[idx+1:], " \t\r\n")
|
|
||||||
line = line[:idx]
|
|
||||||
}
|
|
||||||
parts := strings.SplitN(line, "->", 2)
|
|
||||||
if len(parts) != 2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
head_name := strings.Trim(parts[0], " \t\r\n")
|
|
||||||
tail_name := strings.Trim(parts[1], " \t\r\n")
|
|
||||||
head := nodes[head_name]
|
|
||||||
if head == nil {
|
|
||||||
head = &Noun{Name: head_name}
|
|
||||||
nodes[head_name] = head
|
|
||||||
}
|
|
||||||
tail := nodes[tail_name]
|
|
||||||
if tail == nil {
|
|
||||||
tail = &Noun{Name: tail_name}
|
|
||||||
nodes[tail_name] = tail
|
|
||||||
}
|
|
||||||
edge := &Dependency{
|
|
||||||
Name: edgeName,
|
|
||||||
Source: head,
|
|
||||||
Target: tail,
|
|
||||||
}
|
|
||||||
head.Deps = append(head.Deps, edge)
|
|
||||||
}
|
|
||||||
return nodes
|
|
||||||
}
|
|
||||||
|
|
||||||
func NounMapToList(m map[string]*Noun) []*Noun {
|
|
||||||
list := make([]*Noun, 0, len(m))
|
|
||||||
for _, n := range m {
|
|
||||||
list = append(list, n)
|
|
||||||
}
|
|
||||||
return list
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGraph_Noun(t *testing.T) {
|
|
||||||
nodes := ParseNouns(`a -> b
|
|
||||||
a -> c
|
|
||||||
b -> d
|
|
||||||
b -> e
|
|
||||||
c -> d
|
|
||||||
c -> e`)
|
|
||||||
|
|
||||||
g := &Graph{
|
|
||||||
Name: "Test",
|
|
||||||
Nouns: NounMapToList(nodes),
|
|
||||||
}
|
|
||||||
|
|
||||||
n := g.Noun("a")
|
|
||||||
if n == nil {
|
|
||||||
t.Fatal("should not be nil")
|
|
||||||
}
|
|
||||||
if n.Name != "a" {
|
|
||||||
t.Fatalf("bad: %#v", n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGraph_String(t *testing.T) {
|
|
||||||
nodes := ParseNouns(`a -> b
|
|
||||||
a -> c
|
|
||||||
b -> d
|
|
||||||
b -> e
|
|
||||||
c -> d
|
|
||||||
c -> e`)
|
|
||||||
|
|
||||||
g := &Graph{
|
|
||||||
Name: "Test",
|
|
||||||
Nouns: NounMapToList(nodes),
|
|
||||||
Root: nodes["a"],
|
|
||||||
}
|
|
||||||
actual := g.String()
|
|
||||||
|
|
||||||
expected := `
|
|
||||||
root: a
|
|
||||||
a
|
|
||||||
a -> b
|
|
||||||
a -> c
|
|
||||||
b
|
|
||||||
b -> d
|
|
||||||
b -> e
|
|
||||||
c
|
|
||||||
c -> d
|
|
||||||
c -> e
|
|
||||||
d
|
|
||||||
e
|
|
||||||
`
|
|
||||||
|
|
||||||
actual = strings.TrimSpace(actual)
|
|
||||||
expected = strings.TrimSpace(expected)
|
|
||||||
if actual != expected {
|
|
||||||
t.Fatalf("bad:\n%s\n!=\n%s", actual, expected)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGraph_Validate(t *testing.T) {
|
|
||||||
nodes := ParseNouns(`a -> b
|
|
||||||
a -> c
|
|
||||||
b -> d
|
|
||||||
b -> e
|
|
||||||
c -> d
|
|
||||||
c -> e`)
|
|
||||||
list := NounMapToList(nodes)
|
|
||||||
|
|
||||||
g := &Graph{Name: "Test", Nouns: list}
|
|
||||||
if err := g.Validate(); err != nil {
|
|
||||||
t.Fatalf("err: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGraph_Validate_Cycle(t *testing.T) {
|
|
||||||
nodes := ParseNouns(`a -> b
|
|
||||||
a -> c
|
|
||||||
b -> d
|
|
||||||
d -> b`)
|
|
||||||
list := NounMapToList(nodes)
|
|
||||||
|
|
||||||
g := &Graph{Name: "Test", Nouns: list}
|
|
||||||
err := g.Validate()
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("expected err")
|
|
||||||
}
|
|
||||||
|
|
||||||
vErr, ok := err.(*ValidateError)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected validate error")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(vErr.Cycles) != 1 {
|
|
||||||
t.Fatalf("expected cycles")
|
|
||||||
}
|
|
||||||
|
|
||||||
cycle := vErr.Cycles[0]
|
|
||||||
cycleNodes := make([]string, len(cycle))
|
|
||||||
for i, c := range cycle {
|
|
||||||
cycleNodes[i] = c.Name
|
|
||||||
}
|
|
||||||
sort.Strings(cycleNodes)
|
|
||||||
|
|
||||||
if cycleNodes[0] != "b" {
|
|
||||||
t.Fatalf("bad: %v", cycle)
|
|
||||||
}
|
|
||||||
if cycleNodes[1] != "d" {
|
|
||||||
t.Fatalf("bad: %v", cycle)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGraph_Validate_MultiRoot(t *testing.T) {
|
|
||||||
nodes := ParseNouns(`a -> b
|
|
||||||
c -> d`)
|
|
||||||
list := NounMapToList(nodes)
|
|
||||||
|
|
||||||
g := &Graph{Name: "Test", Nouns: list}
|
|
||||||
err := g.Validate()
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("expected err")
|
|
||||||
}
|
|
||||||
|
|
||||||
vErr, ok := err.(*ValidateError)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected validate error")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !vErr.MissingRoot {
|
|
||||||
t.Fatalf("expected missing root")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGraph_Validate_NoRoot(t *testing.T) {
|
|
||||||
nodes := ParseNouns(`a -> b
|
|
||||||
b -> a`)
|
|
||||||
list := NounMapToList(nodes)
|
|
||||||
|
|
||||||
g := &Graph{Name: "Test", Nouns: list}
|
|
||||||
err := g.Validate()
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("expected err")
|
|
||||||
}
|
|
||||||
|
|
||||||
vErr, ok := err.(*ValidateError)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected validate error")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !vErr.MissingRoot {
|
|
||||||
t.Fatalf("expected missing root")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGraph_Validate_Unreachable(t *testing.T) {
|
|
||||||
nodes := ParseNouns(`a -> b
|
|
||||||
a -> c
|
|
||||||
b -> d
|
|
||||||
x -> x`)
|
|
||||||
list := NounMapToList(nodes)
|
|
||||||
|
|
||||||
g := &Graph{Name: "Test", Nouns: list}
|
|
||||||
err := g.Validate()
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("expected err")
|
|
||||||
}
|
|
||||||
|
|
||||||
vErr, ok := err.(*ValidateError)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected validate error")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(vErr.Unreachable) != 1 {
|
|
||||||
t.Fatalf("expected unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
if vErr.Unreachable[0].Name != "x" {
|
|
||||||
t.Fatalf("bad: %v", vErr.Unreachable[0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type VersionMeta int
|
|
||||||
type VersionConstraint struct {
|
|
||||||
Min int
|
|
||||||
Max int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *VersionConstraint) Satisfied(head, tail *Noun) (bool, error) {
|
|
||||||
vers := int(tail.Meta.(VersionMeta))
|
|
||||||
if vers < v.Min {
|
|
||||||
return false, fmt.Errorf("version %d below minimum %d",
|
|
||||||
vers, v.Min)
|
|
||||||
} else if vers > v.Max {
|
|
||||||
return false, fmt.Errorf("version %d above maximum %d",
|
|
||||||
vers, v.Max)
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *VersionConstraint) String() string {
|
|
||||||
return "version"
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGraph_ConstraintViolation(t *testing.T) {
|
|
||||||
nodes := ParseNouns(`a -> b
|
|
||||||
a -> c
|
|
||||||
b -> d
|
|
||||||
b -> e
|
|
||||||
c -> d
|
|
||||||
c -> e`)
|
|
||||||
list := NounMapToList(nodes)
|
|
||||||
|
|
||||||
// Add a version constraint
|
|
||||||
vers := &VersionConstraint{1, 3}
|
|
||||||
|
|
||||||
// Introduce some constraints
|
|
||||||
depB := nodes["a"].Deps[0]
|
|
||||||
depB.Constraints = []Constraint{vers}
|
|
||||||
depC := nodes["a"].Deps[1]
|
|
||||||
depC.Constraints = []Constraint{vers}
|
|
||||||
|
|
||||||
// Add some versions
|
|
||||||
nodes["b"].Meta = VersionMeta(0)
|
|
||||||
nodes["c"].Meta = VersionMeta(4)
|
|
||||||
|
|
||||||
g := &Graph{Name: "Test", Nouns: list}
|
|
||||||
err := g.Validate()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("err: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = g.CheckConstraints()
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("Expected err")
|
|
||||||
}
|
|
||||||
|
|
||||||
cErr, ok := err.(*ConstraintError)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected constraint error")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cErr.Violations) != 2 {
|
|
||||||
t.Fatalf("expected 2 violations: %v", cErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cErr.Violations[0].Error() != "Constraint version between a and b violated: version 0 below minimum 1" {
|
|
||||||
t.Fatalf("err: %v", cErr.Violations[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
if cErr.Violations[1].Error() != "Constraint version between a and c violated: version 4 above maximum 3" {
|
|
||||||
t.Fatalf("err: %v", cErr.Violations[1])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGraph_Constraint_NoViolation(t *testing.T) {
|
|
||||||
nodes := ParseNouns(`a -> b
|
|
||||||
a -> c
|
|
||||||
b -> d
|
|
||||||
b -> e
|
|
||||||
c -> d
|
|
||||||
c -> e`)
|
|
||||||
list := NounMapToList(nodes)
|
|
||||||
|
|
||||||
// Add a version constraint
|
|
||||||
vers := &VersionConstraint{1, 3}
|
|
||||||
|
|
||||||
// Introduce some constraints
|
|
||||||
depB := nodes["a"].Deps[0]
|
|
||||||
depB.Constraints = []Constraint{vers}
|
|
||||||
depC := nodes["a"].Deps[1]
|
|
||||||
depC.Constraints = []Constraint{vers}
|
|
||||||
|
|
||||||
// Add some versions
|
|
||||||
nodes["b"].Meta = VersionMeta(2)
|
|
||||||
nodes["c"].Meta = VersionMeta(3)
|
|
||||||
|
|
||||||
g := &Graph{Name: "Test", Nouns: list}
|
|
||||||
err := g.Validate()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("err: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = g.CheckConstraints()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("err: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGraphWalk(t *testing.T) {
|
|
||||||
nodes := ParseNouns(`a -> b
|
|
||||||
a -> c
|
|
||||||
b -> d
|
|
||||||
b -> e
|
|
||||||
c -> d
|
|
||||||
c -> e`)
|
|
||||||
list := NounMapToList(nodes)
|
|
||||||
g := &Graph{Name: "Test", Nouns: list}
|
|
||||||
if err := g.Validate(); err != nil {
|
|
||||||
t.Fatalf("err: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var namesLock sync.Mutex
|
|
||||||
names := make([]string, 0, 0)
|
|
||||||
err := g.Walk(func(n *Noun) error {
|
|
||||||
namesLock.Lock()
|
|
||||||
defer namesLock.Unlock()
|
|
||||||
names = append(names, n.Name)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("err: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expected := [][]string{
|
|
||||||
{"e", "d", "c", "b", "a"},
|
|
||||||
{"e", "d", "b", "c", "a"},
|
|
||||||
{"d", "e", "c", "b", "a"},
|
|
||||||
{"d", "e", "b", "c", "a"},
|
|
||||||
}
|
|
||||||
found := false
|
|
||||||
for _, expect := range expected {
|
|
||||||
if reflect.DeepEqual(expect, names) {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
t.Fatalf("bad: %#v", names)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGraphWalk_error(t *testing.T) {
|
|
||||||
nodes := ParseNouns(`a -> b
|
|
||||||
b -> c
|
|
||||||
a -> d
|
|
||||||
a -> e
|
|
||||||
e -> f
|
|
||||||
f -> g
|
|
||||||
g -> h`)
|
|
||||||
list := NounMapToList(nodes)
|
|
||||||
g := &Graph{Name: "Test", Nouns: list}
|
|
||||||
if err := g.Validate(); err != nil {
|
|
||||||
t.Fatalf("err: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We repeat this a lot because sometimes timing causes
|
|
||||||
// a false positive.
|
|
||||||
for i := 0; i < 100; i++ {
|
|
||||||
var lock sync.Mutex
|
|
||||||
var walked []string
|
|
||||||
err := g.Walk(func(n *Noun) error {
|
|
||||||
lock.Lock()
|
|
||||||
defer lock.Unlock()
|
|
||||||
|
|
||||||
walked = append(walked, n.Name)
|
|
||||||
|
|
||||||
if n.Name == "b" {
|
|
||||||
return fmt.Errorf("foo")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("should error")
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Strings(walked)
|
|
||||||
|
|
||||||
expected := []string{"b", "c", "d", "e", "f", "g", "h"}
|
|
||||||
if !reflect.DeepEqual(walked, expected) {
|
|
||||||
t.Fatalf("bad: %#v", walked)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGraph_DependsOn(t *testing.T) {
|
|
||||||
nodes := ParseNouns(`a -> b
|
|
||||||
a -> c
|
|
||||||
b -> d
|
|
||||||
b -> e
|
|
||||||
c -> d
|
|
||||||
c -> e`)
|
|
||||||
|
|
||||||
g := &Graph{
|
|
||||||
Name: "Test",
|
|
||||||
Nouns: NounMapToList(nodes),
|
|
||||||
}
|
|
||||||
|
|
||||||
dNoun := g.Noun("d")
|
|
||||||
incoming := g.DependsOn(dNoun)
|
|
||||||
|
|
||||||
if len(incoming) != 2 {
|
|
||||||
t.Fatalf("bad: %#v", incoming)
|
|
||||||
}
|
|
||||||
|
|
||||||
var hasB, hasC bool
|
|
||||||
for _, in := range incoming {
|
|
||||||
switch in.Name {
|
|
||||||
case "b":
|
|
||||||
hasB = true
|
|
||||||
case "c":
|
|
||||||
hasC = true
|
|
||||||
default:
|
|
||||||
t.Fatalf("Bad: %#v", in)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !hasB || !hasC {
|
|
||||||
t.Fatalf("missing incoming edge")
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,33 +0,0 @@
|
|||||||
package depgraph
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/digraph"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Nouns are the key structure of the dependency graph. They can
|
|
||||||
// be used to represent all objects in the graph. They are linked
|
|
||||||
// by depedencies.
|
|
||||||
type Noun struct {
|
|
||||||
Name string // Opaque name
|
|
||||||
Meta interface{}
|
|
||||||
Deps []*Dependency
|
|
||||||
}
|
|
||||||
|
|
||||||
// Edges returns the out-going edges of a Noun
|
|
||||||
func (n *Noun) Edges() []digraph.Edge {
|
|
||||||
edges := make([]digraph.Edge, len(n.Deps))
|
|
||||||
for idx, dep := range n.Deps {
|
|
||||||
edges[idx] = dep
|
|
||||||
}
|
|
||||||
return edges
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *Noun) GoString() string {
|
|
||||||
return fmt.Sprintf("*%#v", *n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *Noun) String() string {
|
|
||||||
return n.Name
|
|
||||||
}
|
|
@ -13,7 +13,7 @@ resource "aws_s3_bucket" "prod" {
|
|||||||
acl = "private"
|
acl = "private"
|
||||||
policy = <<POLICY
|
policy = <<POLICY
|
||||||
{
|
{
|
||||||
"Version": "2008-10-17",
|
"Version": "2012-10-17",
|
||||||
"Statement": [
|
"Statement": [
|
||||||
{
|
{
|
||||||
"Sid": "AllowTest",
|
"Sid": "AllowTest",
|
||||||
|
40
helper/pathorcontents/read.go
Normal file
40
helper/pathorcontents/read.go
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
// Helpers for dealing with file paths and their contents
|
||||||
|
package pathorcontents
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/mitchellh/go-homedir"
|
||||||
|
)
|
||||||
|
|
||||||
|
// If the argument is a path, Read loads it and returns the contents,
|
||||||
|
// otherwise the argument is assumed to be the desired contents and is simply
|
||||||
|
// returned.
|
||||||
|
//
|
||||||
|
// The boolean second return value can be called `wasPath` - it indicates if a
|
||||||
|
// path was detected and a file loaded.
|
||||||
|
func Read(poc string) (string, bool, error) {
|
||||||
|
if len(poc) == 0 {
|
||||||
|
return poc, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
path := poc
|
||||||
|
if path[0] == '~' {
|
||||||
|
var err error
|
||||||
|
path, err = homedir.Expand(path)
|
||||||
|
if err != nil {
|
||||||
|
return path, true, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(path); err == nil {
|
||||||
|
contents, err := ioutil.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return string(contents), true, err
|
||||||
|
}
|
||||||
|
return string(contents), true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return poc, false, nil
|
||||||
|
}
|
140
helper/pathorcontents/read_test.go
Normal file
140
helper/pathorcontents/read_test.go
Normal file
@ -0,0 +1,140 @@
|
|||||||
|
package pathorcontents
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/mitchellh/go-homedir"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRead_Path(t *testing.T) {
|
||||||
|
isPath := true
|
||||||
|
f, cleanup := testTempFile(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
if _, err := io.WriteString(f, "foobar"); err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
f.Close()
|
||||||
|
|
||||||
|
contents, wasPath, err := Read(f.Name())
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
if wasPath != isPath {
|
||||||
|
t.Fatalf("expected wasPath: %t, got %t", isPath, wasPath)
|
||||||
|
}
|
||||||
|
if contents != "foobar" {
|
||||||
|
t.Fatalf("expected contents %s, got %s", "foobar", contents)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRead_TildePath(t *testing.T) {
|
||||||
|
isPath := true
|
||||||
|
home, err := homedir.Dir()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
f, cleanup := testTempFile(t, home)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
if _, err := io.WriteString(f, "foobar"); err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
f.Close()
|
||||||
|
|
||||||
|
r := strings.NewReplacer(home, "~")
|
||||||
|
homePath := r.Replace(f.Name())
|
||||||
|
contents, wasPath, err := Read(homePath)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
if wasPath != isPath {
|
||||||
|
t.Fatalf("expected wasPath: %t, got %t", isPath, wasPath)
|
||||||
|
}
|
||||||
|
if contents != "foobar" {
|
||||||
|
t.Fatalf("expected contents %s, got %s", "foobar", contents)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRead_PathNoPermission(t *testing.T) {
|
||||||
|
isPath := true
|
||||||
|
f, cleanup := testTempFile(t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
if _, err := io.WriteString(f, "foobar"); err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
f.Close()
|
||||||
|
|
||||||
|
if err := os.Chmod(f.Name(), 0); err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
contents, wasPath, err := Read(f.Name())
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected error, got none!")
|
||||||
|
}
|
||||||
|
if wasPath != isPath {
|
||||||
|
t.Fatalf("expected wasPath: %t, got %t", isPath, wasPath)
|
||||||
|
}
|
||||||
|
if contents != "" {
|
||||||
|
t.Fatalf("expected contents %s, got %s", "", contents)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRead_Contents(t *testing.T) {
|
||||||
|
isPath := false
|
||||||
|
input := "hello"
|
||||||
|
|
||||||
|
contents, wasPath, err := Read(input)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
if wasPath != isPath {
|
||||||
|
t.Fatalf("expected wasPath: %t, got %t", isPath, wasPath)
|
||||||
|
}
|
||||||
|
if contents != input {
|
||||||
|
t.Fatalf("expected contents %s, got %s", input, contents)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRead_TildeContents(t *testing.T) {
|
||||||
|
isPath := false
|
||||||
|
input := "~/hello/notafile"
|
||||||
|
|
||||||
|
contents, wasPath, err := Read(input)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
if wasPath != isPath {
|
||||||
|
t.Fatalf("expected wasPath: %t, got %t", isPath, wasPath)
|
||||||
|
}
|
||||||
|
if contents != input {
|
||||||
|
t.Fatalf("expected contents %s, got %s", input, contents)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns an open tempfile based at baseDir and a function to clean it up.
|
||||||
|
func testTempFile(t *testing.T, baseDir ...string) (*os.File, func()) {
|
||||||
|
base := ""
|
||||||
|
if len(baseDir) == 1 {
|
||||||
|
base = baseDir[0]
|
||||||
|
}
|
||||||
|
f, err := ioutil.TempFile(base, "tf")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return f, func() {
|
||||||
|
os.Remove(f.Name())
|
||||||
|
}
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// generated by stringer -type=getSource resource_data_get_source.go; DO NOT EDIT
|
// Code generated by "stringer -type=getSource resource_data_get_source.go"; DO NOT EDIT
|
||||||
|
|
||||||
package schema
|
package schema
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// generated by stringer -type=ValueType valuetype.go; DO NOT EDIT
|
// Code generated by "stringer -type=ValueType valuetype.go"; DO NOT EDIT
|
||||||
|
|
||||||
package schema
|
package schema
|
||||||
|
|
||||||
|
@ -1,14 +0,0 @@
|
|||||||
package url
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Parse parses rawURL into a URL structure.
|
|
||||||
// The rawURL may be relative or absolute.
|
|
||||||
//
|
|
||||||
// Parse is a wrapper for the Go stdlib net/url Parse function, but returns
|
|
||||||
// Windows "safe" URLs on Windows platforms.
|
|
||||||
func Parse(rawURL string) (*url.URL, error) {
|
|
||||||
return parse(rawURL)
|
|
||||||
}
|
|
@ -1,88 +0,0 @@
|
|||||||
package url
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
type parseTest struct {
|
|
||||||
rawURL string
|
|
||||||
scheme string
|
|
||||||
host string
|
|
||||||
path string
|
|
||||||
str string
|
|
||||||
err bool
|
|
||||||
}
|
|
||||||
|
|
||||||
var parseTests = []parseTest{
|
|
||||||
{
|
|
||||||
rawURL: "/foo/bar",
|
|
||||||
scheme: "",
|
|
||||||
host: "",
|
|
||||||
path: "/foo/bar",
|
|
||||||
str: "/foo/bar",
|
|
||||||
err: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
rawURL: "file:///dir/",
|
|
||||||
scheme: "file",
|
|
||||||
host: "",
|
|
||||||
path: "/dir/",
|
|
||||||
str: "file:///dir/",
|
|
||||||
err: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var winParseTests = []parseTest{
|
|
||||||
{
|
|
||||||
rawURL: `C:\`,
|
|
||||||
scheme: ``,
|
|
||||||
host: ``,
|
|
||||||
path: `C:/`,
|
|
||||||
str: `C:/`,
|
|
||||||
err: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
rawURL: `file://C:\`,
|
|
||||||
scheme: `file`,
|
|
||||||
host: ``,
|
|
||||||
path: `C:/`,
|
|
||||||
str: `file://C:/`,
|
|
||||||
err: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
rawURL: `file:///C:\`,
|
|
||||||
scheme: `file`,
|
|
||||||
host: ``,
|
|
||||||
path: `C:/`,
|
|
||||||
str: `file://C:/`,
|
|
||||||
err: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParse(t *testing.T) {
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
parseTests = append(parseTests, winParseTests...)
|
|
||||||
}
|
|
||||||
for i, pt := range parseTests {
|
|
||||||
url, err := Parse(pt.rawURL)
|
|
||||||
if err != nil && !pt.err {
|
|
||||||
t.Errorf("test %d: unexpected error: %s", i, err)
|
|
||||||
}
|
|
||||||
if err == nil && pt.err {
|
|
||||||
t.Errorf("test %d: expected an error", i)
|
|
||||||
}
|
|
||||||
if url.Scheme != pt.scheme {
|
|
||||||
t.Errorf("test %d: expected Scheme = %q, got %q", i, pt.scheme, url.Scheme)
|
|
||||||
}
|
|
||||||
if url.Host != pt.host {
|
|
||||||
t.Errorf("test %d: expected Host = %q, got %q", i, pt.host, url.Host)
|
|
||||||
}
|
|
||||||
if url.Path != pt.path {
|
|
||||||
t.Errorf("test %d: expected Path = %q, got %q", i, pt.path, url.Path)
|
|
||||||
}
|
|
||||||
if url.String() != pt.str {
|
|
||||||
t.Errorf("test %d: expected url.String() = %q, got %q", i, pt.str, url.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,11 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package url
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
)
|
|
||||||
|
|
||||||
func parse(rawURL string) (*url.URL, error) {
|
|
||||||
return url.Parse(rawURL)
|
|
||||||
}
|
|
@ -1,40 +0,0 @@
|
|||||||
package url
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func parse(rawURL string) (*url.URL, error) {
|
|
||||||
// Make sure we're using "/" since URLs are "/"-based.
|
|
||||||
rawURL = filepath.ToSlash(rawURL)
|
|
||||||
|
|
||||||
u, err := url.Parse(rawURL)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(rawURL) > 1 && rawURL[1] == ':' {
|
|
||||||
// Assume we're dealing with a drive letter file path where the drive
|
|
||||||
// letter has been parsed into the URL Scheme, and the rest of the path
|
|
||||||
// has been parsed into the URL Path without the leading ':' character.
|
|
||||||
u.Path = fmt.Sprintf("%s:%s", string(rawURL[0]), u.Path)
|
|
||||||
u.Scheme = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(u.Host) > 1 && u.Host[1] == ':' && strings.HasPrefix(rawURL, "file://") {
|
|
||||||
// Assume we're dealing with a drive letter file path where the drive
|
|
||||||
// letter has been parsed into the URL Host.
|
|
||||||
u.Path = fmt.Sprintf("%s%s", u.Host, u.Path)
|
|
||||||
u.Host = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove leading slash for absolute file paths.
|
|
||||||
if len(u.Path) > 2 && u.Path[0] == '/' && u.Path[2] == ':' {
|
|
||||||
u.Path = u.Path[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
return u, err
|
|
||||||
}
|
|
@ -1,5 +1,8 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Switch to the stable-website branch
|
||||||
|
git checkout stable-website
|
||||||
|
|
||||||
# Set the tmpdir
|
# Set the tmpdir
|
||||||
if [ -z "$TMPDIR" ]; then
|
if [ -z "$TMPDIR" ]; then
|
||||||
TMPDIR="/tmp"
|
TMPDIR="/tmp"
|
||||||
|
@ -2851,6 +2851,55 @@ func TestContext2Apply_outputInvalid(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestContext2Apply_outputAdd(t *testing.T) {
|
||||||
|
m1 := testModule(t, "apply-output-add-before")
|
||||||
|
p1 := testProvider("aws")
|
||||||
|
p1.ApplyFn = testApplyFn
|
||||||
|
p1.DiffFn = testDiffFn
|
||||||
|
ctx1 := testContext2(t, &ContextOpts{
|
||||||
|
Module: m1,
|
||||||
|
Providers: map[string]ResourceProviderFactory{
|
||||||
|
"aws": testProviderFuncFixed(p1),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
if _, err := ctx1.Plan(); err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
state1, err := ctx1.Apply()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m2 := testModule(t, "apply-output-add-after")
|
||||||
|
p2 := testProvider("aws")
|
||||||
|
p2.ApplyFn = testApplyFn
|
||||||
|
p2.DiffFn = testDiffFn
|
||||||
|
ctx2 := testContext2(t, &ContextOpts{
|
||||||
|
Module: m2,
|
||||||
|
Providers: map[string]ResourceProviderFactory{
|
||||||
|
"aws": testProviderFuncFixed(p2),
|
||||||
|
},
|
||||||
|
State: state1,
|
||||||
|
})
|
||||||
|
|
||||||
|
if _, err := ctx2.Plan(); err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
state2, err := ctx2.Apply()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
actual := strings.TrimSpace(state2.String())
|
||||||
|
expected := strings.TrimSpace(testTerraformApplyOutputAddStr)
|
||||||
|
if actual != expected {
|
||||||
|
t.Fatalf("bad: \n%s", actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestContext2Apply_outputList(t *testing.T) {
|
func TestContext2Apply_outputList(t *testing.T) {
|
||||||
m := testModule(t, "apply-output-list")
|
m := testModule(t, "apply-output-list")
|
||||||
p := testProvider("aws")
|
p := testProvider("aws")
|
||||||
|
@ -1627,6 +1627,53 @@ STATE:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestContext2Plan_targetedOrphan(t *testing.T) {
|
||||||
|
m := testModule(t, "plan-targeted-orphan")
|
||||||
|
p := testProvider("aws")
|
||||||
|
p.DiffFn = testDiffFn
|
||||||
|
ctx := testContext2(t, &ContextOpts{
|
||||||
|
Module: m,
|
||||||
|
Providers: map[string]ResourceProviderFactory{
|
||||||
|
"aws": testProviderFuncFixed(p),
|
||||||
|
},
|
||||||
|
State: &State{
|
||||||
|
Modules: []*ModuleState{
|
||||||
|
&ModuleState{
|
||||||
|
Path: rootModulePath,
|
||||||
|
Resources: map[string]*ResourceState{
|
||||||
|
"aws_instance.orphan": &ResourceState{
|
||||||
|
Type: "aws_instance",
|
||||||
|
Primary: &InstanceState{
|
||||||
|
ID: "i-789xyz",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Destroy: true,
|
||||||
|
Targets: []string{"aws_instance.orphan"},
|
||||||
|
})
|
||||||
|
|
||||||
|
plan, err := ctx.Plan()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
actual := strings.TrimSpace(plan.String())
|
||||||
|
expected := strings.TrimSpace(`DIFF:
|
||||||
|
|
||||||
|
DESTROY: aws_instance.orphan
|
||||||
|
|
||||||
|
STATE:
|
||||||
|
|
||||||
|
aws_instance.orphan:
|
||||||
|
ID = i-789xyz`)
|
||||||
|
if actual != expected {
|
||||||
|
t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestContext2Plan_provider(t *testing.T) {
|
func TestContext2Plan_provider(t *testing.T) {
|
||||||
m := testModule(t, "plan-provider")
|
m := testModule(t, "plan-provider")
|
||||||
p := testProvider("aws")
|
p := testProvider("aws")
|
||||||
|
@ -105,9 +105,8 @@ func (b *BuiltinGraphBuilder) Steps(path []string) []GraphTransformer {
|
|||||||
// Create all our resources from the configuration and state
|
// Create all our resources from the configuration and state
|
||||||
&ConfigTransformer{Module: b.Root},
|
&ConfigTransformer{Module: b.Root},
|
||||||
&OrphanTransformer{
|
&OrphanTransformer{
|
||||||
State: b.State,
|
State: b.State,
|
||||||
Module: b.Root,
|
Module: b.Root,
|
||||||
Targeting: len(b.Targets) > 0,
|
|
||||||
},
|
},
|
||||||
|
|
||||||
// Output-related transformations
|
// Output-related transformations
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user