mirror of
https://github.com/opentofu/opentofu.git
synced 2025-02-25 18:45:20 -06:00
Merge branch 'master' into brandontosch/GH-11874
This commit is contained in:
commit
0168829ecb
13
.travis.yml
13
.travis.yml
@ -1,7 +1,20 @@
|
||||
dist: trusty
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.8
|
||||
|
||||
env:
|
||||
- CONSUL_VERSION=0.7.5 TF_CONSUL_TEST=1 GOMAXPROCS=4
|
||||
|
||||
# Fetch consul for the backend and provider tests
|
||||
before_install:
|
||||
- curl -sLo consul.zip https://releases.hashicorp.com/consul/${CONSUL_VERSION}/consul_${CONSUL_VERSION}_linux_amd64.zip
|
||||
- unzip consul.zip
|
||||
- mkdir ~/bin
|
||||
- mv consul ~/bin
|
||||
- export PATH="~/bin:$PATH"
|
||||
|
||||
install:
|
||||
# This script is used by the Travis build to install a cookie for
|
||||
# go.googlesource.com so rate limits are higher when using `go get` to fetch
|
||||
|
244
CHANGELOG.md
244
CHANGELOG.md
@ -1,18 +1,244 @@
|
||||
**TEMPORARY NOTE:** The "master" branch CHANGELOG also includes any changes
|
||||
in the branch "0-8-stable". The "master" branch is currently a development
|
||||
branch for the next major version of Terraform.
|
||||
## 0.9.2 (unreleased)
|
||||
|
||||
## 0.9.0-beta3 (unreleased)
|
||||
FEATURES:
|
||||
|
||||
BACKWARDS INCOMPATIBILITIES / NOTES:
|
||||
|
||||
* provider/aws: `aws_codebuild_project` renamed `timeout` to `build_timeout` [GH-12503]
|
||||
* provider/azurem: `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` now store has of custom_data not all custom_data [GH-12214]
|
||||
* **New Resource:** `aws_api_gateway_usage_plan` [GH-12542]
|
||||
* **New Resource:** `aws_api_gateway_usage_plan_key` [GH-12851]
|
||||
* **New Resource:** `github_repository_webhook` [GH-12924]
|
||||
* **New Interpolation:** `substr` [GH-12870]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* provider/azurerm: store only hash of `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` custom_data - reduces size of state [GH-12214]
|
||||
* core: fix `ignore_changes` causing fields to be removed during apply [GH-12897]
|
||||
* core: add `-force-copy` option to `terraform init` to supress prompts for copying state [GH-12939]
|
||||
* helper/acctest: Add NewSSHKeyPair function [GH-12894]
|
||||
* provider/alicloud: simplify validators [GH-12982]
|
||||
* provider/aws: Added support for EMR AutoScalingRole [GH-12823]
|
||||
* provider/aws: Add `name_prefix` to `aws_autoscaling_group` and `aws_elb` resources [GH-12629]
|
||||
* provider/aws: Updated default configuration manager version in `aws_opsworks_stack` [GH-12979]
|
||||
* provider/aws: Added aws_api_gateway_api_key value attribute [GH-9462]
|
||||
* provider/aws: Allow aws_alb subnets to change [GH-12850]
|
||||
* provider/aws: Support Attachment of ALB Target Groups to Autoscaling Groups [GH-12855]
|
||||
* provider/azurerm: Add support for setting the primary network interface [GH-11290]
|
||||
* provider/cloudstack: Add `zone_id` to `cloudstack_ipaddress` resource [GH-11306]
|
||||
* provider/consul: Add support for basic auth to the provider [GH-12679]
|
||||
* provider/dnsimple: Allow dnsimple_record.priority attribute to be set [GH-12843]
|
||||
* provider/google: Add support for service_account, metadata, and image_type fields in GKE cluster config [GH-12743]
|
||||
* provider/google: Add local ssd count support for container clusters [GH-12281]
|
||||
* provider/ignition: ignition_filesystem, explicit option to create the filesystem [GH-12980]
|
||||
* provider/ns1: Ensure provider checks for credentials [GH-12920]
|
||||
* provider/openstack: Adding Timeouts to Blockstorage Resources [GH-12862]
|
||||
* provider/openstack: Adding Timeouts to FWaaS v1 Resources [GH-12863]
|
||||
* provider/openstack: Adding Timeouts to Image v2 and LBaaS v2 Resources [GH-12865]
|
||||
* provider/openstack: Adding Timeouts to Network Resources [GH-12866]
|
||||
* provider/openstack: Adding Timeouts to LBaaS v1 Resources [GH-12867]
|
||||
* provider/pagerduty: Validate credentials [GH-12854]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* core: Remove legacy remote state configuration on state migration. This fixes errors when saving plans. [GH-12888]
|
||||
* provider/arukas: Default timeout for launching container increased to 15mins (was 10mins) [GH-12849]
|
||||
* provider/aws: Fix flattened cloudfront lambda function associations to be a set not a slice [GH-11984]
|
||||
* provider/aws: Consider ACTIVE as pending state during ECS svc deletion [GH-12986]
|
||||
* provider/aws: Deprecate the usage of Api Gateway Key Stages in favor of Usage Plans [GH-12883]
|
||||
* provider/aws: prevent panic in resourceAwsSsmDocumentRead [GH-12891]
|
||||
* provider/aws: Prevent panic when setting AWS CodeBuild Source to state [GH-12915]
|
||||
* provider/aws: Only call replace Iam Instance Profile on existing machines [GH-12922]
|
||||
* provider/aws: Increase AWS AMI Destroy timeout [GH-12943]
|
||||
* provider/aws: Set aws_vpc ipv6 for associated only [GH-12899]
|
||||
* provider/aws: Fix AWS ECS placement strategy spread fields [GH-12998]
|
||||
* provider/aws: Specify that aws_network_acl_rule requires a cidr block [GH-13013]
|
||||
* provider/google: turn compute_instance_group.instances into a set [GH-12790]
|
||||
* provider/mysql: recreate user/grant if user/grant got deleted manually [GH-12791]
|
||||
|
||||
## 0.9.1 (March 17, 2017)
|
||||
|
||||
BACKWARDS IMCOMPATIBILITIES / NOTES:
|
||||
|
||||
* provider/pagerduty: the deprecated `name_regex` field has been removed from vendor data source ([#12396](https://github.com/hashicorp/terraform/issues/12396))
|
||||
|
||||
FEATURES:
|
||||
|
||||
* **New Provider:** `kubernetes` ([#12372](https://github.com/hashicorp/terraform/issues/12372))
|
||||
* **New Resource:** `kubernetes_namespace` ([#12372](https://github.com/hashicorp/terraform/issues/12372))
|
||||
* **New Resource:** `kubernetes_config_map` ([#12753](https://github.com/hashicorp/terraform/issues/12753))
|
||||
* **New Data Source:** `dns_a_record_set` ([#12744](https://github.com/hashicorp/terraform/issues/12744))
|
||||
* **New Data Source:** `dns_cname_record_set` ([#12744](https://github.com/hashicorp/terraform/issues/12744))
|
||||
* **New Data Source:** `dns_txt_record_set` ([#12744](https://github.com/hashicorp/terraform/issues/12744))
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* command/init: `-backend-config` accepts `key=value` pairs
|
||||
* provider/aws: Improved error when failing to get S3 tags ([#12759](https://github.com/hashicorp/terraform/issues/12759))
|
||||
* provider/aws: Validate CIDR Blocks in SG and SG rule resources ([#12765](https://github.com/hashicorp/terraform/issues/12765))
|
||||
* provider/aws: Add KMS key tag support ([#12243](https://github.com/hashicorp/terraform/issues/12243))
|
||||
* provider/aws: Allow `name_prefix` to be used with various IAM resources ([#12658](https://github.com/hashicorp/terraform/issues/12658))
|
||||
* provider/openstack: Add timeout support for Compute resources ([#12794](https://github.com/hashicorp/terraform/issues/12794))
|
||||
* provider/scaleway: expose public IPv6 information on scaleway_server ([#12748](https://github.com/hashicorp/terraform/issues/12748))
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* core: Fix panic when an undefined module is reference ([#12793](https://github.com/hashicorp/terraform/issues/12793))
|
||||
* core: Fix regression from 0.8.x when using a data source in a module ([#12837](https://github.com/hashicorp/terraform/issues/12837))
|
||||
* command/apply: Applies from plans with backends set will reuse the backend rather than local ([#12785](https://github.com/hashicorp/terraform/issues/12785))
|
||||
* command/init: Changing only `-backend-config` detects changes and reconfigures ([#12776](https://github.com/hashicorp/terraform/issues/12776))
|
||||
* command/init: Fix legacy backend init error that could occur when upgrading ([#12818](https://github.com/hashicorp/terraform/issues/12818))
|
||||
* command/push: Detect local state and error properly ([#12773](https://github.com/hashicorp/terraform/issues/12773))
|
||||
* command/refresh: Allow empty and non-existent state ([#12777](https://github.com/hashicorp/terraform/issues/12777))
|
||||
* provider/aws: Get the aws_lambda_function attributes when there are great than 50 versions of a function ([#11745](https://github.com/hashicorp/terraform/issues/11745))
|
||||
* provider/aws: Correctly check for nil cidr_block in aws_network_acl ([#12735](https://github.com/hashicorp/terraform/issues/12735))
|
||||
* provider/aws: Stop setting weight property on route53_record read ([#12756](https://github.com/hashicorp/terraform/issues/12756))
|
||||
* provider/google: Fix the Google provider asking for account_file input on every run ([#12729](https://github.com/hashicorp/terraform/issues/12729))
|
||||
* provider/profitbricks: Prevent panic on profitbricks volume ([#12819](https://github.com/hashicorp/terraform/issues/12819))
|
||||
|
||||
|
||||
## 0.9.0 (March 15, 2017)
|
||||
|
||||
**This is the complete 0.8.8 to 0.9 CHANGELOG. Below this section we also have a 0.9.0-beta2 to 0.9.0 final CHANGELOG.**
|
||||
|
||||
BACKWARDS INCOMPATIBILITIES / NOTES:
|
||||
|
||||
* provider/aws: `aws_codebuild_project` renamed `timeout` to `build_timeout` ([#12503](https://github.com/hashicorp/terraform/issues/12503))
|
||||
* provider/azurem: `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` now store has of custom_data not all custom_data ([#12214](https://github.com/hashicorp/terraform/issues/12214))
|
||||
* provider/azurerm: scale_sets `os_profile_master_password` now marked as sensitive
|
||||
* provider/azurerm: sql_server `administrator_login_password` now marked as sensitive
|
||||
* provider/dnsimple: Provider has been upgraded to APIv2 therefore, you will need to use the APIv2 auth token
|
||||
* provider/google: storage buckets have been updated with the new storage classes. The old classes will continue working as before, but should be migrated as soon as possible, as there's no guarantee they'll continue working forever. ([#12044](https://github.com/hashicorp/terraform/issues/12044))
|
||||
* provider/google: compute_instance, compute_instance_template, and compute_disk all have a subtly changed logic when specifying an image family as the image; in 0.8.x they would pin to the latest image in the family when the resource is created; in 0.9.x they pass the family to the API and use its behaviour. New input formats are also supported. ([#12223](https://github.com/hashicorp/terraform/issues/12223))
|
||||
* provider/google: removed the unused and deprecated region field from google_compute_backend_service ([#12663](https://github.com/hashicorp/terraform/issues/12663))
|
||||
* provider/google: removed the deprecated account_file field for the Google Cloud provider ([#12668](https://github.com/hashicorp/terraform/issues/12668))
|
||||
* provider/google: removed the deprecated fields from google_project ([#12659](https://github.com/hashicorp/terraform/issues/12659))
|
||||
|
||||
FEATURES:
|
||||
|
||||
* **Remote Backends:** This is a successor to "remote state" and includes
|
||||
file-based configuration, an improved setup process (just run `terraform init`),
|
||||
no more local caching of remote state, and more. ([#11286](https://github.com/hashicorp/terraform/issues/11286))
|
||||
* **Destroy Provisioners:** Provisioners can now be configured to run
|
||||
on resource destruction. ([#11329](https://github.com/hashicorp/terraform/issues/11329))
|
||||
* **State Locking:** State will be automatically locked when supported by the backend.
|
||||
Backends supporting locking in this release are Local, S3 (via DynamoDB), and Consul. ([#11187](https://github.com/hashicorp/terraform/issues/11187))
|
||||
* **State Environments:** You can now create named "environments" for states. This allows you to manage distinct infrastructure resources from the same configuration.
|
||||
* **New Provider:** `Circonus` ([#12578](https://github.com/hashicorp/terraform/issues/12578))
|
||||
* **New Data Source:** `openstack_networking_network_v2` ([#12304](https://github.com/hashicorp/terraform/issues/12304))
|
||||
* **New Resource:** `aws_iam_account_alias` ([#12648](https://github.com/hashicorp/terraform/issues/12648))
|
||||
* **New Resource:** `datadog_downtime` ([#10994](https://github.com/hashicorp/terraform/issues/10994))
|
||||
* **New Resource:** `ns1_notifylist` ([#12373](https://github.com/hashicorp/terraform/issues/12373))
|
||||
* **New Resource:** `google_container_node_pool` ([#11802](https://github.com/hashicorp/terraform/issues/11802))
|
||||
* **New Resource:** `rancher_certificate` ([#12717](https://github.com/hashicorp/terraform/issues/12717))
|
||||
* **New Resource:** `rancher_host` ([#11545](https://github.com/hashicorp/terraform/issues/11545))
|
||||
* helper/schema: Added Timeouts to allow Provider/Resource developers to expose configurable timeouts for actions ([#12311](https://github.com/hashicorp/terraform/issues/12311))
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* core: Data source values can now be used as part of a `count` calculation. ([#11482](https://github.com/hashicorp/terraform/issues/11482))
|
||||
* core: "terraformrc" can contain env var references with $FOO ([#11929](https://github.com/hashicorp/terraform/issues/11929))
|
||||
* core: report all errors encountered during config validation ([#12383](https://github.com/hashicorp/terraform/issues/12383))
|
||||
* command: CLI args can be specified via env vars. Specify `TF_CLI_ARGS` or `TF_CLI_ARGS_name` (where name is the name of a command) to specify additional CLI args ([#11922](https://github.com/hashicorp/terraform/issues/11922))
|
||||
* command/init: previous behavior is retained, but init now also configures
|
||||
the new remote backends as well as downloads modules. It is the single
|
||||
command to initialize a new or existing Terraform configuration.
|
||||
* command: Display resource state ID in refresh/plan/destroy output ([#12261](https://github.com/hashicorp/terraform/issues/12261))
|
||||
* provider/aws: AWS Lambda DeadLetterConfig support ([#12188](https://github.com/hashicorp/terraform/issues/12188))
|
||||
* provider/aws: Return errors from Elastic Beanstalk ([#12425](https://github.com/hashicorp/terraform/issues/12425))
|
||||
* provider/aws: Set aws_db_cluster to snapshot by default ([#11668](https://github.com/hashicorp/terraform/issues/11668))
|
||||
* provider/aws: Enable final snapshots for aws_rds_cluster by default ([#11694](https://github.com/hashicorp/terraform/issues/11694))
|
||||
* provider/aws: Enable snapshotting by default on aws_redshift_cluster ([#11695](https://github.com/hashicorp/terraform/issues/11695))
|
||||
* provider/aws: Add support for ACM certificates to `api_gateway_domain_name` ([#12592](https://github.com/hashicorp/terraform/issues/12592))
|
||||
* provider/aws: Add support for IPv6 to aws\_security\_group\_rule ([#12645](https://github.com/hashicorp/terraform/issues/12645))
|
||||
* provider/aws: Add IPv6 Support to aws\_route\_table ([#12640](https://github.com/hashicorp/terraform/issues/12640))
|
||||
* provider/aws: Add support for IPv6 to aws\_network\_acl\_rule ([#12644](https://github.com/hashicorp/terraform/issues/12644))
|
||||
* provider/aws: Add support for IPv6 to aws\_default\_route\_table ([#12642](https://github.com/hashicorp/terraform/issues/12642))
|
||||
* provider/aws: Add support for IPv6 to aws\_network\_acl ([#12641](https://github.com/hashicorp/terraform/issues/12641))
|
||||
* provider/aws: Add support for IPv6 in aws\_route ([#12639](https://github.com/hashicorp/terraform/issues/12639))
|
||||
* provider/aws: Add support for IPv6 to aws\_security\_group ([#12655](https://github.com/hashicorp/terraform/issues/12655))
|
||||
* provider/aws: Add replace\_unhealthy\_instances to spot\_fleet\_request ([#12681](https://github.com/hashicorp/terraform/issues/12681))
|
||||
* provider/aws: Remove restriction on running aws\_opsworks\_* on us-east-1 ([#12688](https://github.com/hashicorp/terraform/issues/12688))
|
||||
* provider/aws: Improve error message on S3 Bucket Object deletion ([#12712](https://github.com/hashicorp/terraform/issues/12712))
|
||||
* provider/aws: Add log message about if changes are being applied now or later ([#12691](https://github.com/hashicorp/terraform/issues/12691))
|
||||
* provider/azurerm: Mark the azurerm_scale_set machine password as sensitive ([#11982](https://github.com/hashicorp/terraform/issues/11982))
|
||||
* provider/azurerm: Mark the azurerm_sql_server admin password as sensitive ([#12004](https://github.com/hashicorp/terraform/issues/12004))
|
||||
* provider/azurerm: Add support for managed availability sets. ([#12532](https://github.com/hashicorp/terraform/issues/12532))
|
||||
* provider/azurerm: Add support for extensions on virtual machine scale sets ([#12124](https://github.com/hashicorp/terraform/issues/12124))
|
||||
* provider/dnsimple: Upgrade DNSimple provider to API v2 ([#10760](https://github.com/hashicorp/terraform/issues/10760))
|
||||
* provider/docker: added support for linux capabilities ([#12045](https://github.com/hashicorp/terraform/issues/12045))
|
||||
* provider/fastly: Add Fastly SSL validation fields ([#12578](https://github.com/hashicorp/terraform/issues/12578))
|
||||
* provider/ignition: Migrate all of the igition resources to data sources ([#11851](https://github.com/hashicorp/terraform/issues/11851))
|
||||
* provider/openstack: Set Availability Zone in Instances ([#12610](https://github.com/hashicorp/terraform/issues/12610))
|
||||
* provider/openstack: Force Deletion of Instances ([#12689](https://github.com/hashicorp/terraform/issues/12689))
|
||||
* provider/rancher: Better comparison of compose files ([#12561](https://github.com/hashicorp/terraform/issues/12561))
|
||||
* provider/azurerm: store only hash of `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` custom_data - reduces size of state ([#12214](https://github.com/hashicorp/terraform/issues/12214))
|
||||
* provider/vault: read vault token from `~/.vault-token` as a fallback for the
|
||||
`VAULT_TOKEN` environment variable. ([#11529](https://github.com/hashicorp/terraform/issues/11529))
|
||||
* provisioners: All provisioners now respond very quickly to interrupts for
|
||||
fast cancellation. ([#10934](https://github.com/hashicorp/terraform/issues/10934))
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* core: targeting will remove untargeted providers ([#12050](https://github.com/hashicorp/terraform/issues/12050))
|
||||
* core: doing a map lookup in a resource config with a computed set no longer crashes ([#12210](https://github.com/hashicorp/terraform/issues/12210))
|
||||
* provider/aws: Fixes issue for aws_lb_ssl_negotiation_policy of already deleted ELB ([#12360](https://github.com/hashicorp/terraform/issues/12360))
|
||||
* provider/aws: Populate the iam_instance_profile uniqueId ([#12449](https://github.com/hashicorp/terraform/issues/12449))
|
||||
* provider/aws: Only send iops when creating io1 devices ([#12392](https://github.com/hashicorp/terraform/issues/12392))
|
||||
* provider/aws: Fix spurious aws_spot_fleet_request diffs ([#12437](https://github.com/hashicorp/terraform/issues/12437))
|
||||
* provider/aws: Changing volumes in ECS task definition should force new revision ([#11403](https://github.com/hashicorp/terraform/issues/11403))
|
||||
* provider/aws: Ignore whitespace in json diff for aws_dms_replication_task options ([#12380](https://github.com/hashicorp/terraform/issues/12380))
|
||||
* provider/aws: Check spot instance is running before trying to attach volumes ([#12459](https://github.com/hashicorp/terraform/issues/12459))
|
||||
* provider/aws: Add the IPV6 cidr block to the vpc datasource ([#12529](https://github.com/hashicorp/terraform/issues/12529))
|
||||
* provider/aws: Error on trying to recreate an existing customer gateway ([#12501](https://github.com/hashicorp/terraform/issues/12501))
|
||||
* provider/aws: Prevent aws_dms_replication_task panic ([#12539](https://github.com/hashicorp/terraform/issues/12539))
|
||||
* provider/aws: output the task definition name when errors occur during refresh ([#12609](https://github.com/hashicorp/terraform/issues/12609))
|
||||
* provider/aws: Refresh iam saml provider from state on 404 ([#12602](https://github.com/hashicorp/terraform/issues/12602))
|
||||
* provider/aws: Add address, port, hosted_zone_id and endpoint for aws_db_instance datasource ([#12623](https://github.com/hashicorp/terraform/issues/12623))
|
||||
* provider/aws: Allow recreation of `aws_opsworks_user_profile` when the `user_arn` is changed ([#12595](https://github.com/hashicorp/terraform/issues/12595))
|
||||
* provider/aws: Guard clause to prevent panic on ELB connectionSettings ([#12685](https://github.com/hashicorp/terraform/issues/12685))
|
||||
* provider/azurerm: bug fix to prevent crashes during azurerm_container_service provisioning ([#12516](https://github.com/hashicorp/terraform/issues/12516))
|
||||
* provider/cobbler: Fix Profile Repos ([#12452](https://github.com/hashicorp/terraform/issues/12452))
|
||||
* provider/datadog: Update to datadog_monitor to use default values ([#12497](https://github.com/hashicorp/terraform/issues/12497))
|
||||
* provider/datadog: Default notify_no_data on datadog_monitor to false ([#11903](https://github.com/hashicorp/terraform/issues/11903))
|
||||
* provider/google: Correct the incorrect instance group manager URL returned from GKE ([#4336](https://github.com/hashicorp/terraform/issues/4336))
|
||||
* provider/google: Fix a plan/apply cycle in IAM policies ([#12387](https://github.com/hashicorp/terraform/issues/12387))
|
||||
* provider/google: Fix a plan/apply cycle in forwarding rules when only a single port is specified ([#12662](https://github.com/hashicorp/terraform/issues/12662))
|
||||
* provider/google: Minor correction : "Deleting disk" message in Delete method ([#12521](https://github.com/hashicorp/terraform/issues/12521))
|
||||
* provider/mysql: Avoid crash on un-interpolated provider cfg ([#12391](https://github.com/hashicorp/terraform/issues/12391))
|
||||
* provider/ns1: Fix incorrect schema (causing crash) for 'ns1_user.notify' ([#12721](https://github.com/hashicorp/terraform/issues/12721))
|
||||
* provider/openstack: Handle cases where volumes are disabled ([#12374](https://github.com/hashicorp/terraform/issues/12374))
|
||||
* provider/openstack: Toggle Creation of Default Security Group Rules ([#12119](https://github.com/hashicorp/terraform/issues/12119))
|
||||
* provider/openstack: Change Port fixed_ip to a Set ([#12613](https://github.com/hashicorp/terraform/issues/12613))
|
||||
* provider/openstack: Add network_id to Network data source ([#12615](https://github.com/hashicorp/terraform/issues/12615))
|
||||
* provider/openstack: Check for ErrDefault500 when creating/deleting pool member ([#12664](https://github.com/hashicorp/terraform/issues/12664))
|
||||
* provider/rancher: Apply the set value for finish_upgrade to set to prevent recurring plans ([#12545](https://github.com/hashicorp/terraform/issues/12545))
|
||||
* provider/scaleway: work around API concurrency issue ([#12707](https://github.com/hashicorp/terraform/issues/12707))
|
||||
* provider/statuscake: use default status code list when updating test ([#12375](https://github.com/hashicorp/terraform/issues/12375))
|
||||
|
||||
## 0.9.0 from 0.9.0-beta2 (March 15, 2017)
|
||||
|
||||
**This only includes changes from 0.9.0-beta2 to 0.9.0 final. The section above has the complete 0.8.x to 0.9.0 CHANGELOG.**
|
||||
|
||||
FEATURES:
|
||||
|
||||
* **New Provider:** `Circonus` ([#12578](https://github.com/hashicorp/terraform/issues/12578))
|
||||
|
||||
BACKWARDS INCOMPATIBILITIES / NOTES:
|
||||
|
||||
* provider/aws: `aws_codebuild_project` renamed `timeout` to `build_timeout` ([#12503](https://github.com/hashicorp/terraform/issues/12503))
|
||||
* provider/azurem: `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` now store has of custom_data not all custom_data ([#12214](https://github.com/hashicorp/terraform/issues/12214))
|
||||
* provider/google: compute_instance, compute_instance_template, and compute_disk all have a subtly changed logic when specifying an image family as the image; in 0.8.x they would pin to the latest image in the family when the resource is created; in 0.9.x they pass the family to the API and use its behaviour. New input formats are also supported. ([#12223](https://github.com/hashicorp/terraform/issues/12223))
|
||||
* provider/google: removed the unused and deprecated region field from google_compute_backend_service ([#12663](https://github.com/hashicorp/terraform/issues/12663))
|
||||
* provider/google: removed the deprecated account_file field for the Google Cloud provider ([#12668](https://github.com/hashicorp/terraform/issues/12668))
|
||||
* provider/google: removed the deprecated fields from google_project ([#12659](https://github.com/hashicorp/terraform/issues/12659))
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* provider/azurerm: store only hash of `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` custom_data - reduces size of state ([#12214](https://github.com/hashicorp/terraform/issues/12214))
|
||||
* report all errors encountered during config validation ([#12383](https://github.com/hashicorp/terraform/issues/12383))
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* provider/google: Correct the incorrect instance group manager URL returned from GKE ([#4336](https://github.com/hashicorp/terraform/issues/4336))
|
||||
* provider/google: Fix a plan/apply cycle in IAM policies ([#12387](https://github.com/hashicorp/terraform/issues/12387))
|
||||
* provider/google: Fix a plan/apply cycle in forwarding rules when only a single port is specified ([#12662](https://github.com/hashicorp/terraform/issues/12662))
|
||||
|
||||
## 0.9.0-beta2 (March 2, 2017)
|
||||
|
||||
BACKWARDS INCOMPATIBILITIES / NOTES:
|
||||
|
4
Makefile
4
Makefile
@ -38,10 +38,10 @@ plugin-dev: generate
|
||||
mv $(GOPATH)/bin/$(PLUGIN) $(GOPATH)/bin/terraform-$(PLUGIN)
|
||||
|
||||
# test runs the unit tests
|
||||
test:# fmtcheck errcheck generate
|
||||
test: fmtcheck errcheck generate
|
||||
go test -i $(TEST) || exit 1
|
||||
echo $(TEST) | \
|
||||
xargs -t -n4 go test $(TESTARGS) -timeout=30s -parallel=4
|
||||
xargs -t -n4 go test $(TESTARGS) -timeout=60s -parallel=4
|
||||
|
||||
# testacc runs acceptance tests
|
||||
testacc: fmtcheck generate
|
||||
|
@ -1,7 +1,7 @@
|
||||
Terraform
|
||||
=========
|
||||
|
||||
- Website: http://www.terraform.io
|
||||
- Website: https://www.terraform.io
|
||||
- [](https://gitter.im/hashicorp-terraform/Lobby)
|
||||
- Mailing list: [Google Groups](http://groups.google.com/group/terraform-tool)
|
||||
|
||||
@ -29,7 +29,7 @@ All documentation is available on the [Terraform website](http://www.terraform.i
|
||||
Developing Terraform
|
||||
--------------------
|
||||
|
||||
If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.7+ is *required*). Alternatively, you can use the Vagrantfile in the root of this repo to stand up a virtual machine with the appropriate dev tooling already set up for you.
|
||||
If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.8+ is *required*). Alternatively, you can use the Vagrantfile in the root of this repo to stand up a virtual machine with the appropriate dev tooling already set up for you.
|
||||
|
||||
For local dev first make sure Go is properly installed, including setting up a [GOPATH](http://golang.org/doc/code.html#GOPATH). You will also need to add `$GOPATH/bin` to your `$PATH`.
|
||||
|
||||
|
@ -110,8 +110,8 @@ func (b *Backend) init() {
|
||||
"address": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: defaultAtlasServer,
|
||||
Description: schemaDescriptions["address"],
|
||||
DefaultFunc: schema.EnvDefaultFunc("ATLAS_ADDRESS", defaultAtlasServer),
|
||||
},
|
||||
},
|
||||
|
||||
|
@ -1,12 +1,49 @@
|
||||
package atlas
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/config"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestImpl(t *testing.T) {
|
||||
var _ backend.Backend = new(Backend)
|
||||
var _ backend.CLI = new(Backend)
|
||||
}
|
||||
|
||||
func TestConfigure_envAddr(t *testing.T) {
|
||||
defer os.Setenv("ATLAS_ADDRESS", os.Getenv("ATLAS_ADDRESS"))
|
||||
os.Setenv("ATLAS_ADDRESS", "http://foo.com")
|
||||
|
||||
b := &Backend{}
|
||||
err := b.Configure(terraform.NewResourceConfig(config.TestRawConfig(t, map[string]interface{}{
|
||||
"name": "foo/bar",
|
||||
})))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if b.stateClient.Server != "http://foo.com" {
|
||||
t.Fatalf("bad: %#v", b.stateClient)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigure_envToken(t *testing.T) {
|
||||
defer os.Setenv("ATLAS_TOKEN", os.Getenv("ATLAS_TOKEN"))
|
||||
os.Setenv("ATLAS_TOKEN", "foo")
|
||||
|
||||
b := &Backend{}
|
||||
err := b.Configure(terraform.NewResourceConfig(config.TestRawConfig(t, map[string]interface{}{
|
||||
"name": "foo/bar",
|
||||
})))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if b.stateClient.AccessToken != "foo" {
|
||||
t.Fatalf("bad: %#v", b.stateClient)
|
||||
}
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
backendlocal "github.com/hashicorp/terraform/backend/local"
|
||||
backendconsul "github.com/hashicorp/terraform/backend/remote-state/consul"
|
||||
backendinmem "github.com/hashicorp/terraform/backend/remote-state/inmem"
|
||||
backendS3 "github.com/hashicorp/terraform/backend/remote-state/s3"
|
||||
)
|
||||
|
||||
// backends is the list of available backends. This is a global variable
|
||||
@ -36,6 +37,7 @@ func init() {
|
||||
"local": func() backend.Backend { return &backendlocal.Local{} },
|
||||
"consul": func() backend.Backend { return backendconsul.New() },
|
||||
"inmem": func() backend.Backend { return backendinmem.New() },
|
||||
"s3": func() backend.Backend { return backendS3.New() },
|
||||
}
|
||||
|
||||
// Add the legacy remote backends that haven't yet been convertd to
|
||||
|
@ -127,7 +127,7 @@ func (b *Local) States() ([]string, error) {
|
||||
// the listing always start with "default"
|
||||
envs := []string{backend.DefaultStateName}
|
||||
|
||||
entries, err := ioutil.ReadDir(DefaultEnvDir)
|
||||
entries, err := ioutil.ReadDir(b.stateEnvDir())
|
||||
// no error if there's no envs configured
|
||||
if os.IsNotExist(err) {
|
||||
return envs, nil
|
||||
@ -166,7 +166,7 @@ func (b *Local) DeleteState(name string) error {
|
||||
}
|
||||
|
||||
delete(b.states, name)
|
||||
return os.RemoveAll(filepath.Join(DefaultEnvDir, name))
|
||||
return os.RemoveAll(filepath.Join(b.stateEnvDir(), name))
|
||||
}
|
||||
|
||||
func (b *Local) State(name string) (state.State, error) {
|
||||
@ -320,17 +320,12 @@ func (b *Local) StatePaths(name string) (string, string, string) {
|
||||
name = backend.DefaultStateName
|
||||
}
|
||||
|
||||
envDir := DefaultEnvDir
|
||||
if b.StateEnvDir != "" {
|
||||
envDir = b.StateEnvDir
|
||||
}
|
||||
|
||||
if name == backend.DefaultStateName {
|
||||
if statePath == "" {
|
||||
statePath = DefaultStateFilename
|
||||
}
|
||||
} else {
|
||||
statePath = filepath.Join(envDir, name, DefaultStateFilename)
|
||||
statePath = filepath.Join(b.stateEnvDir(), name, DefaultStateFilename)
|
||||
}
|
||||
|
||||
if stateOutPath == "" {
|
||||
@ -353,12 +348,7 @@ func (b *Local) createState(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
envDir := DefaultEnvDir
|
||||
if b.StateEnvDir != "" {
|
||||
envDir = b.StateEnvDir
|
||||
}
|
||||
|
||||
stateDir := filepath.Join(envDir, name)
|
||||
stateDir := filepath.Join(b.stateEnvDir(), name)
|
||||
s, err := os.Stat(stateDir)
|
||||
if err == nil && s.IsDir() {
|
||||
// no need to check for os.IsNotExist, since that is covered by os.MkdirAll
|
||||
@ -374,6 +364,15 @@ func (b *Local) createState(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// stateEnvDir returns the directory where state environments are stored.
|
||||
func (b *Local) stateEnvDir() string {
|
||||
if b.StateEnvDir != "" {
|
||||
return b.StateEnvDir
|
||||
}
|
||||
|
||||
return DefaultEnvDir
|
||||
}
|
||||
|
||||
// currentStateName returns the name of the current named state as set in the
|
||||
// configuration files.
|
||||
// If there are no configured environments, currentStateName returns "default"
|
||||
|
@ -110,6 +110,12 @@ func (b *Local) opPlan(
|
||||
// Write the backend if we have one
|
||||
plan.Backend = op.PlanOutBackend
|
||||
|
||||
// This works around a bug (#12871) which is no longer possible to
|
||||
// trigger but will exist for already corrupted upgrades.
|
||||
if plan.Backend != nil && plan.State != nil {
|
||||
plan.State.Remote = nil
|
||||
}
|
||||
|
||||
log.Printf("[INFO] backend/local: writing plan output to: %s", path)
|
||||
f, err := os.Create(path)
|
||||
if err == nil {
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
@ -22,24 +23,17 @@ func (b *Local) opRefresh(
|
||||
if b.Backend == nil {
|
||||
if _, err := os.Stat(b.StatePath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
runningOp.Err = fmt.Errorf(
|
||||
"The Terraform state file for your infrastructure does not\n"+
|
||||
"exist. The 'refresh' command only works and only makes sense\n"+
|
||||
"when there is existing state that Terraform is managing. Please\n"+
|
||||
"double-check the value given below and try again. If you\n"+
|
||||
"haven't created infrastructure with Terraform yet, use the\n"+
|
||||
"'terraform apply' command.\n\n"+
|
||||
"Path: %s",
|
||||
b.StatePath)
|
||||
return
|
||||
err = nil
|
||||
}
|
||||
|
||||
runningOp.Err = fmt.Errorf(
|
||||
"There was an error reading the Terraform state that is needed\n"+
|
||||
"for refreshing. The path and error are shown below.\n\n"+
|
||||
"Path: %s\n\nError: %s",
|
||||
b.StatePath, err)
|
||||
return
|
||||
if err != nil {
|
||||
runningOp.Err = fmt.Errorf(
|
||||
"There was an error reading the Terraform state that is needed\n"+
|
||||
"for refreshing. The path and error are shown below.\n\n"+
|
||||
"Path: %s\n\nError: %s",
|
||||
b.StatePath, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -74,6 +68,12 @@ func (b *Local) opRefresh(
|
||||
|
||||
// Set our state
|
||||
runningOp.State = opState.State()
|
||||
if runningOp.State.Empty() || !runningOp.State.HasResources() {
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output(b.Colorize().Color(
|
||||
strings.TrimSpace(refreshNoState) + "\n"))
|
||||
}
|
||||
}
|
||||
|
||||
// Perform operation and write the resulting state to the running op
|
||||
newState, err := tfCtx.Refresh()
|
||||
@ -93,3 +93,11 @@ func (b *Local) opRefresh(
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
const refreshNoState = `
|
||||
[reset][bold][yellow]Empty or non-existent state file.[reset][yellow]
|
||||
|
||||
Refresh will do nothing. Refresh does not error or return an erroneous
|
||||
exit status because many automation scripts use refresh, plan, then apply
|
||||
and may not have a state file yet for the first run.
|
||||
`
|
||||
|
@ -20,6 +20,12 @@ func TestLocal_impl(t *testing.T) {
|
||||
var _ backend.CLI = new(Local)
|
||||
}
|
||||
|
||||
func TestLocal_backend(t *testing.T) {
|
||||
defer testTmpDir(t)()
|
||||
b := &Local{}
|
||||
backend.TestBackend(t, b, b)
|
||||
}
|
||||
|
||||
func checkState(t *testing.T, path, expected string) {
|
||||
// Read the state
|
||||
f, err := os.Open(path)
|
||||
|
@ -21,6 +21,7 @@ func TestLocal(t *testing.T) *Local {
|
||||
StatePath: filepath.Join(tempDir, "state.tfstate"),
|
||||
StateOutPath: filepath.Join(tempDir, "state.tfstate"),
|
||||
StateBackupPath: filepath.Join(tempDir, "state.tfstate.bak"),
|
||||
StateEnvDir: filepath.Join(tempDir, "state.tfstate.d"),
|
||||
ContextOpts: &terraform.ContextOpts{},
|
||||
}
|
||||
}
|
||||
|
@ -53,6 +53,20 @@ func New() backend.Backend {
|
||||
Description: "HTTP Auth in the format of 'username:password'",
|
||||
Default: "", // To prevent input
|
||||
},
|
||||
|
||||
"gzip": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Description: "Compress the state data using gzip",
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"lock": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Description: "Lock state access",
|
||||
Default: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@ -64,13 +78,18 @@ func New() backend.Backend {
|
||||
type Backend struct {
|
||||
*schema.Backend
|
||||
|
||||
// The fields below are set from configure
|
||||
configData *schema.ResourceData
|
||||
lock bool
|
||||
}
|
||||
|
||||
func (b *Backend) configure(ctx context.Context) error {
|
||||
// Grab the resource data
|
||||
b.configData = schema.FromContextBackendConfig(ctx)
|
||||
|
||||
// Store the lock information
|
||||
b.lock = b.configData.Get("lock").(bool)
|
||||
|
||||
// Initialize a client to test config
|
||||
_, err := b.clientRaw()
|
||||
return err
|
||||
|
@ -56,7 +56,7 @@ func (b *Backend) States() ([]string, error) {
|
||||
}
|
||||
|
||||
func (b *Backend) DeleteState(name string) error {
|
||||
if name == backend.DefaultStateName {
|
||||
if name == backend.DefaultStateName || name == "" {
|
||||
return fmt.Errorf("can't delete default state")
|
||||
}
|
||||
|
||||
@ -85,27 +85,39 @@ func (b *Backend) State(name string) (state.State, error) {
|
||||
// Determine the path of the data
|
||||
path := b.path(name)
|
||||
|
||||
// Determine whether to gzip or not
|
||||
gzip := b.configData.Get("gzip").(bool)
|
||||
|
||||
// Build the state client
|
||||
stateMgr := &remote.State{
|
||||
var stateMgr state.State = &remote.State{
|
||||
Client: &RemoteClient{
|
||||
Client: client,
|
||||
Path: path,
|
||||
GZip: gzip,
|
||||
},
|
||||
}
|
||||
|
||||
// If we're not locking, disable it
|
||||
if !b.lock {
|
||||
stateMgr = &state.LockDisabled{Inner: stateMgr}
|
||||
}
|
||||
|
||||
// Get the locker, which we know always exists
|
||||
stateMgrLocker := stateMgr.(state.Locker)
|
||||
|
||||
// Grab a lock, we use this to write an empty state if one doesn't
|
||||
// exist already. We have to write an empty state as a sentinel value
|
||||
// so States() knows it exists.
|
||||
lockInfo := state.NewLockInfo()
|
||||
lockInfo.Operation = "init"
|
||||
lockId, err := stateMgr.Lock(lockInfo)
|
||||
lockId, err := stateMgrLocker.Lock(lockInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to lock state in Consul: %s", err)
|
||||
}
|
||||
|
||||
// Local helper function so we can call it multiple places
|
||||
lockUnlock := func(parent error) error {
|
||||
if err := stateMgr.Unlock(lockId); err != nil {
|
||||
if err := stateMgrLocker.Unlock(lockId); err != nil {
|
||||
return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err)
|
||||
}
|
||||
|
||||
|
@ -2,10 +2,12 @@ package consul
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
)
|
||||
|
||||
@ -13,19 +15,80 @@ func TestBackend_impl(t *testing.T) {
|
||||
var _ backend.Backend = new(Backend)
|
||||
}
|
||||
|
||||
func TestBackend(t *testing.T) {
|
||||
addr := os.Getenv("CONSUL_HTTP_ADDR")
|
||||
if addr == "" {
|
||||
t.Log("consul tests require CONSUL_HTTP_ADDR")
|
||||
func newConsulTestServer(t *testing.T) *testutil.TestServer {
|
||||
skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_CONSUL_TEST") == ""
|
||||
if skip {
|
||||
t.Log("consul server tests require setting TF_ACC or TF_CONSUL_TEST")
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
// Get the backend
|
||||
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": addr,
|
||||
"path": fmt.Sprintf("tf-unit/%s", time.Now().String()),
|
||||
srv := testutil.NewTestServerConfig(t, func(c *testutil.TestServerConfig) {
|
||||
c.LogLevel = "warn"
|
||||
|
||||
if !testing.Verbose() {
|
||||
c.Stdout = ioutil.Discard
|
||||
c.Stderr = ioutil.Discard
|
||||
}
|
||||
})
|
||||
|
||||
return srv
|
||||
}
|
||||
|
||||
func TestBackend(t *testing.T) {
|
||||
srv := newConsulTestServer(t)
|
||||
defer srv.Stop()
|
||||
|
||||
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
||||
|
||||
// Get the backend. We need two to test locking.
|
||||
b1 := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": path,
|
||||
})
|
||||
|
||||
b2 := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": path,
|
||||
})
|
||||
|
||||
// Test
|
||||
backend.TestBackend(t, b)
|
||||
backend.TestBackend(t, b1, b2)
|
||||
}
|
||||
|
||||
func TestBackend_lockDisabled(t *testing.T) {
|
||||
srv := newConsulTestServer(t)
|
||||
defer srv.Stop()
|
||||
|
||||
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
||||
|
||||
// Get the backend. We need two to test locking.
|
||||
b1 := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": path,
|
||||
"lock": false,
|
||||
})
|
||||
|
||||
b2 := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": path + "different", // Diff so locking test would fail if it was locking
|
||||
"lock": false,
|
||||
})
|
||||
|
||||
// Test
|
||||
backend.TestBackend(t, b1, b2)
|
||||
}
|
||||
|
||||
func TestBackend_gzip(t *testing.T) {
|
||||
srv := newConsulTestServer(t)
|
||||
defer srv.Stop()
|
||||
|
||||
// Get the backend
|
||||
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": fmt.Sprintf("tf-unit/%s", time.Now().String()),
|
||||
"gzip": true,
|
||||
})
|
||||
|
||||
// Test
|
||||
backend.TestBackend(t, b, nil)
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
package consul
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
@ -22,6 +24,7 @@ const (
|
||||
type RemoteClient struct {
|
||||
Client *consulapi.Client
|
||||
Path string
|
||||
GZip bool
|
||||
|
||||
consulLock *consulapi.Lock
|
||||
lockCh <-chan struct{}
|
||||
@ -36,18 +39,37 @@ func (c *RemoteClient) Get() (*remote.Payload, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
payload := pair.Value
|
||||
// If the payload starts with 0x1f, it's gzip, not json
|
||||
if len(pair.Value) >= 1 && pair.Value[0] == '\x1f' {
|
||||
if data, err := uncompressState(pair.Value); err == nil {
|
||||
payload = data
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
md5 := md5.Sum(pair.Value)
|
||||
return &remote.Payload{
|
||||
Data: pair.Value,
|
||||
Data: payload,
|
||||
MD5: md5[:],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *RemoteClient) Put(data []byte) error {
|
||||
payload := data
|
||||
if c.GZip {
|
||||
if compressedState, err := compressState(data); err == nil {
|
||||
payload = compressedState
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
kv := c.Client.KV()
|
||||
_, err := kv.Put(&consulapi.KVPair{
|
||||
Key: c.Path,
|
||||
Value: data,
|
||||
Value: payload,
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
@ -177,3 +199,31 @@ func (c *RemoteClient) Unlock(id string) error {
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func compressState(data []byte) ([]byte, error) {
|
||||
b := new(bytes.Buffer)
|
||||
gz := gzip.NewWriter(b)
|
||||
if _, err := gz.Write(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := gz.Flush(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := gz.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func uncompressState(data []byte) ([]byte, error) {
|
||||
b := new(bytes.Buffer)
|
||||
gz, err := gzip.NewReader(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.ReadFrom(gz)
|
||||
if err := gz.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
@ -2,7 +2,6 @@ package consul
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -16,15 +15,12 @@ func TestRemoteClient_impl(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemoteClient(t *testing.T) {
|
||||
addr := os.Getenv("CONSUL_HTTP_ADDR")
|
||||
if addr == "" {
|
||||
t.Log("consul tests require CONSUL_HTTP_ADDR")
|
||||
t.Skip()
|
||||
}
|
||||
srv := newConsulTestServer(t)
|
||||
defer srv.Stop()
|
||||
|
||||
// Get the backend
|
||||
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": addr,
|
||||
"address": srv.HTTPAddr,
|
||||
"path": fmt.Sprintf("tf-unit/%s", time.Now().String()),
|
||||
})
|
||||
|
||||
@ -38,18 +34,54 @@ func TestRemoteClient(t *testing.T) {
|
||||
remote.TestClient(t, state.(*remote.State).Client)
|
||||
}
|
||||
|
||||
func TestConsul_stateLock(t *testing.T) {
|
||||
addr := os.Getenv("CONSUL_HTTP_ADDR")
|
||||
if addr == "" {
|
||||
t.Log("consul lock tests require CONSUL_HTTP_ADDR")
|
||||
t.Skip()
|
||||
// test the gzip functionality of the client
|
||||
func TestRemoteClient_gzipUpgrade(t *testing.T) {
|
||||
srv := newConsulTestServer(t)
|
||||
defer srv.Stop()
|
||||
|
||||
statePath := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
||||
|
||||
// Get the backend
|
||||
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": statePath,
|
||||
})
|
||||
|
||||
// Grab the client
|
||||
state, err := b.State(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
// Test
|
||||
remote.TestClient(t, state.(*remote.State).Client)
|
||||
|
||||
// create a new backend with gzip
|
||||
b = backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": statePath,
|
||||
"gzip": true,
|
||||
})
|
||||
|
||||
// Grab the client
|
||||
state, err = b.State(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
// Test
|
||||
remote.TestClient(t, state.(*remote.State).Client)
|
||||
}
|
||||
|
||||
func TestConsul_stateLock(t *testing.T) {
|
||||
srv := newConsulTestServer(t)
|
||||
defer srv.Stop()
|
||||
|
||||
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
||||
|
||||
// create 2 instances to get 2 remote.Clients
|
||||
sA, err := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": addr,
|
||||
"address": srv.HTTPAddr,
|
||||
"path": path,
|
||||
}).State(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
@ -57,7 +89,7 @@ func TestConsul_stateLock(t *testing.T) {
|
||||
}
|
||||
|
||||
sB, err := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"address": addr,
|
||||
"address": srv.HTTPAddr,
|
||||
"path": path,
|
||||
}).State(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
|
198
backend/remote-state/s3/backend.go
Normal file
198
backend/remote-state/s3/backend.go
Normal file
@ -0,0 +1,198 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
|
||||
terraformAWS "github.com/hashicorp/terraform/builtin/providers/aws"
|
||||
)
|
||||
|
||||
// New creates a new backend for S3 remote state.
|
||||
func New() backend.Backend {
|
||||
s := &schema.Backend{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"bucket": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "The name of the S3 bucket",
|
||||
},
|
||||
|
||||
"key": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "The path to the state file inside the bucket",
|
||||
},
|
||||
|
||||
"region": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "The region of the S3 bucket.",
|
||||
DefaultFunc: schema.EnvDefaultFunc("AWS_DEFAULT_REGION", nil),
|
||||
},
|
||||
|
||||
"endpoint": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "A custom endpoint for the S3 API",
|
||||
DefaultFunc: schema.EnvDefaultFunc("AWS_S3_ENDPOINT", ""),
|
||||
},
|
||||
|
||||
"encrypt": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Description: "Whether to enable server side encryption of the state file",
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"acl": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Canned ACL to be applied to the state file",
|
||||
Default: "",
|
||||
},
|
||||
|
||||
"access_key": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "AWS access key",
|
||||
Default: "",
|
||||
},
|
||||
|
||||
"secret_key": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "AWS secret key",
|
||||
Default: "",
|
||||
},
|
||||
|
||||
"kms_key_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The ARN of a KMS Key to use for encrypting the state",
|
||||
Default: "",
|
||||
},
|
||||
|
||||
"lock_table": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "DynamoDB table for state locking",
|
||||
Default: "",
|
||||
},
|
||||
|
||||
"profile": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "AWS profile name",
|
||||
Default: "",
|
||||
},
|
||||
|
||||
"shared_credentials_file": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Path to a shared credentials file",
|
||||
Default: "",
|
||||
},
|
||||
|
||||
"token": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "MFA token",
|
||||
Default: "",
|
||||
},
|
||||
|
||||
"role_arn": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The role to be assumed",
|
||||
Default: "",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := &Backend{Backend: s}
|
||||
result.Backend.ConfigureFunc = result.configure
|
||||
return result
|
||||
}
|
||||
|
||||
type Backend struct {
|
||||
*schema.Backend
|
||||
|
||||
// The fields below are set from configure
|
||||
s3Client *s3.S3
|
||||
dynClient *dynamodb.DynamoDB
|
||||
|
||||
bucketName string
|
||||
keyName string
|
||||
serverSideEncryption bool
|
||||
acl string
|
||||
kmsKeyID string
|
||||
lockTable string
|
||||
}
|
||||
|
||||
func (b *Backend) configure(ctx context.Context) error {
|
||||
if b.s3Client != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Grab the resource data
|
||||
data := schema.FromContextBackendConfig(ctx)
|
||||
|
||||
b.bucketName = data.Get("bucket").(string)
|
||||
b.keyName = data.Get("key").(string)
|
||||
b.serverSideEncryption = data.Get("encrypt").(bool)
|
||||
b.acl = data.Get("acl").(string)
|
||||
b.kmsKeyID = data.Get("kms_key_id").(string)
|
||||
b.lockTable = data.Get("lock_table").(string)
|
||||
|
||||
var errs []error
|
||||
creds, err := terraformAWS.GetCredentials(&terraformAWS.Config{
|
||||
AccessKey: data.Get("access_key").(string),
|
||||
SecretKey: data.Get("secret_key").(string),
|
||||
Token: data.Get("token").(string),
|
||||
Profile: data.Get("profile").(string),
|
||||
CredsFilename: data.Get("shared_credentials_file").(string),
|
||||
AssumeRoleARN: data.Get("role_arn").(string),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Call Get to check for credential provider. If nothing found, we'll get an
|
||||
// error, and we can present it nicely to the user
|
||||
_, err = creds.Get()
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" {
|
||||
errs = append(errs, fmt.Errorf(`No valid credential sources found for AWS S3 remote.
|
||||
Please see https://www.terraform.io/docs/state/remote/s3.html for more information on
|
||||
providing credentials for the AWS S3 remote`))
|
||||
} else {
|
||||
errs = append(errs, fmt.Errorf("Error loading credentials for AWS S3 remote: %s", err))
|
||||
}
|
||||
return &multierror.Error{Errors: errs}
|
||||
}
|
||||
|
||||
endpoint := data.Get("endpoint").(string)
|
||||
region := data.Get("region").(string)
|
||||
|
||||
awsConfig := &aws.Config{
|
||||
Credentials: creds,
|
||||
Endpoint: aws.String(endpoint),
|
||||
Region: aws.String(region),
|
||||
HTTPClient: cleanhttp.DefaultClient(),
|
||||
}
|
||||
sess := session.New(awsConfig)
|
||||
b.s3Client = s3.New(sess)
|
||||
b.dynClient = dynamodb.New(sess)
|
||||
|
||||
return nil
|
||||
}
|
159
backend/remote-state/s3/backend_state.go
Normal file
159
backend/remote-state/s3/backend_state.go
Normal file
@ -0,0 +1,159 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
const (
|
||||
// This will be used as directory name, the odd looking colon is simply to
|
||||
// reduce the chance of name conflicts with existing objects.
|
||||
keyEnvPrefix = "env:"
|
||||
)
|
||||
|
||||
func (b *Backend) States() ([]string, error) {
|
||||
params := &s3.ListObjectsInput{
|
||||
Bucket: &b.bucketName,
|
||||
Prefix: aws.String(keyEnvPrefix + "/"),
|
||||
}
|
||||
|
||||
resp, err := b.s3Client.ListObjects(params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var envs []string
|
||||
for _, obj := range resp.Contents {
|
||||
env := keyEnv(*obj.Key)
|
||||
if env != "" {
|
||||
envs = append(envs, env)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(envs)
|
||||
envs = append([]string{backend.DefaultStateName}, envs...)
|
||||
return envs, nil
|
||||
}
|
||||
|
||||
// extract the env name from the S3 key
|
||||
func keyEnv(key string) string {
|
||||
parts := strings.Split(key, "/")
|
||||
if len(parts) < 3 {
|
||||
// no env here
|
||||
return ""
|
||||
}
|
||||
|
||||
if parts[0] != keyEnvPrefix {
|
||||
// not our key, so ignore
|
||||
return ""
|
||||
}
|
||||
|
||||
return parts[1]
|
||||
}
|
||||
|
||||
func (b *Backend) DeleteState(name string) error {
|
||||
if name == backend.DefaultStateName || name == "" {
|
||||
return fmt.Errorf("can't delete default state")
|
||||
}
|
||||
|
||||
params := &s3.DeleteObjectInput{
|
||||
Bucket: &b.bucketName,
|
||||
Key: aws.String(b.path(name)),
|
||||
}
|
||||
|
||||
_, err := b.s3Client.DeleteObject(params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Backend) State(name string) (state.State, error) {
|
||||
client := &RemoteClient{
|
||||
s3Client: b.s3Client,
|
||||
dynClient: b.dynClient,
|
||||
bucketName: b.bucketName,
|
||||
path: b.path(name),
|
||||
serverSideEncryption: b.serverSideEncryption,
|
||||
acl: b.acl,
|
||||
kmsKeyID: b.kmsKeyID,
|
||||
lockTable: b.lockTable,
|
||||
}
|
||||
|
||||
stateMgr := &remote.State{Client: client}
|
||||
|
||||
//if this isn't the default state name, we need to create the object so
|
||||
//it's listed by States.
|
||||
if name != backend.DefaultStateName {
|
||||
// take a lock on this state while we write it
|
||||
lockInfo := state.NewLockInfo()
|
||||
lockInfo.Operation = "init"
|
||||
lockId, err := client.Lock(lockInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to lock s3 state: %s", err)
|
||||
}
|
||||
|
||||
// Local helper function so we can call it multiple places
|
||||
lockUnlock := func(parent error) error {
|
||||
if err := stateMgr.Unlock(lockId); err != nil {
|
||||
return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err)
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
// Grab the value
|
||||
if err := stateMgr.RefreshState(); err != nil {
|
||||
err = lockUnlock(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If we have no state, we have to create an empty state
|
||||
if v := stateMgr.State(); v == nil {
|
||||
if err := stateMgr.WriteState(terraform.NewState()); err != nil {
|
||||
err = lockUnlock(err)
|
||||
return nil, err
|
||||
}
|
||||
if err := stateMgr.PersistState(); err != nil {
|
||||
err = lockUnlock(err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock, the state should now be initialized
|
||||
if err := lockUnlock(nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return stateMgr, nil
|
||||
}
|
||||
|
||||
func (b *Backend) client() *RemoteClient {
|
||||
return &RemoteClient{}
|
||||
}
|
||||
|
||||
func (b *Backend) path(name string) string {
|
||||
if name == backend.DefaultStateName {
|
||||
return b.keyName
|
||||
}
|
||||
|
||||
return strings.Join([]string{keyEnvPrefix, name, b.keyName}, "/")
|
||||
}
|
||||
|
||||
const errStateUnlock = `
|
||||
Error unlocking S3 state. Lock ID: %s
|
||||
|
||||
Error: %s
|
||||
|
||||
You may have to force-unlock this state in order to use it again.
|
||||
`
|
213
backend/remote-state/s3/backend_test.go
Normal file
213
backend/remote-state/s3/backend_test.go
Normal file
@ -0,0 +1,213 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
)
|
||||
|
||||
// verify that we are doing ACC tests or the S3 tests specifically
|
||||
func testACC(t *testing.T) {
|
||||
skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_S3_TEST") == ""
|
||||
if skip {
|
||||
t.Log("s3 backend tests require setting TF_ACC or TF_S3_TEST")
|
||||
t.Skip()
|
||||
}
|
||||
if os.Getenv("AWS_DEFAULT_REGION") == "" {
|
||||
os.Setenv("AWS_DEFAULT_REGION", "us-west-2")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackend_impl(t *testing.T) {
|
||||
var _ backend.Backend = new(Backend)
|
||||
}
|
||||
|
||||
func TestBackendConfig(t *testing.T) {
|
||||
// This test just instantiates the client. Shouldn't make any actual
|
||||
// requests nor incur any costs.
|
||||
|
||||
config := map[string]interface{}{
|
||||
"region": "us-west-1",
|
||||
"bucket": "tf-test",
|
||||
"key": "state",
|
||||
"encrypt": true,
|
||||
"access_key": "ACCESS_KEY",
|
||||
"secret_key": "SECRET_KEY",
|
||||
"lock_table": "dynamoTable",
|
||||
}
|
||||
|
||||
b := backend.TestBackendConfig(t, New(), config).(*Backend)
|
||||
|
||||
if *b.s3Client.Config.Region != "us-west-1" {
|
||||
t.Fatalf("Incorrect region was populated")
|
||||
}
|
||||
if b.bucketName != "tf-test" {
|
||||
t.Fatalf("Incorrect bucketName was populated")
|
||||
}
|
||||
if b.keyName != "state" {
|
||||
t.Fatalf("Incorrect keyName was populated")
|
||||
}
|
||||
|
||||
credentials, err := b.s3Client.Config.Credentials.Get()
|
||||
if err != nil {
|
||||
t.Fatalf("Error when requesting credentials")
|
||||
}
|
||||
if credentials.AccessKeyID != "ACCESS_KEY" {
|
||||
t.Fatalf("Incorrect Access Key Id was populated")
|
||||
}
|
||||
if credentials.SecretAccessKey != "SECRET_KEY" {
|
||||
t.Fatalf("Incorrect Secret Access Key was populated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackend(t *testing.T) {
|
||||
testACC(t)
|
||||
|
||||
bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix())
|
||||
keyName := "testState"
|
||||
|
||||
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"bucket": bucketName,
|
||||
"key": keyName,
|
||||
"encrypt": true,
|
||||
}).(*Backend)
|
||||
|
||||
createS3Bucket(t, b.s3Client, bucketName)
|
||||
defer deleteS3Bucket(t, b.s3Client, bucketName)
|
||||
|
||||
backend.TestBackend(t, b, nil)
|
||||
}
|
||||
|
||||
func TestBackendLocked(t *testing.T) {
|
||||
testACC(t)
|
||||
|
||||
bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix())
|
||||
keyName := "testState"
|
||||
|
||||
b1 := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"bucket": bucketName,
|
||||
"key": keyName,
|
||||
"encrypt": true,
|
||||
"lock_table": bucketName,
|
||||
}).(*Backend)
|
||||
|
||||
b2 := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"bucket": bucketName,
|
||||
"key": keyName,
|
||||
"encrypt": true,
|
||||
"lock_table": bucketName,
|
||||
}).(*Backend)
|
||||
|
||||
createS3Bucket(t, b1.s3Client, bucketName)
|
||||
defer deleteS3Bucket(t, b1.s3Client, bucketName)
|
||||
createDynamoDBTable(t, b1.dynClient, bucketName)
|
||||
defer deleteDynamoDBTable(t, b1.dynClient, bucketName)
|
||||
|
||||
backend.TestBackend(t, b1, b2)
|
||||
}
|
||||
|
||||
func createS3Bucket(t *testing.T, s3Client *s3.S3, bucketName string) {
|
||||
createBucketReq := &s3.CreateBucketInput{
|
||||
Bucket: &bucketName,
|
||||
}
|
||||
|
||||
// Be clear about what we're doing in case the user needs to clean
|
||||
// this up later.
|
||||
t.Logf("creating S3 bucket %s in %s", bucketName, *s3Client.Config.Region)
|
||||
_, err := s3Client.CreateBucket(createBucketReq)
|
||||
if err != nil {
|
||||
t.Fatal("failed to create test S3 bucket:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func deleteS3Bucket(t *testing.T, s3Client *s3.S3, bucketName string) {
|
||||
warning := "WARNING: Failed to delete the test S3 bucket. It may have been left in your AWS account and may incur storage charges. (error was %s)"
|
||||
|
||||
// first we have to get rid of the env objects, or we can't delete the bucket
|
||||
resp, err := s3Client.ListObjects(&s3.ListObjectsInput{Bucket: &bucketName})
|
||||
if err != nil {
|
||||
t.Logf(warning, err)
|
||||
return
|
||||
}
|
||||
for _, obj := range resp.Contents {
|
||||
if _, err := s3Client.DeleteObject(&s3.DeleteObjectInput{Bucket: &bucketName, Key: obj.Key}); err != nil {
|
||||
// this will need cleanup no matter what, so just warn and exit
|
||||
t.Logf(warning, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := s3Client.DeleteBucket(&s3.DeleteBucketInput{Bucket: &bucketName}); err != nil {
|
||||
t.Logf(warning, err)
|
||||
}
|
||||
}
|
||||
|
||||
// create the dynamoDB table, and wait until we can query it.
|
||||
func createDynamoDBTable(t *testing.T, dynClient *dynamodb.DynamoDB, tableName string) {
|
||||
createInput := &dynamodb.CreateTableInput{
|
||||
AttributeDefinitions: []*dynamodb.AttributeDefinition{
|
||||
{
|
||||
AttributeName: aws.String("LockID"),
|
||||
AttributeType: aws.String("S"),
|
||||
},
|
||||
},
|
||||
KeySchema: []*dynamodb.KeySchemaElement{
|
||||
{
|
||||
AttributeName: aws.String("LockID"),
|
||||
KeyType: aws.String("HASH"),
|
||||
},
|
||||
},
|
||||
ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
|
||||
ReadCapacityUnits: aws.Int64(5),
|
||||
WriteCapacityUnits: aws.Int64(5),
|
||||
},
|
||||
TableName: aws.String(tableName),
|
||||
}
|
||||
|
||||
_, err := dynClient.CreateTable(createInput)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// now wait until it's ACTIVE
|
||||
start := time.Now()
|
||||
time.Sleep(time.Second)
|
||||
|
||||
describeInput := &dynamodb.DescribeTableInput{
|
||||
TableName: aws.String(tableName),
|
||||
}
|
||||
|
||||
for {
|
||||
resp, err := dynClient.DescribeTable(describeInput)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if *resp.Table.TableStatus == "ACTIVE" {
|
||||
return
|
||||
}
|
||||
|
||||
if time.Since(start) > time.Minute {
|
||||
t.Fatalf("timed out creating DynamoDB table %s", tableName)
|
||||
}
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func deleteDynamoDBTable(t *testing.T, dynClient *dynamodb.DynamoDB, tableName string) {
|
||||
params := &dynamodb.DeleteTableInput{
|
||||
TableName: aws.String(tableName),
|
||||
}
|
||||
_, err := dynClient.DeleteTable(params)
|
||||
if err != nil {
|
||||
t.Logf("WARNING: Failed to delete the test DynamoDB table %q. It has been left in your AWS account and may incur charges. (error was %s)", tableName, err)
|
||||
}
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package remote
|
||||
package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -6,127 +6,32 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
uuid "github.com/hashicorp/go-uuid"
|
||||
terraformAws "github.com/hashicorp/terraform/builtin/providers/aws"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
)
|
||||
|
||||
func s3Factory(conf map[string]string) (Client, error) {
|
||||
bucketName, ok := conf["bucket"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("missing 'bucket' configuration")
|
||||
}
|
||||
|
||||
keyName, ok := conf["key"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("missing 'key' configuration")
|
||||
}
|
||||
|
||||
endpoint, ok := conf["endpoint"]
|
||||
if !ok {
|
||||
endpoint = os.Getenv("AWS_S3_ENDPOINT")
|
||||
}
|
||||
|
||||
regionName, ok := conf["region"]
|
||||
if !ok {
|
||||
regionName = os.Getenv("AWS_DEFAULT_REGION")
|
||||
if regionName == "" {
|
||||
return nil, fmt.Errorf(
|
||||
"missing 'region' configuration or AWS_DEFAULT_REGION environment variable")
|
||||
}
|
||||
}
|
||||
|
||||
serverSideEncryption := false
|
||||
if raw, ok := conf["encrypt"]; ok {
|
||||
v, err := strconv.ParseBool(raw)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"'encrypt' field couldn't be parsed as bool: %s", err)
|
||||
}
|
||||
|
||||
serverSideEncryption = v
|
||||
}
|
||||
|
||||
acl := ""
|
||||
if raw, ok := conf["acl"]; ok {
|
||||
acl = raw
|
||||
}
|
||||
kmsKeyID := conf["kms_key_id"]
|
||||
|
||||
var errs []error
|
||||
creds, err := terraformAws.GetCredentials(&terraformAws.Config{
|
||||
AccessKey: conf["access_key"],
|
||||
SecretKey: conf["secret_key"],
|
||||
Token: conf["token"],
|
||||
Profile: conf["profile"],
|
||||
CredsFilename: conf["shared_credentials_file"],
|
||||
AssumeRoleARN: conf["role_arn"],
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Call Get to check for credential provider. If nothing found, we'll get an
|
||||
// error, and we can present it nicely to the user
|
||||
_, err = creds.Get()
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" {
|
||||
errs = append(errs, fmt.Errorf(`No valid credential sources found for AWS S3 remote.
|
||||
Please see https://www.terraform.io/docs/state/remote/s3.html for more information on
|
||||
providing credentials for the AWS S3 remote`))
|
||||
} else {
|
||||
errs = append(errs, fmt.Errorf("Error loading credentials for AWS S3 remote: %s", err))
|
||||
}
|
||||
return nil, &multierror.Error{Errors: errs}
|
||||
}
|
||||
|
||||
awsConfig := &aws.Config{
|
||||
Credentials: creds,
|
||||
Endpoint: aws.String(endpoint),
|
||||
Region: aws.String(regionName),
|
||||
HTTPClient: cleanhttp.DefaultClient(),
|
||||
}
|
||||
sess := session.New(awsConfig)
|
||||
nativeClient := s3.New(sess)
|
||||
dynClient := dynamodb.New(sess)
|
||||
|
||||
return &S3Client{
|
||||
nativeClient: nativeClient,
|
||||
bucketName: bucketName,
|
||||
keyName: keyName,
|
||||
serverSideEncryption: serverSideEncryption,
|
||||
acl: acl,
|
||||
kmsKeyID: kmsKeyID,
|
||||
dynClient: dynClient,
|
||||
lockTable: conf["lock_table"],
|
||||
}, nil
|
||||
}
|
||||
|
||||
type S3Client struct {
|
||||
nativeClient *s3.S3
|
||||
type RemoteClient struct {
|
||||
s3Client *s3.S3
|
||||
dynClient *dynamodb.DynamoDB
|
||||
bucketName string
|
||||
keyName string
|
||||
path string
|
||||
serverSideEncryption bool
|
||||
acl string
|
||||
kmsKeyID string
|
||||
dynClient *dynamodb.DynamoDB
|
||||
lockTable string
|
||||
}
|
||||
|
||||
func (c *S3Client) Get() (*Payload, error) {
|
||||
output, err := c.nativeClient.GetObject(&s3.GetObjectInput{
|
||||
func (c *RemoteClient) Get() (*remote.Payload, error) {
|
||||
output, err := c.s3Client.GetObject(&s3.GetObjectInput{
|
||||
Bucket: &c.bucketName,
|
||||
Key: &c.keyName,
|
||||
Key: &c.path,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@ -148,7 +53,7 @@ func (c *S3Client) Get() (*Payload, error) {
|
||||
return nil, fmt.Errorf("Failed to read remote state: %s", err)
|
||||
}
|
||||
|
||||
payload := &Payload{
|
||||
payload := &remote.Payload{
|
||||
Data: buf.Bytes(),
|
||||
}
|
||||
|
||||
@ -160,7 +65,7 @@ func (c *S3Client) Get() (*Payload, error) {
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
func (c *S3Client) Put(data []byte) error {
|
||||
func (c *RemoteClient) Put(data []byte) error {
|
||||
contentType := "application/json"
|
||||
contentLength := int64(len(data))
|
||||
|
||||
@ -169,7 +74,7 @@ func (c *S3Client) Put(data []byte) error {
|
||||
ContentLength: &contentLength,
|
||||
Body: bytes.NewReader(data),
|
||||
Bucket: &c.bucketName,
|
||||
Key: &c.keyName,
|
||||
Key: &c.path,
|
||||
}
|
||||
|
||||
if c.serverSideEncryption {
|
||||
@ -187,28 +92,28 @@ func (c *S3Client) Put(data []byte) error {
|
||||
|
||||
log.Printf("[DEBUG] Uploading remote state to S3: %#v", i)
|
||||
|
||||
if _, err := c.nativeClient.PutObject(i); err == nil {
|
||||
if _, err := c.s3Client.PutObject(i); err == nil {
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("Failed to upload state: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *S3Client) Delete() error {
|
||||
_, err := c.nativeClient.DeleteObject(&s3.DeleteObjectInput{
|
||||
func (c *RemoteClient) Delete() error {
|
||||
_, err := c.s3Client.DeleteObject(&s3.DeleteObjectInput{
|
||||
Bucket: &c.bucketName,
|
||||
Key: &c.keyName,
|
||||
Key: &c.path,
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *S3Client) Lock(info *state.LockInfo) (string, error) {
|
||||
func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) {
|
||||
if c.lockTable == "" {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
stateName := fmt.Sprintf("%s/%s", c.bucketName, c.keyName)
|
||||
stateName := fmt.Sprintf("%s/%s", c.bucketName, c.path)
|
||||
info.Path = stateName
|
||||
|
||||
if info.ID == "" {
|
||||
@ -245,10 +150,10 @@ func (c *S3Client) Lock(info *state.LockInfo) (string, error) {
|
||||
return info.ID, nil
|
||||
}
|
||||
|
||||
func (c *S3Client) getLockInfo() (*state.LockInfo, error) {
|
||||
func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) {
|
||||
getParams := &dynamodb.GetItemInput{
|
||||
Key: map[string]*dynamodb.AttributeValue{
|
||||
"LockID": {S: aws.String(fmt.Sprintf("%s/%s", c.bucketName, c.keyName))},
|
||||
"LockID": {S: aws.String(fmt.Sprintf("%s/%s", c.bucketName, c.path))},
|
||||
},
|
||||
ProjectionExpression: aws.String("LockID, Info"),
|
||||
TableName: aws.String(c.lockTable),
|
||||
@ -273,7 +178,7 @@ func (c *S3Client) getLockInfo() (*state.LockInfo, error) {
|
||||
return lockInfo, nil
|
||||
}
|
||||
|
||||
func (c *S3Client) Unlock(id string) error {
|
||||
func (c *RemoteClient) Unlock(id string) error {
|
||||
if c.lockTable == "" {
|
||||
return nil
|
||||
}
|
||||
@ -297,7 +202,7 @@ func (c *S3Client) Unlock(id string) error {
|
||||
|
||||
params := &dynamodb.DeleteItemInput{
|
||||
Key: map[string]*dynamodb.AttributeValue{
|
||||
"LockID": {S: aws.String(fmt.Sprintf("%s/%s", c.bucketName, c.keyName))},
|
||||
"LockID": {S: aws.String(fmt.Sprintf("%s/%s", c.bucketName, c.path))},
|
||||
},
|
||||
TableName: aws.String(c.lockTable),
|
||||
}
|
76
backend/remote-state/s3/client_test.go
Normal file
76
backend/remote-state/s3/client_test.go
Normal file
@ -0,0 +1,76 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
)
|
||||
|
||||
func TestRemoteClient_impl(t *testing.T) {
|
||||
var _ remote.Client = new(RemoteClient)
|
||||
var _ remote.ClientLocker = new(RemoteClient)
|
||||
}
|
||||
|
||||
func TestRemoteClient(t *testing.T) {
|
||||
testACC(t)
|
||||
|
||||
bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix())
|
||||
keyName := "testState"
|
||||
|
||||
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"bucket": bucketName,
|
||||
"key": keyName,
|
||||
"encrypt": true,
|
||||
}).(*Backend)
|
||||
|
||||
state, err := b.State(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
createS3Bucket(t, b.s3Client, bucketName)
|
||||
defer deleteS3Bucket(t, b.s3Client, bucketName)
|
||||
|
||||
remote.TestClient(t, state.(*remote.State).Client)
|
||||
}
|
||||
|
||||
func TestRemoteClientLocks(t *testing.T) {
|
||||
testACC(t)
|
||||
|
||||
bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix())
|
||||
keyName := "testState"
|
||||
|
||||
b1 := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"bucket": bucketName,
|
||||
"key": keyName,
|
||||
"encrypt": true,
|
||||
"lock_table": bucketName,
|
||||
}).(*Backend)
|
||||
|
||||
b2 := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||
"bucket": bucketName,
|
||||
"key": keyName,
|
||||
"encrypt": true,
|
||||
"lock_table": bucketName,
|
||||
}).(*Backend)
|
||||
|
||||
s1, err := b1.State(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s2, err := b2.State(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
createS3Bucket(t, b1.s3Client, bucketName)
|
||||
defer deleteS3Bucket(t, b1.s3Client, bucketName)
|
||||
createDynamoDBTable(t, b1.dynClient, bucketName)
|
||||
defer deleteDynamoDBTable(t, b1.dynClient, bucketName)
|
||||
|
||||
remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client)
|
||||
}
|
@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/config"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
@ -40,8 +41,15 @@ func TestBackendConfig(t *testing.T, b Backend, c map[string]interface{}) Backen
|
||||
// assumed to already be configured. This will test state functionality.
|
||||
// If the backend reports it doesn't support multi-state by returning the
|
||||
// error ErrNamedStatesNotSupported, then it will not test that.
|
||||
func TestBackend(t *testing.T, b Backend) {
|
||||
testBackendStates(t, b)
|
||||
//
|
||||
// If you want to test locking, two backends must be given. If b2 is nil,
|
||||
// then state lockign won't be tested.
|
||||
func TestBackend(t *testing.T, b1, b2 Backend) {
|
||||
testBackendStates(t, b1)
|
||||
|
||||
if b2 != nil {
|
||||
testBackendStateLock(t, b1, b2)
|
||||
}
|
||||
}
|
||||
|
||||
func testBackendStates(t *testing.T, b Backend) {
|
||||
@ -57,53 +65,109 @@ func testBackendStates(t *testing.T, b Backend) {
|
||||
}
|
||||
|
||||
// Create a couple states
|
||||
fooState, err := b.State("foo")
|
||||
foo, err := b.State("foo")
|
||||
if err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
if err := fooState.RefreshState(); err != nil {
|
||||
if err := foo.RefreshState(); err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
if v := fooState.State(); v.HasResources() {
|
||||
if v := foo.State(); v.HasResources() {
|
||||
t.Fatalf("should be empty: %s", v)
|
||||
}
|
||||
|
||||
barState, err := b.State("bar")
|
||||
bar, err := b.State("bar")
|
||||
if err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
if err := barState.RefreshState(); err != nil {
|
||||
if err := bar.RefreshState(); err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
if v := barState.State(); v.HasResources() {
|
||||
if v := bar.State(); v.HasResources() {
|
||||
t.Fatalf("should be empty: %s", v)
|
||||
}
|
||||
|
||||
// Verify they are distinct states
|
||||
// Verify they are distinct states that can be read back from storage
|
||||
{
|
||||
s := barState.State()
|
||||
s.Lineage = "bar"
|
||||
if err := barState.WriteState(s); err != nil {
|
||||
// start with a fresh state, and record the lineage being
|
||||
// written to "bar"
|
||||
barState := terraform.NewState()
|
||||
barLineage := barState.Lineage
|
||||
|
||||
// the foo lineage should be distinct from bar, and unchanged after
|
||||
// modifying bar
|
||||
fooState := terraform.NewState()
|
||||
fooLineage := fooState.Lineage
|
||||
|
||||
// write a known state to foo
|
||||
if err := foo.WriteState(fooState); err != nil {
|
||||
t.Fatal("error writing foo state:", err)
|
||||
}
|
||||
if err := foo.PersistState(); err != nil {
|
||||
t.Fatal("error persisting foo state:", err)
|
||||
}
|
||||
|
||||
// write a distinct known state to bar
|
||||
if err := bar.WriteState(barState); err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
if err := barState.PersistState(); err != nil {
|
||||
if err := bar.PersistState(); err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
|
||||
if err := fooState.RefreshState(); err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
// verify that foo is unchanged with the existing state manager
|
||||
if err := foo.RefreshState(); err != nil {
|
||||
t.Fatal("error refreshing foo:", err)
|
||||
}
|
||||
if v := fooState.State(); v.Lineage == "bar" {
|
||||
t.Fatalf("bad: %#v", v)
|
||||
fooState = foo.State()
|
||||
switch {
|
||||
case fooState == nil:
|
||||
t.Fatal("nil state read from foo")
|
||||
case fooState.Lineage == barLineage:
|
||||
t.Fatalf("bar lineage read from foo: %#v", fooState)
|
||||
case fooState.Lineage != fooLineage:
|
||||
t.Fatal("foo lineage alterred")
|
||||
}
|
||||
|
||||
// fetch foo again from the backend
|
||||
foo, err = b.State("foo")
|
||||
if err != nil {
|
||||
t.Fatal("error re-fetching state:", err)
|
||||
}
|
||||
if err := foo.RefreshState(); err != nil {
|
||||
t.Fatal("error refreshing foo:", err)
|
||||
}
|
||||
fooState = foo.State()
|
||||
switch {
|
||||
case fooState == nil:
|
||||
t.Fatal("nil state read from foo")
|
||||
case fooState.Lineage != fooLineage:
|
||||
t.Fatal("incorrect state returned from backend")
|
||||
}
|
||||
|
||||
// fetch the bar again from the backend
|
||||
bar, err = b.State("bar")
|
||||
if err != nil {
|
||||
t.Fatal("error re-fetching state:", err)
|
||||
}
|
||||
if err := bar.RefreshState(); err != nil {
|
||||
t.Fatal("error refreshing bar:", err)
|
||||
}
|
||||
barState = bar.State()
|
||||
switch {
|
||||
case barState == nil:
|
||||
t.Fatal("nil state read from bar")
|
||||
case barState.Lineage != barLineage:
|
||||
t.Fatal("incorrect state returned from backend")
|
||||
}
|
||||
}
|
||||
|
||||
// Verify we can now list them
|
||||
{
|
||||
// we determined that named stated are supported earlier
|
||||
states, err := b.States()
|
||||
if err == ErrNamedStatesNotSupported {
|
||||
t.Logf("TestBackend: named states not supported in %T, skipping", b)
|
||||
return
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sort.Strings(states)
|
||||
@ -138,3 +202,77 @@ func testBackendStates(t *testing.T, b Backend) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testBackendStateLock(t *testing.T, b1, b2 Backend) {
|
||||
// Get the default state for each
|
||||
b1StateMgr, err := b1.State(DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
if err := b1StateMgr.RefreshState(); err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
|
||||
// Fast exit if this doesn't support locking at all
|
||||
if _, ok := b1StateMgr.(state.Locker); !ok {
|
||||
t.Logf("TestBackend: backend %T doesn't support state locking, not testing", b1)
|
||||
return
|
||||
}
|
||||
|
||||
t.Logf("TestBackend: testing state locking for %T", b1)
|
||||
|
||||
b2StateMgr, err := b2.State(DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatalf("error: %s", err)
|
||||
}
|
||||
if err := b2StateMgr.RefreshState(); err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
|
||||
// Reassign so its obvious whats happening
|
||||
lockerA := b1StateMgr.(state.Locker)
|
||||
lockerB := b2StateMgr.(state.Locker)
|
||||
|
||||
infoA := state.NewLockInfo()
|
||||
infoA.Operation = "test"
|
||||
infoA.Who = "clientA"
|
||||
|
||||
infoB := state.NewLockInfo()
|
||||
infoB.Operation = "test"
|
||||
infoB.Who = "clientB"
|
||||
|
||||
lockIDA, err := lockerA.Lock(infoA)
|
||||
if err != nil {
|
||||
t.Fatal("unable to get initial lock:", err)
|
||||
}
|
||||
|
||||
// If the lock ID is blank, assume locking is disabled
|
||||
if lockIDA == "" {
|
||||
t.Logf("TestBackend: %T: empty string returned for lock, assuming disabled", b1)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = lockerB.Lock(infoB)
|
||||
if err == nil {
|
||||
lockerA.Unlock(lockIDA)
|
||||
t.Fatal("client B obtained lock while held by client A")
|
||||
}
|
||||
|
||||
if err := lockerA.Unlock(lockIDA); err != nil {
|
||||
t.Fatal("error unlocking client A", err)
|
||||
}
|
||||
|
||||
lockIDB, err := lockerB.Lock(infoB)
|
||||
if err != nil {
|
||||
t.Fatal("unable to obtain lock from client B")
|
||||
}
|
||||
|
||||
if lockIDB == lockIDA {
|
||||
t.Fatalf("duplicate lock IDs: %q", lockIDB)
|
||||
}
|
||||
|
||||
if err = lockerB.Unlock(lockIDB); err != nil {
|
||||
t.Fatal("error unlocking client B:", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -2,26 +2,17 @@ package alicloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/denverdino/aliyungo/common"
|
||||
"github.com/denverdino/aliyungo/ecs"
|
||||
"github.com/denverdino/aliyungo/slb"
|
||||
"regexp"
|
||||
"github.com/hashicorp/terraform/helper/validation"
|
||||
)
|
||||
|
||||
// common
|
||||
func validateInstancePort(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
if value < 1 || value > 65535 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must be a valid instance port between 1 and 65535",
|
||||
k))
|
||||
return
|
||||
}
|
||||
return
|
||||
return validation.IntBetween(1, 65535)(v, k)
|
||||
}
|
||||
|
||||
func validateInstanceProtocol(v interface{}, k string) (ws []string, errors []error) {
|
||||
@ -37,12 +28,11 @@ func validateInstanceProtocol(v interface{}, k string) (ws []string, errors []er
|
||||
|
||||
// ecs
|
||||
func validateDiskCategory(v interface{}, k string) (ws []string, errors []error) {
|
||||
category := ecs.DiskCategory(v.(string))
|
||||
if category != ecs.DiskCategoryCloud && category != ecs.DiskCategoryCloudEfficiency && category != ecs.DiskCategoryCloudSSD {
|
||||
errors = append(errors, fmt.Errorf("%s must be one of %s %s %s", k, ecs.DiskCategoryCloud, ecs.DiskCategoryCloudEfficiency, ecs.DiskCategoryCloudSSD))
|
||||
}
|
||||
|
||||
return
|
||||
return validation.StringInSlice([]string{
|
||||
string(ecs.DiskCategoryCloud),
|
||||
string(ecs.DiskCategoryCloudEfficiency),
|
||||
string(ecs.DiskCategoryCloudSSD),
|
||||
}, false)(v, k)
|
||||
}
|
||||
|
||||
func validateInstanceName(v interface{}, k string) (ws []string, errors []error) {
|
||||
@ -59,12 +49,7 @@ func validateInstanceName(v interface{}, k string) (ws []string, errors []error)
|
||||
}
|
||||
|
||||
func validateInstanceDescription(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
if len(value) < 2 || len(value) > 256 {
|
||||
errors = append(errors, fmt.Errorf("%q cannot be longer than 256 characters", k))
|
||||
|
||||
}
|
||||
return
|
||||
return validation.StringLenBetween(2, 256)(v, k)
|
||||
}
|
||||
|
||||
func validateDiskName(v interface{}, k string) (ws []string, errors []error) {
|
||||
@ -86,12 +71,7 @@ func validateDiskName(v interface{}, k string) (ws []string, errors []error) {
|
||||
}
|
||||
|
||||
func validateDiskDescription(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
if len(value) < 2 || len(value) > 256 {
|
||||
errors = append(errors, fmt.Errorf("%q cannot be longer than 256 characters", k))
|
||||
|
||||
}
|
||||
return
|
||||
return validation.StringLenBetween(2, 128)(v, k)
|
||||
}
|
||||
|
||||
//security group
|
||||
@ -109,225 +89,114 @@ func validateSecurityGroupName(v interface{}, k string) (ws []string, errors []e
|
||||
}
|
||||
|
||||
func validateSecurityGroupDescription(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
if len(value) < 2 || len(value) > 256 {
|
||||
errors = append(errors, fmt.Errorf("%q cannot be longer than 256 characters", k))
|
||||
|
||||
}
|
||||
return
|
||||
return validation.StringLenBetween(2, 256)(v, k)
|
||||
}
|
||||
|
||||
func validateSecurityRuleType(v interface{}, k string) (ws []string, errors []error) {
|
||||
rt := GroupRuleDirection(v.(string))
|
||||
if rt != GroupRuleIngress && rt != GroupRuleEgress {
|
||||
errors = append(errors, fmt.Errorf("%s must be one of %s %s", k, GroupRuleIngress, GroupRuleEgress))
|
||||
}
|
||||
|
||||
return
|
||||
return validation.StringInSlice([]string{
|
||||
string(GroupRuleIngress),
|
||||
string(GroupRuleEgress),
|
||||
}, false)(v, k)
|
||||
}
|
||||
|
||||
func validateSecurityRuleIpProtocol(v interface{}, k string) (ws []string, errors []error) {
|
||||
pt := GroupRuleIpProtocol(v.(string))
|
||||
if pt != GroupRuleTcp && pt != GroupRuleUdp && pt != GroupRuleIcmp && pt != GroupRuleGre && pt != GroupRuleAll {
|
||||
errors = append(errors, fmt.Errorf("%s must be one of %s %s %s %s %s", k,
|
||||
GroupRuleTcp, GroupRuleUdp, GroupRuleIcmp, GroupRuleGre, GroupRuleAll))
|
||||
}
|
||||
|
||||
return
|
||||
return validation.StringInSlice([]string{
|
||||
string(GroupRuleTcp),
|
||||
string(GroupRuleUdp),
|
||||
string(GroupRuleIcmp),
|
||||
string(GroupRuleGre),
|
||||
string(GroupRuleAll),
|
||||
}, false)(v, k)
|
||||
}
|
||||
|
||||
func validateSecurityRuleNicType(v interface{}, k string) (ws []string, errors []error) {
|
||||
pt := GroupRuleNicType(v.(string))
|
||||
if pt != GroupRuleInternet && pt != GroupRuleIntranet {
|
||||
errors = append(errors, fmt.Errorf("%s must be one of %s %s", k, GroupRuleInternet, GroupRuleIntranet))
|
||||
}
|
||||
|
||||
return
|
||||
return validation.StringInSlice([]string{
|
||||
string(GroupRuleInternet),
|
||||
string(GroupRuleIntranet),
|
||||
}, false)(v, k)
|
||||
}
|
||||
|
||||
func validateSecurityRulePolicy(v interface{}, k string) (ws []string, errors []error) {
|
||||
pt := GroupRulePolicy(v.(string))
|
||||
if pt != GroupRulePolicyAccept && pt != GroupRulePolicyDrop {
|
||||
errors = append(errors, fmt.Errorf("%s must be one of %s %s", k, GroupRulePolicyAccept, GroupRulePolicyDrop))
|
||||
}
|
||||
|
||||
return
|
||||
return validation.StringInSlice([]string{
|
||||
string(GroupRulePolicyAccept),
|
||||
string(GroupRulePolicyDrop),
|
||||
}, false)(v, k)
|
||||
}
|
||||
|
||||
func validateSecurityPriority(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
if value < 1 || value > 100 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must be a valid authorization policy priority between 1 and 100",
|
||||
k))
|
||||
return
|
||||
}
|
||||
return
|
||||
return validation.IntBetween(1, 100)(v, k)
|
||||
}
|
||||
|
||||
// validateCIDRNetworkAddress ensures that the string value is a valid CIDR that
|
||||
// represents a network address - it adds an error otherwise
|
||||
func validateCIDRNetworkAddress(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
_, ipnet, err := net.ParseCIDR(value)
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid CIDR, got error parsing: %s", k, err))
|
||||
return
|
||||
}
|
||||
|
||||
if ipnet == nil || value != ipnet.String() {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid network CIDR, expected %q, got %q",
|
||||
k, ipnet, value))
|
||||
}
|
||||
|
||||
return
|
||||
return validation.CIDRNetwork(0, 32)(v, k)
|
||||
}
|
||||
|
||||
func validateRouteEntryNextHopType(v interface{}, k string) (ws []string, errors []error) {
|
||||
nht := ecs.NextHopType(v.(string))
|
||||
if nht != ecs.NextHopIntance && nht != ecs.NextHopTunnel {
|
||||
errors = append(errors, fmt.Errorf("%s must be one of %s %s", k,
|
||||
ecs.NextHopIntance, ecs.NextHopTunnel))
|
||||
}
|
||||
|
||||
return
|
||||
return validation.StringInSlice([]string{
|
||||
string(ecs.NextHopIntance),
|
||||
string(ecs.NextHopTunnel),
|
||||
}, false)(v, k)
|
||||
}
|
||||
|
||||
func validateSwitchCIDRNetworkAddress(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
_, ipnet, err := net.ParseCIDR(value)
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid CIDR, got error parsing: %s", k, err))
|
||||
return
|
||||
}
|
||||
|
||||
if ipnet == nil || value != ipnet.String() {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid network CIDR, expected %q, got %q",
|
||||
k, ipnet, value))
|
||||
return
|
||||
}
|
||||
|
||||
mark, _ := strconv.Atoi(strings.Split(ipnet.String(), "/")[1])
|
||||
if mark < 16 || mark > 29 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a network CIDR which mark between 16 and 29",
|
||||
k))
|
||||
}
|
||||
|
||||
return
|
||||
return validation.CIDRNetwork(16, 29)(v, k)
|
||||
}
|
||||
|
||||
// validateIoOptimized ensures that the string value is a valid IoOptimized that
|
||||
// represents a IoOptimized - it adds an error otherwise
|
||||
func validateIoOptimized(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
ioOptimized := ecs.IoOptimized(value)
|
||||
if ioOptimized != ecs.IoOptimizedNone &&
|
||||
ioOptimized != ecs.IoOptimizedOptimized {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid IoOptimized, expected %s or %s, got %q",
|
||||
k, ecs.IoOptimizedNone, ecs.IoOptimizedOptimized, ioOptimized))
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return validation.StringInSlice([]string{
|
||||
"",
|
||||
string(ecs.IoOptimizedNone),
|
||||
string(ecs.IoOptimizedOptimized),
|
||||
}, false)(v, k)
|
||||
}
|
||||
|
||||
// validateInstanceNetworkType ensures that the string value is a classic or vpc
|
||||
func validateInstanceNetworkType(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
network := InstanceNetWork(value)
|
||||
if network != ClassicNet &&
|
||||
network != VpcNet {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid InstanceNetworkType, expected %s or %s, go %q",
|
||||
k, ClassicNet, VpcNet, network))
|
||||
}
|
||||
}
|
||||
return
|
||||
return validation.StringInSlice([]string{
|
||||
"",
|
||||
string(ClassicNet),
|
||||
string(VpcNet),
|
||||
}, false)(v, k)
|
||||
}
|
||||
|
||||
func validateInstanceChargeType(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
chargeType := common.InstanceChargeType(value)
|
||||
if chargeType != common.PrePaid &&
|
||||
chargeType != common.PostPaid {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid InstanceChargeType, expected %s or %s, got %q",
|
||||
k, common.PrePaid, common.PostPaid, chargeType))
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return validation.StringInSlice([]string{
|
||||
"",
|
||||
string(common.PrePaid),
|
||||
string(common.PostPaid),
|
||||
}, false)(v, k)
|
||||
}
|
||||
|
||||
func validateInternetChargeType(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
chargeType := common.InternetChargeType(value)
|
||||
if chargeType != common.PayByBandwidth &&
|
||||
chargeType != common.PayByTraffic {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid InstanceChargeType, expected %s or %s, got %q",
|
||||
k, common.PayByBandwidth, common.PayByTraffic, chargeType))
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return validation.StringInSlice([]string{
|
||||
"",
|
||||
string(common.PayByBandwidth),
|
||||
string(common.PayByTraffic),
|
||||
}, false)(v, k)
|
||||
}
|
||||
|
||||
func validateInternetMaxBandWidthOut(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
if value < 1 || value > 100 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must be a valid internet bandwidth out between 1 and 1000",
|
||||
k))
|
||||
return
|
||||
}
|
||||
return
|
||||
return validation.IntBetween(1, 100)(v, k)
|
||||
}
|
||||
|
||||
// SLB
|
||||
func validateSlbName(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
if len(value) < 1 || len(value) > 80 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must be a valid load balancer name characters between 1 and 80",
|
||||
k))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return validation.StringLenBetween(0, 80)(v, k)
|
||||
}
|
||||
|
||||
func validateSlbInternetChargeType(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
chargeType := common.InternetChargeType(value)
|
||||
|
||||
if chargeType != "paybybandwidth" &&
|
||||
chargeType != "paybytraffic" {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid InstanceChargeType, expected %s or %s, got %q",
|
||||
k, "paybybandwidth", "paybytraffic", value))
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return validation.StringInSlice([]string{
|
||||
"paybybandwidth",
|
||||
"paybytraffic",
|
||||
}, false)(v, k)
|
||||
}
|
||||
|
||||
func validateSlbBandwidth(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
if value < 1 || value > 1000 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must be a valid load balancer bandwidth between 1 and 1000",
|
||||
k))
|
||||
return
|
||||
}
|
||||
return
|
||||
return validation.IntBetween(1, 1000)(v, k)
|
||||
}
|
||||
|
||||
func validateSlbListenerBandwidth(v interface{}, k string) (ws []string, errors []error) {
|
||||
@ -342,67 +211,23 @@ func validateSlbListenerBandwidth(v interface{}, k string) (ws []string, errors
|
||||
}
|
||||
|
||||
func validateSlbListenerScheduler(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
scheduler := slb.SchedulerType(value)
|
||||
|
||||
if scheduler != "wrr" && scheduler != "wlc" {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid SchedulerType, expected %s or %s, got %q",
|
||||
k, "wrr", "wlc", value))
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return validation.StringInSlice([]string{"wrr", "wlc"}, false)(v, k)
|
||||
}
|
||||
|
||||
func validateSlbListenerStickySession(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
flag := slb.FlagType(value)
|
||||
|
||||
if flag != "on" && flag != "off" {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid StickySession, expected %s or %s, got %q",
|
||||
k, "on", "off", value))
|
||||
}
|
||||
}
|
||||
return
|
||||
return validation.StringInSlice([]string{"", "on", "off"}, false)(v, k)
|
||||
}
|
||||
|
||||
func validateSlbListenerStickySessionType(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
flag := slb.StickySessionType(value)
|
||||
|
||||
if flag != "insert" && flag != "server" {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid StickySessionType, expected %s or %s, got %q",
|
||||
k, "insert", "server", value))
|
||||
}
|
||||
}
|
||||
return
|
||||
return validation.StringInSlice([]string{"", "insert", "server"}, false)(v, k)
|
||||
}
|
||||
|
||||
func validateSlbListenerCookie(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
flag := slb.StickySessionType(value)
|
||||
|
||||
if flag != "insert" && flag != "server" {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid StickySessionType, expected %s or %s, got %q",
|
||||
k, "insert", "server", value))
|
||||
}
|
||||
}
|
||||
return
|
||||
return validation.StringInSlice([]string{"", "insert", "server"}, false)(v, k)
|
||||
}
|
||||
|
||||
func validateSlbListenerPersistenceTimeout(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
if value < 0 || value > 86400 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must be a valid load balancer persistence timeout between 0 and 86400",
|
||||
k))
|
||||
return
|
||||
}
|
||||
return
|
||||
return validation.IntBetween(0, 86400)(v, k)
|
||||
}
|
||||
|
||||
//data source validate func
|
||||
@ -419,19 +244,14 @@ func validateNameRegex(v interface{}, k string) (ws []string, errors []error) {
|
||||
}
|
||||
|
||||
func validateImageOwners(v interface{}, k string) (ws []string, errors []error) {
|
||||
if value := v.(string); value != "" {
|
||||
owners := ecs.ImageOwnerAlias(value)
|
||||
if owners != ecs.ImageOwnerSystem &&
|
||||
owners != ecs.ImageOwnerSelf &&
|
||||
owners != ecs.ImageOwnerOthers &&
|
||||
owners != ecs.ImageOwnerMarketplace &&
|
||||
owners != ecs.ImageOwnerDefault {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid Image owner , expected %s, %s, %s, %s or %s, got %q",
|
||||
k, ecs.ImageOwnerSystem, ecs.ImageOwnerSelf, ecs.ImageOwnerOthers, ecs.ImageOwnerMarketplace, ecs.ImageOwnerDefault, owners))
|
||||
}
|
||||
}
|
||||
return
|
||||
return validation.StringInSlice([]string{
|
||||
"",
|
||||
string(ecs.ImageOwnerSystem),
|
||||
string(ecs.ImageOwnerSelf),
|
||||
string(ecs.ImageOwnerOthers),
|
||||
string(ecs.ImageOwnerMarketplace),
|
||||
string(ecs.ImageOwnerDefault),
|
||||
}, false)(v, k)
|
||||
}
|
||||
|
||||
func validateRegion(v interface{}, k string) (ws []string, errors []error) {
|
||||
|
@ -35,7 +35,7 @@ func Provider() terraform.ResourceProvider {
|
||||
"timeout": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc(JSONTimeoutParamName, "600"),
|
||||
DefaultFunc: schema.EnvDefaultFunc(JSONTimeoutParamName, "900"),
|
||||
},
|
||||
},
|
||||
ResourcesMap: map[string]*schema.Resource{
|
||||
|
@ -2,10 +2,11 @@ package arukas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
API "github.com/arukasio/cli"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
API "github.com/arukasio/cli"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func resourceArukasContainer() *schema.Resource {
|
||||
@ -169,11 +170,27 @@ func resourceArukasContainerCreate(d *schema.ResourceData, meta interface{}) err
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sleepUntilUp(client, appSet.Container.ID, client.Timeout); err != nil {
|
||||
d.SetId(appSet.Container.ID)
|
||||
|
||||
stateConf := &resource.StateChangeConf{
|
||||
Target: []string{"running"},
|
||||
Pending: []string{"stopped", "booting"},
|
||||
Timeout: client.Timeout,
|
||||
Refresh: func() (interface{}, string, error) {
|
||||
var container API.Container
|
||||
err := client.Get(&container, fmt.Sprintf("/containers/%s", appSet.Container.ID))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return container, container.StatusText, nil
|
||||
},
|
||||
}
|
||||
_, err := stateConf.WaitForState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId(appSet.Container.ID)
|
||||
return resourceArukasContainerRead(d, meta)
|
||||
}
|
||||
|
||||
@ -270,24 +287,3 @@ func resourceArukasContainerDelete(d *schema.ResourceData, meta interface{}) err
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func sleepUntilUp(client *ArukasClient, containerID string, timeout time.Duration) error {
|
||||
current := 0 * time.Second
|
||||
interval := 5 * time.Second
|
||||
for {
|
||||
var container API.Container
|
||||
if err := client.Get(&container, fmt.Sprintf("/containers/%s", containerID)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if container.IsRunning {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(interval)
|
||||
current += interval
|
||||
|
||||
if timeout > 0 && current > timeout {
|
||||
return fmt.Errorf("Timeout: sleepUntilUp")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package arukas
|
||||
import (
|
||||
"fmt"
|
||||
API "github.com/arukasio/cli"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"testing"
|
||||
@ -10,17 +11,21 @@ import (
|
||||
|
||||
func TestAccArukasContainer_Basic(t *testing.T) {
|
||||
var container API.Container
|
||||
randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
|
||||
name := fmt.Sprintf("terraform_acc_test_%s", randString)
|
||||
endpoint := fmt.Sprintf("terraform-acc-test-endpoint-%s", randString)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckArukasContainerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccCheckArukasContainerConfig_basic,
|
||||
Config: testAccCheckArukasContainerConfig_basic(randString),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckArukasContainerExists("arukas_container.foobar", &container),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "name", "terraform_for_arukas_test_foobar"),
|
||||
"arukas_container.foobar", "name", name),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "image", "nginx:latest"),
|
||||
resource.TestCheckResourceAttr(
|
||||
@ -28,7 +33,7 @@ func TestAccArukasContainer_Basic(t *testing.T) {
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "memory", "256"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "endpoint", "terraform-for-arukas-test-endpoint"),
|
||||
"arukas_container.foobar", "endpoint", endpoint),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "ports.#", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
@ -51,17 +56,23 @@ func TestAccArukasContainer_Basic(t *testing.T) {
|
||||
|
||||
func TestAccArukasContainer_Update(t *testing.T) {
|
||||
var container API.Container
|
||||
randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
|
||||
name := fmt.Sprintf("terraform_acc_test_%s", randString)
|
||||
updatedName := fmt.Sprintf("terraform_acc_test_update_%s", randString)
|
||||
endpoint := fmt.Sprintf("terraform-acc-test-endpoint-%s", randString)
|
||||
updatedEndpoint := fmt.Sprintf("terraform-acc-test-endpoint-update-%s", randString)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckArukasContainerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccCheckArukasContainerConfig_basic,
|
||||
Config: testAccCheckArukasContainerConfig_basic(randString),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckArukasContainerExists("arukas_container.foobar", &container),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "name", "terraform_for_arukas_test_foobar"),
|
||||
"arukas_container.foobar", "name", name),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "image", "nginx:latest"),
|
||||
resource.TestCheckResourceAttr(
|
||||
@ -69,7 +80,7 @@ func TestAccArukasContainer_Update(t *testing.T) {
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "memory", "256"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "endpoint", "terraform-for-arukas-test-endpoint"),
|
||||
"arukas_container.foobar", "endpoint", endpoint),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "ports.#", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
@ -87,11 +98,11 @@ func TestAccArukasContainer_Update(t *testing.T) {
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccCheckArukasContainerConfig_update,
|
||||
Config: testAccCheckArukasContainerConfig_update(randString),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckArukasContainerExists("arukas_container.foobar", &container),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "name", "terraform_for_arukas_test_foobar_upd"),
|
||||
"arukas_container.foobar", "name", updatedName),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "image", "nginx:latest"),
|
||||
resource.TestCheckResourceAttr(
|
||||
@ -99,7 +110,7 @@ func TestAccArukasContainer_Update(t *testing.T) {
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "memory", "512"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "endpoint", "terraform-for-arukas-test-endpoint-upd"),
|
||||
"arukas_container.foobar", "endpoint", updatedEndpoint),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "ports.#", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
@ -130,17 +141,20 @@ func TestAccArukasContainer_Update(t *testing.T) {
|
||||
|
||||
func TestAccArukasContainer_Minimum(t *testing.T) {
|
||||
var container API.Container
|
||||
randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
|
||||
name := fmt.Sprintf("terraform_acc_test_minimum_%s", randString)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckArukasContainerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccCheckArukasContainerConfig_minimum,
|
||||
Config: testAccCheckArukasContainerConfig_minimum(randString),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckArukasContainerExists("arukas_container.foobar", &container),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "name", "terraform_for_arukas_test_foobar"),
|
||||
"arukas_container.foobar", "name", name),
|
||||
resource.TestCheckResourceAttr(
|
||||
"arukas_container.foobar", "image", "nginx:latest"),
|
||||
resource.TestCheckResourceAttr(
|
||||
@ -163,13 +177,15 @@ func TestAccArukasContainer_Minimum(t *testing.T) {
|
||||
|
||||
func TestAccArukasContainer_Import(t *testing.T) {
|
||||
resourceName := "arukas_container.foobar"
|
||||
randString := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckArukasContainerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccCheckArukasContainerConfig_basic,
|
||||
Config: testAccCheckArukasContainerConfig_basic(randString),
|
||||
},
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
@ -227,13 +243,14 @@ func testAccCheckArukasContainerDestroy(s *terraform.State) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
const testAccCheckArukasContainerConfig_basic = `
|
||||
func testAccCheckArukasContainerConfig_basic(randString string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "arukas_container" "foobar" {
|
||||
name = "terraform_for_arukas_test_foobar"
|
||||
name = "terraform_acc_test_%s"
|
||||
image = "nginx:latest"
|
||||
instances = 1
|
||||
memory = 256
|
||||
endpoint = "terraform-for-arukas-test-endpoint"
|
||||
endpoint = "terraform-acc-test-endpoint-%s"
|
||||
ports = {
|
||||
protocol = "tcp"
|
||||
number = "80"
|
||||
@ -242,15 +259,17 @@ resource "arukas_container" "foobar" {
|
||||
key = "key"
|
||||
value = "value"
|
||||
}
|
||||
}`
|
||||
}`, randString, randString)
|
||||
}
|
||||
|
||||
const testAccCheckArukasContainerConfig_update = `
|
||||
func testAccCheckArukasContainerConfig_update(randString string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "arukas_container" "foobar" {
|
||||
name = "terraform_for_arukas_test_foobar_upd"
|
||||
name = "terraform_acc_test_update_%s"
|
||||
image = "nginx:latest"
|
||||
instances = 2
|
||||
memory = 512
|
||||
endpoint = "terraform-for-arukas-test-endpoint-upd"
|
||||
endpoint = "terraform-acc-test-endpoint-update-%s"
|
||||
ports = {
|
||||
protocol = "tcp"
|
||||
number = "80"
|
||||
@ -267,13 +286,16 @@ resource "arukas_container" "foobar" {
|
||||
key = "key_upd"
|
||||
value = "value_upd"
|
||||
}
|
||||
}`
|
||||
}`, randString, randString)
|
||||
}
|
||||
|
||||
const testAccCheckArukasContainerConfig_minimum = `
|
||||
func testAccCheckArukasContainerConfig_minimum(randString string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "arukas_container" "foobar" {
|
||||
name = "terraform_for_arukas_test_foobar"
|
||||
name = "terraform_acc_test_minimum_%s"
|
||||
image = "nginx:latest"
|
||||
ports = {
|
||||
number = "80"
|
||||
}
|
||||
}`
|
||||
}`, randString)
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ func GetCredentials(c *Config) (*awsCredentials.Credentials, error) {
|
||||
if usedEndpoint == "" {
|
||||
usedEndpoint = "default location"
|
||||
}
|
||||
log.Printf("[WARN] Ignoring AWS metadata API endpoint at %s "+
|
||||
log.Printf("[INFO] Ignoring AWS metadata API endpoint at %s "+
|
||||
"as it doesn't return any instance-id", usedEndpoint)
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@ -10,13 +9,9 @@ import (
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
awsCredentials "github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
)
|
||||
@ -28,9 +23,14 @@ func TestAWSGetAccountInfo_shouldBeValid_fromEC2Role(t *testing.T) {
|
||||
awsTs := awsEnv(t)
|
||||
defer awsTs()
|
||||
|
||||
iamEndpoints := []*iamEndpoint{}
|
||||
ts, iamConn, stsConn := getMockedAwsIamStsApi(iamEndpoints)
|
||||
defer ts()
|
||||
closeEmpty, emptySess, err := getMockedAwsApiSession("zero", []*awsMockEndpoint{})
|
||||
defer closeEmpty()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
iamConn := iam.New(emptySess)
|
||||
stsConn := sts.New(emptySess)
|
||||
|
||||
part, id, err := GetAccountInfo(iamConn, stsConn, ec2rolecreds.ProviderName)
|
||||
if err != nil {
|
||||
@ -55,14 +55,24 @@ func TestAWSGetAccountInfo_shouldBeValid_EC2RoleHasPriority(t *testing.T) {
|
||||
awsTs := awsEnv(t)
|
||||
defer awsTs()
|
||||
|
||||
iamEndpoints := []*iamEndpoint{
|
||||
iamEndpoints := []*awsMockEndpoint{
|
||||
{
|
||||
Request: &iamRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
|
||||
Response: &iamResponse{200, iamResponse_GetUser_valid, "text/xml"},
|
||||
Request: &awsMockRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
|
||||
Response: &awsMockResponse{200, iamResponse_GetUser_valid, "text/xml"},
|
||||
},
|
||||
}
|
||||
ts, iamConn, stsConn := getMockedAwsIamStsApi(iamEndpoints)
|
||||
defer ts()
|
||||
closeIam, iamSess, err := getMockedAwsApiSession("IAM", iamEndpoints)
|
||||
defer closeIam()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
iamConn := iam.New(iamSess)
|
||||
closeSts, stsSess, err := getMockedAwsApiSession("STS", []*awsMockEndpoint{})
|
||||
defer closeSts()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stsConn := sts.New(stsSess)
|
||||
|
||||
part, id, err := GetAccountInfo(iamConn, stsConn, ec2rolecreds.ProviderName)
|
||||
if err != nil {
|
||||
@ -81,15 +91,26 @@ func TestAWSGetAccountInfo_shouldBeValid_EC2RoleHasPriority(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAWSGetAccountInfo_shouldBeValid_fromIamUser(t *testing.T) {
|
||||
iamEndpoints := []*iamEndpoint{
|
||||
iamEndpoints := []*awsMockEndpoint{
|
||||
{
|
||||
Request: &iamRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
|
||||
Response: &iamResponse{200, iamResponse_GetUser_valid, "text/xml"},
|
||||
Request: &awsMockRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
|
||||
Response: &awsMockResponse{200, iamResponse_GetUser_valid, "text/xml"},
|
||||
},
|
||||
}
|
||||
|
||||
ts, iamConn, stsConn := getMockedAwsIamStsApi(iamEndpoints)
|
||||
defer ts()
|
||||
closeIam, iamSess, err := getMockedAwsApiSession("IAM", iamEndpoints)
|
||||
defer closeIam()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
closeSts, stsSess, err := getMockedAwsApiSession("STS", []*awsMockEndpoint{})
|
||||
defer closeSts()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
iamConn := iam.New(iamSess)
|
||||
stsConn := sts.New(stsSess)
|
||||
|
||||
part, id, err := GetAccountInfo(iamConn, stsConn, "")
|
||||
if err != nil {
|
||||
@ -108,18 +129,32 @@ func TestAWSGetAccountInfo_shouldBeValid_fromIamUser(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAWSGetAccountInfo_shouldBeValid_fromGetCallerIdentity(t *testing.T) {
|
||||
iamEndpoints := []*iamEndpoint{
|
||||
iamEndpoints := []*awsMockEndpoint{
|
||||
{
|
||||
Request: &iamRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
|
||||
Response: &iamResponse{403, iamResponse_GetUser_unauthorized, "text/xml"},
|
||||
},
|
||||
{
|
||||
Request: &iamRequest{"POST", "/", "Action=GetCallerIdentity&Version=2011-06-15"},
|
||||
Response: &iamResponse{200, stsResponse_GetCallerIdentity_valid, "text/xml"},
|
||||
Request: &awsMockRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
|
||||
Response: &awsMockResponse{403, iamResponse_GetUser_unauthorized, "text/xml"},
|
||||
},
|
||||
}
|
||||
ts, iamConn, stsConn := getMockedAwsIamStsApi(iamEndpoints)
|
||||
defer ts()
|
||||
closeIam, iamSess, err := getMockedAwsApiSession("IAM", iamEndpoints)
|
||||
defer closeIam()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stsEndpoints := []*awsMockEndpoint{
|
||||
{
|
||||
Request: &awsMockRequest{"POST", "/", "Action=GetCallerIdentity&Version=2011-06-15"},
|
||||
Response: &awsMockResponse{200, stsResponse_GetCallerIdentity_valid, "text/xml"},
|
||||
},
|
||||
}
|
||||
closeSts, stsSess, err := getMockedAwsApiSession("STS", stsEndpoints)
|
||||
defer closeSts()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
iamConn := iam.New(iamSess)
|
||||
stsConn := sts.New(stsSess)
|
||||
|
||||
part, id, err := GetAccountInfo(iamConn, stsConn, "")
|
||||
if err != nil {
|
||||
@ -138,22 +173,36 @@ func TestAWSGetAccountInfo_shouldBeValid_fromGetCallerIdentity(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAWSGetAccountInfo_shouldBeValid_fromIamListRoles(t *testing.T) {
|
||||
iamEndpoints := []*iamEndpoint{
|
||||
iamEndpoints := []*awsMockEndpoint{
|
||||
{
|
||||
Request: &iamRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
|
||||
Response: &iamResponse{403, iamResponse_GetUser_unauthorized, "text/xml"},
|
||||
Request: &awsMockRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
|
||||
Response: &awsMockResponse{403, iamResponse_GetUser_unauthorized, "text/xml"},
|
||||
},
|
||||
{
|
||||
Request: &iamRequest{"POST", "/", "Action=GetCallerIdentity&Version=2011-06-15"},
|
||||
Response: &iamResponse{403, stsResponse_GetCallerIdentity_unauthorized, "text/xml"},
|
||||
},
|
||||
{
|
||||
Request: &iamRequest{"POST", "/", "Action=ListRoles&MaxItems=1&Version=2010-05-08"},
|
||||
Response: &iamResponse{200, iamResponse_ListRoles_valid, "text/xml"},
|
||||
Request: &awsMockRequest{"POST", "/", "Action=ListRoles&MaxItems=1&Version=2010-05-08"},
|
||||
Response: &awsMockResponse{200, iamResponse_ListRoles_valid, "text/xml"},
|
||||
},
|
||||
}
|
||||
ts, iamConn, stsConn := getMockedAwsIamStsApi(iamEndpoints)
|
||||
defer ts()
|
||||
closeIam, iamSess, err := getMockedAwsApiSession("IAM", iamEndpoints)
|
||||
defer closeIam()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stsEndpoints := []*awsMockEndpoint{
|
||||
{
|
||||
Request: &awsMockRequest{"POST", "/", "Action=GetCallerIdentity&Version=2011-06-15"},
|
||||
Response: &awsMockResponse{403, stsResponse_GetCallerIdentity_unauthorized, "text/xml"},
|
||||
},
|
||||
}
|
||||
closeSts, stsSess, err := getMockedAwsApiSession("STS", stsEndpoints)
|
||||
defer closeSts()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
iamConn := iam.New(iamSess)
|
||||
stsConn := sts.New(stsSess)
|
||||
|
||||
part, id, err := GetAccountInfo(iamConn, stsConn, "")
|
||||
if err != nil {
|
||||
@ -172,18 +221,30 @@ func TestAWSGetAccountInfo_shouldBeValid_fromIamListRoles(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAWSGetAccountInfo_shouldBeValid_federatedRole(t *testing.T) {
|
||||
iamEndpoints := []*iamEndpoint{
|
||||
iamEndpoints := []*awsMockEndpoint{
|
||||
{
|
||||
Request: &iamRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
|
||||
Response: &iamResponse{400, iamResponse_GetUser_federatedFailure, "text/xml"},
|
||||
Request: &awsMockRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
|
||||
Response: &awsMockResponse{400, iamResponse_GetUser_federatedFailure, "text/xml"},
|
||||
},
|
||||
{
|
||||
Request: &iamRequest{"POST", "/", "Action=ListRoles&MaxItems=1&Version=2010-05-08"},
|
||||
Response: &iamResponse{200, iamResponse_ListRoles_valid, "text/xml"},
|
||||
Request: &awsMockRequest{"POST", "/", "Action=ListRoles&MaxItems=1&Version=2010-05-08"},
|
||||
Response: &awsMockResponse{200, iamResponse_ListRoles_valid, "text/xml"},
|
||||
},
|
||||
}
|
||||
ts, iamConn, stsConn := getMockedAwsIamStsApi(iamEndpoints)
|
||||
defer ts()
|
||||
closeIam, iamSess, err := getMockedAwsApiSession("IAM", iamEndpoints)
|
||||
defer closeIam()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
closeSts, stsSess, err := getMockedAwsApiSession("STS", []*awsMockEndpoint{})
|
||||
defer closeSts()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
iamConn := iam.New(iamSess)
|
||||
stsConn := sts.New(stsSess)
|
||||
|
||||
part, id, err := GetAccountInfo(iamConn, stsConn, "")
|
||||
if err != nil {
|
||||
@ -202,18 +263,30 @@ func TestAWSGetAccountInfo_shouldBeValid_federatedRole(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAWSGetAccountInfo_shouldError_unauthorizedFromIam(t *testing.T) {
|
||||
iamEndpoints := []*iamEndpoint{
|
||||
iamEndpoints := []*awsMockEndpoint{
|
||||
{
|
||||
Request: &iamRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
|
||||
Response: &iamResponse{403, iamResponse_GetUser_unauthorized, "text/xml"},
|
||||
Request: &awsMockRequest{"POST", "/", "Action=GetUser&Version=2010-05-08"},
|
||||
Response: &awsMockResponse{403, iamResponse_GetUser_unauthorized, "text/xml"},
|
||||
},
|
||||
{
|
||||
Request: &iamRequest{"POST", "/", "Action=ListRoles&MaxItems=1&Version=2010-05-08"},
|
||||
Response: &iamResponse{403, iamResponse_ListRoles_unauthorized, "text/xml"},
|
||||
Request: &awsMockRequest{"POST", "/", "Action=ListRoles&MaxItems=1&Version=2010-05-08"},
|
||||
Response: &awsMockResponse{403, iamResponse_ListRoles_unauthorized, "text/xml"},
|
||||
},
|
||||
}
|
||||
ts, iamConn, stsConn := getMockedAwsIamStsApi(iamEndpoints)
|
||||
defer ts()
|
||||
closeIam, iamSess, err := getMockedAwsApiSession("IAM", iamEndpoints)
|
||||
defer closeIam()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
closeSts, stsSess, err := getMockedAwsApiSession("STS", []*awsMockEndpoint{})
|
||||
defer closeSts()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
iamConn := iam.New(iamSess)
|
||||
stsConn := sts.New(stsSess)
|
||||
|
||||
part, id, err := GetAccountInfo(iamConn, stsConn, "")
|
||||
if err == nil {
|
||||
@ -697,51 +770,6 @@ func invalidAwsEnv(t *testing.T) func() {
|
||||
return ts.Close
|
||||
}
|
||||
|
||||
// getMockedAwsIamStsApi establishes a httptest server to simulate behaviour
|
||||
// of a real AWS' IAM & STS server
|
||||
func getMockedAwsIamStsApi(endpoints []*iamEndpoint) (func(), *iam.IAM, *sts.STS) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(r.Body)
|
||||
requestBody := buf.String()
|
||||
|
||||
log.Printf("[DEBUG] Received API %q request to %q: %s",
|
||||
r.Method, r.RequestURI, requestBody)
|
||||
|
||||
for _, e := range endpoints {
|
||||
if r.Method == e.Request.Method && r.RequestURI == e.Request.Uri && requestBody == e.Request.Body {
|
||||
log.Printf("[DEBUG] Mock API responding with %d: %s", e.Response.StatusCode, e.Response.Body)
|
||||
|
||||
w.WriteHeader(e.Response.StatusCode)
|
||||
w.Header().Set("Content-Type", e.Response.ContentType)
|
||||
w.Header().Set("X-Amzn-Requestid", "1b206dd1-f9a8-11e5-becf-051c60f11c4a")
|
||||
w.Header().Set("Date", time.Now().Format(time.RFC1123))
|
||||
|
||||
fmt.Fprintln(w, e.Response.Body)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(400)
|
||||
return
|
||||
}))
|
||||
|
||||
sc := awsCredentials.NewStaticCredentials("accessKey", "secretKey", "")
|
||||
|
||||
sess, err := session.NewSession(&aws.Config{
|
||||
Credentials: sc,
|
||||
Region: aws.String("us-east-1"),
|
||||
Endpoint: aws.String(ts.URL),
|
||||
CredentialsChainVerboseErrors: aws.Bool(true),
|
||||
})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error creating AWS Session: %s", err))
|
||||
}
|
||||
iamConn := iam.New(sess)
|
||||
stsConn := sts.New(sess)
|
||||
return ts.Close, iamConn, stsConn
|
||||
}
|
||||
|
||||
func getEnv() *currentEnv {
|
||||
// Grab any existing AWS keys and preserve. In some tests we'll unset these, so
|
||||
// we need to have them and restore them after
|
||||
@ -790,23 +818,6 @@ const metadataApiRoutes = `
|
||||
}
|
||||
`
|
||||
|
||||
type iamEndpoint struct {
|
||||
Request *iamRequest
|
||||
Response *iamResponse
|
||||
}
|
||||
|
||||
type iamRequest struct {
|
||||
Method string
|
||||
Uri string
|
||||
Body string
|
||||
}
|
||||
|
||||
type iamResponse struct {
|
||||
StatusCode int
|
||||
Body string
|
||||
ContentType string
|
||||
}
|
||||
|
||||
const iamResponse_GetUser_valid = `<GetUserResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<GetUserResult>
|
||||
<User>
|
||||
|
@ -443,10 +443,10 @@ func expandLambdaFunctionAssociation(lf map[string]interface{}) *cloudfront.Lamb
|
||||
return &lfa
|
||||
}
|
||||
|
||||
func flattenLambdaFunctionAssociations(lfa *cloudfront.LambdaFunctionAssociations) []interface{} {
|
||||
s := make([]interface{}, len(lfa.Items))
|
||||
for i, v := range lfa.Items {
|
||||
s[i] = flattenLambdaFunctionAssociation(v)
|
||||
func flattenLambdaFunctionAssociations(lfa *cloudfront.LambdaFunctionAssociations) *schema.Set {
|
||||
s := schema.NewSet(lambdaFunctionAssociationHash, []interface{}{})
|
||||
for _, v := range lfa.Items {
|
||||
s.Add(flattenLambdaFunctionAssociation(v))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
@ -364,14 +364,8 @@ func TestCloudFrontStructure_flattenCacheBehavior(t *testing.T) {
|
||||
t.Fatalf("Expected out[target_origin_id] to be myS3Origin, got %v", out["target_origin_id"])
|
||||
}
|
||||
|
||||
// the flattened lambda function associations are a slice of maps,
|
||||
// where as the default cache behavior LFAs are a set. Here we double check
|
||||
// that and conver the slice to a set, and use Set's Equal() method to check
|
||||
// equality
|
||||
var outSet *schema.Set
|
||||
if outSlice, ok := out["lambda_function_association"].([]interface{}); ok {
|
||||
outSet = schema.NewSet(lambdaFunctionAssociationHash, outSlice)
|
||||
} else {
|
||||
var outSet, ok = out["lambda_function_association"].(*schema.Set)
|
||||
if !ok {
|
||||
t.Fatalf("out['lambda_function_association'] is not a slice as expected: %#v", out["lambda_function_association"])
|
||||
}
|
||||
|
||||
@ -496,7 +490,7 @@ func TestCloudFrontStructure_flattenlambdaFunctionAssociations(t *testing.T) {
|
||||
lfa := expandLambdaFunctionAssociations(in.List())
|
||||
out := flattenLambdaFunctionAssociations(lfa)
|
||||
|
||||
if reflect.DeepEqual(in.List(), out) != true {
|
||||
if reflect.DeepEqual(in.List(), out.List()) != true {
|
||||
t.Fatalf("Expected out to be %v, got %v", in, out)
|
||||
}
|
||||
}
|
||||
|
@ -136,6 +136,7 @@ type AWSClient struct {
|
||||
r53conn *route53.Route53
|
||||
partition string
|
||||
accountid string
|
||||
supportedplatforms []string
|
||||
region string
|
||||
rdsconn *rds.RDS
|
||||
iamconn *iam.IAM
|
||||
@ -224,10 +225,7 @@ func (c *Config) Client() (interface{}, error) {
|
||||
return nil, errwrap.Wrapf("Error creating AWS session: {{err}}", err)
|
||||
}
|
||||
|
||||
// Removes the SDK Version handler, so we only have the provider User-Agent
|
||||
// Ex: "User-Agent: APN/1.0 HashiCorp/1.0 Terraform/0.7.9-dev"
|
||||
sess.Handlers.Build.Remove(request.NamedHandler{Name: "core.SDKVersionUserAgentHandler"})
|
||||
sess.Handlers.Build.PushFrontNamed(addTerraformVersionToUserAgent)
|
||||
sess.Handlers.Build.PushBackNamed(addTerraformVersionToUserAgent)
|
||||
|
||||
if extraDebug := os.Getenv("TERRAFORM_AWS_AUTHFAILURE_DEBUG"); extraDebug != "" {
|
||||
sess.Handlers.UnmarshalError.PushFrontNamed(debugAuthFailure)
|
||||
@ -272,6 +270,17 @@ func (c *Config) Client() (interface{}, error) {
|
||||
return nil, authErr
|
||||
}
|
||||
|
||||
client.ec2conn = ec2.New(awsEc2Sess)
|
||||
|
||||
supportedPlatforms, err := GetSupportedEC2Platforms(client.ec2conn)
|
||||
if err != nil {
|
||||
// We intentionally fail *silently* because there's a chance
|
||||
// user just doesn't have ec2:DescribeAccountAttributes permissions
|
||||
log.Printf("[WARN] Unable to get supported EC2 platforms: %s", err)
|
||||
} else {
|
||||
client.supportedplatforms = supportedPlatforms
|
||||
}
|
||||
|
||||
client.acmconn = acm.New(sess)
|
||||
client.apigateway = apigateway.New(sess)
|
||||
client.appautoscalingconn = applicationautoscaling.New(sess)
|
||||
@ -290,7 +299,6 @@ func (c *Config) Client() (interface{}, error) {
|
||||
client.codepipelineconn = codepipeline.New(sess)
|
||||
client.dsconn = directoryservice.New(sess)
|
||||
client.dynamodbconn = dynamodb.New(dynamoSess)
|
||||
client.ec2conn = ec2.New(awsEc2Sess)
|
||||
client.ecrconn = ecr.New(sess)
|
||||
client.ecsconn = ecs.New(sess)
|
||||
client.efsconn = efs.New(sess)
|
||||
@ -308,7 +316,7 @@ func (c *Config) Client() (interface{}, error) {
|
||||
client.kmsconn = kms.New(sess)
|
||||
client.lambdaconn = lambda.New(sess)
|
||||
client.lightsailconn = lightsail.New(usEast1Sess)
|
||||
client.opsworksconn = opsworks.New(usEast1Sess)
|
||||
client.opsworksconn = opsworks.New(sess)
|
||||
client.r53conn = route53.New(usEast1Sess)
|
||||
client.rdsconn = rds.New(sess)
|
||||
client.redshiftconn = redshift.New(sess)
|
||||
@ -389,6 +397,34 @@ func (c *Config) ValidateAccountId(accountId string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetSupportedEC2Platforms(conn *ec2.EC2) ([]string, error) {
|
||||
attrName := "supported-platforms"
|
||||
|
||||
input := ec2.DescribeAccountAttributesInput{
|
||||
AttributeNames: []*string{aws.String(attrName)},
|
||||
}
|
||||
attributes, err := conn.DescribeAccountAttributes(&input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var platforms []string
|
||||
for _, attr := range attributes.AccountAttributes {
|
||||
if *attr.AttributeName == attrName {
|
||||
for _, v := range attr.AttributeValues {
|
||||
platforms = append(platforms, *v.AttributeValue)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(platforms) == 0 {
|
||||
return nil, fmt.Errorf("No EC2 platforms detected")
|
||||
}
|
||||
|
||||
return platforms, nil
|
||||
}
|
||||
|
||||
// addTerraformVersionToUserAgent is a named handler that will add Terraform's
|
||||
// version information to requests made by the AWS SDK.
|
||||
var addTerraformVersionToUserAgent = request.NamedHandler{
|
||||
|
118
builtin/providers/aws/config_test.go
Normal file
118
builtin/providers/aws/config_test.go
Normal file
@ -0,0 +1,118 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
awsCredentials "github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
)
|
||||
|
||||
func TestGetSupportedEC2Platforms(t *testing.T) {
|
||||
ec2Endpoints := []*awsMockEndpoint{
|
||||
&awsMockEndpoint{
|
||||
Request: &awsMockRequest{"POST", "/", "Action=DescribeAccountAttributes&" +
|
||||
"AttributeName.1=supported-platforms&Version=2016-11-15"},
|
||||
Response: &awsMockResponse{200, test_ec2_describeAccountAttributes_response, "text/xml"},
|
||||
},
|
||||
}
|
||||
closeFunc, sess, err := getMockedAwsApiSession("EC2", ec2Endpoints)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer closeFunc()
|
||||
conn := ec2.New(sess)
|
||||
|
||||
platforms, err := GetSupportedEC2Platforms(conn)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, received: %s", err)
|
||||
}
|
||||
expectedPlatforms := []string{"VPC", "EC2"}
|
||||
if !reflect.DeepEqual(platforms, expectedPlatforms) {
|
||||
t.Fatalf("Received platforms: %q\nExpected: %q\n", platforms, expectedPlatforms)
|
||||
}
|
||||
}
|
||||
|
||||
// getMockedAwsApiSession establishes a httptest server to simulate behaviour
|
||||
// of a real AWS API server
|
||||
func getMockedAwsApiSession(svcName string, endpoints []*awsMockEndpoint) (func(), *session.Session, error) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(r.Body)
|
||||
requestBody := buf.String()
|
||||
|
||||
log.Printf("[DEBUG] Received %s API %q request to %q: %s",
|
||||
svcName, r.Method, r.RequestURI, requestBody)
|
||||
|
||||
for _, e := range endpoints {
|
||||
if r.Method == e.Request.Method && r.RequestURI == e.Request.Uri && requestBody == e.Request.Body {
|
||||
log.Printf("[DEBUG] Mocked %s API responding with %d: %s",
|
||||
svcName, e.Response.StatusCode, e.Response.Body)
|
||||
|
||||
w.WriteHeader(e.Response.StatusCode)
|
||||
w.Header().Set("Content-Type", e.Response.ContentType)
|
||||
w.Header().Set("X-Amzn-Requestid", "1b206dd1-f9a8-11e5-becf-051c60f11c4a")
|
||||
w.Header().Set("Date", time.Now().Format(time.RFC1123))
|
||||
|
||||
fmt.Fprintln(w, e.Response.Body)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(400)
|
||||
return
|
||||
}))
|
||||
|
||||
sc := awsCredentials.NewStaticCredentials("accessKey", "secretKey", "")
|
||||
|
||||
sess, err := session.NewSession(&aws.Config{
|
||||
Credentials: sc,
|
||||
Region: aws.String("us-east-1"),
|
||||
Endpoint: aws.String(ts.URL),
|
||||
CredentialsChainVerboseErrors: aws.Bool(true),
|
||||
})
|
||||
|
||||
return ts.Close, sess, err
|
||||
}
|
||||
|
||||
type awsMockEndpoint struct {
|
||||
Request *awsMockRequest
|
||||
Response *awsMockResponse
|
||||
}
|
||||
|
||||
type awsMockRequest struct {
|
||||
Method string
|
||||
Uri string
|
||||
Body string
|
||||
}
|
||||
|
||||
type awsMockResponse struct {
|
||||
StatusCode int
|
||||
Body string
|
||||
ContentType string
|
||||
}
|
||||
|
||||
var test_ec2_describeAccountAttributes_response = `<DescribeAccountAttributesResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
|
||||
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
|
||||
<accountAttributeSet>
|
||||
<item>
|
||||
<attributeName>supported-platforms</attributeName>
|
||||
<attributeValueSet>
|
||||
<item>
|
||||
<attributeValue>VPC</attributeValue>
|
||||
</item>
|
||||
<item>
|
||||
<attributeValue>EC2</attributeValue>
|
||||
</item>
|
||||
</attributeValueSet>
|
||||
</item>
|
||||
</accountAttributeSet>
|
||||
</DescribeAccountAttributesResponse>`
|
@ -20,6 +20,11 @@ func dataSourceAwsDbInstance() *schema.Resource {
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"address": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"allocated_storage": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@ -82,6 +87,11 @@ func dataSourceAwsDbInstance() *schema.Resource {
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"endpoint": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"engine": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@ -92,6 +102,11 @@ func dataSourceAwsDbInstance() *schema.Resource {
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"hosted_zone_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"iops": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
@ -133,6 +148,11 @@ func dataSourceAwsDbInstance() *schema.Resource {
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
|
||||
"port": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"preferred_backup_window": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@ -232,6 +252,10 @@ func dataSourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error
|
||||
d.Set("master_username", dbInstance.MasterUsername)
|
||||
d.Set("monitoring_interval", dbInstance.MonitoringInterval)
|
||||
d.Set("monitoring_role_arn", dbInstance.MonitoringRoleArn)
|
||||
d.Set("address", dbInstance.Endpoint.Address)
|
||||
d.Set("port", dbInstance.Endpoint.Port)
|
||||
d.Set("hosted_zone_id", dbInstance.Endpoint.HostedZoneId)
|
||||
d.Set("endpoint", fmt.Sprintf("%s:%d", *dbInstance.Endpoint.Address, *dbInstance.Endpoint.Port))
|
||||
|
||||
var optionGroups []string
|
||||
for _, v := range dbInstance.OptionGroupMemberships {
|
||||
|
@ -28,6 +28,25 @@ func TestAccAWSDataDbInstance_basic(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSDataDbInstance_endpoint(t *testing.T) {
|
||||
rInt := acctest.RandInt()
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSDBInstanceConfigWithDataSource(rInt),
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "address"),
|
||||
resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "port"),
|
||||
resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "hosted_zone_id"),
|
||||
resource.TestCheckResourceAttrSet("data.aws_db_instance.bar", "endpoint"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccAWSDBInstanceConfigWithDataSource(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_db_instance" "bar" {
|
||||
|
@ -51,7 +51,7 @@ func dataSourceAwsEcsTaskDefinitionRead(d *schema.ResourceData, meta interface{}
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("Failed getting task definition %s %q", err, d.Get("task_definition").(string))
|
||||
}
|
||||
|
||||
taskDefinition := *desc.TaskDefinition
|
||||
|
@ -4,69 +4,52 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccDataSourceAwsRoute53Zone(t *testing.T) {
|
||||
rInt := acctest.RandInt()
|
||||
publicResourceName := "aws_route53_zon.test"
|
||||
publicDomain := fmt.Sprintf("terraformtestacchz-%d.com.", rInt)
|
||||
privateResourceName := "aws_route53_zone.test_private"
|
||||
privateDomain := fmt.Sprintf("test.acc-%d.", rInt)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDataSourceAwsRoute53ZoneConfig,
|
||||
{
|
||||
Config: testAccDataSourceAwsRoute53ZoneConfig(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccDataSourceAwsRoute53ZoneCheck("data.aws_route53_zone.by_zone_id"),
|
||||
testAccDataSourceAwsRoute53ZoneCheck("data.aws_route53_zone.by_name"),
|
||||
testAccDataSourceAwsRoute53ZoneCheckPrivate("data.aws_route53_zone.by_vpc"),
|
||||
testAccDataSourceAwsRoute53ZoneCheckPrivate("data.aws_route53_zone.by_tag"),
|
||||
testAccDataSourceAwsRoute53ZoneCheck(
|
||||
publicResourceName, "data.aws_route53_zone.by_zone_id", publicDomain),
|
||||
testAccDataSourceAwsRoute53ZoneCheck(
|
||||
publicResourceName, "data.aws_route53_zone.by_name", publicDomain),
|
||||
testAccDataSourceAwsRoute53ZoneCheck(
|
||||
privateResourceName, "data.aws_route53_zone.by_vpc", privateDomain),
|
||||
testAccDataSourceAwsRoute53ZoneCheck(
|
||||
privateResourceName, "data.aws_route53_zone.by_tag", privateDomain),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccDataSourceAwsRoute53ZoneCheck(name string) resource.TestCheckFunc {
|
||||
// rsName for the name of the created resource
|
||||
// dsName for the name of the created data source
|
||||
// zName for the name of the domain
|
||||
func testAccDataSourceAwsRoute53ZoneCheck(rsName, dsName, zName string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[name]
|
||||
rs, ok := s.RootModule().Resources[rsName]
|
||||
if !ok {
|
||||
return fmt.Errorf("root module has no resource called %s", name)
|
||||
return fmt.Errorf("root module has no resource called %s", rsName)
|
||||
}
|
||||
|
||||
hostedZone, ok := s.RootModule().Resources["aws_route53_zone.test"]
|
||||
hostedZone, ok := s.RootModule().Resources[dsName]
|
||||
if !ok {
|
||||
return fmt.Errorf("can't find aws_hosted_zone.test in state")
|
||||
}
|
||||
attr := rs.Primary.Attributes
|
||||
if attr["id"] != hostedZone.Primary.Attributes["id"] {
|
||||
return fmt.Errorf(
|
||||
"id is %s; want %s",
|
||||
attr["id"],
|
||||
hostedZone.Primary.Attributes["id"],
|
||||
)
|
||||
}
|
||||
|
||||
if attr["name"] != "terraformtestacchz.com." {
|
||||
return fmt.Errorf(
|
||||
"Route53 Zone name is %s; want terraformtestacchz.com.",
|
||||
attr["name"],
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccDataSourceAwsRoute53ZoneCheckPrivate(name string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("root module has no resource called %s", name)
|
||||
}
|
||||
|
||||
hostedZone, ok := s.RootModule().Resources["aws_route53_zone.test_private"]
|
||||
if !ok {
|
||||
return fmt.Errorf("can't find aws_hosted_zone.test in state")
|
||||
return fmt.Errorf("can't find zone %q in state", dsName)
|
||||
}
|
||||
|
||||
attr := rs.Primary.Attributes
|
||||
@ -78,56 +61,54 @@ func testAccDataSourceAwsRoute53ZoneCheckPrivate(name string) resource.TestCheck
|
||||
)
|
||||
}
|
||||
|
||||
if attr["name"] != "test.acc." {
|
||||
return fmt.Errorf(
|
||||
"Route53 Zone name is %s; want test.acc.",
|
||||
attr["name"],
|
||||
)
|
||||
if attr["name"] != zName {
|
||||
return fmt.Errorf("Route53 Zone name is %q; want %q", attr["name"], zName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
const testAccDataSourceAwsRoute53ZoneConfig = `
|
||||
func testAccDataSourceAwsRoute53ZoneConfig(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
provider "aws" {
|
||||
region = "us-east-2"
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
region = "us-east-2"
|
||||
}
|
||||
resource "aws_vpc" "test" {
|
||||
cidr_block = "172.16.0.0/16"
|
||||
}
|
||||
|
||||
resource "aws_vpc" "test" {
|
||||
cidr_block = "172.16.0.0/16"
|
||||
}
|
||||
resource "aws_route53_zone" "test_private" {
|
||||
name = "test.acc-%d."
|
||||
vpc_id = "${aws_vpc.test.id}"
|
||||
tags {
|
||||
Environment = "dev-%d"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_route53_zone" "test_private" {
|
||||
name = "test.acc."
|
||||
vpc_id = "${aws_vpc.test.id}"
|
||||
tags {
|
||||
Environment = "dev"
|
||||
}
|
||||
}
|
||||
data "aws_route53_zone" "by_vpc" {
|
||||
name = "${aws_route53_zone.test_private.name}"
|
||||
vpc_id = "${aws_vpc.test.id}"
|
||||
}
|
||||
data "aws_route53_zone" "by_vpc" {
|
||||
name = "${aws_route53_zone.test_private.name}"
|
||||
vpc_id = "${aws_vpc.test.id}"
|
||||
}
|
||||
|
||||
data "aws_route53_zone" "by_tag" {
|
||||
name = "${aws_route53_zone.test_private.name}"
|
||||
private_zone = true
|
||||
tags {
|
||||
Environment = "dev"
|
||||
}
|
||||
}
|
||||
data "aws_route53_zone" "by_tag" {
|
||||
name = "${aws_route53_zone.test_private.name}"
|
||||
private_zone = true
|
||||
tags {
|
||||
Environment = "dev-%d"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_route53_zone" "test" {
|
||||
name = "terraformtestacchz.com."
|
||||
}
|
||||
data "aws_route53_zone" "by_zone_id" {
|
||||
zone_id = "${aws_route53_zone.test.zone_id}"
|
||||
}
|
||||
resource "aws_route53_zone" "test" {
|
||||
name = "terraformtestacchz-%d.com."
|
||||
}
|
||||
|
||||
data "aws_route53_zone" "by_name" {
|
||||
name = "${data.aws_route53_zone.by_zone_id.name}"
|
||||
}
|
||||
data "aws_route53_zone" "by_zone_id" {
|
||||
zone_id = "${aws_route53_zone.test.zone_id}"
|
||||
}
|
||||
|
||||
`
|
||||
data "aws_route53_zone" "by_name" {
|
||||
name = "${data.aws_route53_zone.by_zone_id.name}"
|
||||
}`, rInt, rInt, rInt, rInt)
|
||||
}
|
||||
|
@ -41,6 +41,16 @@ func dataSourceAwsRouteTable() *schema.Resource {
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"ipv6_cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"egress_only_gateway_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"gateway_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@ -177,6 +187,12 @@ func dataSourceRoutesRead(ec2Routes []*ec2.Route) []map[string]interface{} {
|
||||
if r.DestinationCidrBlock != nil {
|
||||
m["cidr_block"] = *r.DestinationCidrBlock
|
||||
}
|
||||
if r.DestinationIpv6CidrBlock != nil {
|
||||
m["ipv6_cidr_block"] = *r.DestinationIpv6CidrBlock
|
||||
}
|
||||
if r.EgressOnlyInternetGatewayId != nil {
|
||||
m["egress_only_gateway_id"] = *r.EgressOnlyInternetGatewayId
|
||||
}
|
||||
if r.GatewayId != nil {
|
||||
m["gateway_id"] = *r.GatewayId
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ func TestAccDataSourceAwsRouteTable_basic(t *testing.T) {
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccDataSourceAwsRouteTableGroupConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccDataSourceAwsRouteTableCheck("data.aws_route_table.by_tag"),
|
||||
@ -33,7 +33,7 @@ func TestAccDataSourceAwsRouteTable_main(t *testing.T) {
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccDataSourceAwsRouteTableMainRoute,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccDataSourceAwsRouteTableCheckMain("data.aws_route_table.by_filter"),
|
||||
|
@ -2,24 +2,30 @@ package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccDataSourceAwsVpc_basic(t *testing.T) {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rInt := rand.Intn(16)
|
||||
cidr := fmt.Sprintf("172.%d.0.0/16", rInt)
|
||||
tag := fmt.Sprintf("terraform-testacc-vpc-data-source-%d", rInt)
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccDataSourceAwsVpcConfig,
|
||||
Config: testAccDataSourceAwsVpcConfig(cidr, tag),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_id"),
|
||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_cidr"),
|
||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_tag"),
|
||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_filter"),
|
||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_id", cidr, tag),
|
||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_cidr", cidr, tag),
|
||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_tag", cidr, tag),
|
||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_filter", cidr, tag),
|
||||
),
|
||||
},
|
||||
},
|
||||
@ -27,14 +33,18 @@ func TestAccDataSourceAwsVpc_basic(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAccDataSourceAwsVpc_ipv6Associated(t *testing.T) {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
rInt := rand.Intn(16)
|
||||
cidr := fmt.Sprintf("172.%d.0.0/16", rInt)
|
||||
tag := fmt.Sprintf("terraform-testacc-vpc-data-source-%d", rInt)
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccDataSourceAwsVpcConfigIpv6,
|
||||
Config: testAccDataSourceAwsVpcConfigIpv6(cidr, tag),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_id"),
|
||||
testAccDataSourceAwsVpcCheck("data.aws_vpc.by_id", cidr, tag),
|
||||
resource.TestCheckResourceAttrSet(
|
||||
"data.aws_vpc.by_id", "ipv6_association_id"),
|
||||
resource.TestCheckResourceAttrSet(
|
||||
@ -45,7 +55,7 @@ func TestAccDataSourceAwsVpc_ipv6Associated(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func testAccDataSourceAwsVpcCheck(name string) resource.TestCheckFunc {
|
||||
func testAccDataSourceAwsVpcCheck(name, cidr, tag string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[name]
|
||||
if !ok {
|
||||
@ -67,10 +77,10 @@ func testAccDataSourceAwsVpcCheck(name string) resource.TestCheckFunc {
|
||||
)
|
||||
}
|
||||
|
||||
if attr["cidr_block"] != "172.16.0.0/16" {
|
||||
return fmt.Errorf("bad cidr_block %s", attr["cidr_block"])
|
||||
if attr["cidr_block"] != cidr {
|
||||
return fmt.Errorf("bad cidr_block %s, expected: %s", attr["cidr_block"], cidr)
|
||||
}
|
||||
if attr["tags.Name"] != "terraform-testacc-vpc-data-source" {
|
||||
if attr["tags.Name"] != tag {
|
||||
return fmt.Errorf("bad Name tag %s", attr["tags.Name"])
|
||||
}
|
||||
|
||||
@ -78,35 +88,37 @@ func testAccDataSourceAwsVpcCheck(name string) resource.TestCheckFunc {
|
||||
}
|
||||
}
|
||||
|
||||
const testAccDataSourceAwsVpcConfigIpv6 = `
|
||||
func testAccDataSourceAwsVpcConfigIpv6(cidr, tag string) string {
|
||||
return fmt.Sprintf(`
|
||||
provider "aws" {
|
||||
region = "us-west-2"
|
||||
}
|
||||
|
||||
resource "aws_vpc" "test" {
|
||||
cidr_block = "172.16.0.0/16"
|
||||
cidr_block = "%s"
|
||||
assign_generated_ipv6_cidr_block = true
|
||||
|
||||
tags {
|
||||
Name = "terraform-testacc-vpc-data-source"
|
||||
Name = "%s"
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_vpc" "by_id" {
|
||||
id = "${aws_vpc.test.id}"
|
||||
}`, cidr, tag)
|
||||
}
|
||||
`
|
||||
|
||||
const testAccDataSourceAwsVpcConfig = `
|
||||
func testAccDataSourceAwsVpcConfig(cidr, tag string) string {
|
||||
return fmt.Sprintf(`
|
||||
provider "aws" {
|
||||
region = "us-west-2"
|
||||
}
|
||||
|
||||
resource "aws_vpc" "test" {
|
||||
cidr_block = "172.16.0.0/16"
|
||||
cidr_block = "%s"
|
||||
|
||||
tags {
|
||||
Name = "terraform-testacc-vpc-data-source"
|
||||
Name = "%s"
|
||||
}
|
||||
}
|
||||
|
||||
@ -129,5 +141,5 @@ data "aws_vpc" "by_filter" {
|
||||
name = "cidr"
|
||||
values = ["${aws_vpc.test.cidr_block}"]
|
||||
}
|
||||
}`, cidr, tag)
|
||||
}
|
||||
`
|
||||
|
@ -0,0 +1,30 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccAWSAPIGatewayUsagePlan_importBasic(t *testing.T) {
|
||||
resourceName := "aws_api_gateway_usage_plan.main"
|
||||
rName := acctest.RandString(10)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSAPIGatewayUsagePlanDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanBasicConfig(rName),
|
||||
},
|
||||
|
||||
{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
31
builtin/providers/aws/import_aws_iam_account_alias_test.go
Normal file
31
builtin/providers/aws/import_aws_iam_account_alias_test.go
Normal file
@ -0,0 +1,31 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccAWSIAMAccountAlias_importBasic(t *testing.T) {
|
||||
resourceName := "aws_iam_account_alias.test"
|
||||
|
||||
rstring := acctest.RandString(5)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSIAMAccountAliasDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAWSIAMAccountAliasConfig(rstring),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
@ -23,11 +23,11 @@ func TestAccAWSNetworkAcl_importBasic(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclEgressNIngressConfig,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
ResourceName: "aws_network_acl.bar",
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
|
@ -51,6 +51,7 @@ func resourceAwsRouteTableImportState(
|
||||
d.SetType("aws_route")
|
||||
d.Set("route_table_id", id)
|
||||
d.Set("destination_cidr_block", route.DestinationCidrBlock)
|
||||
d.Set("destination_ipv6_cidr_block", route.DestinationIpv6CidrBlock)
|
||||
d.SetId(routeIDHash(d, route))
|
||||
results = append(results, d)
|
||||
}
|
||||
|
@ -23,11 +23,11 @@ func TestAccAWSRouteTable_importBasic(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckRouteTableDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccRouteTableConfig,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
ResourceName: "aws_route_table.foo",
|
||||
ImportState: true,
|
||||
ImportStateCheck: checkFn,
|
||||
@ -51,11 +51,11 @@ func TestAccAWSRouteTable_complex(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckRouteTableDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccRouteTableConfig_complexImport,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
ResourceName: "aws_route_table.mod",
|
||||
ImportState: true,
|
||||
ImportStateCheck: checkFn,
|
||||
|
@ -66,13 +66,20 @@ func resourceAwsSecurityGroupImportStatePerm(sg *ec2.SecurityGroup, ruleType str
|
||||
p := &ec2.IpPermission{
|
||||
FromPort: perm.FromPort,
|
||||
IpProtocol: perm.IpProtocol,
|
||||
IpRanges: perm.IpRanges,
|
||||
PrefixListIds: perm.PrefixListIds,
|
||||
ToPort: perm.ToPort,
|
||||
|
||||
UserIdGroupPairs: []*ec2.UserIdGroupPair{pair},
|
||||
}
|
||||
|
||||
if perm.Ipv6Ranges != nil {
|
||||
p.Ipv6Ranges = perm.Ipv6Ranges
|
||||
}
|
||||
|
||||
if perm.IpRanges != nil {
|
||||
p.IpRanges = perm.IpRanges
|
||||
}
|
||||
|
||||
r, err := resourceAwsSecurityGroupImportStatePermPair(sg, ruleType, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -23,11 +23,39 @@ func TestAccAWSSecurityGroup_importBasic(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfig,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
ResourceName: "aws_security_group.web",
|
||||
ImportState: true,
|
||||
ImportStateCheck: checkFn,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSSecurityGroup_importIpv6(t *testing.T) {
|
||||
checkFn := func(s []*terraform.InstanceState) error {
|
||||
// Expect 3: group, 2 rules
|
||||
if len(s) != 3 {
|
||||
return fmt.Errorf("expected 3 states: %#v", s)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfigIpv6,
|
||||
},
|
||||
|
||||
{
|
||||
ResourceName: "aws_security_group.web",
|
||||
ImportState: true,
|
||||
ImportStateCheck: checkFn,
|
||||
@ -42,11 +70,11 @@ func TestAccAWSSecurityGroup_importSelf(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfig_importSelf,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
ResourceName: "aws_security_group.allow_all",
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
@ -61,11 +89,11 @@ func TestAccAWSSecurityGroup_importSourceSecurityGroup(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSSecurityGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSSecurityGroupConfig_importSourceSecurityGroup,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
ResourceName: "aws_security_group.test_group_1",
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
|
@ -32,7 +32,14 @@ func expandNetworkAclEntries(configured []interface{}, entryType string) ([]*ec2
|
||||
Egress: aws.Bool(entryType == "egress"),
|
||||
RuleAction: aws.String(data["action"].(string)),
|
||||
RuleNumber: aws.Int64(int64(data["rule_no"].(int))),
|
||||
CidrBlock: aws.String(data["cidr_block"].(string)),
|
||||
}
|
||||
|
||||
if v, ok := data["ipv6_cidr_block"]; ok {
|
||||
e.Ipv6CidrBlock = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := data["cidr_block"]; ok {
|
||||
e.CidrBlock = aws.String(v.(string))
|
||||
}
|
||||
|
||||
// Specify additional required fields for ICMP
|
||||
@ -55,14 +62,24 @@ func flattenNetworkAclEntries(list []*ec2.NetworkAclEntry) []map[string]interfac
|
||||
entries := make([]map[string]interface{}, 0, len(list))
|
||||
|
||||
for _, entry := range list {
|
||||
entries = append(entries, map[string]interface{}{
|
||||
"from_port": *entry.PortRange.From,
|
||||
"to_port": *entry.PortRange.To,
|
||||
"action": *entry.RuleAction,
|
||||
"rule_no": *entry.RuleNumber,
|
||||
"protocol": *entry.Protocol,
|
||||
"cidr_block": *entry.CidrBlock,
|
||||
})
|
||||
|
||||
newEntry := map[string]interface{}{
|
||||
"from_port": *entry.PortRange.From,
|
||||
"to_port": *entry.PortRange.To,
|
||||
"action": *entry.RuleAction,
|
||||
"rule_no": *entry.RuleNumber,
|
||||
"protocol": *entry.Protocol,
|
||||
}
|
||||
|
||||
if entry.CidrBlock != nil {
|
||||
newEntry["cidr_block"] = *entry.CidrBlock
|
||||
}
|
||||
|
||||
if entry.Ipv6CidrBlock != nil {
|
||||
newEntry["ipv6_cidr_block"] = *entry.Ipv6CidrBlock
|
||||
}
|
||||
|
||||
entries = append(entries, newEntry)
|
||||
}
|
||||
|
||||
return entries
|
||||
|
@ -219,6 +219,8 @@ func Provider() terraform.ResourceProvider {
|
||||
"aws_api_gateway_model": resourceAwsApiGatewayModel(),
|
||||
"aws_api_gateway_resource": resourceAwsApiGatewayResource(),
|
||||
"aws_api_gateway_rest_api": resourceAwsApiGatewayRestApi(),
|
||||
"aws_api_gateway_usage_plan": resourceAwsApiGatewayUsagePlan(),
|
||||
"aws_api_gateway_usage_plan_key": resourceAwsApiGatewayUsagePlanKey(),
|
||||
"aws_app_cookie_stickiness_policy": resourceAwsAppCookieStickinessPolicy(),
|
||||
"aws_appautoscaling_target": resourceAwsAppautoscalingTarget(),
|
||||
"aws_appautoscaling_policy": resourceAwsAppautoscalingPolicy(),
|
||||
@ -298,6 +300,7 @@ func Provider() terraform.ResourceProvider {
|
||||
"aws_flow_log": resourceAwsFlowLog(),
|
||||
"aws_glacier_vault": resourceAwsGlacierVault(),
|
||||
"aws_iam_access_key": resourceAwsIamAccessKey(),
|
||||
"aws_iam_account_alias": resourceAwsIamAccountAlias(),
|
||||
"aws_iam_account_password_policy": resourceAwsIamAccountPasswordPolicy(),
|
||||
"aws_iam_group_policy": resourceAwsIamGroupPolicy(),
|
||||
"aws_iam_group": resourceAwsIamGroup(),
|
||||
|
@ -69,7 +69,6 @@ func resourceAwsAlb() *schema.Resource {
|
||||
"subnets": {
|
||||
Type: schema.TypeSet,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
Set: schema.HashString,
|
||||
},
|
||||
@ -312,6 +311,20 @@ func resourceAwsAlbUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
}
|
||||
|
||||
if d.HasChange("subnets") {
|
||||
subnets := expandStringList(d.Get("subnets").(*schema.Set).List())
|
||||
|
||||
params := &elbv2.SetSubnetsInput{
|
||||
LoadBalancerArn: aws.String(d.Id()),
|
||||
Subnets: subnets,
|
||||
}
|
||||
|
||||
_, err := elbconn.SetSubnets(params)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failure Setting ALB Subnets: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return resourceAwsAlbRead(d, meta)
|
||||
}
|
||||
|
||||
|
@ -179,6 +179,35 @@ func TestAccAWSALB_updatedSecurityGroups(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSALB_updatedSubnets(t *testing.T) {
|
||||
var pre, post elbv2.LoadBalancer
|
||||
albName := fmt.Sprintf("testaccawsalb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
IDRefreshName: "aws_alb.alb_test",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSALBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSALBConfig_basic(albName),
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
testAccCheckAWSALBExists("aws_alb.alb_test", &pre),
|
||||
resource.TestCheckResourceAttr("aws_alb.alb_test", "subnets.#", "2"),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSALBConfig_updateSubnets(albName),
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
testAccCheckAWSALBExists("aws_alb.alb_test", &post),
|
||||
resource.TestCheckResourceAttr("aws_alb.alb_test", "subnets.#", "3"),
|
||||
testAccCheckAWSAlbARNs(&pre, &post),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestAccAWSALB_noSecurityGroup regression tests the issue in #8264,
|
||||
// where if an ALB is created without a security group, a default one
|
||||
// is assigned.
|
||||
@ -426,6 +455,73 @@ resource "aws_security_group" "alb_test" {
|
||||
}`, albName)
|
||||
}
|
||||
|
||||
func testAccAWSALBConfig_updateSubnets(albName string) string {
|
||||
return fmt.Sprintf(`resource "aws_alb" "alb_test" {
|
||||
name = "%s"
|
||||
internal = true
|
||||
security_groups = ["${aws_security_group.alb_test.id}"]
|
||||
subnets = ["${aws_subnet.alb_test.*.id}"]
|
||||
|
||||
idle_timeout = 30
|
||||
enable_deletion_protection = false
|
||||
|
||||
tags {
|
||||
TestName = "TestAccAWSALB_basic"
|
||||
}
|
||||
}
|
||||
|
||||
variable "subnets" {
|
||||
default = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
type = "list"
|
||||
}
|
||||
|
||||
data "aws_availability_zones" "available" {}
|
||||
|
||||
resource "aws_vpc" "alb_test" {
|
||||
cidr_block = "10.0.0.0/16"
|
||||
|
||||
tags {
|
||||
TestName = "TestAccAWSALB_basic"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_subnet" "alb_test" {
|
||||
count = 3
|
||||
vpc_id = "${aws_vpc.alb_test.id}"
|
||||
cidr_block = "${element(var.subnets, count.index)}"
|
||||
map_public_ip_on_launch = true
|
||||
availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}"
|
||||
|
||||
tags {
|
||||
TestName = "TestAccAWSALB_basic"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group" "alb_test" {
|
||||
name = "allow_all_alb_test"
|
||||
description = "Used for ALB Testing"
|
||||
vpc_id = "${aws_vpc.alb_test.id}"
|
||||
|
||||
ingress {
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
egress {
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
tags {
|
||||
TestName = "TestAccAWSALB_basic"
|
||||
}
|
||||
}`, albName)
|
||||
}
|
||||
|
||||
func testAccAWSALBConfig_generatedName() string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_alb" "alb_test" {
|
||||
|
@ -13,9 +13,17 @@ import (
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/hashcode"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
const (
|
||||
AWSAMIRetryTimeout = 10 * time.Minute
|
||||
AWSAMIDeleteRetryTimeout = 20 * time.Minute
|
||||
AWSAMIRetryDelay = 5 * time.Second
|
||||
AWSAMIRetryMinTimeout = 3 * time.Second
|
||||
)
|
||||
|
||||
func resourceAwsAmi() *schema.Resource {
|
||||
// Our schema is shared also with aws_ami_copy and aws_ami_from_instance
|
||||
resourceSchema := resourceAwsAmiCommonSchema(false)
|
||||
@ -281,7 +289,56 @@ func resourceAwsAmiDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that the image is actually removed, if not we need to wait for it to be removed
|
||||
if err := resourceAwsAmiWaitForDestroy(d.Id(), client); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// No error, ami was deleted successfully
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
func AMIStateRefreshFunc(client *ec2.EC2, id string) resource.StateRefreshFunc {
|
||||
return func() (interface{}, string, error) {
|
||||
emptyResp := &ec2.DescribeImagesOutput{}
|
||||
|
||||
resp, err := client.DescribeImages(&ec2.DescribeImagesInput{ImageIds: []*string{aws.String(id)}})
|
||||
if err != nil {
|
||||
if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidAMIID.NotFound" {
|
||||
return emptyResp, "destroyed", nil
|
||||
} else if resp != nil && len(resp.Images) == 0 {
|
||||
return emptyResp, "destroyed", nil
|
||||
} else {
|
||||
return emptyResp, "", fmt.Errorf("Error on refresh: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if resp == nil || resp.Images == nil || len(resp.Images) == 0 {
|
||||
return emptyResp, "destroyed", nil
|
||||
}
|
||||
|
||||
// AMI is valid, so return it's state
|
||||
return resp.Images[0], *resp.Images[0].State, nil
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAwsAmiWaitForDestroy(id string, client *ec2.EC2) error {
|
||||
log.Printf("Waiting for AMI %s to be deleted...", id)
|
||||
|
||||
stateConf := &resource.StateChangeConf{
|
||||
Pending: []string{"available", "pending", "failed"},
|
||||
Target: []string{"destroyed"},
|
||||
Refresh: AMIStateRefreshFunc(client, id),
|
||||
Timeout: AWSAMIDeleteRetryTimeout,
|
||||
Delay: AWSAMIRetryDelay,
|
||||
MinTimeout: AWSAMIRetryTimeout,
|
||||
}
|
||||
|
||||
_, err := stateConf.WaitForState()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error waiting for AMI (%s) to be deleted: %v", id, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -289,51 +346,20 @@ func resourceAwsAmiDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
func resourceAwsAmiWaitForAvailable(id string, client *ec2.EC2) (*ec2.Image, error) {
|
||||
log.Printf("Waiting for AMI %s to become available...", id)
|
||||
|
||||
req := &ec2.DescribeImagesInput{
|
||||
ImageIds: []*string{aws.String(id)},
|
||||
stateConf := &resource.StateChangeConf{
|
||||
Pending: []string{"pending"},
|
||||
Target: []string{"available"},
|
||||
Refresh: AMIStateRefreshFunc(client, id),
|
||||
Timeout: AWSAMIRetryTimeout,
|
||||
Delay: AWSAMIRetryDelay,
|
||||
MinTimeout: AWSAMIRetryMinTimeout,
|
||||
}
|
||||
pollsWhereNotFound := 0
|
||||
for {
|
||||
res, err := client.DescribeImages(req)
|
||||
if err != nil {
|
||||
// When using RegisterImage (for aws_ami) the AMI sometimes isn't available at all
|
||||
// right after the API responds, so we need to tolerate a couple Not Found errors
|
||||
// before an available AMI shows up.
|
||||
if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidAMIID.NotFound" {
|
||||
pollsWhereNotFound++
|
||||
// We arbitrarily stop polling after getting a "not found" error five times,
|
||||
// assuming that the AMI has been deleted by something other than Terraform.
|
||||
if pollsWhereNotFound > 5 {
|
||||
return nil, fmt.Errorf("gave up waiting for AMI to be created: %s", err)
|
||||
}
|
||||
time.Sleep(4 * time.Second)
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("error reading AMI: %s", err)
|
||||
}
|
||||
|
||||
if len(res.Images) != 1 {
|
||||
return nil, fmt.Errorf("new AMI vanished while pending")
|
||||
}
|
||||
|
||||
state := *res.Images[0].State
|
||||
|
||||
if state == "pending" {
|
||||
// Give it a few seconds before we poll again.
|
||||
time.Sleep(4 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
if state == "available" {
|
||||
// We're done!
|
||||
return res.Images[0], nil
|
||||
}
|
||||
|
||||
// If we're not pending or available then we're in one of the invalid/error
|
||||
// states, so stop polling and bail out.
|
||||
stateReason := *res.Images[0].StateReason
|
||||
return nil, fmt.Errorf("new AMI became %s while pending: %s", state, stateReason)
|
||||
info, err := stateConf.WaitForState()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error waiting for AMI (%s) to be ready: %v", id, err)
|
||||
}
|
||||
return info.(*ec2.Image), nil
|
||||
}
|
||||
|
||||
func resourceAwsAmiCommonSchema(computed bool) map[string]*schema.Schema {
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
@ -16,13 +17,14 @@ import (
|
||||
func TestAccAWSAMIFromInstance(t *testing.T) {
|
||||
var amiId string
|
||||
snapshots := []string{}
|
||||
rInt := acctest.RandInt()
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAWSAMIFromInstanceConfig,
|
||||
{
|
||||
Config: testAccAWSAMIFromInstanceConfig(rInt),
|
||||
Check: func(state *terraform.State) error {
|
||||
rs, ok := state.RootModule().Resources["aws_ami_from_instance.test"]
|
||||
if !ok {
|
||||
@ -51,13 +53,13 @@ func TestAccAWSAMIFromInstance(t *testing.T) {
|
||||
|
||||
image := describe.Images[0]
|
||||
if expected := "available"; *image.State != expected {
|
||||
return fmt.Errorf("invalid image state; expected %v, got %v", expected, image.State)
|
||||
return fmt.Errorf("invalid image state; expected %v, got %v", expected, *image.State)
|
||||
}
|
||||
if expected := "machine"; *image.ImageType != expected {
|
||||
return fmt.Errorf("wrong image type; expected %v, got %v", expected, image.ImageType)
|
||||
return fmt.Errorf("wrong image type; expected %v, got %v", expected, *image.ImageType)
|
||||
}
|
||||
if expected := "terraform-acc-ami-from-instance"; *image.Name != expected {
|
||||
return fmt.Errorf("wrong name; expected %v, got %v", expected, image.Name)
|
||||
if expected := fmt.Sprintf("terraform-acc-ami-from-instance-%d", rInt); *image.Name != expected {
|
||||
return fmt.Errorf("wrong name; expected %v, got %v", expected, *image.Name)
|
||||
}
|
||||
|
||||
for _, bdm := range image.BlockDeviceMappings {
|
||||
@ -137,24 +139,25 @@ func TestAccAWSAMIFromInstance(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
var testAccAWSAMIFromInstanceConfig = `
|
||||
provider "aws" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
func testAccAWSAMIFromInstanceConfig(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
provider "aws" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
|
||||
resource "aws_instance" "test" {
|
||||
// This AMI has one block device mapping, so we expect to have
|
||||
// one snapshot in our created AMI.
|
||||
ami = "ami-408c7f28"
|
||||
instance_type = "t1.micro"
|
||||
tags {
|
||||
Name = "testAccAWSAMIFromInstanceConfig_TestAMI"
|
||||
}
|
||||
}
|
||||
resource "aws_instance" "test" {
|
||||
// This AMI has one block device mapping, so we expect to have
|
||||
// one snapshot in our created AMI.
|
||||
ami = "ami-408c7f28"
|
||||
instance_type = "t1.micro"
|
||||
tags {
|
||||
Name = "testAccAWSAMIFromInstanceConfig_TestAMI"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_ami_from_instance" "test" {
|
||||
name = "terraform-acc-ami-from-instance"
|
||||
description = "Testing Terraform aws_ami_from_instance resource"
|
||||
source_instance_id = "${aws_instance.test.id}"
|
||||
resource "aws_ami_from_instance" "test" {
|
||||
name = "terraform-acc-ami-from-instance-%d"
|
||||
description = "Testing Terraform aws_ami_from_instance resource"
|
||||
source_instance_id = "${aws_instance.test.id}"
|
||||
}`, rInt)
|
||||
}
|
||||
`
|
||||
|
@ -42,8 +42,9 @@ func resourceAwsApiGatewayApiKey() *schema.Resource {
|
||||
},
|
||||
|
||||
"stage_key": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Deprecated: "Since the API Gateway usage plans feature was launched on August 11, 2016, usage plans are now required to associate an API key with an API stage",
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"rest_api_id": {
|
||||
@ -68,6 +69,15 @@ func resourceAwsApiGatewayApiKey() *schema.Resource {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"value": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Sensitive: true,
|
||||
ValidateFunc: validateApiGatewayApiKeyValue,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -80,6 +90,7 @@ func resourceAwsApiGatewayApiKeyCreate(d *schema.ResourceData, meta interface{})
|
||||
Name: aws.String(d.Get("name").(string)),
|
||||
Description: aws.String(d.Get("description").(string)),
|
||||
Enabled: aws.Bool(d.Get("enabled").(bool)),
|
||||
Value: aws.String(d.Get("value").(string)),
|
||||
StageKeys: expandApiGatewayStageKeys(d),
|
||||
})
|
||||
if err != nil {
|
||||
@ -96,7 +107,8 @@ func resourceAwsApiGatewayApiKeyRead(d *schema.ResourceData, meta interface{}) e
|
||||
log.Printf("[DEBUG] Reading API Gateway API Key: %s", d.Id())
|
||||
|
||||
apiKey, err := conn.GetApiKey(&apigateway.GetApiKeyInput{
|
||||
ApiKey: aws.String(d.Id()),
|
||||
ApiKey: aws.String(d.Id()),
|
||||
IncludeValue: aws.Bool(true),
|
||||
})
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
|
||||
@ -111,6 +123,7 @@ func resourceAwsApiGatewayApiKeyRead(d *schema.ResourceData, meta interface{}) e
|
||||
d.Set("description", apiKey.Description)
|
||||
d.Set("enabled", apiKey.Enabled)
|
||||
d.Set("stage_key", flattenApiGatewayStageKeys(apiKey.StageKeys))
|
||||
d.Set("value", apiKey.Value)
|
||||
|
||||
if err := d.Set("created_date", apiKey.CreatedDate.Format(time.RFC3339)); err != nil {
|
||||
log.Printf("[DEBUG] Error setting created_date: %s", err)
|
||||
|
@ -33,6 +33,8 @@ func TestAccAWSAPIGatewayApiKey_basic(t *testing.T) {
|
||||
"aws_api_gateway_api_key.test", "created_date"),
|
||||
resource.TestCheckResourceAttrSet(
|
||||
"aws_api_gateway_api_key.test", "last_updated_date"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_api_gateway_api_key.custom", "value", "MyCustomToken#@&\"'(§!ç)-_*$€¨^£%ù+=/:.;?,|"),
|
||||
),
|
||||
},
|
||||
},
|
||||
@ -176,4 +178,15 @@ resource "aws_api_gateway_api_key" "test" {
|
||||
stage_name = "${aws_api_gateway_deployment.test.stage_name}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_api_key" "custom" {
|
||||
name = "bar"
|
||||
enabled = true
|
||||
value = "MyCustomToken#@&\"'(§!ç)-_*$€¨^£%ù+=/:.;?,|"
|
||||
|
||||
stage_key {
|
||||
rest_api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||
stage_name = "${aws_api_gateway_deployment.test.stage_name}"
|
||||
}
|
||||
}
|
||||
`
|
||||
|
@ -21,27 +21,34 @@ func resourceAwsApiGatewayDomainName() *schema.Resource {
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
|
||||
//According to AWS Documentation, ACM will be the only way to add certificates
|
||||
//to ApiGateway DomainNames. When this happens, we will be deprecating all certificate methods
|
||||
//except certificate_arn. We are not quite sure when this will happen.
|
||||
"certificate_body": {
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
ConflictsWith: []string{"certificate_arn"},
|
||||
},
|
||||
|
||||
"certificate_chain": {
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
ConflictsWith: []string{"certificate_arn"},
|
||||
},
|
||||
|
||||
"certificate_name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ConflictsWith: []string{"certificate_arn"},
|
||||
},
|
||||
|
||||
"certificate_private_key": {
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
ConflictsWith: []string{"certificate_arn"},
|
||||
},
|
||||
|
||||
"domain_name": {
|
||||
@ -50,6 +57,12 @@ func resourceAwsApiGatewayDomainName() *schema.Resource {
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"certificate_arn": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ConflictsWith: []string{"certificate_body", "certificate_chain", "certificate_name", "certificate_private_key"},
|
||||
},
|
||||
|
||||
"cloudfront_domain_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
@ -72,13 +85,31 @@ func resourceAwsApiGatewayDomainNameCreate(d *schema.ResourceData, meta interfac
|
||||
conn := meta.(*AWSClient).apigateway
|
||||
log.Printf("[DEBUG] Creating API Gateway Domain Name")
|
||||
|
||||
domainName, err := conn.CreateDomainName(&apigateway.CreateDomainNameInput{
|
||||
CertificateBody: aws.String(d.Get("certificate_body").(string)),
|
||||
CertificateChain: aws.String(d.Get("certificate_chain").(string)),
|
||||
CertificateName: aws.String(d.Get("certificate_name").(string)),
|
||||
CertificatePrivateKey: aws.String(d.Get("certificate_private_key").(string)),
|
||||
DomainName: aws.String(d.Get("domain_name").(string)),
|
||||
})
|
||||
params := &apigateway.CreateDomainNameInput{
|
||||
DomainName: aws.String(d.Get("domain_name").(string)),
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("certificate_arn"); ok {
|
||||
params.CertificateArn = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("certificate_name"); ok {
|
||||
params.CertificateName = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("certificate_body"); ok {
|
||||
params.CertificateBody = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("certificate_chain"); ok {
|
||||
params.CertificateChain = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("certificate_private_key"); ok {
|
||||
params.CertificatePrivateKey = aws.String(v.(string))
|
||||
}
|
||||
|
||||
domainName, err := conn.CreateDomainName(params)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating API Gateway Domain Name: %s", err)
|
||||
}
|
||||
@ -113,6 +144,7 @@ func resourceAwsApiGatewayDomainNameRead(d *schema.ResourceData, meta interface{
|
||||
}
|
||||
d.Set("cloudfront_domain_name", domainName.DistributionDomainName)
|
||||
d.Set("domain_name", domainName.DomainName)
|
||||
d.Set("certificate_arn", domainName.CertificateArn)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -128,6 +160,14 @@ func resourceAwsApiGatewayDomainNameUpdateOperations(d *schema.ResourceData) []*
|
||||
})
|
||||
}
|
||||
|
||||
if d.HasChange("certificate_arn") {
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("replace"),
|
||||
Path: aws.String("/certificateArn"),
|
||||
Value: aws.String(d.Get("certificate_arn").(string)),
|
||||
})
|
||||
}
|
||||
|
||||
return operations
|
||||
}
|
||||
|
||||
@ -139,6 +179,7 @@ func resourceAwsApiGatewayDomainNameUpdate(d *schema.ResourceData, meta interfac
|
||||
DomainName: aws.String(d.Id()),
|
||||
PatchOperations: resourceAwsApiGatewayDomainNameUpdateOperations(d),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
|
||||
func TestAccAWSAPIGatewayMethod_basic(t *testing.T) {
|
||||
var conf apigateway.Method
|
||||
rInt := acctest.RandInt()
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
@ -22,7 +23,7 @@ func TestAccAWSAPIGatewayMethod_basic(t *testing.T) {
|
||||
CheckDestroy: testAccCheckAWSAPIGatewayMethodDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSAPIGatewayMethodConfig,
|
||||
Config: testAccAWSAPIGatewayMethodConfig(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayMethodExists("aws_api_gateway_method.test", &conf),
|
||||
testAccCheckAWSAPIGatewayMethodAttributes(&conf),
|
||||
@ -36,7 +37,7 @@ func TestAccAWSAPIGatewayMethod_basic(t *testing.T) {
|
||||
},
|
||||
|
||||
{
|
||||
Config: testAccAWSAPIGatewayMethodConfigUpdate,
|
||||
Config: testAccAWSAPIGatewayMethodConfigUpdate(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayMethodExists("aws_api_gateway_method.test", &conf),
|
||||
testAccCheckAWSAPIGatewayMethodAttributesUpdate(&conf),
|
||||
@ -72,7 +73,7 @@ func TestAccAWSAPIGatewayMethod_customauthorizer(t *testing.T) {
|
||||
},
|
||||
|
||||
{
|
||||
Config: testAccAWSAPIGatewayMethodConfigUpdate,
|
||||
Config: testAccAWSAPIGatewayMethodConfigUpdate(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayMethodExists("aws_api_gateway_method.test", &conf),
|
||||
testAccCheckAWSAPIGatewayMethodAttributesUpdate(&conf),
|
||||
@ -199,7 +200,7 @@ func testAccCheckAWSAPIGatewayMethodDestroy(s *terraform.State) error {
|
||||
func testAccAWSAPIGatewayMethodConfigWithCustomAuthorizer(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_api_gateway_rest_api" "test" {
|
||||
name = "tf-acc-test-custom-auth"
|
||||
name = "tf-acc-test-custom-auth-%d"
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "invocation_role" {
|
||||
@ -261,7 +262,7 @@ EOF
|
||||
resource "aws_lambda_function" "authorizer" {
|
||||
filename = "test-fixtures/lambdatest.zip"
|
||||
source_code_hash = "${base64sha256(file("test-fixtures/lambdatest.zip"))}"
|
||||
function_name = "tf_acc_api_gateway_authorizer"
|
||||
function_name = "tf_acc_api_gateway_authorizer_%d"
|
||||
role = "${aws_iam_role.iam_for_lambda.arn}"
|
||||
handler = "exports.example"
|
||||
runtime = "nodejs4.3"
|
||||
@ -295,12 +296,13 @@ resource "aws_api_gateway_method" "test" {
|
||||
"method.request.header.Content-Type" = false
|
||||
"method.request.querystring.page" = true
|
||||
}
|
||||
}`, rInt, rInt, rInt)
|
||||
}`, rInt, rInt, rInt, rInt, rInt)
|
||||
}
|
||||
|
||||
const testAccAWSAPIGatewayMethodConfig = `
|
||||
func testAccAWSAPIGatewayMethodConfig(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_api_gateway_rest_api" "test" {
|
||||
name = "test"
|
||||
name = "tf-acc-test-apig-method-%d"
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_resource" "test" {
|
||||
@ -324,11 +326,13 @@ resource "aws_api_gateway_method" "test" {
|
||||
"method.request.querystring.page" = true
|
||||
}
|
||||
}
|
||||
`
|
||||
`, rInt)
|
||||
}
|
||||
|
||||
const testAccAWSAPIGatewayMethodConfigUpdate = `
|
||||
func testAccAWSAPIGatewayMethodConfigUpdate(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_api_gateway_rest_api" "test" {
|
||||
name = "test"
|
||||
name = "tf-acc-test-apig-method-%d"
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_resource" "test" {
|
||||
@ -351,4 +355,5 @@ resource "aws_api_gateway_method" "test" {
|
||||
"method.request.querystring.page" = false
|
||||
}
|
||||
}
|
||||
`
|
||||
`, rInt)
|
||||
}
|
||||
|
499
builtin/providers/aws/resource_aws_api_gateway_usage_plan.go
Normal file
499
builtin/providers/aws/resource_aws_api_gateway_usage_plan.go
Normal file
@ -0,0 +1,499 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"errors"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/apigateway"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func resourceAwsApiGatewayUsagePlan() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAwsApiGatewayUsagePlanCreate,
|
||||
Read: resourceAwsApiGatewayUsagePlanRead,
|
||||
Update: resourceAwsApiGatewayUsagePlanUpdate,
|
||||
Delete: resourceAwsApiGatewayUsagePlanDelete,
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true, // Required since not addable nor removable afterwards
|
||||
},
|
||||
|
||||
"description": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"api_stages": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"api_id": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"stage": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"quota_settings": {
|
||||
Type: schema.TypeSet,
|
||||
MaxItems: 1,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"limit": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true, // Required as not removable singularly
|
||||
},
|
||||
|
||||
"offset": {
|
||||
Type: schema.TypeInt,
|
||||
Default: 0,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"period": {
|
||||
Type: schema.TypeString,
|
||||
Required: true, // Required as not removable
|
||||
ValidateFunc: validateApiGatewayUsagePlanQuotaSettingsPeriod,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"throttle_settings": {
|
||||
Type: schema.TypeSet,
|
||||
MaxItems: 1,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"burst_limit": {
|
||||
Type: schema.TypeInt,
|
||||
Default: 0,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"rate_limit": {
|
||||
Type: schema.TypeInt,
|
||||
Default: 0,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"product_code": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAwsApiGatewayUsagePlanCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AWSClient).apigateway
|
||||
log.Print("[DEBUG] Creating API Gateway Usage Plan")
|
||||
|
||||
params := &apigateway.CreateUsagePlanInput{
|
||||
Name: aws.String(d.Get("name").(string)),
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
params.Description = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if s, ok := d.GetOk("api_stages"); ok {
|
||||
stages := s.([]interface{})
|
||||
as := make([]*apigateway.ApiStage, 0)
|
||||
|
||||
for _, v := range stages {
|
||||
sv := v.(map[string]interface{})
|
||||
stage := &apigateway.ApiStage{}
|
||||
|
||||
if v, ok := sv["api_id"].(string); ok && v != "" {
|
||||
stage.ApiId = aws.String(v)
|
||||
}
|
||||
|
||||
if v, ok := sv["stage"].(string); ok && v != "" {
|
||||
stage.Stage = aws.String(v)
|
||||
}
|
||||
|
||||
as = append(as, stage)
|
||||
}
|
||||
|
||||
if len(as) > 0 {
|
||||
params.ApiStages = as
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("quota_settings"); ok {
|
||||
settings := v.(*schema.Set).List()
|
||||
q, ok := settings[0].(map[string]interface{})
|
||||
|
||||
if errors := validateApiGatewayUsagePlanQuotaSettings(q); len(errors) > 0 {
|
||||
return fmt.Errorf("Error validating the quota settings: %v", errors)
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return errors.New("At least one field is expected inside quota_settings")
|
||||
}
|
||||
|
||||
qs := &apigateway.QuotaSettings{}
|
||||
|
||||
if sv, ok := q["limit"].(int); ok {
|
||||
qs.Limit = aws.Int64(int64(sv))
|
||||
}
|
||||
|
||||
if sv, ok := q["offset"].(int); ok {
|
||||
qs.Offset = aws.Int64(int64(sv))
|
||||
}
|
||||
|
||||
if sv, ok := q["period"].(string); ok && sv != "" {
|
||||
qs.Period = aws.String(sv)
|
||||
}
|
||||
|
||||
params.Quota = qs
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("throttle_settings"); ok {
|
||||
settings := v.(*schema.Set).List()
|
||||
q, ok := settings[0].(map[string]interface{})
|
||||
|
||||
if !ok {
|
||||
return errors.New("At least one field is expected inside throttle_settings")
|
||||
}
|
||||
|
||||
ts := &apigateway.ThrottleSettings{}
|
||||
|
||||
if sv, ok := q["burst_limit"].(int); ok {
|
||||
ts.BurstLimit = aws.Int64(int64(sv))
|
||||
}
|
||||
|
||||
if sv, ok := q["rate_limit"].(float64); ok {
|
||||
ts.RateLimit = aws.Float64(float64(sv))
|
||||
}
|
||||
|
||||
params.Throttle = ts
|
||||
}
|
||||
|
||||
up, err := conn.CreateUsagePlan(params)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating API Gateway Usage Plan: %s", err)
|
||||
}
|
||||
|
||||
d.SetId(*up.Id)
|
||||
|
||||
// Handle case of adding the product code since not addable when
|
||||
// creating the Usage Plan initially.
|
||||
if v, ok := d.GetOk("product_code"); ok {
|
||||
updateParameters := &apigateway.UpdateUsagePlanInput{
|
||||
UsagePlanId: aws.String(d.Id()),
|
||||
PatchOperations: []*apigateway.PatchOperation{
|
||||
{
|
||||
Op: aws.String("add"),
|
||||
Path: aws.String("/productCode"),
|
||||
Value: aws.String(v.(string)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
up, err = conn.UpdateUsagePlan(updateParameters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating the API Gateway Usage Plan product code: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return resourceAwsApiGatewayUsagePlanRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAwsApiGatewayUsagePlanRead(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AWSClient).apigateway
|
||||
log.Printf("[DEBUG] Reading API Gateway Usage Plan: %s", d.Id())
|
||||
|
||||
up, err := conn.GetUsagePlan(&apigateway.GetUsagePlanInput{
|
||||
UsagePlanId: aws.String(d.Id()),
|
||||
})
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
d.Set("name", up.Name)
|
||||
d.Set("description", up.Description)
|
||||
d.Set("product_code", up.ProductCode)
|
||||
|
||||
if up.ApiStages != nil {
|
||||
if err := d.Set("api_stages", flattenApiGatewayUsageApiStages(up.ApiStages)); err != nil {
|
||||
return fmt.Errorf("[DEBUG] Error setting api_stages error: %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if up.Throttle != nil {
|
||||
if err := d.Set("throttle_settings", flattenApiGatewayUsagePlanThrottling(up.Throttle)); err != nil {
|
||||
return fmt.Errorf("[DEBUG] Error setting throttle_settings error: %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if up.Quota != nil {
|
||||
if err := d.Set("quota_settings", flattenApiGatewayUsagePlanQuota(up.Quota)); err != nil {
|
||||
return fmt.Errorf("[DEBUG] Error setting quota_settings error: %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAwsApiGatewayUsagePlanUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AWSClient).apigateway
|
||||
log.Print("[DEBUG] Updating API Gateway Usage Plan")
|
||||
|
||||
operations := make([]*apigateway.PatchOperation, 0)
|
||||
|
||||
if d.HasChange("name") {
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("replace"),
|
||||
Path: aws.String("/name"),
|
||||
Value: aws.String(d.Get("name").(string)),
|
||||
})
|
||||
}
|
||||
|
||||
if d.HasChange("description") {
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("replace"),
|
||||
Path: aws.String("/description"),
|
||||
Value: aws.String(d.Get("description").(string)),
|
||||
})
|
||||
}
|
||||
|
||||
if d.HasChange("product_code") {
|
||||
v, ok := d.GetOk("product_code")
|
||||
|
||||
if ok {
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("replace"),
|
||||
Path: aws.String("/productCode"),
|
||||
Value: aws.String(v.(string)),
|
||||
})
|
||||
} else {
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("remove"),
|
||||
Path: aws.String("/productCode"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("api_stages") {
|
||||
o, n := d.GetChange("api_stages")
|
||||
old := o.([]interface{})
|
||||
new := n.([]interface{})
|
||||
|
||||
// Remove every stages associated. Simpler to remove and add new ones,
|
||||
// since there are no replacings.
|
||||
for _, v := range old {
|
||||
m := v.(map[string]interface{})
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("remove"),
|
||||
Path: aws.String("/apiStages"),
|
||||
Value: aws.String(fmt.Sprintf("%s:%s", m["api_id"].(string), m["stage"].(string))),
|
||||
})
|
||||
}
|
||||
|
||||
// Handle additions
|
||||
if len(new) > 0 {
|
||||
for _, v := range new {
|
||||
m := v.(map[string]interface{})
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("add"),
|
||||
Path: aws.String("/apiStages"),
|
||||
Value: aws.String(fmt.Sprintf("%s:%s", m["api_id"].(string), m["stage"].(string))),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("throttle_settings") {
|
||||
o, n := d.GetChange("throttle_settings")
|
||||
|
||||
os := o.(*schema.Set)
|
||||
ns := n.(*schema.Set)
|
||||
diff := ns.Difference(os).List()
|
||||
|
||||
// Handle Removal
|
||||
if len(diff) == 0 {
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("remove"),
|
||||
Path: aws.String("/throttle"),
|
||||
})
|
||||
}
|
||||
|
||||
if len(diff) > 0 {
|
||||
d := diff[0].(map[string]interface{})
|
||||
|
||||
// Handle Replaces
|
||||
if o != nil && n != nil {
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("replace"),
|
||||
Path: aws.String("/throttle/rateLimit"),
|
||||
Value: aws.String(strconv.Itoa(d["rate_limit"].(int))),
|
||||
})
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("replace"),
|
||||
Path: aws.String("/throttle/burstLimit"),
|
||||
Value: aws.String(strconv.Itoa(d["burst_limit"].(int))),
|
||||
})
|
||||
}
|
||||
|
||||
// Handle Additions
|
||||
if o == nil && n != nil {
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("add"),
|
||||
Path: aws.String("/throttle/rateLimit"),
|
||||
Value: aws.String(strconv.Itoa(d["rate_limit"].(int))),
|
||||
})
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("add"),
|
||||
Path: aws.String("/throttle/burstLimit"),
|
||||
Value: aws.String(strconv.Itoa(d["burst_limit"].(int))),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("quota_settings") {
|
||||
o, n := d.GetChange("quota_settings")
|
||||
|
||||
os := o.(*schema.Set)
|
||||
ns := n.(*schema.Set)
|
||||
diff := ns.Difference(os).List()
|
||||
|
||||
// Handle Removal
|
||||
if len(diff) == 0 {
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("remove"),
|
||||
Path: aws.String("/quota"),
|
||||
})
|
||||
}
|
||||
|
||||
if len(diff) > 0 {
|
||||
d := diff[0].(map[string]interface{})
|
||||
|
||||
if errors := validateApiGatewayUsagePlanQuotaSettings(d); len(errors) > 0 {
|
||||
return fmt.Errorf("Error validating the quota settings: %v", errors)
|
||||
}
|
||||
|
||||
// Handle Replaces
|
||||
if o != nil && n != nil {
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("replace"),
|
||||
Path: aws.String("/quota/limit"),
|
||||
Value: aws.String(strconv.Itoa(d["limit"].(int))),
|
||||
})
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("replace"),
|
||||
Path: aws.String("/quota/offset"),
|
||||
Value: aws.String(strconv.Itoa(d["offset"].(int))),
|
||||
})
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("replace"),
|
||||
Path: aws.String("/quota/period"),
|
||||
Value: aws.String(d["period"].(string)),
|
||||
})
|
||||
}
|
||||
|
||||
// Handle Additions
|
||||
if o == nil && n != nil {
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("add"),
|
||||
Path: aws.String("/quota/limit"),
|
||||
Value: aws.String(strconv.Itoa(d["limit"].(int))),
|
||||
})
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("add"),
|
||||
Path: aws.String("/quota/offset"),
|
||||
Value: aws.String(strconv.Itoa(d["offset"].(int))),
|
||||
})
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("add"),
|
||||
Path: aws.String("/quota/period"),
|
||||
Value: aws.String(d["period"].(string)),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
params := &apigateway.UpdateUsagePlanInput{
|
||||
UsagePlanId: aws.String(d.Id()),
|
||||
PatchOperations: operations,
|
||||
}
|
||||
|
||||
_, err := conn.UpdateUsagePlan(params)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating API Gateway Usage Plan: %s", err)
|
||||
}
|
||||
|
||||
return resourceAwsApiGatewayUsagePlanRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAwsApiGatewayUsagePlanDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AWSClient).apigateway
|
||||
|
||||
// Removing existing api stages associated
|
||||
if apistages, ok := d.GetOk("api_stages"); ok {
|
||||
log.Printf("[DEBUG] Deleting API Stages associated with Usage Plan: %s", d.Id())
|
||||
stages := apistages.([]interface{})
|
||||
operations := []*apigateway.PatchOperation{}
|
||||
|
||||
for _, v := range stages {
|
||||
sv := v.(map[string]interface{})
|
||||
|
||||
operations = append(operations, &apigateway.PatchOperation{
|
||||
Op: aws.String("remove"),
|
||||
Path: aws.String("/apiStages"),
|
||||
Value: aws.String(fmt.Sprintf("%s:%s", sv["api_id"].(string), sv["stage"].(string))),
|
||||
})
|
||||
}
|
||||
|
||||
_, err := conn.UpdateUsagePlan(&apigateway.UpdateUsagePlanInput{
|
||||
UsagePlanId: aws.String(d.Id()),
|
||||
PatchOperations: operations,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error removing API Stages associated with Usage Plan: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Deleting API Gateway Usage Plan: %s", d.Id())
|
||||
|
||||
return resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
_, err := conn.DeleteUsagePlan(&apigateway.DeleteUsagePlanInput{
|
||||
UsagePlanId: aws.String(d.Id()),
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return resource.NonRetryableError(err)
|
||||
})
|
||||
}
|
112
builtin/providers/aws/resource_aws_api_gateway_usage_plan_key.go
Normal file
112
builtin/providers/aws/resource_aws_api_gateway_usage_plan_key.go
Normal file
@ -0,0 +1,112 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/apigateway"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func resourceAwsApiGatewayUsagePlanKey() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAwsApiGatewayUsagePlanKeyCreate,
|
||||
Read: resourceAwsApiGatewayUsagePlanKeyRead,
|
||||
Delete: resourceAwsApiGatewayUsagePlanKeyDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"key_id": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"key_type": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"usage_plan_id": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"value": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAwsApiGatewayUsagePlanKeyCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AWSClient).apigateway
|
||||
log.Print("[DEBUG] Creating API Gateway Usage Plan Key")
|
||||
|
||||
params := &apigateway.CreateUsagePlanKeyInput{
|
||||
KeyId: aws.String(d.Get("key_id").(string)),
|
||||
KeyType: aws.String(d.Get("key_type").(string)),
|
||||
UsagePlanId: aws.String(d.Get("usage_plan_id").(string)),
|
||||
}
|
||||
|
||||
up, err := conn.CreateUsagePlanKey(params)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating API Gateway Usage Plan Key: %s", err)
|
||||
}
|
||||
|
||||
d.SetId(*up.Id)
|
||||
|
||||
return resourceAwsApiGatewayUsagePlanKeyRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceAwsApiGatewayUsagePlanKeyRead(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AWSClient).apigateway
|
||||
log.Printf("[DEBUG] Reading API Gateway Usage Plan Key: %s", d.Id())
|
||||
|
||||
up, err := conn.GetUsagePlanKey(&apigateway.GetUsagePlanKeyInput{
|
||||
UsagePlanId: aws.String(d.Get("usage_plan_id").(string)),
|
||||
KeyId: aws.String(d.Get("key_id").(string)),
|
||||
})
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NotFoundException" {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
d.Set("name", up.Name)
|
||||
d.Set("value", up.Value)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAwsApiGatewayUsagePlanKeyDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AWSClient).apigateway
|
||||
|
||||
log.Printf("[DEBUG] Deleting API Gateway Usage Plan Key: %s", d.Id())
|
||||
|
||||
return resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
_, err := conn.DeleteUsagePlanKey(&apigateway.DeleteUsagePlanKeyInput{
|
||||
UsagePlanId: aws.String(d.Get("usage_plan_id").(string)),
|
||||
KeyId: aws.String(d.Get("key_id").(string)),
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return resource.NonRetryableError(err)
|
||||
})
|
||||
}
|
@ -0,0 +1,232 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/apigateway"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccAWSAPIGatewayUsagePlanKey_basic(t *testing.T) {
|
||||
var conf apigateway.UsagePlanKey
|
||||
name := acctest.RandString(10)
|
||||
updatedName := acctest.RandString(10)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSAPIGatewayUsagePlanKeyDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanKeyBasicConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanKeyExists("aws_api_gateway_usage_plan_key.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan_key.main", "key_type", "API_KEY"),
|
||||
resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "key_id"),
|
||||
resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "key_type"),
|
||||
resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "usage_plan_id"),
|
||||
resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "name"),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan_key.main", "value", ""),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanKeyBasicUpdatedConfig(updatedName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanKeyExists("aws_api_gateway_usage_plan_key.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan_key.main", "key_type", "API_KEY"),
|
||||
resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "key_id"),
|
||||
resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "key_type"),
|
||||
resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "usage_plan_id"),
|
||||
resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "name"),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan_key.main", "value", ""),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanKeyBasicConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanKeyExists("aws_api_gateway_usage_plan_key.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan_key.main", "key_type", "API_KEY"),
|
||||
resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "key_id"),
|
||||
resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "key_type"),
|
||||
resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "usage_plan_id"),
|
||||
resource.TestCheckResourceAttrSet("aws_api_gateway_usage_plan_key.main", "name"),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan_key.main", "value", ""),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckAWSAPIGatewayUsagePlanKeyExists(n string, res *apigateway.UsagePlanKey) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No API Gateway Usage Plan Key ID is set")
|
||||
}
|
||||
|
||||
conn := testAccProvider.Meta().(*AWSClient).apigateway
|
||||
|
||||
req := &apigateway.GetUsagePlanKeyInput{
|
||||
UsagePlanId: aws.String(rs.Primary.Attributes["usage_plan_id"]),
|
||||
KeyId: aws.String(rs.Primary.Attributes["key_id"]),
|
||||
}
|
||||
up, err := conn.GetUsagePlanKey(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Reading API Gateway Usage Plan Key: %#v", up)
|
||||
|
||||
if *up.Id != rs.Primary.ID {
|
||||
return fmt.Errorf("API Gateway Usage Plan Key not found")
|
||||
}
|
||||
|
||||
*res = *up
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckAWSAPIGatewayUsagePlanKeyDestroy(s *terraform.State) error {
|
||||
conn := testAccProvider.Meta().(*AWSClient).apigateway
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "aws_api_gateway_usage_plan_key" {
|
||||
continue
|
||||
}
|
||||
|
||||
req := &apigateway.GetUsagePlanKeyInput{
|
||||
UsagePlanId: aws.String(rs.Primary.ID),
|
||||
KeyId: aws.String(rs.Primary.Attributes["key_id"]),
|
||||
}
|
||||
describe, err := conn.GetUsagePlanKey(req)
|
||||
|
||||
if err == nil {
|
||||
if describe.Id != nil && *describe.Id == rs.Primary.ID {
|
||||
return fmt.Errorf("API Gateway Usage Plan Key still exists")
|
||||
}
|
||||
}
|
||||
|
||||
aws2err, ok := err.(awserr.Error)
|
||||
if !ok {
|
||||
return err
|
||||
}
|
||||
if aws2err.Code() != "NotFoundException" {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const testAccAWSAPIGatewayUsagePlanKeyConfig = `
|
||||
resource "aws_api_gateway_rest_api" "test" {
|
||||
name = "test"
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_resource" "test" {
|
||||
rest_api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||
parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}"
|
||||
path_part = "test"
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_method" "test" {
|
||||
rest_api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||
resource_id = "${aws_api_gateway_resource.test.id}"
|
||||
http_method = "GET"
|
||||
authorization = "NONE"
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_method_response" "error" {
|
||||
rest_api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||
resource_id = "${aws_api_gateway_resource.test.id}"
|
||||
http_method = "${aws_api_gateway_method.test.http_method}"
|
||||
status_code = "400"
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_integration" "test" {
|
||||
rest_api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||
resource_id = "${aws_api_gateway_resource.test.id}"
|
||||
http_method = "${aws_api_gateway_method.test.http_method}"
|
||||
|
||||
type = "HTTP"
|
||||
uri = "https://www.google.de"
|
||||
integration_http_method = "GET"
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_integration_response" "test" {
|
||||
rest_api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||
resource_id = "${aws_api_gateway_resource.test.id}"
|
||||
http_method = "${aws_api_gateway_integration.test.http_method}"
|
||||
status_code = "${aws_api_gateway_method_response.error.status_code}"
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_deployment" "test" {
|
||||
depends_on = ["aws_api_gateway_integration.test"]
|
||||
|
||||
rest_api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||
stage_name = "test"
|
||||
description = "This is a test"
|
||||
|
||||
variables = {
|
||||
"a" = "2"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_deployment" "foo" {
|
||||
depends_on = ["aws_api_gateway_integration.test"]
|
||||
|
||||
rest_api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||
stage_name = "foo"
|
||||
description = "This is a prod stage"
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_usage_plan" "main" {
|
||||
name = "%s"
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_usage_plan" "secondary" {
|
||||
name = "secondary-%s"
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_api_key" "mykey" {
|
||||
name = "demo-%s"
|
||||
|
||||
stage_key {
|
||||
rest_api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||
stage_name = "${aws_api_gateway_deployment.foo.stage_name}"
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
func testAccAWSApiGatewayUsagePlanKeyBasicConfig(rName string) string {
|
||||
return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanKeyConfig+`
|
||||
resource "aws_api_gateway_usage_plan_key" "main" {
|
||||
key_id = "${aws_api_gateway_api_key.mykey.id}"
|
||||
key_type = "API_KEY"
|
||||
usage_plan_id = "${aws_api_gateway_usage_plan.main.id}"
|
||||
}
|
||||
`, rName, rName, rName)
|
||||
}
|
||||
|
||||
func testAccAWSApiGatewayUsagePlanKeyBasicUpdatedConfig(rName string) string {
|
||||
return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanKeyConfig+`
|
||||
resource "aws_api_gateway_usage_plan_key" "main" {
|
||||
key_id = "${aws_api_gateway_api_key.mykey.id}"
|
||||
key_type = "API_KEY"
|
||||
usage_plan_id = "${aws_api_gateway_usage_plan.secondary.id}"
|
||||
}
|
||||
`, rName, rName, rName)
|
||||
}
|
@ -0,0 +1,557 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/apigateway"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccAWSAPIGatewayUsagePlan_basic(t *testing.T) {
|
||||
var conf apigateway.UsagePlan
|
||||
name := acctest.RandString(10)
|
||||
updatedName := acctest.RandString(10)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSAPIGatewayUsagePlanDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanBasicConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanBasicUpdatedConfig(updatedName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", updatedName),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanBasicConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSAPIGatewayUsagePlan_description(t *testing.T) {
|
||||
var conf apigateway.UsagePlan
|
||||
name := acctest.RandString(10)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSAPIGatewayUsagePlanDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanBasicConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanDescriptionConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", "This is a description"),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanDescriptionUpdatedConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", "This is a new description"),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanDescriptionConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", "This is a description"),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanBasicConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSAPIGatewayUsagePlan_productCode(t *testing.T) {
|
||||
var conf apigateway.UsagePlan
|
||||
name := acctest.RandString(10)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSAPIGatewayUsagePlanDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanBasicConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", ""),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanProductCodeConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", "MYCODE"),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanProductCodeUpdatedConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", "MYCODE2"),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanProductCodeConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", "MYCODE"),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanBasicConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", ""),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSAPIGatewayUsagePlan_throttling(t *testing.T) {
|
||||
var conf apigateway.UsagePlan
|
||||
name := acctest.RandString(10)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSAPIGatewayUsagePlanDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanBasicConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name),
|
||||
resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings"),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanThrottlingConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings.4173790118.burst_limit", "2"),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings.4173790118.rate_limit", "5"),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanThrottlingModifiedConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings.1779463053.burst_limit", "3"),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings.1779463053.rate_limit", "6"),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanBasicConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name),
|
||||
resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSAPIGatewayUsagePlan_quota(t *testing.T) {
|
||||
var conf apigateway.UsagePlan
|
||||
name := acctest.RandString(10)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSAPIGatewayUsagePlanDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanBasicConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name),
|
||||
resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings"),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanQuotaConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.1956747625.limit", "100"),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.1956747625.offset", "6"),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.1956747625.period", "WEEK"),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanQuotaModifiedConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.3909168194.limit", "200"),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.3909168194.offset", "20"),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.3909168194.period", "MONTH"),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanBasicConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name),
|
||||
resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSAPIGatewayUsagePlan_apiStages(t *testing.T) {
|
||||
var conf apigateway.UsagePlan
|
||||
name := acctest.RandString(10)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSAPIGatewayUsagePlanDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
// Create UsagePlan WITH Stages as the API calls are different
|
||||
// when creating or updating.
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanApiStagesConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "api_stages.0.stage", "test"),
|
||||
),
|
||||
},
|
||||
// Handle api stages removal
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanBasicConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name),
|
||||
resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "api_stages"),
|
||||
),
|
||||
},
|
||||
// Handle api stages additions
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanApiStagesConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "api_stages.0.stage", "test"),
|
||||
),
|
||||
},
|
||||
// Handle api stages updates
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanApiStagesModifiedConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "api_stages.0.stage", "foo"),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSApiGatewayUsagePlanBasicConfig(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf),
|
||||
resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name),
|
||||
resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "api_stages"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckAWSAPIGatewayUsagePlanExists(n string, res *apigateway.UsagePlan) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No API Gateway Usage Plan ID is set")
|
||||
}
|
||||
|
||||
conn := testAccProvider.Meta().(*AWSClient).apigateway
|
||||
|
||||
req := &apigateway.GetUsagePlanInput{
|
||||
UsagePlanId: aws.String(rs.Primary.ID),
|
||||
}
|
||||
up, err := conn.GetUsagePlan(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if *up.Id != rs.Primary.ID {
|
||||
return fmt.Errorf("APIGateway Usage Plan not found")
|
||||
}
|
||||
|
||||
*res = *up
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckAWSAPIGatewayUsagePlanDestroy(s *terraform.State) error {
|
||||
conn := testAccProvider.Meta().(*AWSClient).apigateway
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "aws_api_gateway_usage_plan" {
|
||||
continue
|
||||
}
|
||||
|
||||
req := &apigateway.GetUsagePlanInput{
|
||||
UsagePlanId: aws.String(s.RootModule().Resources["aws_api_gateway_rest_api.test"].Primary.ID),
|
||||
}
|
||||
describe, err := conn.GetUsagePlan(req)
|
||||
|
||||
if err == nil {
|
||||
if describe.Id != nil && *describe.Id == rs.Primary.ID {
|
||||
return fmt.Errorf("API Gateway Usage Plan still exists")
|
||||
}
|
||||
}
|
||||
|
||||
aws2err, ok := err.(awserr.Error)
|
||||
if !ok {
|
||||
return err
|
||||
}
|
||||
if aws2err.Code() != "NotFoundException" {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const testAccAWSAPIGatewayUsagePlanConfig = `
|
||||
resource "aws_api_gateway_rest_api" "test" {
|
||||
name = "test"
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_resource" "test" {
|
||||
rest_api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||
parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}"
|
||||
path_part = "test"
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_method" "test" {
|
||||
rest_api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||
resource_id = "${aws_api_gateway_resource.test.id}"
|
||||
http_method = "GET"
|
||||
authorization = "NONE"
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_method_response" "error" {
|
||||
rest_api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||
resource_id = "${aws_api_gateway_resource.test.id}"
|
||||
http_method = "${aws_api_gateway_method.test.http_method}"
|
||||
status_code = "400"
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_integration" "test" {
|
||||
rest_api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||
resource_id = "${aws_api_gateway_resource.test.id}"
|
||||
http_method = "${aws_api_gateway_method.test.http_method}"
|
||||
|
||||
type = "HTTP"
|
||||
uri = "https://www.google.de"
|
||||
integration_http_method = "GET"
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_integration_response" "test" {
|
||||
rest_api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||
resource_id = "${aws_api_gateway_resource.test.id}"
|
||||
http_method = "${aws_api_gateway_integration.test.http_method}"
|
||||
status_code = "${aws_api_gateway_method_response.error.status_code}"
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_deployment" "test" {
|
||||
depends_on = ["aws_api_gateway_integration.test"]
|
||||
|
||||
rest_api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||
stage_name = "test"
|
||||
description = "This is a test"
|
||||
|
||||
variables = {
|
||||
"a" = "2"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_api_gateway_deployment" "foo" {
|
||||
depends_on = ["aws_api_gateway_integration.test"]
|
||||
|
||||
rest_api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||
stage_name = "foo"
|
||||
description = "This is a prod stage"
|
||||
}
|
||||
`
|
||||
|
||||
func testAccAWSApiGatewayUsagePlanBasicConfig(rName string) string {
|
||||
return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+`
|
||||
resource "aws_api_gateway_usage_plan" "main" {
|
||||
name = "%s"
|
||||
}
|
||||
`, rName)
|
||||
}
|
||||
|
||||
func testAccAWSApiGatewayUsagePlanDescriptionConfig(rName string) string {
|
||||
return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+`
|
||||
resource "aws_api_gateway_usage_plan" "main" {
|
||||
name = "%s"
|
||||
description = "This is a description"
|
||||
}
|
||||
`, rName)
|
||||
}
|
||||
|
||||
func testAccAWSApiGatewayUsagePlanDescriptionUpdatedConfig(rName string) string {
|
||||
return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+`
|
||||
resource "aws_api_gateway_usage_plan" "main" {
|
||||
name = "%s"
|
||||
description = "This is a new description"
|
||||
}
|
||||
`, rName)
|
||||
}
|
||||
|
||||
func testAccAWSApiGatewayUsagePlanProductCodeConfig(rName string) string {
|
||||
return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+`
|
||||
resource "aws_api_gateway_usage_plan" "main" {
|
||||
name = "%s"
|
||||
product_code = "MYCODE"
|
||||
}
|
||||
`, rName)
|
||||
}
|
||||
|
||||
func testAccAWSApiGatewayUsagePlanProductCodeUpdatedConfig(rName string) string {
|
||||
return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+`
|
||||
resource "aws_api_gateway_usage_plan" "main" {
|
||||
name = "%s"
|
||||
product_code = "MYCODE2"
|
||||
}
|
||||
`, rName)
|
||||
}
|
||||
|
||||
func testAccAWSApiGatewayUsagePlanBasicUpdatedConfig(rName string) string {
|
||||
return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+`
|
||||
resource "aws_api_gateway_usage_plan" "main" {
|
||||
name = "%s"
|
||||
}
|
||||
`, rName)
|
||||
}
|
||||
|
||||
func testAccAWSApiGatewayUsagePlanThrottlingConfig(rName string) string {
|
||||
return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+`
|
||||
resource "aws_api_gateway_usage_plan" "main" {
|
||||
name = "%s"
|
||||
|
||||
throttle_settings {
|
||||
burst_limit = 2
|
||||
rate_limit = 5
|
||||
}
|
||||
}
|
||||
`, rName)
|
||||
}
|
||||
|
||||
func testAccAWSApiGatewayUsagePlanThrottlingModifiedConfig(rName string) string {
|
||||
return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+`
|
||||
resource "aws_api_gateway_usage_plan" "main" {
|
||||
name = "%s"
|
||||
|
||||
throttle_settings {
|
||||
burst_limit = 3
|
||||
rate_limit = 6
|
||||
}
|
||||
}
|
||||
`, rName)
|
||||
}
|
||||
|
||||
func testAccAWSApiGatewayUsagePlanQuotaConfig(rName string) string {
|
||||
return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+`
|
||||
resource "aws_api_gateway_usage_plan" "main" {
|
||||
name = "%s"
|
||||
|
||||
quota_settings {
|
||||
limit = 100
|
||||
offset = 6
|
||||
period = "WEEK"
|
||||
}
|
||||
}
|
||||
`, rName)
|
||||
}
|
||||
|
||||
func testAccAWSApiGatewayUsagePlanQuotaModifiedConfig(rName string) string {
|
||||
return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+`
|
||||
resource "aws_api_gateway_usage_plan" "main" {
|
||||
name = "%s"
|
||||
|
||||
quota_settings {
|
||||
limit = 200
|
||||
offset = 20
|
||||
period = "MONTH"
|
||||
}
|
||||
}
|
||||
`, rName)
|
||||
}
|
||||
|
||||
func testAccAWSApiGatewayUsagePlanApiStagesConfig(rName string) string {
|
||||
return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+`
|
||||
resource "aws_api_gateway_usage_plan" "main" {
|
||||
name = "%s"
|
||||
|
||||
api_stages {
|
||||
api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||
stage = "${aws_api_gateway_deployment.test.stage_name}"
|
||||
}
|
||||
}
|
||||
`, rName)
|
||||
}
|
||||
|
||||
func testAccAWSApiGatewayUsagePlanApiStagesModifiedConfig(rName string) string {
|
||||
return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+`
|
||||
resource "aws_api_gateway_usage_plan" "main" {
|
||||
name = "%s"
|
||||
|
||||
api_stages {
|
||||
api_id = "${aws_api_gateway_rest_api.test.id}"
|
||||
stage = "${aws_api_gateway_deployment.foo.stage_name}"
|
||||
}
|
||||
}
|
||||
`, rName)
|
||||
}
|
@ -18,16 +18,22 @@ func resourceAwsAutoscalingAttachment() *schema.Resource {
|
||||
Delete: resourceAwsAutoscalingAttachmentDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"autoscaling_group_name": &schema.Schema{
|
||||
"autoscaling_group_name": {
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"elb": &schema.Schema{
|
||||
"elb": {
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"alb_target_group_arn": {
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -36,17 +42,31 @@ func resourceAwsAutoscalingAttachment() *schema.Resource {
|
||||
func resourceAwsAutoscalingAttachmentCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
asgconn := meta.(*AWSClient).autoscalingconn
|
||||
asgName := d.Get("autoscaling_group_name").(string)
|
||||
elbName := d.Get("elb").(string)
|
||||
|
||||
attachElbInput := &autoscaling.AttachLoadBalancersInput{
|
||||
AutoScalingGroupName: aws.String(asgName),
|
||||
LoadBalancerNames: []*string{aws.String(elbName)},
|
||||
if v, ok := d.GetOk("elb"); ok {
|
||||
attachOpts := &autoscaling.AttachLoadBalancersInput{
|
||||
AutoScalingGroupName: aws.String(asgName),
|
||||
LoadBalancerNames: []*string{aws.String(v.(string))},
|
||||
}
|
||||
|
||||
log.Printf("[INFO] registering asg %s with ELBs %s", asgName, v.(string))
|
||||
|
||||
if _, err := asgconn.AttachLoadBalancers(attachOpts); err != nil {
|
||||
return errwrap.Wrapf(fmt.Sprintf("Failure attaching AutoScaling Group %s with Elastic Load Balancer: %s: {{err}}", asgName, v.(string)), err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("[INFO] registering asg %s with ELBs %s", asgName, elbName)
|
||||
if v, ok := d.GetOk("alb_target_group_arn"); ok {
|
||||
attachOpts := &autoscaling.AttachLoadBalancerTargetGroupsInput{
|
||||
AutoScalingGroupName: aws.String(asgName),
|
||||
TargetGroupARNs: []*string{aws.String(v.(string))},
|
||||
}
|
||||
|
||||
if _, err := asgconn.AttachLoadBalancers(attachElbInput); err != nil {
|
||||
return errwrap.Wrapf(fmt.Sprintf("Failure attaching AutoScaling Group %s with Elastic Load Balancer: %s: {{err}}", asgName, elbName), err)
|
||||
log.Printf("[INFO] registering asg %s with ALB Target Group %s", asgName, v.(string))
|
||||
|
||||
if _, err := asgconn.AttachLoadBalancerTargetGroups(attachOpts); err != nil {
|
||||
return errwrap.Wrapf(fmt.Sprintf("Failure attaching AutoScaling Group %s with ALB Target Group: %s: {{err}}", asgName, v.(string)), err)
|
||||
}
|
||||
}
|
||||
|
||||
d.SetId(resource.PrefixedUniqueId(fmt.Sprintf("%s-", asgName)))
|
||||
@ -57,7 +77,6 @@ func resourceAwsAutoscalingAttachmentCreate(d *schema.ResourceData, meta interfa
|
||||
func resourceAwsAutoscalingAttachmentRead(d *schema.ResourceData, meta interface{}) error {
|
||||
asgconn := meta.(*AWSClient).autoscalingconn
|
||||
asgName := d.Get("autoscaling_group_name").(string)
|
||||
elbName := d.Get("elb").(string)
|
||||
|
||||
// Retrieve the ASG properites to get list of associated ELBs
|
||||
asg, err := getAwsAutoscalingGroup(asgName, asgconn)
|
||||
@ -71,18 +90,36 @@ func resourceAwsAutoscalingAttachmentRead(d *schema.ResourceData, meta interface
|
||||
return nil
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, i := range asg.LoadBalancerNames {
|
||||
if elbName == *i {
|
||||
d.Set("elb", elbName)
|
||||
found = true
|
||||
break
|
||||
if v, ok := d.GetOk("elb"); ok {
|
||||
found := false
|
||||
for _, i := range asg.LoadBalancerNames {
|
||||
if v.(string) == *i {
|
||||
d.Set("elb", v.(string))
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
log.Printf("[WARN] Association for %s was not found in ASG assocation", v.(string))
|
||||
d.SetId("")
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
log.Printf("[WARN] Association for %s was not found in ASG assocation", elbName)
|
||||
d.SetId("")
|
||||
if v, ok := d.GetOk("alb_target_group_arn"); ok {
|
||||
found := false
|
||||
for _, i := range asg.TargetGroupARNs {
|
||||
if v.(string) == *i {
|
||||
d.Set("alb_target_group_arn", v.(string))
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
log.Printf("[WARN] Association for %s was not found in ASG assocation", v.(string))
|
||||
d.SetId("")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -91,17 +128,29 @@ func resourceAwsAutoscalingAttachmentRead(d *schema.ResourceData, meta interface
|
||||
func resourceAwsAutoscalingAttachmentDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
asgconn := meta.(*AWSClient).autoscalingconn
|
||||
asgName := d.Get("autoscaling_group_name").(string)
|
||||
elbName := d.Get("elb").(string)
|
||||
|
||||
log.Printf("[INFO] Deleting ELB %s association from: %s", elbName, asgName)
|
||||
if v, ok := d.GetOk("elb"); ok {
|
||||
detachOpts := &autoscaling.DetachLoadBalancersInput{
|
||||
AutoScalingGroupName: aws.String(asgName),
|
||||
LoadBalancerNames: []*string{aws.String(v.(string))},
|
||||
}
|
||||
|
||||
detachOpts := &autoscaling.DetachLoadBalancersInput{
|
||||
AutoScalingGroupName: aws.String(asgName),
|
||||
LoadBalancerNames: []*string{aws.String(elbName)},
|
||||
log.Printf("[INFO] Deleting ELB %s association from: %s", v.(string), asgName)
|
||||
if _, err := asgconn.DetachLoadBalancers(detachOpts); err != nil {
|
||||
return errwrap.Wrapf(fmt.Sprintf("Failure detaching AutoScaling Group %s with Elastic Load Balancer: %s: {{err}}", asgName, v.(string)), err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := asgconn.DetachLoadBalancers(detachOpts); err != nil {
|
||||
return errwrap.Wrapf(fmt.Sprintf("Failure detaching AutoScaling Group %s with Elastic Load Balancer: %s: {{err}}", asgName, elbName), err)
|
||||
if v, ok := d.GetOk("alb_target_group_arn"); ok {
|
||||
detachOpts := &autoscaling.DetachLoadBalancerTargetGroupsInput{
|
||||
AutoScalingGroupName: aws.String(asgName),
|
||||
TargetGroupARNs: []*string{aws.String(v.(string))},
|
||||
}
|
||||
|
||||
log.Printf("[INFO] Deleting ALB Target Group %s association from: %s", v.(string), asgName)
|
||||
if _, err := asgconn.DetachLoadBalancerTargetGroups(detachOpts); err != nil {
|
||||
return errwrap.Wrapf(fmt.Sprintf("Failure detaching AutoScaling Group %s with ALB Target Group: %s: {{err}}", asgName, v.(string)), err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccAwsAutoscalingAttachment_basic(t *testing.T) {
|
||||
func TestAccAwsAutoscalingAttachment_elb(t *testing.T) {
|
||||
|
||||
rInt := acctest.RandInt()
|
||||
|
||||
@ -19,45 +19,109 @@ func TestAccAwsAutoscalingAttachment_basic(t *testing.T) {
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAWSAutoscalingAttachment_basic(rInt),
|
||||
{
|
||||
Config: testAccAWSAutoscalingAttachment_elb(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAutocalingAttachmentExists("aws_autoscaling_group.asg", 0),
|
||||
testAccCheckAWSAutocalingElbAttachmentExists("aws_autoscaling_group.asg", 0),
|
||||
),
|
||||
},
|
||||
// Add in one association
|
||||
resource.TestStep{
|
||||
Config: testAccAWSAutoscalingAttachment_associated(rInt),
|
||||
{
|
||||
Config: testAccAWSAutoscalingAttachment_elb_associated(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAutocalingAttachmentExists("aws_autoscaling_group.asg", 1),
|
||||
testAccCheckAWSAutocalingElbAttachmentExists("aws_autoscaling_group.asg", 1),
|
||||
),
|
||||
},
|
||||
// Test adding a 2nd
|
||||
resource.TestStep{
|
||||
Config: testAccAWSAutoscalingAttachment_double_associated(rInt),
|
||||
{
|
||||
Config: testAccAWSAutoscalingAttachment_elb_double_associated(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAutocalingAttachmentExists("aws_autoscaling_group.asg", 2),
|
||||
testAccCheckAWSAutocalingElbAttachmentExists("aws_autoscaling_group.asg", 2),
|
||||
),
|
||||
},
|
||||
// Now remove that newest one
|
||||
resource.TestStep{
|
||||
Config: testAccAWSAutoscalingAttachment_associated(rInt),
|
||||
{
|
||||
Config: testAccAWSAutoscalingAttachment_elb_associated(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAutocalingAttachmentExists("aws_autoscaling_group.asg", 1),
|
||||
testAccCheckAWSAutocalingElbAttachmentExists("aws_autoscaling_group.asg", 1),
|
||||
),
|
||||
},
|
||||
// Now remove them both
|
||||
resource.TestStep{
|
||||
Config: testAccAWSAutoscalingAttachment_basic(rInt),
|
||||
{
|
||||
Config: testAccAWSAutoscalingAttachment_elb(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAutocalingAttachmentExists("aws_autoscaling_group.asg", 0),
|
||||
testAccCheckAWSAutocalingElbAttachmentExists("aws_autoscaling_group.asg", 0),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckAWSAutocalingAttachmentExists(asgname string, loadBalancerCount int) resource.TestCheckFunc {
|
||||
func TestAccAwsAutoscalingAttachment_albTargetGroup(t *testing.T) {
|
||||
|
||||
rInt := acctest.RandInt()
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSAutoscalingAttachment_alb(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAutocalingAlbAttachmentExists("aws_autoscaling_group.asg", 0),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSAutoscalingAttachment_alb_associated(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAutocalingAlbAttachmentExists("aws_autoscaling_group.asg", 1),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSAutoscalingAttachment_alb_double_associated(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAutocalingAlbAttachmentExists("aws_autoscaling_group.asg", 2),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSAutoscalingAttachment_alb_associated(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAutocalingAlbAttachmentExists("aws_autoscaling_group.asg", 1),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccAWSAutoscalingAttachment_alb(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSAutocalingAlbAttachmentExists("aws_autoscaling_group.asg", 0),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckAWSAutocalingElbAttachmentExists(asgname string, loadBalancerCount int) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[asgname]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", asgname)
|
||||
}
|
||||
|
||||
conn := testAccProvider.Meta().(*AWSClient).autoscalingconn
|
||||
asg := rs.Primary.ID
|
||||
|
||||
actual, err := conn.DescribeAutoScalingGroups(&autoscaling.DescribeAutoScalingGroupsInput{
|
||||
AutoScalingGroupNames: []*string{aws.String(asg)},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Received an error when attempting to load %s: %s", asg, err)
|
||||
}
|
||||
|
||||
if loadBalancerCount != len(actual.AutoScalingGroups[0].LoadBalancerNames) {
|
||||
return fmt.Errorf("Error: ASG has the wrong number of load balacners associated. Expected [%d] but got [%d]", loadBalancerCount, len(actual.AutoScalingGroups[0].LoadBalancerNames))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckAWSAutocalingAlbAttachmentExists(asgname string, targetGroupCount int) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[asgname]
|
||||
if !ok {
|
||||
@ -75,15 +139,108 @@ func testAccCheckAWSAutocalingAttachmentExists(asgname string, loadBalancerCount
|
||||
return fmt.Errorf("Recieved an error when attempting to load %s: %s", asg, err)
|
||||
}
|
||||
|
||||
if loadBalancerCount != len(actual.AutoScalingGroups[0].LoadBalancerNames) {
|
||||
return fmt.Errorf("Error: ASG has the wrong number of load balacners associated. Expected [%d] but got [%d]", loadBalancerCount, len(actual.AutoScalingGroups[0].LoadBalancerNames))
|
||||
if targetGroupCount != len(actual.AutoScalingGroups[0].TargetGroupARNs) {
|
||||
return fmt.Errorf("Error: ASG has the wrong number of Target Groups associated. Expected [%d] but got [%d]", targetGroupCount, len(actual.AutoScalingGroups[0].TargetGroupARNs))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccAWSAutoscalingAttachment_basic(rInt int) string {
|
||||
func testAccAWSAutoscalingAttachment_alb(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_alb_target_group" "test" {
|
||||
name = "test-alb-%d"
|
||||
port = 443
|
||||
protocol = "HTTPS"
|
||||
vpc_id = "${aws_vpc.test.id}"
|
||||
|
||||
deregistration_delay = 200
|
||||
|
||||
stickiness {
|
||||
type = "lb_cookie"
|
||||
cookie_duration = 10000
|
||||
}
|
||||
|
||||
health_check {
|
||||
path = "/health"
|
||||
interval = 60
|
||||
port = 8081
|
||||
protocol = "HTTP"
|
||||
timeout = 3
|
||||
healthy_threshold = 3
|
||||
unhealthy_threshold = 3
|
||||
matcher = "200-299"
|
||||
}
|
||||
|
||||
tags {
|
||||
TestName = "TestAccAWSALBTargetGroup_basic"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_alb_target_group" "another_test" {
|
||||
name = "atest-alb-%d"
|
||||
port = 443
|
||||
protocol = "HTTPS"
|
||||
vpc_id = "${aws_vpc.test.id}"
|
||||
|
||||
deregistration_delay = 200
|
||||
|
||||
stickiness {
|
||||
type = "lb_cookie"
|
||||
cookie_duration = 10000
|
||||
}
|
||||
|
||||
health_check {
|
||||
path = "/health"
|
||||
interval = 60
|
||||
port = 8081
|
||||
protocol = "HTTP"
|
||||
timeout = 3
|
||||
healthy_threshold = 3
|
||||
unhealthy_threshold = 3
|
||||
matcher = "200-299"
|
||||
}
|
||||
|
||||
tags {
|
||||
TestName = "TestAccAWSALBTargetGroup_basic"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_autoscaling_group" "asg" {
|
||||
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
|
||||
name = "asg-lb-assoc-terraform-test_%d"
|
||||
max_size = 1
|
||||
min_size = 0
|
||||
desired_capacity = 0
|
||||
health_check_grace_period = 300
|
||||
force_delete = true
|
||||
launch_configuration = "${aws_launch_configuration.as_conf.name}"
|
||||
|
||||
tag {
|
||||
key = "Name"
|
||||
value = "terraform-asg-lg-assoc-test"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_launch_configuration" "as_conf" {
|
||||
name = "test_config_%d"
|
||||
image_id = "ami-f34032c3"
|
||||
instance_type = "t1.micro"
|
||||
}
|
||||
|
||||
resource "aws_vpc" "test" {
|
||||
cidr_block = "10.0.0.0/16"
|
||||
|
||||
tags {
|
||||
TestName = "TestAccAWSALBTargetGroup_basic"
|
||||
}
|
||||
}
|
||||
`, rInt, rInt, rInt, rInt)
|
||||
}
|
||||
|
||||
func testAccAWSAutoscalingAttachment_elb(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_elb" "foo" {
|
||||
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
|
||||
@ -131,18 +288,34 @@ resource "aws_autoscaling_group" "asg" {
|
||||
}`, rInt, rInt)
|
||||
}
|
||||
|
||||
func testAccAWSAutoscalingAttachment_associated(rInt int) string {
|
||||
return testAccAWSAutoscalingAttachment_basic(rInt) + `
|
||||
func testAccAWSAutoscalingAttachment_elb_associated(rInt int) string {
|
||||
return testAccAWSAutoscalingAttachment_elb(rInt) + `
|
||||
resource "aws_autoscaling_attachment" "asg_attachment_foo" {
|
||||
autoscaling_group_name = "${aws_autoscaling_group.asg.id}"
|
||||
elb = "${aws_elb.foo.id}"
|
||||
}`
|
||||
}
|
||||
|
||||
func testAccAWSAutoscalingAttachment_double_associated(rInt int) string {
|
||||
return testAccAWSAutoscalingAttachment_associated(rInt) + `
|
||||
func testAccAWSAutoscalingAttachment_alb_associated(rInt int) string {
|
||||
return testAccAWSAutoscalingAttachment_alb(rInt) + `
|
||||
resource "aws_autoscaling_attachment" "asg_attachment_foo" {
|
||||
autoscaling_group_name = "${aws_autoscaling_group.asg.id}"
|
||||
alb_target_group_arn = "${aws_alb_target_group.test.arn}"
|
||||
}`
|
||||
}
|
||||
|
||||
func testAccAWSAutoscalingAttachment_elb_double_associated(rInt int) string {
|
||||
return testAccAWSAutoscalingAttachment_elb_associated(rInt) + `
|
||||
resource "aws_autoscaling_attachment" "asg_attachment_bar" {
|
||||
autoscaling_group_name = "${aws_autoscaling_group.asg.id}"
|
||||
elb = "${aws_elb.bar.id}"
|
||||
}`
|
||||
}
|
||||
|
||||
func testAccAWSAutoscalingAttachment_alb_double_associated(rInt int) string {
|
||||
return testAccAWSAutoscalingAttachment_alb_associated(rInt) + `
|
||||
resource "aws_autoscaling_attachment" "asg_attachment_bar" {
|
||||
autoscaling_group_name = "${aws_autoscaling_group.asg.id}"
|
||||
alb_target_group_arn = "${aws_alb_target_group.another_test.arn}"
|
||||
}`
|
||||
}
|
||||
|
@ -29,10 +29,11 @@ func resourceAwsAutoscalingGroup() *schema.Resource {
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
ConflictsWith: []string{"name_prefix"},
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
// https://github.com/boto/botocore/blob/9f322b1/botocore/data/autoscaling/2011-01-01/service-2.json#L1862-L1873
|
||||
value := v.(string)
|
||||
@ -43,58 +44,71 @@ func resourceAwsAutoscalingGroup() *schema.Resource {
|
||||
return
|
||||
},
|
||||
},
|
||||
"name_prefix": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
if len(value) > 229 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot be longer than 229 characters, name is limited to 255", k))
|
||||
}
|
||||
return
|
||||
},
|
||||
},
|
||||
|
||||
"launch_configuration": &schema.Schema{
|
||||
"launch_configuration": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"desired_capacity": &schema.Schema{
|
||||
"desired_capacity": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"min_elb_capacity": &schema.Schema{
|
||||
"min_elb_capacity": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"min_size": &schema.Schema{
|
||||
"min_size": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"max_size": &schema.Schema{
|
||||
"max_size": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"default_cooldown": &schema.Schema{
|
||||
"default_cooldown": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"force_delete": &schema.Schema{
|
||||
"force_delete": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"health_check_grace_period": &schema.Schema{
|
||||
"health_check_grace_period": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 300,
|
||||
},
|
||||
|
||||
"health_check_type": &schema.Schema{
|
||||
"health_check_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"availability_zones": &schema.Schema{
|
||||
"availability_zones": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
@ -102,12 +116,12 @@ func resourceAwsAutoscalingGroup() *schema.Resource {
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"placement_group": &schema.Schema{
|
||||
"placement_group": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"load_balancers": &schema.Schema{
|
||||
"load_balancers": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
@ -115,7 +129,7 @@ func resourceAwsAutoscalingGroup() *schema.Resource {
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"vpc_zone_identifier": &schema.Schema{
|
||||
"vpc_zone_identifier": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
@ -123,13 +137,13 @@ func resourceAwsAutoscalingGroup() *schema.Resource {
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"termination_policies": &schema.Schema{
|
||||
"termination_policies": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
|
||||
"wait_for_capacity_timeout": &schema.Schema{
|
||||
"wait_for_capacity_timeout": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "10m",
|
||||
@ -148,12 +162,12 @@ func resourceAwsAutoscalingGroup() *schema.Resource {
|
||||
},
|
||||
},
|
||||
|
||||
"wait_for_elb_capacity": &schema.Schema{
|
||||
"wait_for_elb_capacity": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"enabled_metrics": &schema.Schema{
|
||||
"enabled_metrics": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
@ -167,31 +181,32 @@ func resourceAwsAutoscalingGroup() *schema.Resource {
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"metrics_granularity": &schema.Schema{
|
||||
"metrics_granularity": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "1Minute",
|
||||
},
|
||||
|
||||
"protect_from_scale_in": &schema.Schema{
|
||||
"protect_from_scale_in": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"target_group_arns": &schema.Schema{
|
||||
"target_group_arns": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"arn": &schema.Schema{
|
||||
"arn": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"initial_lifecycle_hook": &schema.Schema{
|
||||
"initial_lifecycle_hook": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
@ -282,7 +297,11 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{})
|
||||
if v, ok := d.GetOk("name"); ok {
|
||||
asgName = v.(string)
|
||||
} else {
|
||||
asgName = resource.PrefixedUniqueId("tf-asg-")
|
||||
if v, ok := d.GetOk("name_prefix"); ok {
|
||||
asgName = resource.PrefixedUniqueId(v.(string))
|
||||
} else {
|
||||
asgName = resource.PrefixedUniqueId("tf-asg-")
|
||||
}
|
||||
d.Set("name", asgName)
|
||||
}
|
||||
|
||||
@ -427,6 +446,8 @@ func resourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) e
|
||||
d.Set("health_check_type", g.HealthCheckType)
|
||||
d.Set("launch_configuration", g.LaunchConfigurationName)
|
||||
d.Set("load_balancers", flattenStringList(g.LoadBalancerNames))
|
||||
d.Set("target_group_arns", flattenStringList(g.TargetGroupARNs))
|
||||
|
||||
if err := d.Set("suspended_processes", flattenAsgSuspendedProcesses(g.SuspendedProcesses)); err != nil {
|
||||
log.Printf("[WARN] Error setting suspended_processes for %q: %s", d.Id(), err)
|
||||
}
|
||||
|
@ -84,6 +84,27 @@ func TestAccAWSAutoScalingGroup_basic(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSAutoScalingGroup_namePrefix(t *testing.T) {
|
||||
nameRegexp := regexp.MustCompile("^test-")
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAWSAutoScalingGroupConfig_namePrefix,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestMatchResourceAttr(
|
||||
"aws_autoscaling_group.test", "name", nameRegexp),
|
||||
resource.TestCheckResourceAttrSet(
|
||||
"aws_autoscaling_group.test", "arn"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSAutoScalingGroup_autoGeneratedName(t *testing.T) {
|
||||
asgNameRegexp := regexp.MustCompile("^tf-asg-")
|
||||
|
||||
@ -472,13 +493,15 @@ func TestAccAWSAutoScalingGroup_ALB_TargetGroups_ELBCapacity(t *testing.T) {
|
||||
var group autoscaling.Group
|
||||
var tg elbv2.TargetGroup
|
||||
|
||||
rInt := acctest.RandInt()
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity,
|
||||
Config: testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity(rInt),
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
|
||||
testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &tg),
|
||||
@ -746,6 +769,22 @@ resource "aws_autoscaling_group" "bar" {
|
||||
}
|
||||
`
|
||||
|
||||
const testAccAWSAutoScalingGroupConfig_namePrefix = `
|
||||
resource "aws_launch_configuration" "test" {
|
||||
image_id = "ami-21f78e11"
|
||||
instance_type = "t1.micro"
|
||||
}
|
||||
|
||||
resource "aws_autoscaling_group" "test" {
|
||||
availability_zones = ["us-west-2a"]
|
||||
desired_capacity = 0
|
||||
max_size = 0
|
||||
min_size = 0
|
||||
name_prefix = "test-"
|
||||
launch_configuration = "${aws_launch_configuration.test.name}"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccAWSAutoScalingGroupConfig_terminationPoliciesEmpty = `
|
||||
resource "aws_launch_configuration" "foobar" {
|
||||
image_id = "ami-21f78e11"
|
||||
@ -1386,7 +1425,8 @@ resource "aws_autoscaling_group" "bar" {
|
||||
`, name)
|
||||
}
|
||||
|
||||
const testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity = `
|
||||
func testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
provider "aws" {
|
||||
region = "us-west-2"
|
||||
}
|
||||
@ -1420,7 +1460,7 @@ resource "aws_alb_listener" "test_listener" {
|
||||
}
|
||||
|
||||
resource "aws_alb_target_group" "test" {
|
||||
name = "tf-example-alb-tg"
|
||||
name = "tf-alb-test-%d"
|
||||
port = 80
|
||||
protocol = "HTTP"
|
||||
vpc_id = "${aws_vpc.default.id}"
|
||||
@ -1431,6 +1471,10 @@ resource "aws_alb_target_group" "test" {
|
||||
timeout = "2"
|
||||
interval = "5"
|
||||
}
|
||||
|
||||
tags {
|
||||
Name = "testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_ELBCapacity"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_subnet" "main" {
|
||||
@ -1522,8 +1566,8 @@ resource "aws_autoscaling_group" "bar" {
|
||||
force_delete = true
|
||||
termination_policies = ["OldestInstance"]
|
||||
launch_configuration = "${aws_launch_configuration.foobar.name}"
|
||||
}`, rInt)
|
||||
}
|
||||
`
|
||||
|
||||
func testAccAWSAutoScalingGroupConfigWithSuspendedProcesses(name string) string {
|
||||
return fmt.Sprintf(`
|
||||
|
@ -592,11 +592,11 @@ func resourceAwsCodeBuildProjectSourceAuthHash(v interface{}) int {
|
||||
var buf bytes.Buffer
|
||||
m := v.(map[string]interface{})
|
||||
|
||||
authType := m["type"].(string)
|
||||
authResource := m["resource"].(string)
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["type"].(string)))
|
||||
|
||||
buf.WriteString(fmt.Sprintf("%s-", authType))
|
||||
buf.WriteString(fmt.Sprintf("%s-", authResource))
|
||||
if m["resource"] != nil {
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["resource"].(string)))
|
||||
}
|
||||
|
||||
return hashcode.String(buf.String())
|
||||
}
|
||||
|
@ -839,6 +839,10 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
|
||||
}
|
||||
d.SetPartial("apply_immediately")
|
||||
|
||||
if !d.Get("apply_immediately").(bool) {
|
||||
log.Println("[INFO] Only settings updating, instance changes will be applied in next maintenance window")
|
||||
}
|
||||
|
||||
requestUpdate := false
|
||||
if d.HasChange("allocated_storage") || d.HasChange("iops") {
|
||||
d.SetPartial("allocated_storage")
|
||||
|
@ -622,6 +622,10 @@ resource "aws_db_instance" "bar" {
|
||||
backup_retention_period = 0
|
||||
|
||||
parameter_group_name = "default.mysql5.6"
|
||||
|
||||
timeouts {
|
||||
create = "30m"
|
||||
}
|
||||
}`
|
||||
|
||||
var testAccAWSDBInstanceConfigKmsKeyId = `
|
||||
|
@ -17,56 +17,66 @@ func resourceAwsDefaultRouteTable() *schema.Resource {
|
||||
Delete: resourceAwsDefaultRouteTableDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"default_route_table_id": &schema.Schema{
|
||||
"default_route_table_id": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"vpc_id": &schema.Schema{
|
||||
"vpc_id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"propagating_vgws": &schema.Schema{
|
||||
"propagating_vgws": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"route": &schema.Schema{
|
||||
"route": {
|
||||
Type: schema.TypeSet,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"cidr_block": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"gateway_id": &schema.Schema{
|
||||
"cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"instance_id": &schema.Schema{
|
||||
"ipv6_cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"nat_gateway_id": &schema.Schema{
|
||||
"egress_only_gateway_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"vpc_peering_connection_id": &schema.Schema{
|
||||
"gateway_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"network_interface_id": &schema.Schema{
|
||||
"instance_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"nat_gateway_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"vpc_peering_connection_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"network_interface_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
@ -193,16 +203,33 @@ func revokeAllRouteTableRules(defaultRouteTableId string, meta interface{}) erro
|
||||
// See aws_vpc_endpoint
|
||||
continue
|
||||
}
|
||||
log.Printf(
|
||||
"[INFO] Deleting route from %s: %s",
|
||||
defaultRouteTableId, *r.DestinationCidrBlock)
|
||||
_, err := conn.DeleteRoute(&ec2.DeleteRouteInput{
|
||||
RouteTableId: aws.String(defaultRouteTableId),
|
||||
DestinationCidrBlock: r.DestinationCidrBlock,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
if r.DestinationCidrBlock != nil {
|
||||
log.Printf(
|
||||
"[INFO] Deleting route from %s: %s",
|
||||
defaultRouteTableId, *r.DestinationCidrBlock)
|
||||
_, err := conn.DeleteRoute(&ec2.DeleteRouteInput{
|
||||
RouteTableId: aws.String(defaultRouteTableId),
|
||||
DestinationCidrBlock: r.DestinationCidrBlock,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if r.DestinationIpv6CidrBlock != nil {
|
||||
log.Printf(
|
||||
"[INFO] Deleting route from %s: %s",
|
||||
defaultRouteTableId, *r.DestinationIpv6CidrBlock)
|
||||
_, err := conn.DeleteRoute(&ec2.DeleteRouteInput{
|
||||
RouteTableId: aws.String(defaultRouteTableId),
|
||||
DestinationIpv6CidrBlock: r.DestinationIpv6CidrBlock,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -20,7 +20,7 @@ func TestAccAWSDefaultRouteTable_basic(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDefaultRouteTableDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccDefaultRouteTableConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRouteTableExists(
|
||||
@ -40,7 +40,7 @@ func TestAccAWSDefaultRouteTable_swap(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDefaultRouteTableDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccDefaultRouteTable_change,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRouteTableExists(
|
||||
@ -53,7 +53,7 @@ func TestAccAWSDefaultRouteTable_swap(t *testing.T) {
|
||||
// behavior that may happen, in which case a follow up plan will show (in
|
||||
// this case) a diff as the table now needs to be updated to match the
|
||||
// config
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccDefaultRouteTable_change_mod,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRouteTableExists(
|
||||
@ -74,7 +74,7 @@ func TestAccAWSDefaultRouteTable_vpc_endpoint(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDefaultRouteTableDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccDefaultRouteTable_vpc_endpoint,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckRouteTableExists(
|
||||
|
@ -357,7 +357,13 @@ func flattenPlacementStrategy(pss []*ecs.PlacementStrategy) []map[string]interfa
|
||||
for _, ps := range pss {
|
||||
c := make(map[string]interface{})
|
||||
c["type"] = *ps.Type
|
||||
c["field"] = strings.ToLower(*ps.Field)
|
||||
c["field"] = *ps.Field
|
||||
|
||||
// for some fields the API requires lowercase for creation but will return uppercase on query
|
||||
if *ps.Field == "MEMORY" || *ps.Field == "CPU" {
|
||||
c["field"] = strings.ToLower(*ps.Field)
|
||||
}
|
||||
|
||||
results = append(results, c)
|
||||
}
|
||||
return results
|
||||
@ -467,7 +473,7 @@ func resourceAwsEcsServiceDelete(d *schema.ResourceData, meta interface{}) error
|
||||
|
||||
// Wait until it's deleted
|
||||
wait := resource.StateChangeConf{
|
||||
Pending: []string{"DRAINING"},
|
||||
Pending: []string{"ACTIVE", "DRAINING"},
|
||||
Target: []string{"INACTIVE"},
|
||||
Timeout: 10 * time.Minute,
|
||||
MinTimeout: 1 * time.Second,
|
||||
|
@ -30,11 +30,18 @@ func resourceAwsElb() *schema.Resource {
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
ConflictsWith: []string{"name_prefix"},
|
||||
ValidateFunc: validateElbName,
|
||||
},
|
||||
"name_prefix": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateElbName,
|
||||
ValidateFunc: validateElbNamePrefix,
|
||||
},
|
||||
|
||||
"internal": &schema.Schema{
|
||||
@ -247,7 +254,11 @@ func resourceAwsElbCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
if v, ok := d.GetOk("name"); ok {
|
||||
elbName = v.(string)
|
||||
} else {
|
||||
elbName = resource.PrefixedUniqueId("tf-lb-")
|
||||
if v, ok := d.GetOk("name_prefix"); ok {
|
||||
elbName = resource.PrefixedUniqueId(v.(string))
|
||||
} else {
|
||||
elbName = resource.PrefixedUniqueId("tf-lb-")
|
||||
}
|
||||
d.Set("name", elbName)
|
||||
}
|
||||
|
||||
@ -388,7 +399,9 @@ func resourceAwsElbRead(d *schema.ResourceData, meta interface{}) error {
|
||||
}
|
||||
}
|
||||
d.Set("subnets", flattenStringList(lb.Subnets))
|
||||
d.Set("idle_timeout", lbAttrs.ConnectionSettings.IdleTimeout)
|
||||
if lbAttrs.ConnectionSettings != nil {
|
||||
d.Set("idle_timeout", lbAttrs.ConnectionSettings.IdleTimeout)
|
||||
}
|
||||
d.Set("connection_draining", lbAttrs.ConnectionDraining.Enabled)
|
||||
d.Set("connection_draining_timeout", lbAttrs.ConnectionDraining.Timeout)
|
||||
d.Set("cross_zone_load_balancing", lbAttrs.CrossZoneLoadBalancing.Enabled)
|
||||
|
@ -26,7 +26,7 @@ func TestAccAWSELB_basic(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
@ -70,7 +70,7 @@ func TestAccAWSELB_fullCharacterRange(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: fmt.Sprintf(testAccAWSELBFullRangeOfCharacters, lbName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||
@ -93,14 +93,14 @@ func TestAccAWSELB_AccessLogs_enabled(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBAccessLogs,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBAccessLogsOn(rName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||
@ -115,7 +115,7 @@ func TestAccAWSELB_AccessLogs_enabled(t *testing.T) {
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBAccessLogs,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||
@ -138,14 +138,14 @@ func TestAccAWSELB_AccessLogs_disabled(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBAccessLogs,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBAccessLogsDisabled(rName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||
@ -160,7 +160,7 @@ func TestAccAWSELB_AccessLogs_disabled(t *testing.T) {
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBAccessLogs,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||
@ -172,6 +172,28 @@ func TestAccAWSELB_AccessLogs_disabled(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSELB_namePrefix(t *testing.T) {
|
||||
var conf elb.LoadBalancerDescription
|
||||
nameRegex := regexp.MustCompile("^test-")
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
IDRefreshName: "aws_elb.test",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAWSELB_namePrefix,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.test", &conf),
|
||||
resource.TestMatchResourceAttr(
|
||||
"aws_elb.test", "name", nameRegex),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSELB_generatedName(t *testing.T) {
|
||||
var conf elb.LoadBalancerDescription
|
||||
generatedNameRegexp := regexp.MustCompile("^tf-lb-")
|
||||
@ -182,7 +204,7 @@ func TestAccAWSELB_generatedName(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBGeneratedName,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.foo", &conf),
|
||||
@ -203,7 +225,7 @@ func TestAccAWSELB_availabilityZones(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
@ -218,7 +240,7 @@ func TestAccAWSELB_availabilityZones(t *testing.T) {
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig_AvailabilityZonesUpdate,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
@ -244,7 +266,7 @@ func TestAccAWSELB_tags(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
@ -254,7 +276,7 @@ func TestAccAWSELB_tags(t *testing.T) {
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig_TagUpdate,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
@ -285,7 +307,7 @@ func TestAccAWSELB_iam_server_cert(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccELBIAMServerCertConfig(
|
||||
fmt.Sprintf("tf-acctest-%s", acctest.RandString(10))),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
@ -306,7 +328,7 @@ func TestAccAWSELB_swap_subnets(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig_subnets,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.ourapp", &conf),
|
||||
@ -315,7 +337,7 @@ func TestAccAWSELB_swap_subnets(t *testing.T) {
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig_subnet_swap,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.ourapp", &conf),
|
||||
@ -363,7 +385,7 @@ func TestAccAWSELB_InstanceAttaching(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
@ -371,7 +393,7 @@ func TestAccAWSELB_InstanceAttaching(t *testing.T) {
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigNewInstance,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
@ -391,7 +413,7 @@ func TestAccAWSELBUpdate_Listener(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
@ -401,7 +423,7 @@ func TestAccAWSELBUpdate_Listener(t *testing.T) {
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigListener_update,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
@ -422,7 +444,7 @@ func TestAccAWSELB_HealthCheck(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigHealthCheck,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
@ -450,14 +472,14 @@ func TestAccAWSELBUpdate_HealthCheck(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigHealthCheck,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_elb.bar", "health_check.0.healthy_threshold", "5"),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigHealthCheck_update,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
@ -477,7 +499,7 @@ func TestAccAWSELB_Timeout(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigIdleTimeout,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSELBExists("aws_elb.bar", &conf),
|
||||
@ -497,7 +519,7 @@ func TestAccAWSELBUpdate_Timeout(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigIdleTimeout,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
@ -505,7 +527,7 @@ func TestAccAWSELBUpdate_Timeout(t *testing.T) {
|
||||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigIdleTimeout_update,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
@ -524,7 +546,7 @@ func TestAccAWSELB_ConnectionDraining(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigConnectionDraining,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
@ -546,7 +568,7 @@ func TestAccAWSELBUpdate_ConnectionDraining(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigConnectionDraining,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
@ -557,7 +579,7 @@ func TestAccAWSELBUpdate_ConnectionDraining(t *testing.T) {
|
||||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigConnectionDraining_update_timeout,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
@ -568,7 +590,7 @@ func TestAccAWSELBUpdate_ConnectionDraining(t *testing.T) {
|
||||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigConnectionDraining_update_disable,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
@ -587,7 +609,7 @@ func TestAccAWSELB_SecurityGroups(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSELBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
// ELBs get a default security group
|
||||
@ -596,7 +618,7 @@ func TestAccAWSELB_SecurityGroups(t *testing.T) {
|
||||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSELBConfigSecurityGroups,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
// Count should still be one as we swap in a custom security group
|
||||
@ -1138,6 +1160,20 @@ resource "aws_elb" "foo" {
|
||||
`, r, r)
|
||||
}
|
||||
|
||||
const testAccAWSELB_namePrefix = `
|
||||
resource "aws_elb" "test" {
|
||||
name_prefix = "test-"
|
||||
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
|
||||
|
||||
listener {
|
||||
instance_port = 8000
|
||||
instance_protocol = "http"
|
||||
lb_port = 80
|
||||
lb_protocol = "http"
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
const testAccAWSELBGeneratedName = `
|
||||
resource "aws_elb" "foo" {
|
||||
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
|
||||
|
@ -157,6 +157,11 @@ func resourceAwsEMRCluster() *schema.Resource {
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
},
|
||||
"autoscaling_role": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Optional: true,
|
||||
},
|
||||
"visible_to_all_users": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
@ -259,6 +264,9 @@ func resourceAwsEMRClusterCreate(d *schema.ResourceData, meta interface{}) error
|
||||
if v, ok := d.GetOk("log_uri"); ok {
|
||||
params.LogUri = aws.String(v.(string))
|
||||
}
|
||||
if v, ok := d.GetOk("autoscaling_role"); ok {
|
||||
params.AutoScalingRole = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if instanceProfile != "" {
|
||||
params.JobFlowRole = aws.String(instanceProfile)
|
||||
@ -353,6 +361,7 @@ func resourceAwsEMRClusterRead(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
d.Set("name", cluster.Name)
|
||||
d.Set("service_role", cluster.ServiceRole)
|
||||
d.Set("autoscaling_role", cluster.AutoScalingRole)
|
||||
d.Set("release_label", cluster.ReleaseLabel)
|
||||
d.Set("log_uri", cluster.LogUri)
|
||||
d.Set("master_public_dns", cluster.MasterPublicDnsName)
|
||||
|
@ -237,6 +237,7 @@ resource "aws_emr_cluster" "tf-test-cluster" {
|
||||
depends_on = ["aws_main_route_table_association.a"]
|
||||
|
||||
service_role = "${aws_iam_role.iam_emr_default_role.arn}"
|
||||
autoscaling_role = "${aws_iam_role.emr-autoscaling-role.arn}"
|
||||
}
|
||||
|
||||
resource "aws_security_group" "allow_all" {
|
||||
@ -474,6 +475,29 @@ resource "aws_iam_policy" "iam_emr_profile_policy" {
|
||||
}
|
||||
EOT
|
||||
}
|
||||
|
||||
# IAM Role for autoscaling
|
||||
resource "aws_iam_role" "emr-autoscaling-role" {
|
||||
name = "EMR_AutoScaling_DefaultRole"
|
||||
assume_role_policy = "${data.aws_iam_policy_document.emr-autoscaling-role-policy.json}"
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "emr-autoscaling-role-policy" {
|
||||
statement {
|
||||
effect = "Allow"
|
||||
actions = ["sts:AssumeRole"]
|
||||
|
||||
principals = {
|
||||
type = "Service"
|
||||
identifiers = ["elasticmapreduce.amazonaws.com","application-autoscaling.amazonaws.com"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "emr-autoscaling-role" {
|
||||
role = "${aws_iam_role.emr-autoscaling-role.name}"
|
||||
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforAutoScalingRole"
|
||||
}
|
||||
`, r, r, r, r, r, r)
|
||||
}
|
||||
|
||||
@ -520,6 +544,7 @@ resource "aws_emr_cluster" "tf-test-cluster" {
|
||||
depends_on = ["aws_main_route_table_association.a"]
|
||||
|
||||
service_role = "${aws_iam_role.iam_emr_default_role.arn}"
|
||||
autoscaling_role = "${aws_iam_role.emr-autoscaling-role.arn}"
|
||||
}
|
||||
|
||||
resource "aws_security_group" "allow_all" {
|
||||
@ -757,6 +782,29 @@ resource "aws_iam_policy" "iam_emr_profile_policy" {
|
||||
}
|
||||
EOT
|
||||
}
|
||||
|
||||
# IAM Role for autoscaling
|
||||
resource "aws_iam_role" "emr-autoscaling-role" {
|
||||
name = "EMR_AutoScaling_DefaultRole"
|
||||
assume_role_policy = "${data.aws_iam_policy_document.emr-autoscaling-role-policy.json}"
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "emr-autoscaling-role-policy" {
|
||||
statement {
|
||||
effect = "Allow"
|
||||
actions = ["sts:AssumeRole"]
|
||||
|
||||
principals = {
|
||||
type = "Service"
|
||||
identifiers = ["elasticmapreduce.amazonaws.com","application-autoscaling.amazonaws.com"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "emr-autoscaling-role" {
|
||||
role = "${aws_iam_role.emr-autoscaling-role.name}"
|
||||
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforAutoScalingRole"
|
||||
}
|
||||
`, r, r, r, r, r, r)
|
||||
}
|
||||
|
||||
@ -803,6 +851,7 @@ resource "aws_emr_cluster" "tf-test-cluster" {
|
||||
depends_on = ["aws_main_route_table_association.a"]
|
||||
|
||||
service_role = "${aws_iam_role.iam_emr_default_role.arn}"
|
||||
autoscaling_role = "${aws_iam_role.emr-autoscaling-role.arn}"
|
||||
}
|
||||
|
||||
resource "aws_security_group" "allow_all" {
|
||||
@ -1040,6 +1089,29 @@ resource "aws_iam_policy" "iam_emr_profile_policy" {
|
||||
}
|
||||
EOT
|
||||
}
|
||||
|
||||
# IAM Role for autoscaling
|
||||
resource "aws_iam_role" "emr-autoscaling-role" {
|
||||
name = "EMR_AutoScaling_DefaultRole"
|
||||
assume_role_policy = "${data.aws_iam_policy_document.emr-autoscaling-role-policy.json}"
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "emr-autoscaling-role-policy" {
|
||||
statement {
|
||||
effect = "Allow"
|
||||
actions = ["sts:AssumeRole"]
|
||||
|
||||
principals = {
|
||||
type = "Service"
|
||||
identifiers = ["elasticmapreduce.amazonaws.com","application-autoscaling.amazonaws.com"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "emr-autoscaling-role" {
|
||||
role = "${aws_iam_role.emr-autoscaling-role.name}"
|
||||
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforAutoScalingRole"
|
||||
}
|
||||
`, r, r, r, r, r, r)
|
||||
}
|
||||
|
||||
@ -1085,6 +1157,7 @@ resource "aws_emr_cluster" "tf-test-cluster" {
|
||||
depends_on = ["aws_main_route_table_association.a"]
|
||||
|
||||
service_role = "${aws_iam_role.iam_emr_default_role.arn}"
|
||||
autoscaling_role = "${aws_iam_role.emr-autoscaling-role.arn}"
|
||||
}
|
||||
|
||||
resource "aws_security_group" "allow_all" {
|
||||
@ -1322,5 +1395,28 @@ resource "aws_iam_policy" "iam_emr_profile_policy" {
|
||||
}
|
||||
EOT
|
||||
}
|
||||
|
||||
# IAM Role for autoscaling
|
||||
resource "aws_iam_role" "emr-autoscaling-role" {
|
||||
name = "EMR_AutoScaling_DefaultRole"
|
||||
assume_role_policy = "${data.aws_iam_policy_document.emr-autoscaling-role-policy.json}"
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "emr-autoscaling-role-policy" {
|
||||
statement {
|
||||
effect = "Allow"
|
||||
actions = ["sts:AssumeRole"]
|
||||
|
||||
principals = {
|
||||
type = "Service"
|
||||
identifiers = ["elasticmapreduce.amazonaws.com","application-autoscaling.amazonaws.com"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "emr-autoscaling-role" {
|
||||
role = "${aws_iam_role.emr-autoscaling-role.name}"
|
||||
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforAutoScalingRole"
|
||||
}
|
||||
`, r, r, r, r, r, r)
|
||||
}
|
||||
|
94
builtin/providers/aws/resource_aws_iam_account_alias.go
Normal file
94
builtin/providers/aws/resource_aws_iam_account_alias.go
Normal file
@ -0,0 +1,94 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func resourceAwsIamAccountAlias() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceAwsIamAccountAliasCreate,
|
||||
Read: resourceAwsIamAccountAliasRead,
|
||||
Delete: resourceAwsIamAccountAliasDelete,
|
||||
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"account_alias": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateAccountAlias,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceAwsIamAccountAliasCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AWSClient).iamconn
|
||||
|
||||
account_alias := d.Get("account_alias").(string)
|
||||
|
||||
params := &iam.CreateAccountAliasInput{
|
||||
AccountAlias: aws.String(account_alias),
|
||||
}
|
||||
|
||||
_, err := conn.CreateAccountAlias(params)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating account alias with name %s", account_alias)
|
||||
}
|
||||
|
||||
d.SetId(account_alias)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAwsIamAccountAliasRead(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AWSClient).iamconn
|
||||
|
||||
params := &iam.ListAccountAliasesInput{}
|
||||
|
||||
resp, err := conn.ListAccountAliases(params)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp == nil || len(resp.AccountAliases) == 0 {
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
account_alias := aws.StringValue(resp.AccountAliases[0])
|
||||
|
||||
d.SetId(account_alias)
|
||||
d.Set("account_alias", account_alias)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceAwsIamAccountAliasDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
conn := meta.(*AWSClient).iamconn
|
||||
|
||||
account_alias := d.Get("account_alias").(string)
|
||||
|
||||
params := &iam.DeleteAccountAliasInput{
|
||||
AccountAlias: aws.String(account_alias),
|
||||
}
|
||||
|
||||
_, err := conn.DeleteAccountAlias(params)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting account alias with name %s", account_alias)
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
|
||||
return nil
|
||||
}
|
91
builtin/providers/aws/resource_aws_iam_account_alias_test.go
Normal file
91
builtin/providers/aws/resource_aws_iam_account_alias_test.go
Normal file
@ -0,0 +1,91 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccAWSIAMAccountAlias_basic(t *testing.T) {
|
||||
var account_alias string
|
||||
|
||||
rstring := acctest.RandString(5)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSIAMAccountAliasDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSIAMAccountAliasConfig(rstring),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSIAMAccountAliasExists("aws_iam_account_alias.test", &account_alias),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckAWSIAMAccountAliasDestroy(s *terraform.State) error {
|
||||
conn := testAccProvider.Meta().(*AWSClient).iamconn
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "aws_iam_account_alias" {
|
||||
continue
|
||||
}
|
||||
|
||||
params := &iam.ListAccountAliasesInput{}
|
||||
|
||||
resp, err := conn.ListAccountAliases(params)
|
||||
|
||||
if err != nil || resp == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(resp.AccountAliases) > 0 {
|
||||
return fmt.Errorf("Bad: Account alias still exists: %q", rs.Primary.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func testAccCheckAWSIAMAccountAliasExists(n string, a *string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
conn := testAccProvider.Meta().(*AWSClient).iamconn
|
||||
params := &iam.ListAccountAliasesInput{}
|
||||
|
||||
resp, err := conn.ListAccountAliases(params)
|
||||
|
||||
if err != nil || resp == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(resp.AccountAliases) == 0 {
|
||||
return fmt.Errorf("Bad: Account alias %q does not exist", rs.Primary.ID)
|
||||
}
|
||||
|
||||
*a = aws.StringValue(resp.AccountAliases[0])
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccAWSIAMAccountAliasConfig(rstring string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_iam_account_alias" "test" {
|
||||
account_alias = "terraform-%s-alias"
|
||||
}
|
||||
`, rstring)
|
||||
}
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
@ -27,8 +28,15 @@ func resourceAwsIamGroupPolicy() *schema.Resource {
|
||||
Required: true,
|
||||
},
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
ConflictsWith: []string{"name_prefix"},
|
||||
},
|
||||
"name_prefix": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"group": &schema.Schema{
|
||||
@ -45,10 +53,19 @@ func resourceAwsIamGroupPolicyPut(d *schema.ResourceData, meta interface{}) erro
|
||||
|
||||
request := &iam.PutGroupPolicyInput{
|
||||
GroupName: aws.String(d.Get("group").(string)),
|
||||
PolicyName: aws.String(d.Get("name").(string)),
|
||||
PolicyDocument: aws.String(d.Get("policy").(string)),
|
||||
}
|
||||
|
||||
var policyName string
|
||||
if v, ok := d.GetOk("name"); ok {
|
||||
policyName = v.(string)
|
||||
} else if v, ok := d.GetOk("name_prefix"); ok {
|
||||
policyName = resource.PrefixedUniqueId(v.(string))
|
||||
} else {
|
||||
policyName = resource.UniqueId()
|
||||
}
|
||||
request.PolicyName = aws.String(policyName)
|
||||
|
||||
if _, err := iamconn.PutGroupPolicy(request); err != nil {
|
||||
return fmt.Errorf("Error putting IAM group policy %s: %s", *request.PolicyName, err)
|
||||
}
|
||||
|
@ -7,18 +7,20 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccAWSIAMGroupPolicy_basic(t *testing.T) {
|
||||
rInt := acctest.RandInt()
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckIAMGroupPolicyDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccIAMGroupPolicyConfig,
|
||||
{
|
||||
Config: testAccIAMGroupPolicyConfig(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckIAMGroupPolicy(
|
||||
"aws_iam_group.group",
|
||||
@ -26,8 +28,8 @@ func TestAccAWSIAMGroupPolicy_basic(t *testing.T) {
|
||||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccIAMGroupPolicyConfigUpdate,
|
||||
{
|
||||
Config: testAccIAMGroupPolicyConfigUpdate(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckIAMGroupPolicy(
|
||||
"aws_iam_group.group",
|
||||
@ -39,6 +41,48 @@ func TestAccAWSIAMGroupPolicy_basic(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSIAMGroupPolicy_namePrefix(t *testing.T) {
|
||||
rInt := acctest.RandInt()
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
IDRefreshName: "aws_iam_group_policy.test",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckIAMGroupPolicyDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccIAMGroupPolicyConfig_namePrefix(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckIAMGroupPolicy(
|
||||
"aws_iam_group.test",
|
||||
"aws_iam_group_policy.test",
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSIAMGroupPolicy_generatedName(t *testing.T) {
|
||||
rInt := acctest.RandInt()
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
IDRefreshName: "aws_iam_group_policy.test",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckIAMGroupPolicyDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccIAMGroupPolicyConfig_generatedName(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckIAMGroupPolicy(
|
||||
"aws_iam_group.test",
|
||||
"aws_iam_group_policy.test",
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckIAMGroupPolicyDestroy(s *terraform.State) error {
|
||||
conn := testAccProvider.Meta().(*AWSClient).iamconn
|
||||
|
||||
@ -102,43 +146,90 @@ func testAccCheckIAMGroupPolicy(
|
||||
}
|
||||
}
|
||||
|
||||
const testAccIAMGroupPolicyConfig = `
|
||||
resource "aws_iam_group" "group" {
|
||||
name = "test_group"
|
||||
path = "/"
|
||||
}
|
||||
func testAccIAMGroupPolicyConfig(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_iam_group" "group" {
|
||||
name = "test_group_%d"
|
||||
path = "/"
|
||||
}
|
||||
|
||||
resource "aws_iam_group_policy" "foo" {
|
||||
name = "foo_policy"
|
||||
group = "${aws_iam_group.group.name}"
|
||||
policy = <<EOF
|
||||
resource "aws_iam_group_policy" "foo" {
|
||||
name = "foo_policy_%d"
|
||||
group = "${aws_iam_group.group.name}"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": {
|
||||
"Effect": "Allow",
|
||||
"Action": "*",
|
||||
"Resource": "*"
|
||||
}
|
||||
"Version": "2012-10-17",
|
||||
"Statement": {
|
||||
"Effect": "Allow",
|
||||
"Action": "*",
|
||||
"Resource": "*"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
}
|
||||
`
|
||||
|
||||
const testAccIAMGroupPolicyConfigUpdate = `
|
||||
resource "aws_iam_group" "group" {
|
||||
name = "test_group"
|
||||
path = "/"
|
||||
}`, rInt, rInt)
|
||||
}
|
||||
|
||||
resource "aws_iam_group_policy" "foo" {
|
||||
name = "foo_policy"
|
||||
group = "${aws_iam_group.group.name}"
|
||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
||||
func testAccIAMGroupPolicyConfig_namePrefix(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_iam_group" "test" {
|
||||
name = "test_group_%d"
|
||||
path = "/"
|
||||
}
|
||||
|
||||
resource "aws_iam_group_policy" "test" {
|
||||
name_prefix = "test-%d"
|
||||
group = "${aws_iam_group.test.name}"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": {
|
||||
"Effect": "Allow",
|
||||
"Action": "*",
|
||||
"Resource": "*"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
}`, rInt, rInt)
|
||||
}
|
||||
|
||||
resource "aws_iam_group_policy" "bar" {
|
||||
name = "bar_policy"
|
||||
group = "${aws_iam_group.group.name}"
|
||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
||||
func testAccIAMGroupPolicyConfig_generatedName(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_iam_group" "test" {
|
||||
name = "test_group_%d"
|
||||
path = "/"
|
||||
}
|
||||
|
||||
resource "aws_iam_group_policy" "test" {
|
||||
group = "${aws_iam_group.test.name}"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": {
|
||||
"Effect": "Allow",
|
||||
"Action": "*",
|
||||
"Resource": "*"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
}`, rInt)
|
||||
}
|
||||
|
||||
func testAccIAMGroupPolicyConfigUpdate(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_iam_group" "group" {
|
||||
name = "test_group_%d"
|
||||
path = "/"
|
||||
}
|
||||
|
||||
resource "aws_iam_group_policy" "foo" {
|
||||
name = "foo_policy_%d"
|
||||
group = "${aws_iam_group.group.name}"
|
||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
||||
}
|
||||
|
||||
resource "aws_iam_group_policy" "bar" {
|
||||
name = "bar_policy_%d"
|
||||
group = "${aws_iam_group.group.name}"
|
||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
||||
}`, rInt, rInt, rInt)
|
||||
}
|
||||
`
|
||||
|
@ -3,13 +3,13 @@ package aws
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
@ -31,22 +31,18 @@ func resourceAwsIamRolePolicy() *schema.Resource {
|
||||
Required: true,
|
||||
},
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
// https://github.com/boto/botocore/blob/2485f5c/botocore/data/iam/2010-05-08/service-2.json#L8291-L8296
|
||||
value := v.(string)
|
||||
if len(value) > 128 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot be longer than 128 characters", k))
|
||||
}
|
||||
if !regexp.MustCompile("^[\\w+=,.@-]+$").MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must match [\\w+=,.@-]", k))
|
||||
}
|
||||
return
|
||||
},
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
ConflictsWith: []string{"name_prefix"},
|
||||
ValidateFunc: validateIamRolePolicyName,
|
||||
},
|
||||
"name_prefix": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateIamRolePolicyNamePrefix,
|
||||
},
|
||||
"role": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
@ -62,10 +58,19 @@ func resourceAwsIamRolePolicyPut(d *schema.ResourceData, meta interface{}) error
|
||||
|
||||
request := &iam.PutRolePolicyInput{
|
||||
RoleName: aws.String(d.Get("role").(string)),
|
||||
PolicyName: aws.String(d.Get("name").(string)),
|
||||
PolicyDocument: aws.String(d.Get("policy").(string)),
|
||||
}
|
||||
|
||||
var policyName string
|
||||
if v, ok := d.GetOk("name"); ok {
|
||||
policyName = v.(string)
|
||||
} else if v, ok := d.GetOk("name_prefix"); ok {
|
||||
policyName = resource.PrefixedUniqueId(v.(string))
|
||||
} else {
|
||||
policyName = resource.UniqueId()
|
||||
}
|
||||
request.PolicyName = aws.String(policyName)
|
||||
|
||||
if _, err := iamconn.PutRolePolicy(request); err != nil {
|
||||
return fmt.Errorf("Error putting IAM role policy %s: %s", *request.PolicyName, err)
|
||||
}
|
||||
|
@ -7,30 +7,35 @@ import (
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccAWSRolePolicyAttachment_basic(t *testing.T) {
|
||||
var out iam.ListAttachedRolePoliciesOutput
|
||||
rInt := acctest.RandInt()
|
||||
testPolicy := fmt.Sprintf("tf-acctest-%d", rInt)
|
||||
testPolicy2 := fmt.Sprintf("tf-acctest2-%d", rInt)
|
||||
testPolicy3 := fmt.Sprintf("tf-acctest3-%d", rInt)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSRolePolicyAttachmentDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAWSRolePolicyAttachConfig,
|
||||
{
|
||||
Config: testAccAWSRolePolicyAttachConfig(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSRolePolicyAttachmentExists("aws_iam_role_policy_attachment.test-attach", 1, &out),
|
||||
testAccCheckAWSRolePolicyAttachmentAttributes([]string{"test-policy"}, &out),
|
||||
testAccCheckAWSRolePolicyAttachmentAttributes([]string{testPolicy}, &out),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccAWSRolePolicyAttachConfigUpdate,
|
||||
{
|
||||
Config: testAccAWSRolePolicyAttachConfigUpdate(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSRolePolicyAttachmentExists("aws_iam_role_policy_attachment.test-attach", 2, &out),
|
||||
testAccCheckAWSRolePolicyAttachmentAttributes([]string{"test-policy2", "test-policy3"}, &out),
|
||||
testAccCheckAWSRolePolicyAttachmentAttributes([]string{testPolicy2, testPolicy3}, &out),
|
||||
),
|
||||
},
|
||||
},
|
||||
@ -88,135 +93,137 @@ func testAccCheckAWSRolePolicyAttachmentAttributes(policies []string, out *iam.L
|
||||
}
|
||||
}
|
||||
|
||||
const testAccAWSRolePolicyAttachConfig = `
|
||||
resource "aws_iam_role" "role" {
|
||||
name = "test-role"
|
||||
assume_role_policy = <<EOF
|
||||
func testAccAWSRolePolicyAttachConfig(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_iam_role" "role" {
|
||||
name = "test-role-%d"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Sid": ""
|
||||
}
|
||||
]
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Sid": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "policy" {
|
||||
name = "test-policy"
|
||||
description = "A test policy"
|
||||
policy = <<EOF
|
||||
resource "aws_iam_policy" "policy" {
|
||||
name = "tf-acctest-%d"
|
||||
description = "A test policy"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": [
|
||||
"iam:ChangePassword"
|
||||
],
|
||||
"Resource": "*",
|
||||
"Effect": "Allow"
|
||||
}
|
||||
]
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": [
|
||||
"iam:ChangePassword"
|
||||
],
|
||||
"Resource": "*",
|
||||
"Effect": "Allow"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "test-attach" {
|
||||
role = "${aws_iam_role.role.name}"
|
||||
policy_arn = "${aws_iam_policy.policy.arn}"
|
||||
}`, rInt, rInt)
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "test-attach" {
|
||||
role = "${aws_iam_role.role.name}"
|
||||
policy_arn = "${aws_iam_policy.policy.arn}"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccAWSRolePolicyAttachConfigUpdate = `
|
||||
resource "aws_iam_role" "role" {
|
||||
name = "test-role"
|
||||
assume_role_policy = <<EOF
|
||||
func testAccAWSRolePolicyAttachConfigUpdate(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_iam_role" "role" {
|
||||
name = "test-role-%d"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Sid": ""
|
||||
}
|
||||
]
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Sid": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "policy" {
|
||||
name = "test-policy"
|
||||
description = "A test policy"
|
||||
policy = <<EOF
|
||||
resource "aws_iam_policy" "policy" {
|
||||
name = "tf-acctest-%d"
|
||||
description = "A test policy"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": [
|
||||
"iam:ChangePassword"
|
||||
],
|
||||
"Resource": "*",
|
||||
"Effect": "Allow"
|
||||
}
|
||||
]
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": [
|
||||
"iam:ChangePassword"
|
||||
],
|
||||
"Resource": "*",
|
||||
"Effect": "Allow"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "policy2" {
|
||||
name = "test-policy2"
|
||||
description = "A test policy"
|
||||
policy = <<EOF
|
||||
resource "aws_iam_policy" "policy2" {
|
||||
name = "tf-acctest2-%d"
|
||||
description = "A test policy"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": [
|
||||
"iam:ChangePassword"
|
||||
],
|
||||
"Resource": "*",
|
||||
"Effect": "Allow"
|
||||
}
|
||||
]
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": [
|
||||
"iam:ChangePassword"
|
||||
],
|
||||
"Resource": "*",
|
||||
"Effect": "Allow"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "policy3" {
|
||||
name = "test-policy3"
|
||||
description = "A test policy"
|
||||
policy = <<EOF
|
||||
resource "aws_iam_policy" "policy3" {
|
||||
name = "tf-acctest3-%d"
|
||||
description = "A test policy"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": [
|
||||
"iam:ChangePassword"
|
||||
],
|
||||
"Resource": "*",
|
||||
"Effect": "Allow"
|
||||
}
|
||||
]
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": [
|
||||
"iam:ChangePassword"
|
||||
],
|
||||
"Resource": "*",
|
||||
"Effect": "Allow"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "test-attach" {
|
||||
role = "${aws_iam_role.role.name}"
|
||||
policy_arn = "${aws_iam_policy.policy2.arn}"
|
||||
}
|
||||
resource "aws_iam_role_policy_attachment" "test-attach" {
|
||||
role = "${aws_iam_role.role.name}"
|
||||
policy_arn = "${aws_iam_policy.policy2.arn}"
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "test-attach2" {
|
||||
role = "${aws_iam_role.role.name}"
|
||||
policy_arn = "${aws_iam_policy.policy3.arn}"
|
||||
resource "aws_iam_role_policy_attachment" "test-attach2" {
|
||||
role = "${aws_iam_role.role.name}"
|
||||
policy_arn = "${aws_iam_policy.policy3.arn}"
|
||||
}`, rInt, rInt, rInt, rInt)
|
||||
}
|
||||
`
|
||||
|
@ -44,6 +44,50 @@ func TestAccAWSIAMRolePolicy_basic(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSIAMRolePolicy_namePrefix(t *testing.T) {
|
||||
role := acctest.RandString(10)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
IDRefreshName: "aws_iam_role_policy.test",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckIAMRolePolicyDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccIAMRolePolicyConfig_namePrefix(role),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckIAMRolePolicy(
|
||||
"aws_iam_role.test",
|
||||
"aws_iam_role_policy.test",
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSIAMRolePolicy_generatedName(t *testing.T) {
|
||||
role := acctest.RandString(10)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
IDRefreshName: "aws_iam_role_policy.test",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckIAMRolePolicyDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccIAMRolePolicyConfig_generatedName(role),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckIAMRolePolicy(
|
||||
"aws_iam_role.test",
|
||||
"aws_iam_role_policy.test",
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckIAMRolePolicyDestroy(s *terraform.State) error {
|
||||
iamconn := testAccProvider.Meta().(*AWSClient).iamconn
|
||||
|
||||
@ -154,6 +198,83 @@ EOF
|
||||
`, role, policy1)
|
||||
}
|
||||
|
||||
func testAccIAMRolePolicyConfig_namePrefix(role string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_iam_role" "test" {
|
||||
name = "tf_test_role_%s"
|
||||
path = "/"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Sid": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy" "test" {
|
||||
name_prefix = "tf_test_policy_"
|
||||
role = "${aws_iam_role.test.name}"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": {
|
||||
"Effect": "Allow",
|
||||
"Action": "*",
|
||||
"Resource": "*"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
}
|
||||
`, role)
|
||||
}
|
||||
|
||||
func testAccIAMRolePolicyConfig_generatedName(role string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_iam_role" "test" {
|
||||
name = "tf_test_role_%s"
|
||||
path = "/"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Sid": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy" "test" {
|
||||
role = "${aws_iam_role.test.name}"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": {
|
||||
"Effect": "Allow",
|
||||
"Action": "*",
|
||||
"Resource": "*"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
}
|
||||
`, role)
|
||||
}
|
||||
|
||||
func testAccIAMRolePolicyConfigUpdate(role, policy1, policy2 string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_iam_role" "role" {
|
||||
|
@ -2,10 +2,12 @@ package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
@ -70,6 +72,11 @@ func resourceAwsIamSamlProviderRead(d *schema.ResourceData, meta interface{}) er
|
||||
}
|
||||
out, err := iamconn.GetSAMLProvider(input)
|
||||
if err != nil {
|
||||
if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" {
|
||||
log.Printf("[WARN] IAM SAML Provider %q not found.", d.Id())
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
@ -27,8 +28,15 @@ func resourceAwsIamUserPolicy() *schema.Resource {
|
||||
Required: true,
|
||||
},
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
ConflictsWith: []string{"name_prefix"},
|
||||
},
|
||||
"name_prefix": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"user": &schema.Schema{
|
||||
@ -45,10 +53,19 @@ func resourceAwsIamUserPolicyPut(d *schema.ResourceData, meta interface{}) error
|
||||
|
||||
request := &iam.PutUserPolicyInput{
|
||||
UserName: aws.String(d.Get("user").(string)),
|
||||
PolicyName: aws.String(d.Get("name").(string)),
|
||||
PolicyDocument: aws.String(d.Get("policy").(string)),
|
||||
}
|
||||
|
||||
var policyName string
|
||||
if v, ok := d.GetOk("name"); ok {
|
||||
policyName = v.(string)
|
||||
} else if v, ok := d.GetOk("name_prefix"); ok {
|
||||
policyName = resource.PrefixedUniqueId(v.(string))
|
||||
} else {
|
||||
policyName = resource.UniqueId()
|
||||
}
|
||||
request.PolicyName = aws.String(policyName)
|
||||
|
||||
if _, err := iamconn.PutUserPolicy(request); err != nil {
|
||||
return fmt.Errorf("Error putting IAM user policy %s: %s", *request.PolicyName, err)
|
||||
}
|
||||
|
@ -7,18 +7,21 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccAWSIAMUserPolicy_basic(t *testing.T) {
|
||||
rInt := acctest.RandInt()
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckIAMUserPolicyDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccIAMUserPolicyConfig,
|
||||
{
|
||||
Config: testAccIAMUserPolicyConfig(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckIAMUserPolicy(
|
||||
"aws_iam_user.user",
|
||||
@ -26,8 +29,8 @@ func TestAccAWSIAMUserPolicy_basic(t *testing.T) {
|
||||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccIAMUserPolicyConfigUpdate,
|
||||
{
|
||||
Config: testAccIAMUserPolicyConfigUpdate(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckIAMUserPolicy(
|
||||
"aws_iam_user.user",
|
||||
@ -39,6 +42,50 @@ func TestAccAWSIAMUserPolicy_basic(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSIAMUserPolicy_namePrefix(t *testing.T) {
|
||||
rInt := acctest.RandInt()
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
IDRefreshName: "aws_iam_user_policy.test",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckIAMUserPolicyDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccIAMUserPolicyConfig_namePrefix(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckIAMUserPolicy(
|
||||
"aws_iam_user.test",
|
||||
"aws_iam_user_policy.test",
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSIAMUserPolicy_generatedName(t *testing.T) {
|
||||
rInt := acctest.RandInt()
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
IDRefreshName: "aws_iam_user_policy.test",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckIAMUserPolicyDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccIAMUserPolicyConfig_generatedName(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckIAMUserPolicy(
|
||||
"aws_iam_user.test",
|
||||
"aws_iam_user_policy.test",
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckIAMUserPolicyDestroy(s *terraform.State) error {
|
||||
iamconn := testAccProvider.Meta().(*AWSClient).iamconn
|
||||
|
||||
@ -105,34 +152,63 @@ func testAccCheckIAMUserPolicy(
|
||||
}
|
||||
}
|
||||
|
||||
const testAccIAMUserPolicyConfig = `
|
||||
resource "aws_iam_user" "user" {
|
||||
name = "test_user"
|
||||
path = "/"
|
||||
func testAccIAMUserPolicyConfig(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_iam_user" "user" {
|
||||
name = "test_user_%d"
|
||||
path = "/"
|
||||
}
|
||||
|
||||
resource "aws_iam_user_policy" "foo" {
|
||||
name = "foo_policy_%d"
|
||||
user = "${aws_iam_user.user.name}"
|
||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
||||
}`, rInt, rInt)
|
||||
}
|
||||
|
||||
resource "aws_iam_user_policy" "foo" {
|
||||
name = "foo_policy"
|
||||
user = "${aws_iam_user.user.name}"
|
||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
||||
}
|
||||
`
|
||||
func testAccIAMUserPolicyConfig_namePrefix(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_iam_user" "test" {
|
||||
name = "test_user_%d"
|
||||
path = "/"
|
||||
}
|
||||
|
||||
const testAccIAMUserPolicyConfigUpdate = `
|
||||
resource "aws_iam_user" "user" {
|
||||
name = "test_user"
|
||||
path = "/"
|
||||
resource "aws_iam_user_policy" "test" {
|
||||
name_prefix = "test-%d"
|
||||
user = "${aws_iam_user.test.name}"
|
||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
||||
}`, rInt, rInt)
|
||||
}
|
||||
|
||||
resource "aws_iam_user_policy" "foo" {
|
||||
name = "foo_policy"
|
||||
user = "${aws_iam_user.user.name}"
|
||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
||||
func testAccIAMUserPolicyConfig_generatedName(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_iam_user" "test" {
|
||||
name = "test_user_%d"
|
||||
path = "/"
|
||||
}
|
||||
|
||||
resource "aws_iam_user_policy" "test" {
|
||||
user = "${aws_iam_user.test.name}"
|
||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
||||
}`, rInt)
|
||||
}
|
||||
|
||||
resource "aws_iam_user_policy" "bar" {
|
||||
name = "bar_policy"
|
||||
user = "${aws_iam_user.user.name}"
|
||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
||||
func testAccIAMUserPolicyConfigUpdate(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_iam_user" "user" {
|
||||
name = "test_user_%d"
|
||||
path = "/"
|
||||
}
|
||||
|
||||
resource "aws_iam_user_policy" "foo" {
|
||||
name = "foo_policy_%d"
|
||||
user = "${aws_iam_user.user.name}"
|
||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
||||
}
|
||||
|
||||
resource "aws_iam_user_policy" "bar" {
|
||||
name = "bar_policy_%d"
|
||||
user = "${aws_iam_user.user.name}"
|
||||
policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}"
|
||||
}`, rInt, rInt, rInt)
|
||||
}
|
||||
`
|
||||
|
@ -7,24 +7,27 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/inspector"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccAWSInspectorTemplate_basic(t *testing.T) {
|
||||
rInt := acctest.RandInt()
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSInspectorTemplateDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAWSInspectorTemplateAssessment,
|
||||
Config: testAccAWSInspectorTemplateAssessment(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSInspectorTemplateExists("aws_inspector_assessment_template.foo"),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccCheckAWSInspectorTemplatetModified,
|
||||
Config: testAccCheckAWSInspectorTemplatetModified(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSInspectorTargetExists("aws_inspector_assessment_template.foo"),
|
||||
),
|
||||
@ -74,20 +77,21 @@ func testAccCheckAWSInspectorTemplateExists(name string) resource.TestCheckFunc
|
||||
}
|
||||
}
|
||||
|
||||
var testAccAWSInspectorTemplateAssessment = `
|
||||
func testAccAWSInspectorTemplateAssessment(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_inspector_resource_group" "foo" {
|
||||
tags {
|
||||
Name = "bar"
|
||||
Name = "tf-acc-test-%d"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_inspector_assessment_target" "foo" {
|
||||
name = "foo"
|
||||
name = "tf-acc-test-basic-%d"
|
||||
resource_group_arn = "${aws_inspector_resource_group.foo.arn}"
|
||||
}
|
||||
|
||||
resource "aws_inspector_assessment_template" "foo" {
|
||||
name = "foo template"
|
||||
name = "tf-acc-test-basic-tpl-%d"
|
||||
target_arn = "${aws_inspector_assessment_target.foo.arn}"
|
||||
duration = 3600
|
||||
|
||||
@ -97,22 +101,24 @@ resource "aws_inspector_assessment_template" "foo" {
|
||||
"arn:aws:inspector:us-west-2:758058086616:rulespackage/0-JJOtZiqQ",
|
||||
"arn:aws:inspector:us-west-2:758058086616:rulespackage/0-vg5GGHSD",
|
||||
]
|
||||
}`
|
||||
}`, rInt, rInt, rInt)
|
||||
}
|
||||
|
||||
var testAccCheckAWSInspectorTemplatetModified = `
|
||||
func testAccCheckAWSInspectorTemplatetModified(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_inspector_resource_group" "foo" {
|
||||
tags {
|
||||
Name = "bar"
|
||||
Name = "tf-acc-test-%d"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_inspector_assessment_target" "foo" {
|
||||
name = "foo"
|
||||
name = "tf-acc-test-basic-%d"
|
||||
resource_group_arn = "${aws_inspector_resource_group.foo.arn}"
|
||||
}
|
||||
|
||||
resource "aws_inspector_assessment_template" "foo" {
|
||||
name = "bar template"
|
||||
name = "tf-acc-test-basic-tpl-%d"
|
||||
target_arn = "${aws_inspector_assessment_target.foo.arn}"
|
||||
duration = 3600
|
||||
|
||||
@ -122,4 +128,5 @@ resource "aws_inspector_assessment_template" "foo" {
|
||||
"arn:aws:inspector:us-west-2:758058086616:rulespackage/0-JJOtZiqQ",
|
||||
"arn:aws:inspector:us-west-2:758058086616:rulespackage/0-vg5GGHSD",
|
||||
]
|
||||
}`
|
||||
}`, rInt, rInt, rInt)
|
||||
}
|
||||
|
@ -611,7 +611,7 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
d.SetPartial("tags")
|
||||
}
|
||||
|
||||
if d.HasChange("iam_instance_profile") {
|
||||
if d.HasChange("iam_instance_profile") && !d.IsNewResource() {
|
||||
request := &ec2.DescribeIamInstanceProfileAssociationsInput{
|
||||
Filters: []*ec2.Filter{
|
||||
&ec2.Filter{
|
||||
|
@ -656,7 +656,38 @@ func TestAccAWSInstance_instanceProfileChange(t *testing.T) {
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccInstanceConfigAttachInstanceProfile(rName),
|
||||
Config: testAccInstanceConfigWithInstanceProfile(rName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckInstanceExists("aws_instance.foo", &v),
|
||||
testCheckInstanceProfile(),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSInstance_withIamInstanceProfile(t *testing.T) {
|
||||
var v ec2.Instance
|
||||
rName := acctest.RandString(5)
|
||||
|
||||
testCheckInstanceProfile := func() resource.TestCheckFunc {
|
||||
return func(*terraform.State) error {
|
||||
if v.IamInstanceProfile == nil {
|
||||
return fmt.Errorf("Instance Profile is nil - we expected an InstanceProfile associated with the Instance")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
IDRefreshName: "aws_instance.foo",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckInstanceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccInstanceConfigWithInstanceProfile(rName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckInstanceExists("aws_instance.foo", &v),
|
||||
testCheckInstanceProfile(),
|
||||
@ -1281,7 +1312,7 @@ resource "aws_instance" "foo" {
|
||||
}`, rName, rName)
|
||||
}
|
||||
|
||||
func testAccInstanceConfigAttachInstanceProfile(rName string) string {
|
||||
func testAccInstanceConfigWithInstanceProfile(rName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_iam_role" "test" {
|
||||
name = "test-%s"
|
||||
|
@ -80,6 +80,7 @@ func resourceAwsKmsKey() *schema.Resource {
|
||||
return
|
||||
},
|
||||
},
|
||||
"tags": tagsSchema(),
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -98,6 +99,9 @@ func resourceAwsKmsKeyCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
if v, exists := d.GetOk("policy"); exists {
|
||||
req.Policy = aws.String(v.(string))
|
||||
}
|
||||
if v, exists := d.GetOk("tags"); exists {
|
||||
req.Tags = tagsFromMapKMS(v.(map[string]interface{}))
|
||||
}
|
||||
|
||||
var resp *kms.CreateKeyOutput
|
||||
// AWS requires any principal in the policy to exist before the key is created.
|
||||
@ -170,6 +174,14 @@ func resourceAwsKmsKeyRead(d *schema.ResourceData, meta interface{}) error {
|
||||
}
|
||||
d.Set("enable_key_rotation", krs.KeyRotationEnabled)
|
||||
|
||||
tagList, err := conn.ListResourceTags(&kms.ListResourceTagsInput{
|
||||
KeyId: metadata.KeyId,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to get KMS key tags (key: %s): %s", d.Get("key_id").(string), err)
|
||||
}
|
||||
d.Set("tags", tagsToMapKMS(tagList.Tags))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -215,6 +227,10 @@ func _resourceAwsKmsKeyUpdate(d *schema.ResourceData, meta interface{}, isFresh
|
||||
}
|
||||
}
|
||||
|
||||
if err := setTagsKMS(conn, d, d.Id()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceAwsKmsKeyRead(d, meta)
|
||||
}
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
// make testacc TEST=./builtin/providers/aws/ TESTARGS='-run=TestAccAWSKmsKey_'
|
||||
package aws
|
||||
|
||||
import (
|
||||
@ -95,6 +96,25 @@ func TestAccAWSKmsKey_isEnabled(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSKmsKey_tags(t *testing.T) {
|
||||
var keyBefore kms.KeyMetadata
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSKmsKeyDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSKmsKey_tags,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSKmsKeyExists("aws_kms_key.foo", &keyBefore),
|
||||
resource.TestCheckResourceAttr("aws_kms_key.foo", "tags.%", "2"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckAWSKmsKeyHasPolicy(name string, expectedPolicyText string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[name]
|
||||
@ -244,3 +264,12 @@ resource "aws_kms_key" "bar" {
|
||||
enable_key_rotation = true
|
||||
is_enabled = true
|
||||
}`, kmsTimestamp)
|
||||
|
||||
var testAccAWSKmsKey_tags = fmt.Sprintf(`
|
||||
resource "aws_kms_key" "foo" {
|
||||
description = "Terraform acc test %s"
|
||||
tags {
|
||||
Key1 = "Value One"
|
||||
Description = "Very interesting"
|
||||
}
|
||||
}`, kmsTimestamp)
|
||||
|
@ -7,12 +7,14 @@ import (
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/lambda"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccAWSLambdaAlias_basic(t *testing.T) {
|
||||
var conf lambda.AliasConfiguration
|
||||
rInt := acctest.RandInt()
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
@ -20,7 +22,7 @@ func TestAccAWSLambdaAlias_basic(t *testing.T) {
|
||||
CheckDestroy: testAccCheckAwsLambdaAliasDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAwsLambdaAliasConfig,
|
||||
Config: testAccAwsLambdaAliasConfig(rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAwsLambdaAliasExists("aws_lambda_alias.lambda_alias_test", &conf),
|
||||
testAccCheckAwsLambdaAttributes(&conf),
|
||||
@ -95,9 +97,10 @@ func testAccCheckAwsLambdaAttributes(mapping *lambda.AliasConfiguration) resourc
|
||||
}
|
||||
}
|
||||
|
||||
const testAccAwsLambdaAliasConfig = `
|
||||
func testAccAwsLambdaAliasConfig(rInt int) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_iam_role" "iam_for_lambda" {
|
||||
name = "iam_for_lambda"
|
||||
name = "iam_for_lambda_%d"
|
||||
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
@ -117,7 +120,7 @@ EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "policy_for_role" {
|
||||
name = "policy_for_role"
|
||||
name = "policy_for_role_%d"
|
||||
path = "/"
|
||||
description = "IAM policy for for Lamda alias testing"
|
||||
|
||||
@ -138,7 +141,7 @@ EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_policy_attachment" "policy_attachment_for_role" {
|
||||
name = "policy_attachment_for_role"
|
||||
name = "policy_attachment_for_role_%d"
|
||||
roles = ["${aws_iam_role.iam_for_lambda.name}"]
|
||||
policy_arn = "${aws_iam_policy.policy_for_role.arn}"
|
||||
}
|
||||
@ -156,5 +159,5 @@ resource "aws_lambda_alias" "lambda_alias_test" {
|
||||
description = "a sample description"
|
||||
function_name = "${aws_lambda_function.lambda_function_test_create.arn}"
|
||||
function_version = "$LATEST"
|
||||
}`, rInt, rInt, rInt)
|
||||
}
|
||||
`
|
||||
|
@ -389,9 +389,9 @@ func resourceAwsLambdaFunctionRead(d *schema.ResourceData, meta interface{}) err
|
||||
last := p.Versions[len(p.Versions)-1]
|
||||
lastVersion = *last.Version
|
||||
lastQualifiedArn = *last.FunctionArn
|
||||
return true
|
||||
return false
|
||||
}
|
||||
return false
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@ -416,6 +416,7 @@ func listVersionsByFunctionPages(c *lambda.Lambda, input *lambda.ListVersionsByF
|
||||
if !shouldContinue || lastPage {
|
||||
break
|
||||
}
|
||||
input.Marker = page.NextMarker
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -292,6 +292,8 @@ func TestAccAWSLambdaFunction_localUpdate(t *testing.T) {
|
||||
}
|
||||
defer os.Remove(path)
|
||||
|
||||
rInt := acctest.RandInt()
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
@ -301,7 +303,7 @@ func TestAccAWSLambdaFunction_localUpdate(t *testing.T) {
|
||||
PreConfig: func() {
|
||||
testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func.js": "lambda.js"}, zipFile)
|
||||
},
|
||||
Config: genAWSLambdaFunctionConfig_local(path),
|
||||
Config: genAWSLambdaFunctionConfig_local(path, rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_local", "tf_acc_lambda_name_local", &conf),
|
||||
testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_local"),
|
||||
@ -313,7 +315,7 @@ func TestAccAWSLambdaFunction_localUpdate(t *testing.T) {
|
||||
PreConfig: func() {
|
||||
testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func_modified.js": "lambda.js"}, zipFile)
|
||||
},
|
||||
Config: genAWSLambdaFunctionConfig_local(path),
|
||||
Config: genAWSLambdaFunctionConfig_local(path, rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_local", "tf_acc_lambda_name_local", &conf),
|
||||
testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_local"),
|
||||
@ -387,6 +389,8 @@ func TestAccAWSLambdaFunction_s3Update(t *testing.T) {
|
||||
bucketName := fmt.Sprintf("tf-acc-lambda-s3-deployments-%d", randomInteger)
|
||||
key := "lambda-func.zip"
|
||||
|
||||
rInt := acctest.RandInt()
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
@ -397,7 +401,7 @@ func TestAccAWSLambdaFunction_s3Update(t *testing.T) {
|
||||
// Upload 1st version
|
||||
testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func.js": "lambda.js"}, zipFile)
|
||||
},
|
||||
Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path),
|
||||
Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path, rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_s3", "tf_acc_lambda_name_s3", &conf),
|
||||
testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_s3"),
|
||||
@ -411,12 +415,12 @@ func TestAccAWSLambdaFunction_s3Update(t *testing.T) {
|
||||
// Upload 2nd version
|
||||
testAccCreateZipFromFiles(map[string]string{"test-fixtures/lambda_func_modified.js": "lambda.js"}, zipFile)
|
||||
},
|
||||
Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path),
|
||||
Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path, rInt),
|
||||
},
|
||||
// Extra step because of missing ComputedWhen
|
||||
// See https://github.com/hashicorp/terraform/pull/4846 & https://github.com/hashicorp/terraform/pull/5330
|
||||
{
|
||||
Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path),
|
||||
Config: genAWSLambdaFunctionConfig_s3(bucketName, key, path, rInt),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAwsLambdaFunctionExists("aws_lambda_function.lambda_function_s3", "tf_acc_lambda_name_s3", &conf),
|
||||
testAccCheckAwsLambdaFunctionName(&conf, "tf_acc_lambda_name_s3"),
|
||||
@ -1101,7 +1105,7 @@ resource "aws_lambda_function" "lambda_function_test" {
|
||||
|
||||
const testAccAWSLambdaFunctionConfig_local_tpl = `
|
||||
resource "aws_iam_role" "iam_for_lambda" {
|
||||
name = "iam_for_lambda"
|
||||
name = "iam_for_lambda_%d"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
@ -1128,8 +1132,8 @@ resource "aws_lambda_function" "lambda_function_local" {
|
||||
}
|
||||
`
|
||||
|
||||
func genAWSLambdaFunctionConfig_local(filePath string) string {
|
||||
return fmt.Sprintf(testAccAWSLambdaFunctionConfig_local_tpl,
|
||||
func genAWSLambdaFunctionConfig_local(filePath string, rInt int) string {
|
||||
return fmt.Sprintf(testAccAWSLambdaFunctionConfig_local_tpl, rInt,
|
||||
filePath, filePath)
|
||||
}
|
||||
|
||||
@ -1182,7 +1186,7 @@ resource "aws_s3_bucket_object" "o" {
|
||||
etag = "${md5(file("%s"))}"
|
||||
}
|
||||
resource "aws_iam_role" "iam_for_lambda" {
|
||||
name = "iam_for_lambda"
|
||||
name = "iam_for_lambda_%d"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
@ -1210,9 +1214,9 @@ resource "aws_lambda_function" "lambda_function_s3" {
|
||||
}
|
||||
`
|
||||
|
||||
func genAWSLambdaFunctionConfig_s3(bucket, key, path string) string {
|
||||
func genAWSLambdaFunctionConfig_s3(bucket, key, path string, rInt int) string {
|
||||
return fmt.Sprintf(testAccAWSLambdaFunctionConfig_s3_tpl,
|
||||
bucket, key, path, path)
|
||||
bucket, key, path, path, rInt)
|
||||
}
|
||||
|
||||
func testAccAWSLambdaFunctionConfig_s3_unversioned_tpl(rName, bucketName, key, path string) string {
|
||||
|
@ -28,20 +28,20 @@ func resourceAwsNetworkAcl() *schema.Resource {
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"vpc_id": &schema.Schema{
|
||||
"vpc_id": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
Computed: false,
|
||||
},
|
||||
"subnet_id": &schema.Schema{
|
||||
"subnet_id": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: false,
|
||||
Deprecated: "Attribute subnet_id is deprecated on network_acl resources. Use subnet_ids instead",
|
||||
},
|
||||
"subnet_ids": &schema.Schema{
|
||||
"subnet_ids": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
@ -49,42 +49,46 @@ func resourceAwsNetworkAcl() *schema.Resource {
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
"ingress": &schema.Schema{
|
||||
"ingress": {
|
||||
Type: schema.TypeSet,
|
||||
Required: false,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"from_port": &schema.Schema{
|
||||
"from_port": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"to_port": &schema.Schema{
|
||||
"to_port": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"rule_no": &schema.Schema{
|
||||
"rule_no": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"action": &schema.Schema{
|
||||
"action": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"protocol": &schema.Schema{
|
||||
"protocol": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"cidr_block": &schema.Schema{
|
||||
"cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"icmp_type": &schema.Schema{
|
||||
"ipv6_cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"icmp_type": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"icmp_code": &schema.Schema{
|
||||
"icmp_code": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
@ -92,42 +96,46 @@ func resourceAwsNetworkAcl() *schema.Resource {
|
||||
},
|
||||
Set: resourceAwsNetworkAclEntryHash,
|
||||
},
|
||||
"egress": &schema.Schema{
|
||||
"egress": {
|
||||
Type: schema.TypeSet,
|
||||
Required: false,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"from_port": &schema.Schema{
|
||||
"from_port": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"to_port": &schema.Schema{
|
||||
"to_port": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"rule_no": &schema.Schema{
|
||||
"rule_no": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"action": &schema.Schema{
|
||||
"action": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"protocol": &schema.Schema{
|
||||
"protocol": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"cidr_block": &schema.Schema{
|
||||
"cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"icmp_type": &schema.Schema{
|
||||
"ipv6_cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"icmp_type": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"icmp_code": &schema.Schema{
|
||||
"icmp_code": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
@ -389,25 +397,36 @@ func updateNetworkAclEntries(d *schema.ResourceData, entryType string, conn *ec2
|
||||
}
|
||||
}
|
||||
|
||||
// AWS mutates the CIDR block into a network implied by the IP and
|
||||
// mask provided. This results in hashing inconsistencies between
|
||||
// the local config file and the state returned by the API. Error
|
||||
// if the user provides a CIDR block with an inappropriate mask
|
||||
if err := validateCIDRBlock(*add.CidrBlock); err != nil {
|
||||
return err
|
||||
if add.CidrBlock != nil && *add.CidrBlock != "" {
|
||||
// AWS mutates the CIDR block into a network implied by the IP and
|
||||
// mask provided. This results in hashing inconsistencies between
|
||||
// the local config file and the state returned by the API. Error
|
||||
// if the user provides a CIDR block with an inappropriate mask
|
||||
if err := validateCIDRBlock(*add.CidrBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Add new Acl entry
|
||||
_, connErr := conn.CreateNetworkAclEntry(&ec2.CreateNetworkAclEntryInput{
|
||||
createOpts := &ec2.CreateNetworkAclEntryInput{
|
||||
NetworkAclId: aws.String(d.Id()),
|
||||
CidrBlock: add.CidrBlock,
|
||||
Egress: add.Egress,
|
||||
PortRange: add.PortRange,
|
||||
Protocol: add.Protocol,
|
||||
RuleAction: add.RuleAction,
|
||||
RuleNumber: add.RuleNumber,
|
||||
IcmpTypeCode: add.IcmpTypeCode,
|
||||
})
|
||||
}
|
||||
|
||||
if add.CidrBlock != nil && *add.CidrBlock != "" {
|
||||
createOpts.CidrBlock = add.CidrBlock
|
||||
}
|
||||
|
||||
if add.Ipv6CidrBlock != nil && *add.Ipv6CidrBlock != "" {
|
||||
createOpts.Ipv6CidrBlock = add.Ipv6CidrBlock
|
||||
}
|
||||
|
||||
// Add new Acl entry
|
||||
_, connErr := conn.CreateNetworkAclEntry(createOpts)
|
||||
if connErr != nil {
|
||||
return fmt.Errorf("Error creating %s entry: %s", entryType, connErr)
|
||||
}
|
||||
@ -520,7 +539,13 @@ func resourceAwsNetworkAclEntryHash(v interface{}) int {
|
||||
buf.WriteString(fmt.Sprintf("%s-", protocol))
|
||||
}
|
||||
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["cidr_block"].(string)))
|
||||
if v, ok := m["cidr_block"]; ok {
|
||||
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||
}
|
||||
|
||||
if v, ok := m["ipv6_cidr_block"]; ok {
|
||||
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||
}
|
||||
|
||||
if v, ok := m["ssl_certificate_id"]; ok {
|
||||
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||
@ -539,11 +564,11 @@ func resourceAwsNetworkAclEntryHash(v interface{}) int {
|
||||
func getDefaultNetworkAcl(vpc_id string, conn *ec2.EC2) (defaultAcl *ec2.NetworkAcl, err error) {
|
||||
resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{
|
||||
Filters: []*ec2.Filter{
|
||||
&ec2.Filter{
|
||||
{
|
||||
Name: aws.String("default"),
|
||||
Values: []*string{aws.String("true")},
|
||||
},
|
||||
&ec2.Filter{
|
||||
{
|
||||
Name: aws.String("vpc-id"),
|
||||
Values: []*string{aws.String(vpc_id)},
|
||||
},
|
||||
@ -559,7 +584,7 @@ func getDefaultNetworkAcl(vpc_id string, conn *ec2.EC2) (defaultAcl *ec2.Network
|
||||
func findNetworkAclAssociation(subnetId string, conn *ec2.EC2) (networkAclAssociation *ec2.NetworkAclAssociation, err error) {
|
||||
resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{
|
||||
Filters: []*ec2.Filter{
|
||||
&ec2.Filter{
|
||||
{
|
||||
Name: aws.String("association.subnet-id"),
|
||||
Values: []*string{aws.String(subnetId)},
|
||||
},
|
||||
@ -587,8 +612,12 @@ func networkAclEntriesToMapList(networkAcls []*ec2.NetworkAclEntry) []map[string
|
||||
acl := make(map[string]interface{})
|
||||
acl["rule_no"] = *entry.RuleNumber
|
||||
acl["action"] = *entry.RuleAction
|
||||
acl["cidr_block"] = *entry.CidrBlock
|
||||
|
||||
if entry.CidrBlock != nil {
|
||||
acl["cidr_block"] = *entry.CidrBlock
|
||||
}
|
||||
if entry.Ipv6CidrBlock != nil {
|
||||
acl["ipv6_cidr_block"] = *entry.Ipv6CidrBlock
|
||||
}
|
||||
// The AWS network ACL API only speaks protocol numbers, and
|
||||
// that's all we record.
|
||||
if _, err := strconv.Atoi(*entry.Protocol); err != nil {
|
||||
|
@ -21,54 +21,59 @@ func resourceAwsNetworkAclRule() *schema.Resource {
|
||||
Delete: resourceAwsNetworkAclRuleDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"network_acl_id": &schema.Schema{
|
||||
"network_acl_id": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"rule_number": &schema.Schema{
|
||||
"rule_number": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"egress": &schema.Schema{
|
||||
"egress": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Default: false,
|
||||
},
|
||||
"protocol": &schema.Schema{
|
||||
"protocol": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"rule_action": &schema.Schema{
|
||||
"rule_action": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"cidr_block": &schema.Schema{
|
||||
"cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"from_port": &schema.Schema{
|
||||
"ipv6_cidr_block": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"from_port": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"to_port": &schema.Schema{
|
||||
"to_port": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"icmp_type": &schema.Schema{
|
||||
"icmp_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: validateICMPArgumentValue,
|
||||
},
|
||||
"icmp_code": &schema.Schema{
|
||||
"icmp_code": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
@ -97,7 +102,6 @@ func resourceAwsNetworkAclRuleCreate(d *schema.ResourceData, meta interface{}) e
|
||||
Egress: aws.Bool(d.Get("egress").(bool)),
|
||||
RuleNumber: aws.Int64(int64(d.Get("rule_number").(int))),
|
||||
Protocol: aws.String(strconv.Itoa(p)),
|
||||
CidrBlock: aws.String(d.Get("cidr_block").(string)),
|
||||
RuleAction: aws.String(d.Get("rule_action").(string)),
|
||||
PortRange: &ec2.PortRange{
|
||||
From: aws.Int64(int64(d.Get("from_port").(int))),
|
||||
@ -105,6 +109,21 @@ func resourceAwsNetworkAclRuleCreate(d *schema.ResourceData, meta interface{}) e
|
||||
},
|
||||
}
|
||||
|
||||
cidr, hasCidr := d.GetOk("cidr_block")
|
||||
ipv6Cidr, hasIpv6Cidr := d.GetOk("ipv6_cidr_block")
|
||||
|
||||
if hasCidr == false && hasIpv6Cidr == false {
|
||||
return fmt.Errorf("Either `cidr_block` or `ipv6_cidr_block` must be defined")
|
||||
}
|
||||
|
||||
if hasCidr {
|
||||
params.CidrBlock = aws.String(cidr.(string))
|
||||
}
|
||||
|
||||
if hasIpv6Cidr {
|
||||
params.Ipv6CidrBlock = aws.String(ipv6Cidr.(string))
|
||||
}
|
||||
|
||||
// Specify additional required fields for ICMP. For the list
|
||||
// of ICMP codes and types, see: http://www.nthelp.com/icmp.html
|
||||
if p == 1 {
|
||||
@ -160,6 +179,7 @@ func resourceAwsNetworkAclRuleRead(d *schema.ResourceData, meta interface{}) err
|
||||
|
||||
d.Set("rule_number", resp.RuleNumber)
|
||||
d.Set("cidr_block", resp.CidrBlock)
|
||||
d.Set("ipv6_cidr_block", resp.Ipv6CidrBlock)
|
||||
d.Set("egress", resp.Egress)
|
||||
if resp.IcmpTypeCode != nil {
|
||||
d.Set("icmp_code", resp.IcmpTypeCode.Code)
|
||||
|
@ -2,6 +2,7 @@ package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
@ -20,7 +21,7 @@ func TestAccAWSNetworkAclRule_basic(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclRuleBasicConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.baz", &networkAcl),
|
||||
@ -32,6 +33,39 @@ func TestAccAWSNetworkAclRule_basic(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSNetworkAclRule_missingParam(t *testing.T) {
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclRuleMissingParam,
|
||||
ExpectError: regexp.MustCompile("Either `cidr_block` or `ipv6_cidr_block` must be defined"),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSNetworkAclRule_ipv6(t *testing.T) {
|
||||
var networkAcl ec2.NetworkAcl
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclRuleIpv6Config,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.baz", &networkAcl),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestResourceAWSNetworkAclRule_validateICMPArgumentValue(t *testing.T) {
|
||||
type testCases struct {
|
||||
Value string
|
||||
@ -195,3 +229,44 @@ resource "aws_network_acl_rule" "wibble" {
|
||||
icmp_code = -1
|
||||
}
|
||||
`
|
||||
|
||||
const testAccAWSNetworkAclRuleMissingParam = `
|
||||
provider "aws" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
resource "aws_vpc" "foo" {
|
||||
cidr_block = "10.3.0.0/16"
|
||||
}
|
||||
resource "aws_network_acl" "bar" {
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
}
|
||||
resource "aws_network_acl_rule" "baz" {
|
||||
network_acl_id = "${aws_network_acl.bar.id}"
|
||||
rule_number = 200
|
||||
egress = false
|
||||
protocol = "tcp"
|
||||
rule_action = "allow"
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
}
|
||||
`
|
||||
|
||||
const testAccAWSNetworkAclRuleIpv6Config = `
|
||||
resource "aws_vpc" "foo" {
|
||||
cidr_block = "10.3.0.0/16"
|
||||
}
|
||||
resource "aws_network_acl" "bar" {
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
}
|
||||
resource "aws_network_acl_rule" "baz" {
|
||||
network_acl_id = "${aws_network_acl.bar.id}"
|
||||
rule_number = 150
|
||||
egress = false
|
||||
protocol = "tcp"
|
||||
rule_action = "allow"
|
||||
ipv6_cidr_block = "::/0"
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
}
|
||||
|
||||
`
|
||||
|
@ -20,34 +20,34 @@ func TestAccAWSNetworkAcl_EgressAndIngressRules(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclEgressNIngressConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclExists("aws_network_acl.bar", &networkAcl),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "ingress.109047673.protocol", "6"),
|
||||
"aws_network_acl.bar", "ingress.1871939009.protocol", "6"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "ingress.109047673.rule_no", "1"),
|
||||
"aws_network_acl.bar", "ingress.1871939009.rule_no", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "ingress.109047673.from_port", "80"),
|
||||
"aws_network_acl.bar", "ingress.1871939009.from_port", "80"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "ingress.109047673.to_port", "80"),
|
||||
"aws_network_acl.bar", "ingress.1871939009.to_port", "80"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "ingress.109047673.action", "allow"),
|
||||
"aws_network_acl.bar", "ingress.1871939009.action", "allow"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "ingress.109047673.cidr_block", "10.3.0.0/18"),
|
||||
"aws_network_acl.bar", "ingress.1871939009.cidr_block", "10.3.0.0/18"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "egress.868403673.protocol", "6"),
|
||||
"aws_network_acl.bar", "egress.3111164687.protocol", "6"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "egress.868403673.rule_no", "2"),
|
||||
"aws_network_acl.bar", "egress.3111164687.rule_no", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "egress.868403673.from_port", "443"),
|
||||
"aws_network_acl.bar", "egress.3111164687.from_port", "443"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "egress.868403673.to_port", "443"),
|
||||
"aws_network_acl.bar", "egress.3111164687.to_port", "443"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "egress.868403673.cidr_block", "10.3.0.0/18"),
|
||||
"aws_network_acl.bar", "egress.3111164687.cidr_block", "10.3.0.0/18"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.bar", "egress.868403673.action", "allow"),
|
||||
"aws_network_acl.bar", "egress.3111164687.action", "allow"),
|
||||
),
|
||||
},
|
||||
},
|
||||
@ -63,23 +63,22 @@ func TestAccAWSNetworkAcl_OnlyIngressRules_basic(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclIngressConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl),
|
||||
// testAccCheckSubnetAssociation("aws_network_acl.foos", "aws_subnet.blob"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1451312565.protocol", "6"),
|
||||
"aws_network_acl.foos", "ingress.4245812720.protocol", "6"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1451312565.rule_no", "2"),
|
||||
"aws_network_acl.foos", "ingress.4245812720.rule_no", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1451312565.from_port", "443"),
|
||||
"aws_network_acl.foos", "ingress.4245812720.from_port", "443"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1451312565.to_port", "443"),
|
||||
"aws_network_acl.foos", "ingress.4245812720.to_port", "443"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1451312565.action", "deny"),
|
||||
"aws_network_acl.foos", "ingress.4245812720.action", "deny"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1451312565.cidr_block", "10.2.0.0/18"),
|
||||
"aws_network_acl.foos", "ingress.4245812720.cidr_block", "10.2.0.0/18"),
|
||||
),
|
||||
},
|
||||
},
|
||||
@ -95,46 +94,46 @@ func TestAccAWSNetworkAcl_OnlyIngressRules_update(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclIngressConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl),
|
||||
testIngressRuleLength(&networkAcl, 2),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.protocol", "6"),
|
||||
"aws_network_acl.foos", "ingress.401088754.protocol", "6"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.rule_no", "1"),
|
||||
"aws_network_acl.foos", "ingress.401088754.rule_no", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.from_port", "0"),
|
||||
"aws_network_acl.foos", "ingress.401088754.from_port", "0"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.to_port", "22"),
|
||||
"aws_network_acl.foos", "ingress.401088754.to_port", "22"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.action", "deny"),
|
||||
"aws_network_acl.foos", "ingress.401088754.action", "deny"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1451312565.cidr_block", "10.2.0.0/18"),
|
||||
"aws_network_acl.foos", "ingress.4245812720.cidr_block", "10.2.0.0/18"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1451312565.from_port", "443"),
|
||||
"aws_network_acl.foos", "ingress.4245812720.from_port", "443"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1451312565.rule_no", "2"),
|
||||
"aws_network_acl.foos", "ingress.4245812720.rule_no", "2"),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclIngressConfigChange,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
Check: resource.ComposeAggregateTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl),
|
||||
testIngressRuleLength(&networkAcl, 1),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.protocol", "6"),
|
||||
"aws_network_acl.foos", "ingress.401088754.protocol", "6"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.rule_no", "1"),
|
||||
"aws_network_acl.foos", "ingress.401088754.rule_no", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.from_port", "0"),
|
||||
"aws_network_acl.foos", "ingress.401088754.from_port", "0"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.to_port", "22"),
|
||||
"aws_network_acl.foos", "ingress.401088754.to_port", "22"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.action", "deny"),
|
||||
"aws_network_acl.foos", "ingress.401088754.action", "deny"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.2048097841.cidr_block", "10.2.0.0/18"),
|
||||
"aws_network_acl.foos", "ingress.401088754.cidr_block", "10.2.0.0/18"),
|
||||
),
|
||||
},
|
||||
},
|
||||
@ -150,7 +149,7 @@ func TestAccAWSNetworkAcl_OnlyEgressRules(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclEgressConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclExists("aws_network_acl.bond", &networkAcl),
|
||||
@ -169,13 +168,13 @@ func TestAccAWSNetworkAcl_SubnetChange(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclSubnetConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSubnetIsAssociatedWithAcl("aws_network_acl.bar", "aws_subnet.old"),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclSubnetConfigChange,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckSubnetIsNotAssociatedWithAcl("aws_network_acl.bar", "aws_subnet.old"),
|
||||
@ -206,7 +205,7 @@ func TestAccAWSNetworkAcl_Subnets(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclSubnet_SubnetIds,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclExists("aws_network_acl.bar", &networkAcl),
|
||||
@ -216,7 +215,7 @@ func TestAccAWSNetworkAcl_Subnets(t *testing.T) {
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclSubnet_SubnetIdsUpdate,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclExists("aws_network_acl.bar", &networkAcl),
|
||||
@ -230,6 +229,37 @@ func TestAccAWSNetworkAcl_Subnets(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSNetworkAcl_ipv6Rules(t *testing.T) {
|
||||
var networkAcl ec2.NetworkAcl
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
IDRefreshName: "aws_network_acl.foos",
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclIpv6Config,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclExists("aws_network_acl.foos", &networkAcl),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1976110835.protocol", "6"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1976110835.rule_no", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1976110835.from_port", "0"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1976110835.to_port", "22"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1976110835.action", "allow"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_network_acl.foos", "ingress.1976110835.ipv6_cidr_block", "::/0"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSNetworkAcl_espProtocol(t *testing.T) {
|
||||
var networkAcl ec2.NetworkAcl
|
||||
|
||||
@ -239,7 +269,7 @@ func TestAccAWSNetworkAcl_espProtocol(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSNetworkAclDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSNetworkAclEsp,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSNetworkAclExists("aws_network_acl.testesp", &networkAcl),
|
||||
@ -336,7 +366,7 @@ func testAccCheckSubnetIsAssociatedWithAcl(acl string, sub string) resource.Test
|
||||
resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{
|
||||
NetworkAclIds: []*string{aws.String(networkAcl.Primary.ID)},
|
||||
Filters: []*ec2.Filter{
|
||||
&ec2.Filter{
|
||||
{
|
||||
Name: aws.String("association.subnet-id"),
|
||||
Values: []*string{aws.String(subnet.Primary.ID)},
|
||||
},
|
||||
@ -362,7 +392,7 @@ func testAccCheckSubnetIsNotAssociatedWithAcl(acl string, subnet string) resourc
|
||||
resp, err := conn.DescribeNetworkAcls(&ec2.DescribeNetworkAclsInput{
|
||||
NetworkAclIds: []*string{aws.String(networkAcl.Primary.ID)},
|
||||
Filters: []*ec2.Filter{
|
||||
&ec2.Filter{
|
||||
{
|
||||
Name: aws.String("association.subnet-id"),
|
||||
Values: []*string{aws.String(subnet.Primary.ID)},
|
||||
},
|
||||
@ -379,6 +409,33 @@ func testAccCheckSubnetIsNotAssociatedWithAcl(acl string, subnet string) resourc
|
||||
}
|
||||
}
|
||||
|
||||
const testAccAWSNetworkAclIpv6Config = `
|
||||
resource "aws_vpc" "foo" {
|
||||
cidr_block = "10.1.0.0/16"
|
||||
tags {
|
||||
Name = "TestAccAWSNetworkAcl_ipv6Rules"
|
||||
}
|
||||
}
|
||||
resource "aws_subnet" "blob" {
|
||||
cidr_block = "10.1.1.0/24"
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
map_public_ip_on_launch = true
|
||||
}
|
||||
resource "aws_network_acl" "foos" {
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
ingress = {
|
||||
protocol = "tcp"
|
||||
rule_no = 1
|
||||
action = "allow"
|
||||
ipv6_cidr_block = "::/0"
|
||||
from_port = 0
|
||||
to_port = 22
|
||||
}
|
||||
|
||||
subnet_ids = ["${aws_subnet.blob.id}"]
|
||||
}
|
||||
`
|
||||
|
||||
const testAccAWSNetworkAclIngressConfig = `
|
||||
resource "aws_vpc" "foo" {
|
||||
cidr_block = "10.1.0.0/16"
|
||||
|
@ -21,21 +21,21 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
||||
Update: resourceAwsOpsworksApplicationUpdate,
|
||||
Delete: resourceAwsOpsworksApplicationDelete,
|
||||
Schema: map[string]*schema.Schema{
|
||||
"id": &schema.Schema{
|
||||
"id": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"name": &schema.Schema{
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"short_name": &schema.Schema{
|
||||
"short_name": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
},
|
||||
// aws-flow-ruby | java | rails | php | nodejs | static | other
|
||||
"type": &schema.Schema{
|
||||
"type": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
@ -56,62 +56,62 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
||||
return
|
||||
},
|
||||
},
|
||||
"stack_id": &schema.Schema{
|
||||
"stack_id": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
// TODO: the following 4 vals are really part of the Attributes array. We should validate that only ones relevant to the chosen type are set, perhaps. (what is the default type? how do they map?)
|
||||
"document_root": &schema.Schema{
|
||||
"document_root": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
//Default: "public",
|
||||
},
|
||||
"rails_env": &schema.Schema{
|
||||
"rails_env": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
//Default: "production",
|
||||
},
|
||||
"auto_bundle_on_deploy": &schema.Schema{
|
||||
"auto_bundle_on_deploy": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
//Default: true,
|
||||
},
|
||||
"aws_flow_ruby_settings": &schema.Schema{
|
||||
"aws_flow_ruby_settings": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"app_source": &schema.Schema{
|
||||
"app_source": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"type": &schema.Schema{
|
||||
"type": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"url": &schema.Schema{
|
||||
"url": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"username": &schema.Schema{
|
||||
"username": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"password": &schema.Schema{
|
||||
"password": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"revision": &schema.Schema{
|
||||
"revision": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"ssh_key": &schema.Schema{
|
||||
"ssh_key": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
@ -121,41 +121,41 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
||||
// AutoSelectOpsworksMysqlInstance, OpsworksMysqlInstance, or RdsDbInstance.
|
||||
// anything beside auto select will lead into failure in case the instance doesn't exist
|
||||
// XXX: validation?
|
||||
"data_source_type": &schema.Schema{
|
||||
"data_source_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"data_source_database_name": &schema.Schema{
|
||||
"data_source_database_name": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"data_source_arn": &schema.Schema{
|
||||
"data_source_arn": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"description": &schema.Schema{
|
||||
"description": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"domains": &schema.Schema{
|
||||
"domains": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
"environment": &schema.Schema{
|
||||
"environment": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"key": &schema.Schema{
|
||||
"key": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"value": &schema.Schema{
|
||||
"value": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"secure": &schema.Schema{
|
||||
"secure": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: true,
|
||||
@ -163,18 +163,18 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
||||
},
|
||||
},
|
||||
},
|
||||
"enable_ssl": &schema.Schema{
|
||||
"enable_ssl": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
},
|
||||
"ssl_configuration": &schema.Schema{
|
||||
"ssl_configuration": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
//Computed: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"certificate": &schema.Schema{
|
||||
"certificate": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
StateFunc: func(v interface{}) string {
|
||||
@ -186,7 +186,7 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
||||
}
|
||||
},
|
||||
},
|
||||
"private_key": &schema.Schema{
|
||||
"private_key": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
StateFunc: func(v interface{}) string {
|
||||
@ -198,7 +198,7 @@ func resourceAwsOpsworksApplication() *schema.Resource {
|
||||
}
|
||||
},
|
||||
},
|
||||
"chain": &schema.Schema{
|
||||
"chain": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
StateFunc: func(v interface{}) string {
|
||||
|
@ -8,25 +8,30 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/opsworks"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccAWSOpsworksApplication(t *testing.T) {
|
||||
var opsapp opsworks.App
|
||||
|
||||
rInt := acctest.RandInt()
|
||||
name := fmt.Sprintf("tf-ops-acc-application-%d", rInt)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAwsOpsworksApplicationDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAwsOpsworksApplicationCreate,
|
||||
{
|
||||
Config: testAccAwsOpsworksApplicationCreate(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSOpsworksApplicationExists(
|
||||
"aws_opsworks_application.tf-acc-app", &opsapp),
|
||||
testAccCheckAWSOpsworksCreateAppAttributes(&opsapp),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "name", "tf-ops-acc-application",
|
||||
"aws_opsworks_application.tf-acc-app", "name", name,
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "type", "other",
|
||||
@ -34,14 +39,14 @@ func TestAccAWSOpsworksApplication(t *testing.T) {
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "enable_ssl", "false",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "ssl_configuration", "",
|
||||
resource.TestCheckNoResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "ssl_configuration",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "domains", "",
|
||||
resource.TestCheckNoResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "domains",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "app_source", "",
|
||||
resource.TestCheckNoResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "app_source",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "environment.3077298702.key", "key1",
|
||||
@ -49,22 +54,22 @@ func TestAccAWSOpsworksApplication(t *testing.T) {
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "environment.3077298702.value", "value1",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "environment.3077298702.secret", "",
|
||||
resource.TestCheckNoResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "environment.3077298702.secret",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "document_root", "foo",
|
||||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccAwsOpsworksApplicationUpdate,
|
||||
{
|
||||
Config: testAccAwsOpsworksApplicationUpdate(name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSOpsworksApplicationExists(
|
||||
"aws_opsworks_application.tf-acc-app", &opsapp),
|
||||
testAccCheckAWSOpsworksUpdateAppAttributes(&opsapp),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "name", "tf-ops-acc-application",
|
||||
"aws_opsworks_application.tf-acc-app", "name", name,
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "type", "rails",
|
||||
@ -117,8 +122,8 @@ func TestAccAWSOpsworksApplication(t *testing.T) {
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "environment.3077298702.value", "value1",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "environment.3077298702.secret", "",
|
||||
resource.TestCheckNoResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "environment.3077298702.secret",
|
||||
),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_opsworks_application.tf-acc-app", "document_root", "root",
|
||||
@ -188,7 +193,7 @@ func testAccCheckAWSOpsworksCreateAppAttributes(
|
||||
}
|
||||
|
||||
expectedEnv := []*opsworks.EnvironmentVariable{
|
||||
&opsworks.EnvironmentVariable{
|
||||
{
|
||||
Key: aws.String("key1"),
|
||||
Value: aws.String("value1"),
|
||||
Secure: aws.Bool(false),
|
||||
@ -248,12 +253,12 @@ func testAccCheckAWSOpsworksUpdateAppAttributes(
|
||||
}
|
||||
|
||||
expectedEnv := []*opsworks.EnvironmentVariable{
|
||||
&opsworks.EnvironmentVariable{
|
||||
{
|
||||
Key: aws.String("key2"),
|
||||
Value: aws.String("*****FILTERED*****"),
|
||||
Secure: aws.Bool(true),
|
||||
},
|
||||
&opsworks.EnvironmentVariable{
|
||||
{
|
||||
Key: aws.String("key1"),
|
||||
Value: aws.String("value1"),
|
||||
Secure: aws.Bool(false),
|
||||
@ -308,10 +313,12 @@ func testAccCheckAwsOpsworksApplicationDestroy(s *terraform.State) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var testAccAwsOpsworksApplicationCreate = testAccAwsOpsworksStackConfigVpcCreate("tf-ops-acc-application") + `
|
||||
func testAccAwsOpsworksApplicationCreate(name string) string {
|
||||
return testAccAwsOpsworksStackConfigVpcCreate(name) +
|
||||
fmt.Sprintf(`
|
||||
resource "aws_opsworks_application" "tf-acc-app" {
|
||||
stack_id = "${aws_opsworks_stack.tf-acc.id}"
|
||||
name = "tf-ops-acc-application"
|
||||
name = "%s"
|
||||
type = "other"
|
||||
enable_ssl = false
|
||||
app_source ={
|
||||
@ -320,12 +327,15 @@ resource "aws_opsworks_application" "tf-acc-app" {
|
||||
environment = { key = "key1" value = "value1" secure = false}
|
||||
document_root = "foo"
|
||||
}
|
||||
`
|
||||
`, name)
|
||||
}
|
||||
|
||||
var testAccAwsOpsworksApplicationUpdate = testAccAwsOpsworksStackConfigVpcCreate("tf-ops-acc-application") + `
|
||||
func testAccAwsOpsworksApplicationUpdate(name string) string {
|
||||
return testAccAwsOpsworksStackConfigVpcCreate(name) +
|
||||
fmt.Sprintf(`
|
||||
resource "aws_opsworks_application" "tf-acc-app" {
|
||||
stack_id = "${aws_opsworks_stack.tf-acc.id}"
|
||||
name = "tf-ops-acc-application"
|
||||
name = "%s"
|
||||
type = "rails"
|
||||
domains = ["example.com", "sub.example.com"]
|
||||
enable_ssl = true
|
||||
@ -372,4 +382,5 @@ EOS
|
||||
auto_bundle_on_deploy = "true"
|
||||
rails_env = "staging"
|
||||
}
|
||||
`
|
||||
`, name)
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ func TestAccAWSOpsworksCustomLayer(t *testing.T) {
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAwsOpsworksCustomLayerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAwsOpsworksCustomLayerConfigNoVpcCreate(stackName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSOpsworksCustomLayerExists(
|
||||
@ -74,7 +74,7 @@ func TestAccAWSOpsworksCustomLayer(t *testing.T) {
|
||||
),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
{
|
||||
Config: testAccAwsOpsworksCustomLayerConfigUpdate(stackName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
resource.TestCheckResourceAttr(
|
||||
@ -219,7 +219,7 @@ func testAccCheckAWSOpsworksCreateLayerAttributes(
|
||||
}
|
||||
|
||||
expectedEbsVolumes := []*opsworks.VolumeConfiguration{
|
||||
&opsworks.VolumeConfiguration{
|
||||
{
|
||||
VolumeType: aws.String("gp2"),
|
||||
NumberOfDisks: aws.Int64(2),
|
||||
MountPoint: aws.String("/home"),
|
||||
@ -287,10 +287,6 @@ resource "aws_security_group" "tf-ops-acc-layer2" {
|
||||
|
||||
func testAccAwsOpsworksCustomLayerConfigNoVpcCreate(name string) string {
|
||||
return fmt.Sprintf(`
|
||||
provider "aws" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
|
||||
resource "aws_opsworks_custom_layer" "tf-acc" {
|
||||
stack_id = "${aws_opsworks_stack.tf-acc.id}"
|
||||
name = "%s"
|
||||
@ -361,10 +357,6 @@ resource "aws_opsworks_custom_layer" "tf-acc" {
|
||||
|
||||
func testAccAwsOpsworksCustomLayerConfigUpdate(name string) string {
|
||||
return fmt.Sprintf(`
|
||||
provider "aws" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
|
||||
resource "aws_security_group" "tf-ops-acc-layer3" {
|
||||
name = "tf-ops-acc-layer3"
|
||||
ingress {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user