Merge branch 'master' of github.com:hashicorp/terraform

This commit is contained in:
Max Englander 2016-01-24 05:43:37 +00:00
commit 2d231d71a4
731 changed files with 38147 additions and 4396 deletions

1
.gitignore vendored
View File

@ -9,6 +9,7 @@ modules-dev/
pkg/ pkg/
vendor/ vendor/
website/.vagrant website/.vagrant
website/.bundle
website/build website/build
website/node_modules website/node_modules
.vagrant/ .vagrant/

View File

@ -4,7 +4,6 @@ language: go
go: go:
- 1.5 - 1.5
- tip
install: make updatedeps install: make updatedeps

View File

@ -1,9 +1,184 @@
## 0.6.8 (Unreleased) ## 0.6.10 (Unreleased)
BACKWARDS INCOMPATIBILITIES:
* The `-module-depth` flag available on `plan`, `apply`, `show`, and `graph` now defaults to `-1`, causing
resources within modules to be expanded in command output. This is only a cosmetic change; it does not affect
any behavior.
* This release includes a bugfix for `$${}` interpolation escaping. These strings are now properly converted to `${}`
during interpolation. This may cause diffs on existing configurations in certain cases.
FEATURES:
* **New resource: `azurerm_cdn_endpoint`** [GH-4759]
* **New resource: `azurerm_cdn_profile`** [GH-4740]
* **New resource: `azurerm_network_security_rule`** [GH-4586]
* **New resource: `azurerm_subnet`** [GH-4595]
* **New resource: `azurerm_network_interface`** [GH-4598]
* **New resource: `azurerm_route_table`** [GH-4602]
* **New resource: `azurerm_route`** [GH-4604]
* **New resource: `azurerm_storage_account`** [GH-4698]
* **New resource: `aws_lambda_alias`** [GH-4664]
* **New resource: `aws_redshift_cluster`** [GH-3862]
* **New resource: `aws_redshift_security_group`** [GH-3862]
* **New resource: `aws_redshift_parameter_group`** [GH-3862]
* **New resource: `aws_redshift_subnet_group`** [GH-3862]
* **New resource: `docker_network`** [GH-4483]
* **New resource: `docker_volume`** [GH-4483]
* **New resource: `google_sql_user`** [GH-4669]
IMPROVEMENTS:
* core: Add `sha256()` interpolation function [GH-4704]
* core: Validate lifecycle keys to show helpful error messages whe they are mistypes [GH-4745]
* core: Default `module-depth` parameter to `-1`, which expands resources within modules in command output [GH-4763]
* provider/aws: Add new parameters `az_mode` and `availability_zone(s)` in ElastiCache [GH-4631]
* provider/aws: Allow ap-northeast-2 (Seoul) as valid region [GH-4637]
* provider/aws: Limit SNS Topic Subscription protocols [GH-4639]
* provider/aws: Add support for configuring logging on `aws_s3_bucket` resources [GH-4482]
* provider/aws: Add AWS Classiclink for AWS VPC resource [GH-3994]
* provider/aws: Supporting New AWS Route53 HealthCheck additions [GH-4564]
* provider/aws: Store instance state [GH-3261]
* provider/aws: Add support for updating ELB availability zones and subnets [GH-4597]
* provider/aws: Enable specifying aws s3 redirect protocol [GH-4098]
* provider/aws: Added support for `encrypted` on `ebs_block_devices` in Launch Configurations [GH-4481]
* provider/aws: Add support for creating Managed Microsoft Active Directory
and Directory Connectors [GH-4388]
* provider/aws: Mark some `aws_db_instance` fields as optional [GH-3138]
* provider/digitalocean: Add support for reassigning `digitalocean_floating_ip` resources [GH-4476]
* provider/dme: Add support for Global Traffic Director locations on `dme_record` resources [GH-4305]
* provider/docker: Add support for adding host entries on `docker_container` resources [GH-3463]
* provider/docker: Add support for mounting named volumes on `docker_container` resources [GH-4480]
* provider/google: Add content field to bucket object [GH-3893]
* provider/google: Add support for `named_port` blocks on `google_compute_instance_group_manager` resources [GH-4605]
* provider/openstack: Add "personality" support to instance resource [GH-4623]
* provider/packet: Handle external state changes for Packet resources gracefully [GH-4676]
* provider/tls: `tls_private_key` now exports attributes with public key in both PEM and OpenSSH format [GH-4606]
* state/remote: Allow KMS Key Encryption to be used with S3 backend [GH-2903]
BUG FIXES:
* core: Fix handling of literals with escaped interpolations `$${var}` [GH-4747]
* core: Fix diff mismatch when RequiresNew field and list both change [GH-4749]
* core: Respect module target path argument on `terraform init` [GH-4753]
* core: Write planfile even on empty plans [GH-4766]
* core: Add validation error when output is missing value field [GH-4762]
* core: Fix improper handling of orphan resources when targeting [GH-4574]
* config: Detect a specific JSON edge case and show a helpful workaround [GH-4746]
* provider/openstack: Ensure valid Security Group Rule attribute combination [GH-4466]
* provider/openstack: Don't put fixed_ip in port creation request if not defined [GH-4617]
* provider/google: Clarify SQL Database Instance recent name restriction [GH-4577]
* provider/google: Split Instance network interface into two fields [GH-4265]
* provider/aws: Error with empty list item on security group [GH-4140]
* provider/aws: Trap Instance error from mismatched SG IDs and Names [GH-4240]
* provider/aws: EBS optimised to force new resource in AWS Instance [GH-4627]
* provider/aws: Wait for NACL rule to be visible [GH-4734]
* provider/aws: `default_result` on `aws_autoscaling_lifecycle_hook` resources is now computed [GH-4695]
* provider/mailgun: Handle the fact that the domain destroy API is eventually consistent [GH-4777]
* provider/template: Fix race causing sporadic crashes in template_file with count > 1 [GH-4694]
* provider/template: Add support for updating `template_cloudinit_config` resources [GH-4757]
## 0.6.9 (January 8, 2016)
FEATURES:
* **New provider: `vcd` - VMware vCloud Director** [GH-3785]
* **New provider: `postgresql` - Create PostgreSQL databases and roles** [GH-3653]
* **New provider: `chef` - Create chef environments, roles, etc** [GH-3084]
* **New provider: `azurerm` - Preliminary support for Azure Resource Manager** [GH-4226]
* **New provider: `mysql` - Create MySQL databases** [GH-3122]
* **New resource: `aws_autoscaling_schedule`** [GH-4256]
* **New resource: `aws_nat_gateway`** [GH-4381]
* **New resource: `aws_network_acl_rule`** [GH-4286]
* **New resources: `aws_ecr_repository` and `aws_ecr_repository_policy`** [GH-4415]
* **New resource: `google_pubsub_topic`** [GH-3671]
* **New resource: `google_pubsub_subscription`** [GH-3671]
* **New resource: `template_cloudinit_config`** [GH-4095]
* **New resource: `tls_locally_signed_cert`** [GH-3930]
* **New remote state backend: `artifactory`** [GH-3684]
IMPROVEMENTS:
* core: Change set internals for performance improvements [GH-3992]
* core: Support HTTP basic auth in consul remote state [GH-4166]
* core: Improve error message on resource arity mismatch [GH-4244]
* core: Add support for unary operators + and - to the interpolation syntax [GH-3621]
* core: Add SSH agent support for Windows [GH-4323]
* core: Add `sha1()` interpolation function [GH-4450]
* provider/aws: Add `placement_group` as an option for `aws_autoscaling_group` [GH-3704]
* provider/aws: Add support for DynamoDB Table StreamSpecifications [GH-4208]
* provider/aws: Add `name_prefix` to Security Groups [GH-4167]
* provider/aws: Add support for removing nodes to `aws_elasticache_cluster` [GH-3809]
* provider/aws: Add support for `skip_final_snapshot` to `aws_db_instance` [GH-3853]
* provider/aws: Adding support for Tags to DB SecurityGroup [GH-4260]
* provider/aws: Adding Tag support for DB Param Groups [GH-4259]
* provider/aws: Fix issue with updated route ids for VPC Endpoints [GH-4264]
* provider/aws: Added measure_latency option to Route 53 Health Check resource [GH-3688]
* provider/aws: Validate IOPs for EBS Volumes [GH-4146]
* provider/aws: DB Subnet group arn output [GH-4261]
* provider/aws: Get full Kinesis streams view with pagination [GH-4368]
* provider/aws: Allow changing private IPs for ENIs [GH-4307]
* provider/aws: Retry MalformedPolicy errors due to newly created principals in S3 Buckets [GH-4315]
* provider/aws: Validate `name` on `db_subnet_group` against AWS requirements [GH-4340]
* provider/aws: wait for ASG capacity on update [GH-3947]
* provider/aws: Add validation for ECR repository name [GH-4431]
* provider/cloudstack: performance improvements [GH-4150]
* provider/docker: Add support for setting the entry point on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting the restart policy on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting memory, swap and CPU shares on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting labels on `docker_container` resources [GH-3761]
* provider/docker: Add support for setting log driver and options on `docker_container` resources [GH-3761]
* provider/docker: Add support for settings network mode on `docker_container` resources [GH-4475]
* provider/heroku: Improve handling of Applications within an Organization [GH-4495]
* provider/vsphere: Add support for custom vm params on `vsphere_virtual_machine` [GH-3867]
* provider/vsphere: Rename vcenter_server config parameter to something clearer [GH-3718]
* provider/vsphere: Make allow_unverified_ssl a configuable on the provider [GH-3933]
* provider/vsphere: Add folder handling for folder-qualified vm names [GH-3939]
* provider/vsphere: Change ip_address parameter for ipv6 support [GH-4035]
* provider/openstack: Increase instance timeout from 10 to 30 minutes [GH-4223]
* provider/google: Add `restart_policy` attribute to `google_managed_instance_group` [GH-3892]
BUG FIXES:
* core: skip provider input for deprecated fields [GH-4193]
* core: Fix issue which could cause fields that become empty to retain old values in the state [GH-3257]
* provider/docker: Fix an issue running with Docker Swarm by looking up containers by ID instead of name [GH-4148]
* provider/openstack: Better handling of load balancing resource state changes [GH-3926]
* provider/aws: Treat `INACTIVE` ECS cluster as deleted [GH-4364]
* provider/aws: Skip `source_security_group_id` determination logic for Classic ELBs [GH-4075]
* provider/aws: Fix issue destroy Route 53 zone/record if it no longer exists [GH-4198]
* provider/aws: Fix issue force destroying a versioned S3 bucket [GH-4168]
* provider/aws: Update DB Replica to honor storage type [GH-4155]
* provider/aws: Fix issue creating AWS RDS replicas across regions [GH-4215]
* provider/aws: Fix issue with Route53 and zero weighted records [GH-4427]
* provider/aws: Fix issue with iam_profile in aws_instance when a path is specified [GH-3663]
* provider/aws: Refactor AWS Authentication chain to fix issue with authentication and IAM [GH-4254]
* provider/aws: Fix issue with finding S3 Hosted Zone ID for eu-central-1 region [GH-4236]
* provider/aws: Fix missing AMI issue with Launch Configurations [GH-4242]
* provider/aws: Opsworks stack SSH key is write-only [GH-4241]
* provider/aws: Update VPC Endpoint to correctly set route table ids [GH-4392]
* provider/aws: Fix issue with ElasticSearch Domain `access_policies` always appear changed [GH-4245]
* provider/aws: Fix issue with nil parameter group value causing panic in `aws_db_parameter_group` [GH-4318]
* provider/aws: Fix issue with Elastic IPs not recognizing when they have been unassigned manually [GH-4387]
* provider/aws: Use body or URL for all CloudFormation stack updates [GH-4370]
* provider/aws: Fix template_url/template_body conflict [GH-4540]
* provider/aws: Fix bug w/ changing ECS svc/ELB association [GH-4366]
* provider/azure: Update for [breaking change to upstream client library](https://github.com/Azure/azure-sdk-for-go/commit/68d50cb53a73edfeb7f17f5e86cdc8eb359a9528). [GH-4300]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
* provider/google: Fix project metadata sshKeys from showing up and causing unnecessary diffs [GH-4512]
* provider/openstack: Handle volumes in "deleting" state [GH-4204]
* provider/rundeck: Tolerate Rundeck server not returning project name when reading a job [GH-4301]
* provider/vsphere: Create and attach additional disks before bootup [GH-4196]
* provider/openstack: Convert block_device from a Set to a List [GH-4288]
* provider/google: Terraform identifies deleted resources and handles them appropriately on Read [GH-3913]
## 0.6.8 (December 2, 2015)
FEATURES: FEATURES:
* **New resource: `digitalocean_floating_ip`** [GH-3748]
* **New provider: `statuscake`** [GH-3340] * **New provider: `statuscake`** [GH-3340]
* **New resource: `digitalocean_floating_ip`** [GH-3748]
* **New resource: `aws_lambda_event_source_mapping`** [GH-4093]
IMPROVEMENTS: IMPROVEMENTS:
@ -16,8 +191,11 @@ IMPROVEMENTS:
BUG FIXES: BUG FIXES:
* core: Fix a bug which prevented HEREDOC syntax being used in lists [GH-4078] * core: Fix a bug which prevented HEREDOC syntax being used in lists [GH-4078]
* core: Fix a bug which prevented HEREDOC syntax where the anchor ends in a number [GH-4128]
* core: Fix a bug which prevented HEREDOC syntax being used with Windows line endings [GH-4069]
* provider/aws: Fix a bug which could result in a panic when reading EC2 metadata [GH-4024] * provider/aws: Fix a bug which could result in a panic when reading EC2 metadata [GH-4024]
* provider/aws: Fix issue recreating security group rule if it has been destroyed [GH-4050] * provider/aws: Fix issue recreating security group rule if it has been destroyed [GH-4050]
* provider/aws: Fix issue with some attributes in Spot Instance Requests returning as nil [GH-4132]
* provider/aws: Fix issue where SPF records in Route 53 could show differences with no modification to the configuration [GH-4108] * provider/aws: Fix issue where SPF records in Route 53 could show differences with no modification to the configuration [GH-4108]
* provisioner/chef: Fix issue with path separators breaking the Chef provisioner on Windows [GH-4041] * provisioner/chef: Fix issue with path separators breaking the Chef provisioner on Windows [GH-4041]

View File

@ -11,30 +11,98 @@ best way to contribute to the project, read on. This document will cover
what we're looking for. By addressing all the points we're looking for, what we're looking for. By addressing all the points we're looking for,
it raises the chances we can quickly merge or address your contributions. it raises the chances we can quickly merge or address your contributions.
Specifically, we have provided checklists below for each type of issue and pull
request that can happen on the project. These checklists represent everything
we need to be able to review and respond quickly.
## HashiCorp vs. Community Providers
We separate providers out into what we call "HashiCorp Providers" and
"Community Providers".
HashiCorp providers are providers that we'll dedicate full time resources to
improving, supporting the latest features, and fixing bugs. These are providers
we understand deeply and are confident we have the resources to manage
ourselves.
Community providers are providers where we depend on the community to
contribute fixes and enhancements to improve. HashiCorp will run automated
tests and ensure these providers continue to work, but will not dedicate full
time resources to add new features to these providers. These providers are
available in official Terraform releases, but the functionality is primarily
contributed.
The current list of HashiCorp Providers is as follows:
* `aws`
* `azurerm`
* `google`
Our testing standards are the same for both HashiCorp and Community providers,
and HashiCorp runs full acceptance test suites for every provider nightly to
ensure Terraform remains stable.
We make the distinction between these two types of providers to help
highlight the vast amounts of community effort that goes in to making Terraform
great, and to help contributers better understand the role HashiCorp employees
play in the various areas of the code base.
## Issues ## Issues
### Reporting an Issue ### Issue Reporting Checklists
* Make sure you test against the latest released version. It is possible We welcome issues of all kinds including feature requests, bug reports, and
we already fixed the bug you're experiencing. general questions. Below you'll find checklists with guidlines for well-formed
issues of each type.
* Provide steps to reproduce the issue, along with your `.tf` files, #### Bug Reports
with secrets removed, so we can try to reproduce it. Without this,
it makes it much harder to fix the issue.
* If you experienced a panic, please create a [gist](https://gist.github.com) - [ ] __Test against latest release__: Make sure you test against the latest
of the *entire* generated crash log for us to look at. Double check released version. It is possible we already fixed the bug you're experiencing.
no sensitive items were in the log.
* Respond as promptly as possible to any questions made by the Terraform - [ ] __Search for possible duplicate reports__: It's helpful to keep bug
team to your issue. Stale issues will be closed. reports consolidated to one thread, so do a quick search on existing bug
reports to check if anybody else has reported the same thing. You can scope
searches by the label "bug" to help narrow things down.
- [ ] __Include steps to reproduce__: Provide steps to reproduce the issue,
along with your `.tf` files, with secrets removed, so we can try to
reproduce it. Without this, it makes it much harder to fix the issue.
- [ ] __For panics, include `crash.log`__: If you experienced a panic, please
create a [gist](https://gist.github.com) of the *entire* generated crash log
for us to look at. Double check no sensitive items were in the log.
#### Feature Requests
- [ ] __Search for possible duplicate requests__: It's helpful to keep requests
consolidated to one thread, so do a quick search on existing requests to
check if anybody else has reported the same thing. You can scope searches by
the label "enhancement" to help narrow things down.
- [ ] __Include a use case description__: In addition to describing the
behavior of the feature you'd like to see added, it's helpful to also lay
out the reason why the feature would be important and how it would benefit
Terraform users.
#### Questions
- [ ] __Search for answers in Terraform documentation__: We're happy to answer
questions in GitHub Issues, but it helps reduce issue churn and maintainer
workload if you work to find answers to common questions in the
documentation. Often times Question issues result in documentation updates
to help future users, so if you don't find an answer, you can give us
pointers for where you'd expect to see it in the docs.
### Issue Lifecycle ### Issue Lifecycle
1. The issue is reported. 1. The issue is reported.
2. The issue is verified and categorized by a Terraform collaborator. 2. The issue is verified and categorized by a Terraform collaborator.
Categorization is done via tags. For example, bugs are marked as "bugs". Categorization is done via GitHub labels. We generally use a two-label
system of (1) issue/PR type, and (2) section of the codebase. Type is
usually "bug", "enhancement", "documentation", or "question", and section
can be any of the providers or provisioners or "core".
3. Unless it is critical, the issue is left for a period of time (sometimes 3. Unless it is critical, the issue is left for a period of time (sometimes
many weeks), giving outside contributors a chance to address the issue. many weeks), giving outside contributors a chance to address the issue.
@ -47,27 +115,401 @@ it raises the chances we can quickly merge or address your contributions.
the issue tracker clean. The issue is still indexed and available for the issue tracker clean. The issue is still indexed and available for
future viewers, or can be re-opened if necessary. future viewers, or can be re-opened if necessary.
## Setting up Go to work on Terraform ## Pull Requests
If you have never worked with Go before, you will have to complete the Thank you for contributing! Here you'll find information on what to include in
following steps in order to be able to compile and test Terraform (or your Pull Request to ensure it is accepted quickly.
use the Vagrantfile in this repo to stand up a dev VM).
1. Install Go. Make sure the Go version is at least Go 1.4. Terraform will not work with anything less than * For pull requests that follow the guidelines, we expect to be able to review
Go 1.4. On a Mac, you can `brew install go` to install Go 1.4. and merge very quickly.
* Pull requests that don't follow the guidelines will be annotated with what
they're missing. A community or core team member may be able to swing around
and help finish up the work, but these PRs will generally hang out much
longer until they can be completed and merged.
2. Set and export the `GOPATH` environment variable and update your `PATH`. ### Pull Request Lifecycle
For example, you can add to your `.bash_profile`.
``` 1. You are welcome to submit your pull request for commentary or review before
export GOPATH=$HOME/Documents/golang it is fully completed. Please prefix the title of your pull request with
export PATH=$PATH:$GOPATH/bin "[WIP]" to indicate this. It's also a good idea to include specific
questions or items you'd like feedback on.
2. Once you believe your pull request is ready to be merged, you can remove any
"[WIP]" prefix from the title and a core team member will review. Follow
[the checklists below](#checklists-for-contribution) to help ensure that
your contribution will be merged quickly.
3. One of Terraform's core team members will look over your contribution and
either provide comments letting you know if there is anything left to do. We
do our best to provide feedback in a timely manner, but it may take some
time for us to respond.
4. Once all outstanding comments and checklist items have been addressed, your
contribution will be merged! Merged PRs will be included in the next
Terraform release. The core team takes care of updating the CHANGELOG as
they merge.
5. In rare cases, we might decide that a PR should be closed. We'll make sure
to provide clear reasoning when this happens.
### Checklists for Contribution
There are several different kinds of contribution, each of which has its own
standards for a speedy review. The following sections describe guidelines for
each type of contribution.
#### Documentation Update
Because [Terraform's website][website] is in the same repo as the code, it's
easy for anybody to help us improve our docs.
- [ ] __Reasoning for docs update__: Including a quick explanation for why the
update needed is helpful for reviewers.
- [ ] __Relevant Terraform version__: Is this update worth deploying to the
site immediately, or is it referencing an upcoming version of Terraform and
should get pushed out with the next release?
#### Enhancement/Bugfix to a Resource
Working on existing resources is a great way to get started as a Terraform
contributor because you can work within existing code and tests to get a feel
for what to do.
- [ ] __Acceptance test coverage of new behavior__: Existing resources each
have a set of [acceptance tests][acctests] covering their functionality.
These tests should exercise all the behavior of the resource. Whether you are
adding something or fixing a bug, the idea is to have an acceptance test that
fails if your code were to be removed. Sometimes it is sufficient to
"enhance" an existing test by adding an assertion or tweaking the config
that is used, but often a new test is better to add. You can copy/paste an
existing test and follow the conventions you see there, modifying the test
to exercise the behavior of your code.
- [ ] __Documentation updates__: If your code makes any changes that need to
be documented, you should include those doc updates in the same PR. The
[Terraform website][website] source is in this repo and includes
instructions for getting a local copy of the site up and running if you'd
like to preview your changes.
- [ ] __Well-formed Code__: Do your best to follow existing conventions you
see in the codebase, and ensure your code is formatted with `go fmt`. (The
Travis CI build will fail if `go fmt` has not been run on incoming code.)
The PR reviewers can help out on this front, and may provide comments with
suggestions on how to improve the code.
#### New Resource
Implementing a new resource is a good way to learn more about how Terraform
interacts with upstream APIs. There are plenty of examples to draw from in the
existing resources, but you still get to implement something completely new.
- [ ] __Acceptance tests__: New resources should include acceptance tests
covering their behavior. See [Writing Acceptance
Tests](#writing-acceptance-tests) below for a detailed guide on how to
approach these.
- [ ] __Documentation__: Each resource gets a page in the Terraform
documentation. The [Terraform website][website] source is in this
repo and includes instructions for getting a local copy of the site up and
running if you'd like to preview your changes. For a resource, you'll want
to add a new file in the appropriate place and add a link to the sidebar for
that page.
- [ ] __Well-formed Code__: Do your best to follow existing conventions you
see in the codebase, and ensure your code is formatted with `go fmt`. (The
Travis CI build will fail if `go fmt` has not been run on incoming code.)
The PR reviewers can help out on this front, and may provide comments with
suggestions on how to improve the code.
#### New Provider
Implementing a new provider gives Terraform the ability to manage resources in
a whole new API. It's a larger undertaking, but brings major new functionaliy
into Terraform.
- [ ] __Acceptance tests__: Each provider should include an acceptance test
suite with tests for each resource should include acceptance tests covering
its behavior. See [Writing Acceptance Tests](#writing-acceptance-tests) below
for a detailed guide on how to approach these.
- [ ] __Documentation__: Each provider has a section in the Terraform
documentation. The [Terraform website][website] source is in this repo and
includes instructions for getting a local copy of the site up and running if
you'd like to preview your changes. For a provider, you'll want to add new
index file and individual pages for each resource.
- [ ] __Well-formed Code__: Do your best to follow existing conventions you
see in the codebase, and ensure your code is formatted with `go fmt`. (The
Travis CI build will fail if `go fmt` has not been run on incoming code.)
The PR reviewers can help out on this front, and may provide comments with
suggestions on how to improve the code.
#### Core Bugfix/Enhancement
We are always happy when any developer is interested in diving into Terraform's
core to help out! Here's what we look for in smaller Core PRs.
- [ ] __Unit tests__: Terraform's core is covered by hundreds of unit tests at
several different layers of abstraction. Generally the best place to start
is with a "Context Test". These are higher level test that interact
end-to-end with most of Terraform's core. They are divided into test files
for each major action (plan, apply, etc.). Getting a failing test is a great
way to prove out a bug report or a new enhancement. With a context test in
place, you can work on implementation and lower level unit tests. Lower
level tests are largely context dependent, but the Context Tests are almost
always part of core work.
- [ ] __Documentation updates__: If the core change involves anything that
needs to be reflected in our documentation, you can make those changes in
the same PR. The [Terraform website][website] source is in this repo and
includes instructions for getting a local copy of the site up and running if
you'd like to preview your changes.
- [ ] __Well-formed Code__: Do your best to follow existing conventions you
see in the codebase, and ensure your code is formatted with `go fmt`. (The
Travis CI build will fail if `go fmt` has not been run on incoming code.)
The PR reviewers can help out on this front, and may provide comments with
suggestions on how to improve the code.
#### Core Feature
If you're interested in taking on a larger core feature, it's a good idea to
get feedback early and often on the effort.
- [ ] __Early validation of idea and implementation plan__: Terraform's core
is complicated enough that there are often several ways to implement
something, each of which has different implications and tradeoffs. Working
through a plan of attack with the team before you dive into implementation
will help ensure that you're working in the right direction.
- [ ] __Unit tests__: Terraform's core is covered by hundreds of unit tests at
several different layers of abstraction. Generally the best place to start
is with a "Context Test". These are higher level test that interact
end-to-end with most of Terraform's core. They are divided into test files
for each major action (plan, apply, etc.). Getting a failing test is a great
way to prove out a bug report or a new enhancement. With a context test in
place, you can work on implementation and lower level unit tests. Lower
level tests are largely context dependent, but the Context Tests are almost
always part of core work.
- [ ] __Documentation updates__: If the core change involves anything that
needs to be reflected in our documentation, you can make those changes in
the same PR. The [Terraform website][website] source is in this repo and
includes instructions for getting a local copy of the site up and running if
you'd like to preview your changes.
- [ ] __Well-formed Code__: Do your best to follow existing conventions you
see in the codebase, and ensure your code is formatted with `go fmt`. (The
Travis CI build will fail if `go fmt` has not been run on incoming code.)
The PR reviewers can help out on this front, and may provide comments with
suggestions on how to improve the code.
### Writing Acceptance Tests
Terraform includes an acceptance test harness that does most of the repetitive
work involved in testing a resource.
#### Acceptance Tests Often Cost Money to Run
Because acceptance tests create real resources, they often cost money to run.
Because the resources only exist for a short period of time, the total amount
of money required is usually a relatively small. Nevertheless, we don't want
financial limitations to be a barrier to contribution, so if you are unable to
pay to run acceptance tests for your contribution, simply mention this in your
pull request. We will happily accept "best effort" implementations of
acceptance tests and run them for you on our side. This might mean that your PR
takes a bit longer to merge, but it most definitely is not a blocker for
contributions.
#### Running an Acceptance Test
Acceptance tests can be run using the `testacc` target in the Terraform
`Makefile`. The individual tests to run can be controlled using a regular
expression. Prior to running the tests provider configuration details such as
access keys must be made available as environment variables.
For example, to run an acceptance test against the Azure Resource Manager
provider, the following environment variables must be set:
```sh
export ARM_SUBSCRIPTION_ID=...
export ARM_CLIENT_ID=...
export ARM_CLIENT_SECRET=...
export ARM_TENANT_ID=...
```
Tests can then be run by specifying the target provider and a regular
expression defining the tests to run:
```sh
$ make testacc TEST=./builtin/providers/azurerm TESTARGS='-run=TestAccAzureRMPublicIpStatic_update'
==> Checking that code complies with gofmt requirements...
go generate ./...
TF_ACC=1 go test ./builtin/providers/azurerm -v -run=TestAccAzureRMPublicIpStatic_update -timeout 120m
=== RUN TestAccAzureRMPublicIpStatic_update
--- PASS: TestAccAzureRMPublicIpStatic_update (177.48s)
PASS
ok github.com/hashicorp/terraform/builtin/providers/azurerm 177.504s
```
Entire resource test suites can be targeted by using the naming convention to
write the regular expression. For example, to run all tests of the
`azurerm_public_ip` resource rather than just the update test, you can start
testing like this:
```sh
$ make testacc TEST=./builtin/providers/azurerm TESTARGS='-run=TestAccAzureRMPublicIpStatic'
==> Checking that code complies with gofmt requirements...
go generate ./...
TF_ACC=1 go test ./builtin/providers/azurerm -v -run=TestAccAzureRMPublicIpStatic -timeout 120m
=== RUN TestAccAzureRMPublicIpStatic_basic
--- PASS: TestAccAzureRMPublicIpStatic_basic (137.74s)
=== RUN TestAccAzureRMPublicIpStatic_update
--- PASS: TestAccAzureRMPublicIpStatic_update (180.63s)
PASS
ok github.com/hashicorp/terraform/builtin/providers/azurerm 318.392s
```
#### Writing an Acceptance Test
Terraform has a framework for writing acceptance tests which minimises the
amount of boilerplate code necessary to use common testing patterns. The entry
point to the framework is the `resource.Test()` function.
Tests are divided into `TestStep`s. Each `TestStep` proceeds by applying some
Terraform configuration using the provider under test, and then verifying that
results are as expected by making assertions using the provider API. It is
common for a single test function to excercise both the creation of and updates
to a single resource. Most tests follow a similar structure.
1. Pre-flight checks are made to ensure that sufficient provider configuration
is available to be able to proceed - for example in an acceptance test
targetting AWS, `AWS_ACCESS_KEY_ID` and `AWS_SECRET_KEY` must be set prior
to running acceptance tests. This is common to all tests exercising a single
provider.
Each `TestStep` is defined in the call to `resource.Test()`. Most assertion
functions are defined out of band with the tests. This keeps the tests
readable, and allows reuse of assertion functions across different tests of the
same type of resource. The definition of a complete test looks like this:
```go
func TestAccAzureRMPublicIpStatic_update(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMPublicIpDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAzureRMVPublicIpStatic_basic,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMPublicIpExists("azurerm_public_ip.test"),
),
},
},
})
}
```
When executing the test, the the following steps are taken for each `TestStep`:
1. The Terraform configuration required for the test is applied. This is
responsible for configuring the resource under test, and any dependencies it
may have. For example, to test the `azurerm_public_ip` resource, an
`azurerm_resource_group` is required. This results in configuration which
looks like this:
```hcl
resource "azurerm_resource_group" "test" {
name = "acceptanceTestResourceGroup1"
location = "West US"
}
resource "azurerm_public_ip" "test" {
name = "acceptanceTestPublicIp1"
location = "West US"
resource_group_name = "${azurerm_resource_group.test.name}"
public_ip_address_allocation = "static"
}
``` ```
3. [Follow the development guide](https://github.com/hashicorp/terraform#developing-terraform) 1. Assertions are run using the provider API. These use the provider API
directly rather than asserting against the resource state. For example, to
verify that the `azurerm_public_ip` described above was created
successfully, a test function like this is used:
5. Make your changes to the Terraform source, being sure to run the basic ```go
func testCheckAzureRMPublicIpExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
// Ensure we have enough information in state to look up in API
rs, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("Not found: %s", name)
}
publicIPName := rs.Primary.Attributes["name"]
resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"]
if !hasResourceGroup {
return fmt.Errorf("Bad: no resource group found in state for public ip: %s", availSetName)
}
conn := testAccProvider.Meta().(*ArmClient).publicIPClient
resp, err := conn.Get(resourceGroup, publicIPName, "")
if err != nil {
return fmt.Errorf("Bad: Get on publicIPClient: %s", err)
}
if resp.StatusCode == http.StatusNotFound {
return fmt.Errorf("Bad: Public IP %q (resource group: %q) does not exist", name, resourceGroup)
}
return nil
}
}
```
Notice that the only information used from the Terraform state is the ID of
the resource - though in this case it is necessary to split the ID into
constituent parts in order to use the provider API. For computed properties,
we instead assert that the value saved in the Terraform state was the
expected value if possible. The testing framework providers helper functions
for several common types of check - for example:
```go
resource.TestCheckResourceAttr("azurerm_public_ip.test", "domain_name_label", "mylabel01"),
```
1. The resources created by the test are destroyed. This step happens
automatically, and is the equivalent of calling `terraform destroy`.
1. Assertions are made against the provider API to verify that the resources
have indeed been removed. If these checks fail, the test fails and reports
"dangling resources". The code to ensure that the `azurerm_public_ip` shown
above looks like this:
```go
func testCheckAzureRMPublicIpDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*ArmClient).publicIPClient
for _, rs := range s.RootModule().Resources {
if rs.Type != "azurerm_public_ip" {
continue
}
name := rs.Primary.Attributes["name"]
resourceGroup := rs.Primary.Attributes["resource_group_name"]
resp, err := conn.Get(resourceGroup, name, "")
if err != nil {
return nil
}
if resp.StatusCode != http.StatusNotFound {
return fmt.Errorf("Public IP still exists:\n%#v", resp.Properties)
}
}
return nil
}
```
These functions usually test only for the resource directly under test: we
skip the check that the `azurerm_resource_group` has been destroyed when
testing `azurerm_resource_group`, under the assumption that
`azurerm_resource_group` is tested independently in its own acceptance
tests. tests.
7. If everything works well and the tests pass, run `go fmt` on your code [website]: https://github.com/hashicorp/terraform/tree/master/website
before submitting a pull request. [acctests]: https://github.com/hashicorp/terraform#acceptance-tests
[ml]: https://groups.google.com/group/terraform-tool

View File

@ -4,12 +4,12 @@ VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf
default: test default: test
# bin generates the releaseable binaries for Terraform # bin generates the releaseable binaries for Terraform
bin: generate bin: fmtcheck generate
@sh -c "'$(CURDIR)/scripts/build.sh'" @sh -c "'$(CURDIR)/scripts/build.sh'"
# dev creates binaries for testing Terraform locally. These are put # dev creates binaries for testing Terraform locally. These are put
# into ./bin/ as well as $GOPATH/bin # into ./bin/ as well as $GOPATH/bin
dev: generate dev: fmtcheck generate
@TF_DEV=1 sh -c "'$(CURDIR)/scripts/build.sh'" @TF_DEV=1 sh -c "'$(CURDIR)/scripts/build.sh'"
quickdev: generate quickdev: generate
@ -18,35 +18,35 @@ quickdev: generate
# Shorthand for quickly building the core of Terraform. Note that some # Shorthand for quickly building the core of Terraform. Note that some
# changes will require a rebuild of everything, in which case the dev # changes will require a rebuild of everything, in which case the dev
# target should be used. # target should be used.
core-dev: generate core-dev: fmtcheck generate
go install github.com/hashicorp/terraform go install github.com/hashicorp/terraform
# Shorthand for quickly testing the core of Terraform (i.e. "not providers")
core-test: generate
@echo "Testing core packages..." && go test $(shell go list ./... | grep -v builtin)
# Shorthand for building and installing just one plugin for local testing. # Shorthand for building and installing just one plugin for local testing.
# Run as (for example): make plugin-dev PLUGIN=provider-aws # Run as (for example): make plugin-dev PLUGIN=provider-aws
plugin-dev: generate plugin-dev: fmtcheck generate
go install github.com/hashicorp/terraform/builtin/bins/$(PLUGIN) go install github.com/hashicorp/terraform/builtin/bins/$(PLUGIN)
mv $(GOPATH)/bin/$(PLUGIN) $(GOPATH)/bin/terraform-$(PLUGIN) mv $(GOPATH)/bin/$(PLUGIN) $(GOPATH)/bin/terraform-$(PLUGIN)
release: updatedeps
gox -build-toolchain
@$(MAKE) bin
# test runs the unit tests and vets the code # test runs the unit tests and vets the code
test: generate test: fmtcheck generate
TF_ACC= go test $(TEST) $(TESTARGS) -timeout=30s -parallel=4 TF_ACC= go test $(TEST) $(TESTARGS) -timeout=30s -parallel=4
@$(MAKE) vet @$(MAKE) vet
# testacc runs acceptance tests # testacc runs acceptance tests
testacc: generate testacc: fmtcheck generate
@if [ "$(TEST)" = "./..." ]; then \ @if [ "$(TEST)" = "./..." ]; then \
echo "ERROR: Set TEST to a specific package. For example,"; \ echo "ERROR: Set TEST to a specific package. For example,"; \
echo " make testacc TEST=./builtin/providers/aws"; \ echo " make testacc TEST=./builtin/providers/aws"; \
exit 1; \ exit 1; \
fi fi
TF_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 90m TF_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 120m
# testrace runs the race checker # testrace runs the race checker
testrace: generate testrace: fmtcheck generate
TF_ACC= go test -race $(TEST) $(TESTARGS) TF_ACC= go test -race $(TEST) $(TESTARGS)
# updatedeps installs all the dependencies that Terraform needs to run # updatedeps installs all the dependencies that Terraform needs to run
@ -88,4 +88,10 @@ vet:
generate: generate:
go generate ./... go generate ./...
.PHONY: bin default generate test updatedeps vet fmt:
gofmt -w .
fmtcheck:
@sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'"
.PHONY: bin default generate test updatedeps vet fmt fmtcheck

View File

@ -61,6 +61,18 @@ $ make test TEST=./terraform
... ...
``` ```
If you're working on a specific provider and only wish to rebuild that provider, you can use the `plugin-dev` target. For example, to build only the Azure provider:
```sh
$ make plugin-dev PLUGIN=provider-azure
```
If you're working on the core of Terraform, and only wish to rebuild that without rebuilding providers, you can use the `core-dev` target. It is important to note that some types of changes may require both core and providers to be rebuilt - for example work on the RPC interface. To build just the core of Terraform:
```sh
$ make core-dev
```
### Acceptance Tests ### Acceptance Tests
Terraform also has a comprehensive [acceptance test](http://en.wikipedia.org/wiki/Acceptance_testing) suite covering most of the major features of the built-in providers. Terraform also has a comprehensive [acceptance test](http://en.wikipedia.org/wiki/Acceptance_testing) suite covering most of the major features of the built-in providers.
@ -85,3 +97,41 @@ TF_ACC=1 go test ./builtin/providers/aws -v -run=Vpc -timeout 90m
The `TEST` variable is required, and you should specify the folder where the provider is. The `TESTARGS` variable is recommended to filter down to a specific resource to test, since testing all of them at once can take a very long time. The `TEST` variable is required, and you should specify the folder where the provider is. The `TESTARGS` variable is recommended to filter down to a specific resource to test, since testing all of them at once can take a very long time.
Acceptance tests typically require other environment variables to be set for things such as access keys. The provider itself should error early and tell you what to set, so it is not documented here. Acceptance tests typically require other environment variables to be set for things such as access keys. The provider itself should error early and tell you what to set, so it is not documented here.
### Cross Compilation and Building for Distribution
If you wish to cross-compile Terraform for another architecture, you can set the `XC_OS` and `XC_ARCH` environment variables to values representing the target operating system and architecture before calling `make`. The output is placed in the `pkg` subdirectory tree both expanded in a directory representing the OS/architecture combination and as a ZIP archive.
For example, to compile 64-bit Linux binaries on Mac OS X Linux, you can run:
```sh
$ XC_OS=linux XC_ARCH=amd64 make bin
...
$ file pkg/linux_amd64/terraform
terraform: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), statically linked, not stripped
```
`XC_OS` and `XC_ARCH` can be space separated lists representing different combinations of operating system and architecture. For example, to compile for both Linux and Mac OS X, targeting both 32- and 64-bit architectures, you can run:
```sh
$ XC_OS="linux darwin" XC_ARCH="386 amd64" make bin
...
$ tree ./pkg/ -P "terraform|*.zip"
./pkg/
├── darwin_386
│   └── terraform
├── darwin_386.zip
├── darwin_amd64
│   └── terraform
├── darwin_amd64.zip
├── linux_386
│   └── terraform
├── linux_386.zip
├── linux_amd64
│   └── terraform
└── linux_amd64.zip
4 directories, 8 files
```
_Note: Cross-compilation uses [gox](https://github.com/mitchellh/gox), which requires toolchains to be built with versions of Go prior to 1.5. In order to successfully cross-compile with older versions of Go, you will need to run `gox -build-toolchain` before running the commands detailed above._

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/hashicorp/terraform/builtin/providers/azurerm"
"github.com/hashicorp/terraform/plugin"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProviderFunc: azurerm.Provider,
})
}

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/hashicorp/terraform/builtin/providers/chef"
"github.com/hashicorp/terraform/plugin"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProviderFunc: chef.Provider,
})
}

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/hashicorp/terraform/builtin/providers/mysql"
"github.com/hashicorp/terraform/plugin"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProviderFunc: mysql.Provider,
})
}

View File

@ -0,0 +1 @@
package main

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/hashicorp/terraform/builtin/providers/postgresql"
"github.com/hashicorp/terraform/plugin"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProviderFunc: postgresql.Provider,
})
}

View File

@ -0,0 +1 @@
package main

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/hashicorp/terraform/builtin/providers/vcd"
"github.com/hashicorp/terraform/plugin"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProviderFunc: vcd.Provider,
})
}

View File

@ -3,14 +3,19 @@ package aws
import ( import (
"fmt" "fmt"
"log" "log"
"net/http"
"os"
"strings" "strings"
"time"
"github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-multierror" "github.com/hashicorp/go-multierror"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials" awsCredentials "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/cloudformation" "github.com/aws/aws-sdk-go/service/cloudformation"
@ -22,6 +27,7 @@ import (
"github.com/aws/aws-sdk-go/service/directoryservice" "github.com/aws/aws-sdk-go/service/directoryservice"
"github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/ecr"
"github.com/aws/aws-sdk-go/service/ecs" "github.com/aws/aws-sdk-go/service/ecs"
"github.com/aws/aws-sdk-go/service/efs" "github.com/aws/aws-sdk-go/service/efs"
"github.com/aws/aws-sdk-go/service/elasticache" "github.com/aws/aws-sdk-go/service/elasticache"
@ -34,6 +40,7 @@ import (
"github.com/aws/aws-sdk-go/service/lambda" "github.com/aws/aws-sdk-go/service/lambda"
"github.com/aws/aws-sdk-go/service/opsworks" "github.com/aws/aws-sdk-go/service/opsworks"
"github.com/aws/aws-sdk-go/service/rds" "github.com/aws/aws-sdk-go/service/rds"
"github.com/aws/aws-sdk-go/service/redshift"
"github.com/aws/aws-sdk-go/service/route53" "github.com/aws/aws-sdk-go/service/route53"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/sns" "github.com/aws/aws-sdk-go/service/sns"
@ -41,11 +48,13 @@ import (
) )
type Config struct { type Config struct {
AccessKey string AccessKey string
SecretKey string SecretKey string
Token string CredsFilename string
Region string Profile string
MaxRetries int Token string
Region string
MaxRetries int
AllowedAccountIds []interface{} AllowedAccountIds []interface{}
ForbiddenAccountIds []interface{} ForbiddenAccountIds []interface{}
@ -62,6 +71,7 @@ type AWSClient struct {
dsconn *directoryservice.DirectoryService dsconn *directoryservice.DirectoryService
dynamodbconn *dynamodb.DynamoDB dynamodbconn *dynamodb.DynamoDB
ec2conn *ec2.EC2 ec2conn *ec2.EC2
ecrconn *ecr.ECR
ecsconn *ecs.ECS ecsconn *ecs.ECS
efsconn *efs.EFS efsconn *efs.EFS
elbconn *elb.ELB elbconn *elb.ELB
@ -70,6 +80,7 @@ type AWSClient struct {
s3conn *s3.S3 s3conn *s3.S3
sqsconn *sqs.SQS sqsconn *sqs.SQS
snsconn *sns.SNS snsconn *sns.SNS
redshiftconn *redshift.Redshift
r53conn *route53.Route53 r53conn *route53.Route53
region string region string
rdsconn *rds.RDS rdsconn *rds.RDS
@ -104,9 +115,14 @@ func (c *Config) Client() (interface{}, error) {
client.region = c.Region client.region = c.Region
log.Println("[INFO] Building AWS auth structure") log.Println("[INFO] Building AWS auth structure")
// We fetched all credential sources in Provider. If they are creds := getCreds(c.AccessKey, c.SecretKey, c.Token, c.Profile, c.CredsFilename)
// available, they'll already be in c. See Provider definition. // Call Get to check for credential provider. If nothing found, we'll get an
creds := credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token) // error, and we can present it nicely to the user
_, err = creds.Get()
if err != nil {
errs = append(errs, fmt.Errorf("Error loading credentials for AWS Provider: %s", err))
return nil, &multierror.Error{Errors: errs}
}
awsConfig := &aws.Config{ awsConfig := &aws.Config{
Credentials: creds, Credentials: creds,
Region: aws.String(c.Region), Region: aws.String(c.Region),
@ -118,7 +134,7 @@ func (c *Config) Client() (interface{}, error) {
sess := session.New(awsConfig) sess := session.New(awsConfig)
client.iamconn = iam.New(sess) client.iamconn = iam.New(sess)
err := c.ValidateCredentials(client.iamconn) err = c.ValidateCredentials(client.iamconn)
if err != nil { if err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
@ -179,6 +195,9 @@ func (c *Config) Client() (interface{}, error) {
log.Println("[INFO] Initializing EC2 Connection") log.Println("[INFO] Initializing EC2 Connection")
client.ec2conn = ec2.New(sess) client.ec2conn = ec2.New(sess)
log.Println("[INFO] Initializing ECR Connection")
client.ecrconn = ecr.New(sess)
log.Println("[INFO] Initializing ECS Connection") log.Println("[INFO] Initializing ECS Connection")
client.ecsconn = ecs.New(sess) client.ecsconn = ecs.New(sess)
@ -223,6 +242,10 @@ func (c *Config) Client() (interface{}, error) {
log.Println("[INFO] Initializing CodeCommit SDK connection") log.Println("[INFO] Initializing CodeCommit SDK connection")
client.codecommitconn = codecommit.New(usEast1Sess) client.codecommitconn = codecommit.New(usEast1Sess)
log.Println("[INFO] Initializing Redshift SDK connection")
client.redshiftconn = redshift.New(sess)
} }
if len(errs) > 0 { if len(errs) > 0 {
@ -235,9 +258,9 @@ func (c *Config) Client() (interface{}, error) {
// ValidateRegion returns an error if the configured region is not a // ValidateRegion returns an error if the configured region is not a
// valid aws region and nil otherwise. // valid aws region and nil otherwise.
func (c *Config) ValidateRegion() error { func (c *Config) ValidateRegion() error {
var regions = [11]string{"us-east-1", "us-west-2", "us-west-1", "eu-west-1", var regions = [12]string{"us-east-1", "us-west-2", "us-west-1", "eu-west-1",
"eu-central-1", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "eu-central-1", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1",
"sa-east-1", "cn-north-1", "us-gov-west-1"} "ap-northeast-2", "sa-east-1", "cn-north-1", "us-gov-west-1"}
for _, valid := range regions { for _, valid := range regions {
if c.Region == valid { if c.Region == valid {
@ -316,3 +339,56 @@ func (c *Config) ValidateAccountId(iamconn *iam.IAM) error {
return nil return nil
} }
// This function is responsible for reading credentials from the
// environment in the case that they're not explicitly specified
// in the Terraform configuration.
func getCreds(key, secret, token, profile, credsfile string) *awsCredentials.Credentials {
// build a chain provider, lazy-evaulated by aws-sdk
providers := []awsCredentials.Provider{
&awsCredentials.StaticProvider{Value: awsCredentials.Value{
AccessKeyID: key,
SecretAccessKey: secret,
SessionToken: token,
}},
&awsCredentials.EnvProvider{},
&awsCredentials.SharedCredentialsProvider{
Filename: credsfile,
Profile: profile,
},
}
// We only look in the EC2 metadata API if we can connect
// to the metadata service within a reasonable amount of time
metadataURL := os.Getenv("AWS_METADATA_URL")
if metadataURL == "" {
metadataURL = "http://169.254.169.254:80/latest"
}
c := http.Client{
Timeout: 100 * time.Millisecond,
}
r, err := c.Get(metadataURL)
// Flag to determine if we should add the EC2Meta data provider. Default false
var useIAM bool
if err == nil {
// AWS will add a "Server: EC2ws" header value for the metadata request. We
// check the headers for this value to ensure something else didn't just
// happent to be listening on that IP:Port
if r.Header["Server"] != nil && strings.Contains(r.Header["Server"][0], "EC2") {
useIAM = true
}
}
if useIAM {
log.Printf("[DEBUG] EC2 Metadata service found, adding EC2 Role Credential Provider")
providers = append(providers, &ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.New(session.New(&aws.Config{
Endpoint: aws.String(metadataURL),
})),
})
} else {
log.Printf("[DEBUG] EC2 Metadata service not found, not adding EC2 Role Credential Provider")
}
return awsCredentials.NewChainCredentials(providers)
}

View File

@ -0,0 +1,376 @@
package aws
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"testing"
"github.com/aws/aws-sdk-go/aws/awserr"
)
func TestAWSConfig_shouldError(t *testing.T) {
resetEnv := unsetEnv(t)
defer resetEnv()
cfg := Config{}
c := getCreds(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename)
_, err := c.Get()
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() != "NoCredentialProviders" {
t.Fatalf("Expected NoCredentialProviders error")
}
}
if err == nil {
t.Fatalf("Expected an error with empty env, keys, and IAM in AWS Config")
}
}
func TestAWSConfig_shouldBeStatic(t *testing.T) {
simple := []struct {
Key, Secret, Token string
}{
{
Key: "test",
Secret: "secret",
}, {
Key: "test",
Secret: "test",
Token: "test",
},
}
for _, c := range simple {
cfg := Config{
AccessKey: c.Key,
SecretKey: c.Secret,
Token: c.Token,
}
creds := getCreds(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename)
if creds == nil {
t.Fatalf("Expected a static creds provider to be returned")
}
v, err := creds.Get()
if err != nil {
t.Fatalf("Error gettings creds: %s", err)
}
if v.AccessKeyID != c.Key {
t.Fatalf("AccessKeyID mismatch, expected: (%s), got (%s)", c.Key, v.AccessKeyID)
}
if v.SecretAccessKey != c.Secret {
t.Fatalf("SecretAccessKey mismatch, expected: (%s), got (%s)", c.Secret, v.SecretAccessKey)
}
if v.SessionToken != c.Token {
t.Fatalf("SessionToken mismatch, expected: (%s), got (%s)", c.Token, v.SessionToken)
}
}
}
// TestAWSConfig_shouldIAM is designed to test the scenario of running Terraform
// from an EC2 instance, without environment variables or manually supplied
// credentials.
func TestAWSConfig_shouldIAM(t *testing.T) {
// clear AWS_* environment variables
resetEnv := unsetEnv(t)
defer resetEnv()
// capture the test server's close method, to call after the test returns
ts := awsEnv(t)
defer ts()
// An empty config, no key supplied
cfg := Config{}
creds := getCreds(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename)
if creds == nil {
t.Fatalf("Expected a static creds provider to be returned")
}
v, err := creds.Get()
if err != nil {
t.Fatalf("Error gettings creds: %s", err)
}
if v.AccessKeyID != "somekey" {
t.Fatalf("AccessKeyID mismatch, expected: (somekey), got (%s)", v.AccessKeyID)
}
if v.SecretAccessKey != "somesecret" {
t.Fatalf("SecretAccessKey mismatch, expected: (somesecret), got (%s)", v.SecretAccessKey)
}
if v.SessionToken != "sometoken" {
t.Fatalf("SessionToken mismatch, expected: (sometoken), got (%s)", v.SessionToken)
}
}
// TestAWSConfig_shouldIAM is designed to test the scenario of running Terraform
// from an EC2 instance, without environment variables or manually supplied
// credentials.
func TestAWSConfig_shouldIgnoreIAM(t *testing.T) {
resetEnv := unsetEnv(t)
defer resetEnv()
// capture the test server's close method, to call after the test returns
ts := awsEnv(t)
defer ts()
simple := []struct {
Key, Secret, Token string
}{
{
Key: "test",
Secret: "secret",
}, {
Key: "test",
Secret: "test",
Token: "test",
},
}
for _, c := range simple {
cfg := Config{
AccessKey: c.Key,
SecretKey: c.Secret,
Token: c.Token,
}
creds := getCreds(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename)
if creds == nil {
t.Fatalf("Expected a static creds provider to be returned")
}
v, err := creds.Get()
if err != nil {
t.Fatalf("Error gettings creds: %s", err)
}
if v.AccessKeyID != c.Key {
t.Fatalf("AccessKeyID mismatch, expected: (%s), got (%s)", c.Key, v.AccessKeyID)
}
if v.SecretAccessKey != c.Secret {
t.Fatalf("SecretAccessKey mismatch, expected: (%s), got (%s)", c.Secret, v.SecretAccessKey)
}
if v.SessionToken != c.Token {
t.Fatalf("SessionToken mismatch, expected: (%s), got (%s)", c.Token, v.SessionToken)
}
}
}
var credentialsFileContents = `[myprofile]
aws_access_key_id = accesskey
aws_secret_access_key = secretkey
`
func TestAWSConfig_shouldBeShared(t *testing.T) {
file, err := ioutil.TempFile(os.TempDir(), "terraform_aws_cred")
if err != nil {
t.Fatalf("Error writing temporary credentials file: %s", err)
}
_, err = file.WriteString(credentialsFileContents)
if err != nil {
t.Fatalf("Error writing temporary credentials to file: %s", err)
}
err = file.Close()
if err != nil {
t.Fatalf("Error closing temporary credentials file: %s", err)
}
defer os.Remove(file.Name())
resetEnv := unsetEnv(t)
defer resetEnv()
if err := os.Setenv("AWS_PROFILE", "myprofile"); err != nil {
t.Fatalf("Error resetting env var AWS_PROFILE: %s", err)
}
if err := os.Setenv("AWS_SHARED_CREDENTIALS_FILE", file.Name()); err != nil {
t.Fatalf("Error resetting env var AWS_SHARED_CREDENTIALS_FILE: %s", err)
}
creds := getCreds("", "", "", "myprofile", file.Name())
if creds == nil {
t.Fatalf("Expected a provider chain to be returned")
}
v, err := creds.Get()
if err != nil {
t.Fatalf("Error gettings creds: %s", err)
}
if v.AccessKeyID != "accesskey" {
t.Fatalf("AccessKeyID mismatch, expected (%s), got (%s)", "accesskey", v.AccessKeyID)
}
if v.SecretAccessKey != "secretkey" {
t.Fatalf("SecretAccessKey mismatch, expected (%s), got (%s)", "accesskey", v.AccessKeyID)
}
}
func TestAWSConfig_shouldBeENV(t *testing.T) {
// need to set the environment variables to a dummy string, as we don't know
// what they may be at runtime without hardcoding here
s := "some_env"
resetEnv := setEnv(s, t)
defer resetEnv()
cfg := Config{}
creds := getCreds(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename)
if creds == nil {
t.Fatalf("Expected a static creds provider to be returned")
}
v, err := creds.Get()
if err != nil {
t.Fatalf("Error gettings creds: %s", err)
}
if v.AccessKeyID != s {
t.Fatalf("AccessKeyID mismatch, expected: (%s), got (%s)", s, v.AccessKeyID)
}
if v.SecretAccessKey != s {
t.Fatalf("SecretAccessKey mismatch, expected: (%s), got (%s)", s, v.SecretAccessKey)
}
if v.SessionToken != s {
t.Fatalf("SessionToken mismatch, expected: (%s), got (%s)", s, v.SessionToken)
}
}
// unsetEnv unsets enviornment variables for testing a "clean slate" with no
// credentials in the environment
func unsetEnv(t *testing.T) func() {
// Grab any existing AWS keys and preserve. In some tests we'll unset these, so
// we need to have them and restore them after
e := getEnv()
if err := os.Unsetenv("AWS_ACCESS_KEY_ID"); err != nil {
t.Fatalf("Error unsetting env var AWS_ACCESS_KEY_ID: %s", err)
}
if err := os.Unsetenv("AWS_SECRET_ACCESS_KEY"); err != nil {
t.Fatalf("Error unsetting env var AWS_SECRET_ACCESS_KEY: %s", err)
}
if err := os.Unsetenv("AWS_SESSION_TOKEN"); err != nil {
t.Fatalf("Error unsetting env var AWS_SESSION_TOKEN: %s", err)
}
if err := os.Unsetenv("AWS_PROFILE"); err != nil {
t.Fatalf("Error unsetting env var AWS_TOKEN: %s", err)
}
if err := os.Unsetenv("AWS_SHARED_CREDENTIALS_FILE"); err != nil {
t.Fatalf("Error unsetting env var AWS_SHARED_CREDENTIALS_FILE: %s", err)
}
return func() {
// re-set all the envs we unset above
if err := os.Setenv("AWS_ACCESS_KEY_ID", e.Key); err != nil {
t.Fatalf("Error resetting env var AWS_ACCESS_KEY_ID: %s", err)
}
if err := os.Setenv("AWS_SECRET_ACCESS_KEY", e.Secret); err != nil {
t.Fatalf("Error resetting env var AWS_SECRET_ACCESS_KEY: %s", err)
}
if err := os.Setenv("AWS_SESSION_TOKEN", e.Token); err != nil {
t.Fatalf("Error resetting env var AWS_SESSION_TOKEN: %s", err)
}
if err := os.Setenv("AWS_PROFILE", e.Profile); err != nil {
t.Fatalf("Error resetting env var AWS_PROFILE: %s", err)
}
if err := os.Setenv("AWS_SHARED_CREDENTIALS_FILE", e.CredsFilename); err != nil {
t.Fatalf("Error resetting env var AWS_SHARED_CREDENTIALS_FILE: %s", err)
}
}
}
func setEnv(s string, t *testing.T) func() {
e := getEnv()
// Set all the envs to a dummy value
if err := os.Setenv("AWS_ACCESS_KEY_ID", s); err != nil {
t.Fatalf("Error setting env var AWS_ACCESS_KEY_ID: %s", err)
}
if err := os.Setenv("AWS_SECRET_ACCESS_KEY", s); err != nil {
t.Fatalf("Error setting env var AWS_SECRET_ACCESS_KEY: %s", err)
}
if err := os.Setenv("AWS_SESSION_TOKEN", s); err != nil {
t.Fatalf("Error setting env var AWS_SESSION_TOKEN: %s", err)
}
if err := os.Setenv("AWS_PROFILE", s); err != nil {
t.Fatalf("Error setting env var AWS_PROFILE: %s", err)
}
if err := os.Setenv("AWS_SHARED_CREDENTIALS_FILE", s); err != nil {
t.Fatalf("Error setting env var AWS_SHARED_CREDENTIALS_FLE: %s", err)
}
return func() {
// re-set all the envs we unset above
if err := os.Setenv("AWS_ACCESS_KEY_ID", e.Key); err != nil {
t.Fatalf("Error resetting env var AWS_ACCESS_KEY_ID: %s", err)
}
if err := os.Setenv("AWS_SECRET_ACCESS_KEY", e.Secret); err != nil {
t.Fatalf("Error resetting env var AWS_SECRET_ACCESS_KEY: %s", err)
}
if err := os.Setenv("AWS_SESSION_TOKEN", e.Token); err != nil {
t.Fatalf("Error resetting env var AWS_SESSION_TOKEN: %s", err)
}
if err := os.Setenv("AWS_PROFILE", e.Profile); err != nil {
t.Fatalf("Error setting env var AWS_PROFILE: %s", err)
}
if err := os.Setenv("AWS_SHARED_CREDENTIALS_FILE", s); err != nil {
t.Fatalf("Error setting env var AWS_SHARED_CREDENTIALS_FLE: %s", err)
}
}
}
// awsEnv establishes a httptest server to mock out the internal AWS Metadata
// service. IAM Credentials are retrieved by the EC2RoleProvider, which makes
// API calls to this internal URL. By replacing the server with a test server,
// we can simulate an AWS environment
func awsEnv(t *testing.T) func() {
routes := routes{}
if err := json.Unmarshal([]byte(aws_routes), &routes); err != nil {
t.Fatalf("Failed to unmarshal JSON in AWS ENV test: %s", err)
}
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain")
w.Header().Add("Server", "MockEC2")
for _, e := range routes.Endpoints {
if r.RequestURI == e.Uri {
fmt.Fprintln(w, e.Body)
}
}
}))
os.Setenv("AWS_METADATA_URL", ts.URL+"/latest")
return ts.Close
}
func getEnv() *currentEnv {
// Grab any existing AWS keys and preserve. In some tests we'll unset these, so
// we need to have them and restore them after
return &currentEnv{
Key: os.Getenv("AWS_ACCESS_KEY_ID"),
Secret: os.Getenv("AWS_SECRET_ACCESS_KEY"),
Token: os.Getenv("AWS_SESSION_TOKEN"),
Profile: os.Getenv("AWS_TOKEN"),
CredsFilename: os.Getenv("AWS_SHARED_CREDENTIALS_FILE"),
}
}
// struct to preserve the current environment
type currentEnv struct {
Key, Secret, Token, Profile, CredsFilename string
}
type routes struct {
Endpoints []*endpoint `json:"endpoints"`
}
type endpoint struct {
Uri string `json:"uri"`
Body string `json:"body"`
}
const aws_routes = `
{
"endpoints": [
{
"uri": "/latest/meta-data/iam/security-credentials",
"body": "test_role"
},
{
"uri": "/latest/meta-data/iam/security-credentials/test_role",
"body": "{\"Code\":\"Success\",\"LastUpdated\":\"2015-12-11T17:17:25Z\",\"Type\":\"AWS-HMAC\",\"AccessKeyId\":\"somekey\",\"SecretAccessKey\":\"somesecret\",\"Token\":\"sometoken\"}"
}
]
}
`

View File

@ -8,10 +8,11 @@ var hostedZoneIDsMap = map[string]string{
"us-west-2": "Z3BJ6K6RIION7M", "us-west-2": "Z3BJ6K6RIION7M",
"us-west-1": "Z2F56UZL2M1ACD", "us-west-1": "Z2F56UZL2M1ACD",
"eu-west-1": "Z1BKCTXD74EZPE", "eu-west-1": "Z1BKCTXD74EZPE",
"central-1": "Z21DNDUVLTQW6Q", "eu-central-1": "Z21DNDUVLTQW6Q",
"ap-southeast-1": "Z3O0J2DXBE1FTB", "ap-southeast-1": "Z3O0J2DXBE1FTB",
"ap-southeast-2": "Z1WCIGYICN2BYD", "ap-southeast-2": "Z1WCIGYICN2BYD",
"ap-northeast-1": "Z2M4EHUR26P7ZW", "ap-northeast-1": "Z2M4EHUR26P7ZW",
"ap-northeast-2": "Z3W03O7B5YMIYP",
"sa-east-1": "Z7KQH4QJS55SO", "sa-east-1": "Z7KQH4QJS55SO",
"us-gov-west-1": "Z31GFT0UA1I2HV", "us-gov-west-1": "Z31GFT0UA1I2HV",
} }

View File

@ -69,6 +69,15 @@ func flattenNetworkAclEntries(list []*ec2.NetworkAclEntry) []map[string]interfac
} }
func protocolStrings(protocolIntegers map[string]int) map[int]string {
protocolStrings := make(map[int]string, len(protocolIntegers))
for k, v := range protocolIntegers {
protocolStrings[v] = k
}
return protocolStrings
}
func protocolIntegers() map[string]int { func protocolIntegers() map[string]int {
var protocolIntegers = make(map[string]int) var protocolIntegers = make(map[string]int)
protocolIntegers = map[string]int{ protocolIntegers = map[string]int{

View File

@ -1,19 +1,10 @@
package aws package aws
import ( import (
"net"
"sync"
"time"
"github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/mutexkv" "github.com/hashicorp/terraform/helper/mutexkv"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
) )
// Provider returns a terraform.ResourceProvider. // Provider returns a terraform.ResourceProvider.
@ -21,95 +12,41 @@ func Provider() terraform.ResourceProvider {
// TODO: Move the validation to this, requires conditional schemas // TODO: Move the validation to this, requires conditional schemas
// TODO: Move the configuration to this, requires validation // TODO: Move the configuration to this, requires validation
// These variables are closed within the `getCreds` function below.
// This function is responsible for reading credentials from the
// environment in the case that they're not explicitly specified
// in the Terraform configuration.
//
// By using the getCreds function here instead of making the default
// empty, we avoid asking for input on credentials if they're available
// in the environment.
var credVal credentials.Value
var credErr error
var once sync.Once
getCreds := func() {
// Build the list of providers to look for creds in
providers := []credentials.Provider{
&credentials.EnvProvider{},
&credentials.SharedCredentialsProvider{},
}
// We only look in the EC2 metadata API if we can connect
// to the metadata service within a reasonable amount of time
conn, err := net.DialTimeout("tcp", "169.254.169.254:80", 100*time.Millisecond)
if err == nil {
conn.Close()
providers = append(providers, &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())})
}
credVal, credErr = credentials.NewChainCredentials(providers).Get()
// If we didn't successfully find any credentials, just
// set the error to nil.
if credErr == credentials.ErrNoValidProvidersFoundInChain {
credErr = nil
}
}
// getCredDefault is a function used by DefaultFunc below to
// get the default value for various parts of the credentials.
// This function properly handles loading the credentials, checking
// for errors, etc.
getCredDefault := func(def interface{}, f func() string) (interface{}, error) {
once.Do(getCreds)
// If there was an error, that is always first
if credErr != nil {
return nil, credErr
}
// If the value is empty string, return nil (not set)
val := f()
if val == "" {
return def, nil
}
return val, nil
}
// The actual provider // The actual provider
return &schema.Provider{ return &schema.Provider{
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{
"access_key": &schema.Schema{ "access_key": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Optional: true,
DefaultFunc: func() (interface{}, error) { Default: "",
return getCredDefault(nil, func() string {
return credVal.AccessKeyID
})
},
Description: descriptions["access_key"], Description: descriptions["access_key"],
}, },
"secret_key": &schema.Schema{ "secret_key": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Optional: true,
DefaultFunc: func() (interface{}, error) { Default: "",
return getCredDefault(nil, func() string {
return credVal.SecretAccessKey
})
},
Description: descriptions["secret_key"], Description: descriptions["secret_key"],
}, },
"profile": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["profile"],
},
"shared_credentials_file": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: "",
Description: descriptions["shared_credentials_file"],
},
"token": &schema.Schema{ "token": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
Optional: true, Optional: true,
DefaultFunc: func() (interface{}, error) { Default: "",
return getCredDefault("", func() string {
return credVal.SessionToken
})
},
Description: descriptions["token"], Description: descriptions["token"],
}, },
@ -174,6 +111,7 @@ func Provider() terraform.ResourceProvider {
"aws_autoscaling_group": resourceAwsAutoscalingGroup(), "aws_autoscaling_group": resourceAwsAutoscalingGroup(),
"aws_autoscaling_notification": resourceAwsAutoscalingNotification(), "aws_autoscaling_notification": resourceAwsAutoscalingNotification(),
"aws_autoscaling_policy": resourceAwsAutoscalingPolicy(), "aws_autoscaling_policy": resourceAwsAutoscalingPolicy(),
"aws_autoscaling_schedule": resourceAwsAutoscalingSchedule(),
"aws_cloudformation_stack": resourceAwsCloudFormationStack(), "aws_cloudformation_stack": resourceAwsCloudFormationStack(),
"aws_cloudtrail": resourceAwsCloudTrail(), "aws_cloudtrail": resourceAwsCloudTrail(),
"aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(), "aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(),
@ -190,6 +128,8 @@ func Provider() terraform.ResourceProvider {
"aws_directory_service_directory": resourceAwsDirectoryServiceDirectory(), "aws_directory_service_directory": resourceAwsDirectoryServiceDirectory(),
"aws_dynamodb_table": resourceAwsDynamoDbTable(), "aws_dynamodb_table": resourceAwsDynamoDbTable(),
"aws_ebs_volume": resourceAwsEbsVolume(), "aws_ebs_volume": resourceAwsEbsVolume(),
"aws_ecr_repository": resourceAwsEcrRepository(),
"aws_ecr_repository_policy": resourceAwsEcrRepositoryPolicy(),
"aws_ecs_cluster": resourceAwsEcsCluster(), "aws_ecs_cluster": resourceAwsEcsCluster(),
"aws_ecs_service": resourceAwsEcsService(), "aws_ecs_service": resourceAwsEcsService(),
"aws_ecs_task_definition": resourceAwsEcsTaskDefinition(), "aws_ecs_task_definition": resourceAwsEcsTaskDefinition(),
@ -223,10 +163,14 @@ func Provider() terraform.ResourceProvider {
"aws_kinesis_firehose_delivery_stream": resourceAwsKinesisFirehoseDeliveryStream(), "aws_kinesis_firehose_delivery_stream": resourceAwsKinesisFirehoseDeliveryStream(),
"aws_kinesis_stream": resourceAwsKinesisStream(), "aws_kinesis_stream": resourceAwsKinesisStream(),
"aws_lambda_function": resourceAwsLambdaFunction(), "aws_lambda_function": resourceAwsLambdaFunction(),
"aws_lambda_event_source_mapping": resourceAwsLambdaEventSourceMapping(),
"aws_lambda_alias": resourceAwsLambdaAlias(),
"aws_launch_configuration": resourceAwsLaunchConfiguration(), "aws_launch_configuration": resourceAwsLaunchConfiguration(),
"aws_lb_cookie_stickiness_policy": resourceAwsLBCookieStickinessPolicy(), "aws_lb_cookie_stickiness_policy": resourceAwsLBCookieStickinessPolicy(),
"aws_main_route_table_association": resourceAwsMainRouteTableAssociation(), "aws_main_route_table_association": resourceAwsMainRouteTableAssociation(),
"aws_nat_gateway": resourceAwsNatGateway(),
"aws_network_acl": resourceAwsNetworkAcl(), "aws_network_acl": resourceAwsNetworkAcl(),
"aws_network_acl_rule": resourceAwsNetworkAclRule(),
"aws_network_interface": resourceAwsNetworkInterface(), "aws_network_interface": resourceAwsNetworkInterface(),
"aws_opsworks_stack": resourceAwsOpsworksStack(), "aws_opsworks_stack": resourceAwsOpsworksStack(),
"aws_opsworks_java_app_layer": resourceAwsOpsworksJavaAppLayer(), "aws_opsworks_java_app_layer": resourceAwsOpsworksJavaAppLayer(),
@ -243,6 +187,10 @@ func Provider() terraform.ResourceProvider {
"aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(), "aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(),
"aws_rds_cluster": resourceAwsRDSCluster(), "aws_rds_cluster": resourceAwsRDSCluster(),
"aws_rds_cluster_instance": resourceAwsRDSClusterInstance(), "aws_rds_cluster_instance": resourceAwsRDSClusterInstance(),
"aws_redshift_cluster": resourceAwsRedshiftCluster(),
"aws_redshift_security_group": resourceAwsRedshiftSecurityGroup(),
"aws_redshift_parameter_group": resourceAwsRedshiftParameterGroup(),
"aws_redshift_subnet_group": resourceAwsRedshiftSubnetGroup(),
"aws_route53_delegation_set": resourceAwsRoute53DelegationSet(), "aws_route53_delegation_set": resourceAwsRoute53DelegationSet(),
"aws_route53_record": resourceAwsRoute53Record(), "aws_route53_record": resourceAwsRoute53Record(),
"aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(), "aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(),
@ -288,6 +236,12 @@ func init() {
"secret_key": "The secret key for API operations. You can retrieve this\n" + "secret_key": "The secret key for API operations. You can retrieve this\n" +
"from the 'Security & Credentials' section of the AWS console.", "from the 'Security & Credentials' section of the AWS console.",
"profile": "The profile for API operations. If not set, the default profile\n" +
"created with `aws configure` will be used.",
"shared_credentials_file": "The path to the shared credentials file. If not set\n" +
"this defaults to ~/.aws/credentials.",
"token": "session token. A session token is only required if you are\n" + "token": "session token. A session token is only required if you are\n" +
"using temporary security credentials.", "using temporary security credentials.",
@ -307,6 +261,8 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
config := Config{ config := Config{
AccessKey: d.Get("access_key").(string), AccessKey: d.Get("access_key").(string),
SecretKey: d.Get("secret_key").(string), SecretKey: d.Get("secret_key").(string),
Profile: d.Get("profile").(string),
CredsFilename: d.Get("shared_credentials_file").(string),
Token: d.Get("token").(string), Token: d.Get("token").(string),
Region: d.Get("region").(string), Region: d.Get("region").(string),
MaxRetries: d.Get("max_retries").(int), MaxRetries: d.Get("max_retries").(int),

View File

@ -169,9 +169,9 @@ resource "aws_subnet" "foo" {
resource "aws_instance" "test" { resource "aws_instance" "test" {
// This AMI has one block device mapping, so we expect to have // This AMI has one block device mapping, so we expect to have
// one snapshot in our created AMI. // one snapshot in our created AMI.
// This is an Amazon Linux HVM AMI. A public HVM AMI is required // This is an Ubuntu Linux HVM AMI. A public HVM AMI is required
// because paravirtual images cannot be copied between accounts. // because paravirtual images cannot be copied between accounts.
ami = "ami-5449393e" ami = "ami-0f8bce65"
instance_type = "t2.micro" instance_type = "t2.micro"
tags { tags {
Name = "terraform-acc-ami-copy-victim" Name = "terraform-acc-ami-copy-victim"

View File

@ -5,6 +5,7 @@ import (
"testing" "testing"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/elb" "github.com/aws/aws-sdk-go/service/elb"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
@ -40,10 +41,31 @@ func TestAccAWSAppCookieStickinessPolicy_basic(t *testing.T) {
} }
func testAccCheckAppCookieStickinessPolicyDestroy(s *terraform.State) error { func testAccCheckAppCookieStickinessPolicyDestroy(s *terraform.State) error {
if len(s.RootModule().Resources) > 0 { conn := testAccProvider.Meta().(*AWSClient).elbconn
return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
}
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_app_cookie_stickiness_policy" {
continue
}
lbName, _, policyName := resourceAwsAppCookieStickinessPolicyParseId(
rs.Primary.ID)
out, err := conn.DescribeLoadBalancerPolicies(
&elb.DescribeLoadBalancerPoliciesInput{
LoadBalancerName: aws.String(lbName),
PolicyNames: []*string{aws.String(policyName)},
})
if err != nil {
if ec2err, ok := err.(awserr.Error); ok && (ec2err.Code() == "PolicyNotFound" || ec2err.Code() == "LoadBalancerNotFound") {
continue
}
return err
}
if len(out.PolicyDescriptions) > 0 {
return fmt.Errorf("Policy still exists")
}
}
return nil return nil
} }

View File

@ -51,8 +51,9 @@ func resourceAwsAutoscalingGroup() *schema.Resource {
}, },
"min_elb_capacity": &schema.Schema{ "min_elb_capacity": &schema.Schema{
Type: schema.TypeInt, Type: schema.TypeInt,
Optional: true, Optional: true,
Deprecated: "Please use 'wait_for_elb_capacity' instead.",
}, },
"min_size": &schema.Schema{ "min_size": &schema.Schema{
@ -96,6 +97,11 @@ func resourceAwsAutoscalingGroup() *schema.Resource {
Set: schema.HashString, Set: schema.HashString,
}, },
"placement_group": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"load_balancers": &schema.Schema{ "load_balancers": &schema.Schema{
Type: schema.TypeSet, Type: schema.TypeSet,
Optional: true, Optional: true,
@ -136,6 +142,11 @@ func resourceAwsAutoscalingGroup() *schema.Resource {
}, },
}, },
"wait_for_elb_capacity": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
},
"tag": autoscalingTagsSchema(), "tag": autoscalingTagsSchema(),
}, },
} }
@ -185,6 +196,10 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{})
autoScalingGroupOpts.HealthCheckGracePeriod = aws.Int64(int64(v.(int))) autoScalingGroupOpts.HealthCheckGracePeriod = aws.Int64(int64(v.(int)))
} }
if v, ok := d.GetOk("placement_group"); ok {
autoScalingGroupOpts.PlacementGroup = aws.String(v.(string))
}
if v, ok := d.GetOk("load_balancers"); ok && v.(*schema.Set).Len() > 0 { if v, ok := d.GetOk("load_balancers"); ok && v.(*schema.Set).Len() > 0 {
autoScalingGroupOpts.LoadBalancerNames = expandStringList( autoScalingGroupOpts.LoadBalancerNames = expandStringList(
v.(*schema.Set).List()) v.(*schema.Set).List())
@ -232,6 +247,7 @@ func resourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) e
d.Set("load_balancers", g.LoadBalancerNames) d.Set("load_balancers", g.LoadBalancerNames)
d.Set("min_size", g.MinSize) d.Set("min_size", g.MinSize)
d.Set("max_size", g.MaxSize) d.Set("max_size", g.MaxSize)
d.Set("placement_group", g.PlacementGroup)
d.Set("name", g.AutoScalingGroupName) d.Set("name", g.AutoScalingGroupName)
d.Set("tag", g.Tags) d.Set("tag", g.Tags)
d.Set("vpc_zone_identifier", strings.Split(*g.VPCZoneIdentifier, ",")) d.Set("vpc_zone_identifier", strings.Split(*g.VPCZoneIdentifier, ","))
@ -242,6 +258,7 @@ func resourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) e
func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) error { func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).autoscalingconn conn := meta.(*AWSClient).autoscalingconn
shouldWaitForCapacity := false
opts := autoscaling.UpdateAutoScalingGroupInput{ opts := autoscaling.UpdateAutoScalingGroupInput{
AutoScalingGroupName: aws.String(d.Id()), AutoScalingGroupName: aws.String(d.Id()),
@ -253,6 +270,7 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{})
if d.HasChange("desired_capacity") { if d.HasChange("desired_capacity") {
opts.DesiredCapacity = aws.Int64(int64(d.Get("desired_capacity").(int))) opts.DesiredCapacity = aws.Int64(int64(d.Get("desired_capacity").(int)))
shouldWaitForCapacity = true
} }
if d.HasChange("launch_configuration") { if d.HasChange("launch_configuration") {
@ -261,6 +279,7 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{})
if d.HasChange("min_size") { if d.HasChange("min_size") {
opts.MinSize = aws.Int64(int64(d.Get("min_size").(int))) opts.MinSize = aws.Int64(int64(d.Get("min_size").(int)))
shouldWaitForCapacity = true
} }
if d.HasChange("max_size") { if d.HasChange("max_size") {
@ -286,6 +305,10 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{})
} }
} }
if d.HasChange("placement_group") {
opts.PlacementGroup = aws.String(d.Get("placement_group").(string))
}
if d.HasChange("termination_policies") { if d.HasChange("termination_policies") {
// If the termination policy is set to null, we need to explicitly set // If the termination policy is set to null, we need to explicitly set
// it back to "Default", or the API won't reset it for us. // it back to "Default", or the API won't reset it for us.
@ -353,6 +376,10 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{})
} }
} }
if shouldWaitForCapacity {
waitForASGCapacity(d, meta)
}
return resourceAwsAutoscalingGroupRead(d, meta) return resourceAwsAutoscalingGroupRead(d, meta)
} }
@ -490,7 +517,7 @@ func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{})
// ASG before continuing. Waits up to `waitForASGCapacityTimeout` for // ASG before continuing. Waits up to `waitForASGCapacityTimeout` for
// "desired_capacity", or "min_size" if desired capacity is not specified. // "desired_capacity", or "min_size" if desired capacity is not specified.
// //
// If "min_elb_capacity" is specified, will also wait for that number of // If "wait_for_elb_capacity" is specified, will also wait for that number of
// instances to show up InService in all attached ELBs. See "Waiting for // instances to show up InService in all attached ELBs. See "Waiting for
// Capacity" in docs for more discussion of the feature. // Capacity" in docs for more discussion of the feature.
func waitForASGCapacity(d *schema.ResourceData, meta interface{}) error { func waitForASGCapacity(d *schema.ResourceData, meta interface{}) error {
@ -498,7 +525,10 @@ func waitForASGCapacity(d *schema.ResourceData, meta interface{}) error {
if v := d.Get("desired_capacity").(int); v > 0 { if v := d.Get("desired_capacity").(int); v > 0 {
wantASG = v wantASG = v
} }
wantELB := d.Get("min_elb_capacity").(int) wantELB := d.Get("wait_for_elb_capacity").(int)
// Covers deprecated field support
wantELB += d.Get("min_elb_capacity").(int)
wait, err := time.ParseDuration(d.Get("wait_for_capacity_timeout").(string)) wait, err := time.ParseDuration(d.Get("wait_for_capacity_timeout").(string))
if err != nil { if err != nil {
@ -561,11 +591,13 @@ func waitForASGCapacity(d *schema.ResourceData, meta interface{}) error {
log.Printf("[DEBUG] %q Capacity: %d/%d ASG, %d/%d ELB", log.Printf("[DEBUG] %q Capacity: %d/%d ASG, %d/%d ELB",
d.Id(), haveASG, wantASG, haveELB, wantELB) d.Id(), haveASG, wantASG, haveELB, wantELB)
if haveASG >= wantASG && haveELB >= wantELB { if haveASG == wantASG && haveELB == wantELB {
return nil return nil
} }
return fmt.Errorf("Still need to wait for more healthy instances. This could mean instances failed to launch. See Scaling History for more information.") return fmt.Errorf(
"Still waiting for %q instances. Current/Desired: %d/%d ASG, %d/%d ELB",
d.Id(), haveASG, wantASG, haveELB, wantELB)
}) })
} }

View File

@ -161,7 +161,7 @@ func TestAccAWSAutoScalingGroup_WithLoadBalancer(t *testing.T) {
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
Steps: []resource.TestStep{ Steps: []resource.TestStep{
resource.TestStep{ resource.TestStep{
Config: testAccAWSAutoScalingGroupConfigWithLoadBalancer, Config: fmt.Sprintf(testAccAWSAutoScalingGroupConfigWithLoadBalancer),
Check: resource.ComposeTestCheckFunc( Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
testAccCheckAWSAutoScalingGroupAttributesLoadBalancer(&group), testAccCheckAWSAutoScalingGroupAttributesLoadBalancer(&group),
@ -171,6 +171,26 @@ func TestAccAWSAutoScalingGroup_WithLoadBalancer(t *testing.T) {
}) })
} }
func TestAccAWSAutoScalingGroup_withPlacementGroup(t *testing.T) {
var group autoscaling.Group
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSAutoScalingGroupConfig_withPlacementGroup,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "placement_group", "test"),
),
},
},
})
}
func testAccCheckAWSAutoScalingGroupDestroy(s *terraform.State) error { func testAccCheckAWSAutoScalingGroupDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).autoscalingconn conn := testAccProvider.Meta().(*AWSClient).autoscalingconn
@ -260,8 +280,8 @@ func testAccCheckAWSAutoScalingGroupAttributes(group *autoscaling.Group) resourc
func testAccCheckAWSAutoScalingGroupAttributesLoadBalancer(group *autoscaling.Group) resource.TestCheckFunc { func testAccCheckAWSAutoScalingGroupAttributesLoadBalancer(group *autoscaling.Group) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
if *group.LoadBalancerNames[0] != "foobar-terraform-test" { if len(group.LoadBalancerNames) != 1 {
return fmt.Errorf("Bad load_balancers: %#v", group.LoadBalancerNames[0]) return fmt.Errorf("Bad load_balancers: %v", group.LoadBalancerNames)
} }
return nil return nil
@ -401,6 +421,11 @@ resource "aws_launch_configuration" "foobar" {
instance_type = "t1.micro" instance_type = "t1.micro"
} }
resource "aws_placement_group" "test" {
name = "test"
strategy = "cluster"
}
resource "aws_autoscaling_group" "bar" { resource "aws_autoscaling_group" "bar" {
availability_zones = ["us-west-2a"] availability_zones = ["us-west-2a"]
name = "foobar3-terraform-test" name = "foobar3-terraform-test"
@ -488,7 +513,6 @@ resource "aws_security_group" "foo" {
} }
resource "aws_elb" "bar" { resource "aws_elb" "bar" {
name = "foobar-terraform-test"
subnets = ["${aws_subnet.foo.id}"] subnets = ["${aws_subnet.foo.id}"]
security_groups = ["${aws_security_group.foo.id}"] security_groups = ["${aws_security_group.foo.id}"]
@ -526,7 +550,7 @@ resource "aws_autoscaling_group" "bar" {
min_size = 2 min_size = 2
health_check_grace_period = 300 health_check_grace_period = 300
health_check_type = "ELB" health_check_type = "ELB"
min_elb_capacity = 2 wait_for_elb_capacity = 2
force_delete = true force_delete = true
launch_configuration = "${aws_launch_configuration.foobar.name}" launch_configuration = "${aws_launch_configuration.foobar.name}"
@ -628,3 +652,36 @@ resource "aws_autoscaling_group" "bar" {
launch_configuration = "${aws_launch_configuration.foobar.name}" launch_configuration = "${aws_launch_configuration.foobar.name}"
} }
` `
const testAccAWSAutoScalingGroupConfig_withPlacementGroup = `
resource "aws_launch_configuration" "foobar" {
image_id = "ami-21f78e11"
instance_type = "c3.large"
}
resource "aws_placement_group" "test" {
name = "test"
strategy = "cluster"
}
resource "aws_autoscaling_group" "bar" {
availability_zones = ["us-west-2a"]
name = "foobar3-terraform-test"
max_size = 1
min_size = 1
health_check_grace_period = 300
health_check_type = "ELB"
desired_capacity = 1
force_delete = true
termination_policies = ["OldestInstance","ClosestToNextInstanceHour"]
placement_group = "${aws_placement_group.test.name}"
launch_configuration = "${aws_launch_configuration.foobar.name}"
tag {
key = "Foo"
value = "foo-bar"
propagate_at_launch = true
}
}
`

View File

@ -33,6 +33,7 @@ func resourceAwsAutoscalingLifecycleHook() *schema.Resource {
"default_result": &schema.Schema{ "default_result": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
Optional: true, Optional: true,
Computed: true,
}, },
"heartbeat_timeout": &schema.Schema{ "heartbeat_timeout": &schema.Schema{
Type: schema.TypeInt, Type: schema.TypeInt,

View File

@ -32,11 +32,29 @@ func TestAccAWSAutoscalingLifecycleHook_basic(t *testing.T) {
}) })
} }
func TestAccAWSAutoscalingLifecycleHook_omitDefaultResult(t *testing.T) {
var hook autoscaling.LifecycleHook
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAutoscalingLifecycleHookDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSAutoscalingLifecycleHookConfig_omitDefaultResult,
Check: resource.ComposeTestCheckFunc(
testAccCheckLifecycleHookExists("aws_autoscaling_lifecycle_hook.foobar", &hook),
resource.TestCheckResourceAttr("aws_autoscaling_lifecycle_hook.foobar", "default_result", "ABANDON"),
),
},
},
})
}
func testAccCheckLifecycleHookExists(n string, hook *autoscaling.LifecycleHook) resource.TestCheckFunc { func testAccCheckLifecycleHookExists(n string, hook *autoscaling.LifecycleHook) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n] rs, ok := s.RootModule().Resources[n]
if !ok { if !ok {
rs = rs
return fmt.Errorf("Not found: %s", n) return fmt.Errorf("Not found: %s", n)
} }
@ -166,3 +184,86 @@ EOF
role_arn = "${aws_iam_role.foobar.arn}" role_arn = "${aws_iam_role.foobar.arn}"
} }
`) `)
var testAccAWSAutoscalingLifecycleHookConfig_omitDefaultResult = fmt.Sprintf(`
resource "aws_launch_configuration" "foobar" {
name = "terraform-test-foobar5"
image_id = "ami-21f78e11"
instance_type = "t1.micro"
}
resource "aws_sqs_queue" "foobar" {
name = "foobar"
delay_seconds = 90
max_message_size = 2048
message_retention_seconds = 86400
receive_wait_time_seconds = 10
}
resource "aws_iam_role" "foobar" {
name = "foobar"
assume_role_policy = <<EOF
{
"Version" : "2012-10-17",
"Statement": [ {
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": [ "sts:AssumeRole" ]
} ]
}
EOF
}
resource "aws_iam_role_policy" "foobar" {
name = "foobar"
role = "${aws_iam_role.foobar.id}"
policy = <<EOF
{
"Version" : "2012-10-17",
"Statement": [ {
"Effect": "Allow",
"Action": [
"sqs:SendMessage",
"sqs:GetQueueUrl",
"sns:Publish"
],
"Resource": [
"${aws_sqs_queue.foobar.arn}"
]
} ]
}
EOF
}
resource "aws_autoscaling_group" "foobar" {
availability_zones = ["us-west-2a"]
name = "terraform-test-foobar5"
max_size = 5
min_size = 2
health_check_grace_period = 300
health_check_type = "ELB"
force_delete = true
termination_policies = ["OldestInstance"]
launch_configuration = "${aws_launch_configuration.foobar.name}"
tag {
key = "Foo"
value = "foo-bar"
propagate_at_launch = true
}
}
resource "aws_autoscaling_lifecycle_hook" "foobar" {
name = "foobar"
autoscaling_group_name = "${aws_autoscaling_group.foobar.name}"
heartbeat_timeout = 2000
lifecycle_transition = "autoscaling:EC2_INSTANCE_LAUNCHING"
notification_metadata = <<EOF
{
"foo": "bar"
}
EOF
notification_target_arn = "${aws_sqs_queue.foobar.arn}"
role_arn = "${aws_iam_role.foobar.arn}"
}
`)

View File

@ -144,7 +144,7 @@ func testAccCheckASGNDestroy(s *terraform.State) error {
} }
if len(resp.NotificationConfigurations) != 0 { if len(resp.NotificationConfigurations) != 0 {
fmt.Errorf("Error finding notification descriptions") return fmt.Errorf("Error finding notification descriptions")
} }
} }

View File

@ -34,7 +34,6 @@ func testAccCheckScalingPolicyExists(n string, policy *autoscaling.ScalingPolicy
return func(s *terraform.State) error { return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n] rs, ok := s.RootModule().Resources[n]
if !ok { if !ok {
rs = rs
return fmt.Errorf("Not found: %s", n) return fmt.Errorf("Not found: %s", n)
} }

View File

@ -0,0 +1,184 @@
package aws
import (
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/hashicorp/terraform/helper/schema"
)
const awsAutoscalingScheduleTimeLayout = "2006-01-02T15:04:05Z"
func resourceAwsAutoscalingSchedule() *schema.Resource {
return &schema.Resource{
Create: resourceAwsAutoscalingScheduleCreate,
Read: resourceAwsAutoscalingScheduleRead,
Update: resourceAwsAutoscalingScheduleCreate,
Delete: resourceAwsAutoscalingScheduleDelete,
Schema: map[string]*schema.Schema{
"arn": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"scheduled_action_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"autoscaling_group_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"start_time": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validateASGScheduleTimestamp,
},
"end_time": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validateASGScheduleTimestamp,
},
"recurrence": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"min_size": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"max_size": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"desired_capacity": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
},
}
}
func resourceAwsAutoscalingScheduleCreate(d *schema.ResourceData, meta interface{}) error {
autoscalingconn := meta.(*AWSClient).autoscalingconn
params := &autoscaling.PutScheduledUpdateGroupActionInput{
AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)),
ScheduledActionName: aws.String(d.Get("scheduled_action_name").(string)),
}
if attr, ok := d.GetOk("start_time"); ok {
t, err := time.Parse(awsAutoscalingScheduleTimeLayout, attr.(string))
if err != nil {
return fmt.Errorf("Error Parsing AWS Autoscaling Group Schedule Start Time: %s", err.Error())
}
params.StartTime = aws.Time(t)
}
if attr, ok := d.GetOk("end_time"); ok {
t, err := time.Parse(awsAutoscalingScheduleTimeLayout, attr.(string))
if err != nil {
return fmt.Errorf("Error Parsing AWS Autoscaling Group Schedule End Time: %s", err.Error())
}
params.EndTime = aws.Time(t)
}
if attr, ok := d.GetOk("recurrence"); ok {
params.Recurrence = aws.String(attr.(string))
}
if attr, ok := d.GetOk("min_size"); ok {
params.MinSize = aws.Int64(int64(attr.(int)))
}
if attr, ok := d.GetOk("max_size"); ok {
params.MaxSize = aws.Int64(int64(attr.(int)))
}
if attr, ok := d.GetOk("desired_capacity"); ok {
params.DesiredCapacity = aws.Int64(int64(attr.(int)))
}
log.Printf("[INFO] Creating Autoscaling Scheduled Action: %s", d.Get("scheduled_action_name").(string))
_, err := autoscalingconn.PutScheduledUpdateGroupAction(params)
if err != nil {
return fmt.Errorf("Error Creating Autoscaling Scheduled Action: %s", err.Error())
}
d.SetId(d.Get("scheduled_action_name").(string))
return resourceAwsAutoscalingScheduleRead(d, meta)
}
func resourceAwsAutoscalingScheduleRead(d *schema.ResourceData, meta interface{}) error {
sa, err := resourceAwsASGScheduledActionRetrieve(d, meta)
if err != nil {
return err
}
d.Set("autoscaling_group_name", sa.AutoScalingGroupName)
d.Set("arn", sa.ScheduledActionARN)
d.Set("desired_capacity", sa.DesiredCapacity)
d.Set("min_size", sa.MinSize)
d.Set("max_size", sa.MaxSize)
d.Set("recurrence", sa.Recurrence)
if sa.StartTime != nil {
d.Set("start_time", sa.StartTime.Format(awsAutoscalingScheduleTimeLayout))
}
if sa.EndTime != nil {
d.Set("end_time", sa.EndTime.Format(awsAutoscalingScheduleTimeLayout))
}
return nil
}
func resourceAwsAutoscalingScheduleDelete(d *schema.ResourceData, meta interface{}) error {
autoscalingconn := meta.(*AWSClient).autoscalingconn
params := &autoscaling.DeleteScheduledActionInput{
AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)),
ScheduledActionName: aws.String(d.Id()),
}
log.Printf("[INFO] Deleting Autoscaling Scheduled Action: %s", d.Id())
_, err := autoscalingconn.DeleteScheduledAction(params)
if err != nil {
return fmt.Errorf("Error deleting Autoscaling Scheduled Action: %s", err.Error())
}
return nil
}
func resourceAwsASGScheduledActionRetrieve(d *schema.ResourceData, meta interface{}) (*autoscaling.ScheduledUpdateGroupAction, error) {
autoscalingconn := meta.(*AWSClient).autoscalingconn
params := &autoscaling.DescribeScheduledActionsInput{
AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)),
ScheduledActionNames: []*string{aws.String(d.Id())},
}
log.Printf("[INFO] Describing Autoscaling Scheduled Action: %+v", params)
actions, err := autoscalingconn.DescribeScheduledActions(params)
if err != nil {
return nil, fmt.Errorf("Error retrieving Autoscaling Scheduled Actions: %s", err)
}
if len(actions.ScheduledUpdateGroupActions) != 1 ||
*actions.ScheduledUpdateGroupActions[0].ScheduledActionName != d.Id() {
return nil, fmt.Errorf("Unable to find Autoscaling Scheduled Action: %#v", actions.ScheduledUpdateGroupActions)
}
return actions.ScheduledUpdateGroupActions[0], nil
}

View File

@ -0,0 +1,170 @@
package aws
import (
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSAutoscalingSchedule_basic(t *testing.T) {
var schedule autoscaling.ScheduledUpdateGroupAction
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAutoscalingScheduleDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSAutoscalingScheduleConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckScalingScheduleExists("aws_autoscaling_schedule.foobar", &schedule),
),
},
},
})
}
func TestAccAWSAutoscalingSchedule_recurrence(t *testing.T) {
var schedule autoscaling.ScheduledUpdateGroupAction
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAutoscalingScheduleDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSAutoscalingScheduleConfig_recurrence,
Check: resource.ComposeTestCheckFunc(
testAccCheckScalingScheduleExists("aws_autoscaling_schedule.foobar", &schedule),
resource.TestCheckResourceAttr("aws_autoscaling_schedule.foobar", "recurrence", "0 8 * * *"),
),
},
},
})
}
func testAccCheckScalingScheduleExists(n string, policy *autoscaling.ScheduledUpdateGroupAction) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
autoScalingGroup, _ := rs.Primary.Attributes["autoscaling_group_name"]
conn := testAccProvider.Meta().(*AWSClient).autoscalingconn
params := &autoscaling.DescribeScheduledActionsInput{
AutoScalingGroupName: aws.String(autoScalingGroup),
ScheduledActionNames: []*string{aws.String(rs.Primary.ID)},
}
resp, err := conn.DescribeScheduledActions(params)
if err != nil {
return err
}
if len(resp.ScheduledUpdateGroupActions) == 0 {
return fmt.Errorf("Scaling Schedule not found")
}
return nil
}
}
func testAccCheckAWSAutoscalingScheduleDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).autoscalingconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_autoscaling_schedule" {
continue
}
autoScalingGroup, _ := rs.Primary.Attributes["autoscaling_group_name"]
params := &autoscaling.DescribeScheduledActionsInput{
AutoScalingGroupName: aws.String(autoScalingGroup),
ScheduledActionNames: []*string{aws.String(rs.Primary.ID)},
}
resp, err := conn.DescribeScheduledActions(params)
if err == nil {
if len(resp.ScheduledUpdateGroupActions) != 0 &&
*resp.ScheduledUpdateGroupActions[0].ScheduledActionName == rs.Primary.ID {
return fmt.Errorf("Scaling Schedule Still Exists: %s", rs.Primary.ID)
}
}
}
return nil
}
var testAccAWSAutoscalingScheduleConfig = fmt.Sprintf(`
resource "aws_launch_configuration" "foobar" {
name = "terraform-test-foobar5"
image_id = "ami-21f78e11"
instance_type = "t1.micro"
}
resource "aws_autoscaling_group" "foobar" {
availability_zones = ["us-west-2a"]
name = "terraform-test-foobar5"
max_size = 1
min_size = 1
health_check_grace_period = 300
health_check_type = "ELB"
force_delete = true
termination_policies = ["OldestInstance"]
launch_configuration = "${aws_launch_configuration.foobar.name}"
tag {
key = "Foo"
value = "foo-bar"
propagate_at_launch = true
}
}
resource "aws_autoscaling_schedule" "foobar" {
scheduled_action_name = "foobar"
min_size = 0
max_size = 1
desired_capacity = 0
start_time = "2016-12-11T18:00:00Z"
end_time = "2016-12-12T06:00:00Z"
autoscaling_group_name = "${aws_autoscaling_group.foobar.name}"
}
`)
var testAccAWSAutoscalingScheduleConfig_recurrence = fmt.Sprintf(`
resource "aws_launch_configuration" "foobar" {
name = "terraform-test-foobar5"
image_id = "ami-21f78e11"
instance_type = "t1.micro"
}
resource "aws_autoscaling_group" "foobar" {
availability_zones = ["us-west-2a"]
name = "terraform-test-foobar5"
max_size = 1
min_size = 1
health_check_grace_period = 300
health_check_type = "ELB"
force_delete = true
termination_policies = ["OldestInstance"]
launch_configuration = "${aws_launch_configuration.foobar.name}"
tag {
key = "Foo"
value = "foo-bar"
propagate_at_launch = true
}
}
resource "aws_autoscaling_schedule" "foobar" {
scheduled_action_name = "foobar"
min_size = 0
max_size = 1
desired_capacity = 0
recurrence = "0 8 * * *"
autoscaling_group_name = "${aws_autoscaling_group.foobar.name}"
}
`)

View File

@ -142,7 +142,7 @@ func resourceAwsCloudFormationStackCreate(d *schema.ResourceData, meta interface
wait := resource.StateChangeConf{ wait := resource.StateChangeConf{
Pending: []string{"CREATE_IN_PROGRESS", "ROLLBACK_IN_PROGRESS", "ROLLBACK_COMPLETE"}, Pending: []string{"CREATE_IN_PROGRESS", "ROLLBACK_IN_PROGRESS", "ROLLBACK_COMPLETE"},
Target: "CREATE_COMPLETE", Target: []string{"CREATE_COMPLETE"},
Timeout: 30 * time.Minute, Timeout: 30 * time.Minute,
MinTimeout: 5 * time.Second, MinTimeout: 5 * time.Second,
Refresh: func() (interface{}, string, error) { Refresh: func() (interface{}, string, error) {
@ -190,8 +190,18 @@ func resourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface{}
stacks := resp.Stacks stacks := resp.Stacks
if len(stacks) < 1 { if len(stacks) < 1 {
log.Printf("[DEBUG] Removing CloudFormation stack %s as it's already gone", d.Id())
d.SetId("")
return nil return nil
} }
for _, s := range stacks {
if *s.StackId == d.Id() && *s.StackStatus == "DELETE_COMPLETE" {
log.Printf("[DEBUG] Removing CloudFormation stack %s"+
" as it has been already deleted", d.Id())
d.SetId("")
return nil
}
}
tInput := cloudformation.GetTemplateInput{ tInput := cloudformation.GetTemplateInput{
StackName: aws.String(stackName), StackName: aws.String(stackName),
@ -258,12 +268,14 @@ func resourceAwsCloudFormationStackUpdate(d *schema.ResourceData, meta interface
StackName: aws.String(d.Get("name").(string)), StackName: aws.String(d.Get("name").(string)),
} }
if d.HasChange("template_body") { // Either TemplateBody or TemplateURL are required for each change
input.TemplateBody = aws.String(normalizeJson(d.Get("template_body").(string))) if v, ok := d.GetOk("template_url"); ok {
input.TemplateURL = aws.String(v.(string))
} }
if d.HasChange("template_url") { if v, ok := d.GetOk("template_body"); ok && input.TemplateURL == nil {
input.TemplateURL = aws.String(d.Get("template_url").(string)) input.TemplateBody = aws.String(normalizeJson(v.(string)))
} }
if d.HasChange("capabilities") { if d.HasChange("capabilities") {
input.Capabilities = expandStringList(d.Get("capabilities").(*schema.Set).List()) input.Capabilities = expandStringList(d.Get("capabilities").(*schema.Set).List())
} }
@ -299,7 +311,7 @@ func resourceAwsCloudFormationStackUpdate(d *schema.ResourceData, meta interface
"UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS",
"UPDATE_ROLLBACK_COMPLETE", "UPDATE_ROLLBACK_COMPLETE",
}, },
Target: "UPDATE_COMPLETE", Target: []string{"UPDATE_COMPLETE"},
Timeout: 15 * time.Minute, Timeout: 15 * time.Minute,
MinTimeout: 5 * time.Second, MinTimeout: 5 * time.Second,
Refresh: func() (interface{}, string, error) { Refresh: func() (interface{}, string, error) {
@ -358,7 +370,7 @@ func resourceAwsCloudFormationStackDelete(d *schema.ResourceData, meta interface
wait := resource.StateChangeConf{ wait := resource.StateChangeConf{
Pending: []string{"DELETE_IN_PROGRESS", "ROLLBACK_IN_PROGRESS"}, Pending: []string{"DELETE_IN_PROGRESS", "ROLLBACK_IN_PROGRESS"},
Target: "DELETE_COMPLETE", Target: []string{"DELETE_COMPLETE"},
Timeout: 30 * time.Minute, Timeout: 30 * time.Minute,
MinTimeout: 5 * time.Second, MinTimeout: 5 * time.Second,
Refresh: func() (interface{}, string, error) { Refresh: func() (interface{}, string, error) {

View File

@ -2,7 +2,9 @@ package aws
import ( import (
"fmt" "fmt"
"math/rand"
"testing" "testing"
"time"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudformation" "github.com/aws/aws-sdk-go/service/cloudformation"
@ -64,11 +66,60 @@ func TestAccAWSCloudFormation_allAttributes(t *testing.T) {
}) })
} }
// Regression for https://github.com/hashicorp/terraform/issues/4332
func TestAccAWSCloudFormation_withParams(t *testing.T) {
var stack cloudformation.Stack
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSCloudFormationDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSCloudFormationConfig_withParams,
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with_params", &stack),
),
},
resource.TestStep{
Config: testAccAWSCloudFormationConfig_withParams_modified,
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with_params", &stack),
),
},
},
})
}
// Regression for https://github.com/hashicorp/terraform/issues/4534
func TestAccAWSCloudFormation_withUrl_withParams(t *testing.T) {
var stack cloudformation.Stack
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSCloudFormationDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSCloudFormationConfig_templateUrl_withParams,
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with-url-and-params", &stack),
),
},
resource.TestStep{
Config: testAccAWSCloudFormationConfig_templateUrl_withParams_modified,
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with-url-and-params", &stack),
),
},
},
})
}
func testAccCheckCloudFormationStackExists(n string, stack *cloudformation.Stack) resource.TestCheckFunc { func testAccCheckCloudFormationStackExists(n string, stack *cloudformation.Stack) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n] rs, ok := s.RootModule().Resources[n]
if !ok { if !ok {
rs = rs
return fmt.Errorf("Not found: %s", n) return fmt.Errorf("Not found: %s", n)
} }
@ -102,9 +153,12 @@ func testAccCheckAWSCloudFormationDestroy(s *terraform.State) error {
resp, err := conn.DescribeStacks(&params) resp, err := conn.DescribeStacks(&params)
if err == nil { if err != nil {
if len(resp.Stacks) != 0 && return err
*resp.Stacks[0].StackId == rs.Primary.ID { }
for _, s := range resp.Stacks {
if *s.StackId == rs.Primary.ID && *s.StackStatus != "DELETE_COMPLETE" {
return fmt.Errorf("CloudFormation stack still exists: %q", rs.Primary.ID) return fmt.Errorf("CloudFormation stack still exists: %q", rs.Primary.ID)
} }
} }
@ -226,3 +280,97 @@ resource "aws_sns_topic" "cf-updates" {
name = "tf-cf-notifications" name = "tf-cf-notifications"
} }
` `
var tpl_testAccAWSCloudFormationConfig_withParams = `
resource "aws_cloudformation_stack" "with_params" {
name = "tf-stack-with-params"
parameters {
VpcCIDR = "%s"
}
template_body = <<STACK
{
"Parameters" : {
"VpcCIDR" : {
"Description" : "CIDR to be used for the VPC",
"Type" : "String"
}
},
"Resources" : {
"MyVPC": {
"Type" : "AWS::EC2::VPC",
"Properties" : {
"CidrBlock" : {"Ref": "VpcCIDR"},
"Tags" : [
{"Key": "Name", "Value": "Primary_CF_VPC"}
]
}
}
}
}
STACK
on_failure = "DELETE"
timeout_in_minutes = 1
}
`
var testAccAWSCloudFormationConfig_withParams = fmt.Sprintf(
tpl_testAccAWSCloudFormationConfig_withParams,
"10.0.0.0/16")
var testAccAWSCloudFormationConfig_withParams_modified = fmt.Sprintf(
tpl_testAccAWSCloudFormationConfig_withParams,
"12.0.0.0/16")
var tpl_testAccAWSCloudFormationConfig_templateUrl_withParams = `
resource "aws_s3_bucket" "b" {
bucket = "%s"
acl = "public-read"
policy = <<POLICY
{
"Version":"2008-10-17",
"Statement": [
{
"Sid":"AllowPublicRead",
"Effect":"Allow",
"Principal": {
"AWS": "*"
},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::%s/*"
}
]
}
POLICY
website {
index_document = "index.html"
error_document = "error.html"
}
}
resource "aws_s3_bucket_object" "object" {
bucket = "${aws_s3_bucket.b.id}"
key = "tf-cf-stack.json"
source = "test-fixtures/cloudformation-template.json"
}
resource "aws_cloudformation_stack" "with-url-and-params" {
name = "tf-stack-template-url-with-params"
parameters {
VpcCIDR = "%s"
}
template_url = "https://${aws_s3_bucket.b.id}.s3-us-west-2.amazonaws.com/${aws_s3_bucket_object.object.key}"
on_failure = "DELETE"
timeout_in_minutes = 1
}
`
var cfRandInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int()
var cfBucketName = "tf-stack-with-url-and-params-" + fmt.Sprintf("%d", cfRandInt)
var testAccAWSCloudFormationConfig_templateUrl_withParams = fmt.Sprintf(
tpl_testAccAWSCloudFormationConfig_templateUrl_withParams,
cfBucketName, cfBucketName, "11.0.0.0/16")
var testAccAWSCloudFormationConfig_templateUrl_withParams_modified = fmt.Sprintf(
tpl_testAccAWSCloudFormationConfig_templateUrl_withParams,
cfBucketName, cfBucketName, "13.0.0.0/16")

View File

@ -5,6 +5,7 @@ import (
"testing" "testing"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/codecommit" "github.com/aws/aws-sdk-go/service/codecommit"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
@ -86,15 +87,33 @@ func testAccCheckCodeCommitRepositoryExists(name string) resource.TestCheckFunc
} }
func testAccCheckCodeCommitRepositoryDestroy(s *terraform.State) error { func testAccCheckCodeCommitRepositoryDestroy(s *terraform.State) error {
if len(s.RootModule().Resources) > 0 { conn := testAccProvider.Meta().(*AWSClient).codecommitconn
return fmt.Errorf("Expected all resources to be gone, but found: %#v",
s.RootModule().Resources) for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_codecommit_repository" {
continue
}
_, err := conn.GetRepository(&codecommit.GetRepositoryInput{
RepositoryName: aws.String(rs.Primary.ID),
})
if ae, ok := err.(awserr.Error); ok && ae.Code() == "RepositoryDoesNotExistException" {
continue
}
if err == nil {
return fmt.Errorf("Repository still exists: %s", rs.Primary.ID)
}
return err
} }
return nil return nil
} }
const testAccCodeCommitRepository_basic = ` const testAccCodeCommitRepository_basic = `
provider "aws" {
region = "us-east-1"
}
resource "aws_codecommit_repository" "test" { resource "aws_codecommit_repository" "test" {
repository_name = "my_test_repository" repository_name = "my_test_repository"
description = "This is a test description" description = "This is a test description"
@ -102,6 +121,9 @@ resource "aws_codecommit_repository" "test" {
` `
const testAccCodeCommitRepository_withChanges = ` const testAccCodeCommitRepository_withChanges = `
provider "aws" {
region = "us-east-1"
}
resource "aws_codecommit_repository" "test" { resource "aws_codecommit_repository" "test" {
repository_name = "my_test_repository" repository_name = "my_test_repository"
description = "This is a test description - with changes" description = "This is a test description - with changes"

View File

@ -5,6 +5,7 @@ import (
"testing" "testing"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/codedeploy" "github.com/aws/aws-sdk-go/service/codedeploy"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
@ -40,17 +41,19 @@ func testAccCheckAWSCodeDeployAppDestroy(s *terraform.State) error {
continue continue
} }
resp, err := conn.GetApplication(&codedeploy.GetApplicationInput{ _, err := conn.GetApplication(&codedeploy.GetApplicationInput{
ApplicationName: aws.String(rs.Primary.ID), ApplicationName: aws.String(rs.Primary.Attributes["name"]),
}) })
if err == nil { if err != nil {
if resp.Application != nil { // Verify the error is what we want
return fmt.Errorf("CodeDeploy app still exists:\n%#v", *resp.Application.ApplicationId) if ae, ok := err.(awserr.Error); ok && ae.Code() == "ApplicationDoesNotExistException" {
continue
} }
return err
} }
return err return fmt.Errorf("still exists")
} }
return nil return nil

View File

@ -344,17 +344,6 @@ func onPremisesTagFiltersToMap(list []*codedeploy.TagFilter) []map[string]string
return result return result
} }
// validateTagFilters confirms the "value" component of a tag filter is one of
// AWS's three allowed types.
func validateTagFilters(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if value != "KEY_ONLY" && value != "VALUE_ONLY" && value != "KEY_AND_VALUE" {
errors = append(errors, fmt.Errorf(
"%q must be one of \"KEY_ONLY\", \"VALUE_ONLY\", or \"KEY_AND_VALUE\"", k))
}
return
}
func resourceAwsCodeDeployTagFilterHash(v interface{}) int { func resourceAwsCodeDeployTagFilterHash(v interface{}) int {
var buf bytes.Buffer var buf bytes.Buffer
m := v.(map[string]interface{}) m := v.(map[string]interface{})

View File

@ -5,6 +5,7 @@ import (
"testing" "testing"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/codedeploy" "github.com/aws/aws-sdk-go/service/codedeploy"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
@ -45,6 +46,10 @@ func testAccCheckAWSCodeDeployDeploymentGroupDestroy(s *terraform.State) error {
DeploymentGroupName: aws.String(rs.Primary.Attributes["deployment_group_name"]), DeploymentGroupName: aws.String(rs.Primary.Attributes["deployment_group_name"]),
}) })
if ae, ok := err.(awserr.Error); ok && ae.Code() == "ApplicationDoesNotExistException" {
continue
}
if err == nil { if err == nil {
if resp.DeploymentGroupInfo.DeploymentGroupName != nil { if resp.DeploymentGroupInfo.DeploymentGroupName != nil {
return fmt.Errorf("CodeDeploy deployment group still exists:\n%#v", *resp.DeploymentGroupInfo.DeploymentGroupName) return fmt.Errorf("CodeDeploy deployment group still exists:\n%#v", *resp.DeploymentGroupInfo.DeploymentGroupName)

View File

@ -68,7 +68,7 @@ func resourceAwsCustomerGatewayCreate(d *schema.ResourceData, meta interface{})
// Wait for the CustomerGateway to be available. // Wait for the CustomerGateway to be available.
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"pending"}, Pending: []string{"pending"},
Target: "available", Target: []string{"available"},
Refresh: customerGatewayRefreshFunc(conn, *customerGateway.CustomerGatewayId), Refresh: customerGatewayRefreshFunc(conn, *customerGateway.CustomerGatewayId),
Timeout: 10 * time.Minute, Timeout: 10 * time.Minute,
Delay: 10 * time.Second, Delay: 10 * time.Second,

View File

@ -5,6 +5,7 @@ import (
"testing" "testing"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
@ -46,8 +47,33 @@ func TestAccAWSCustomerGateway_basic(t *testing.T) {
} }
func testAccCheckCustomerGatewayDestroy(s *terraform.State) error { func testAccCheckCustomerGatewayDestroy(s *terraform.State) error {
if len(s.RootModule().Resources) > 0 { conn := testAccProvider.Meta().(*AWSClient).ec2conn
return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_customer_gatewah" {
continue
}
gatewayFilter := &ec2.Filter{
Name: aws.String("customer-gateway-id"),
Values: []*string{aws.String(rs.Primary.ID)},
}
resp, err := conn.DescribeCustomerGateways(&ec2.DescribeCustomerGatewaysInput{
Filters: []*ec2.Filter{gatewayFilter},
})
if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidCustomerGatewayID.NotFound" {
continue
}
if err == nil {
if len(resp.CustomerGateways) > 0 {
return fmt.Errorf("Customer gateway still exists: %v", resp.CustomerGateways)
}
}
return err
} }
return nil return nil

View File

@ -31,20 +31,27 @@ func resourceAwsDbInstance() *schema.Resource {
ForceNew: true, ForceNew: true,
}, },
"arn": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"username": &schema.Schema{ "username": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Optional: true,
Computed: true,
ForceNew: true, ForceNew: true,
}, },
"password": &schema.Schema{ "password": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Optional: true,
}, },
"engine": &schema.Schema{ "engine": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Optional: true,
Computed: true,
ForceNew: true, ForceNew: true,
StateFunc: func(v interface{}) string { StateFunc: func(v interface{}) string {
value := v.(string) value := v.(string)
@ -66,7 +73,8 @@ func resourceAwsDbInstance() *schema.Resource {
"allocated_storage": &schema.Schema{ "allocated_storage": &schema.Schema{
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Optional: true,
Computed: true,
}, },
"storage_type": &schema.Schema{ "storage_type": &schema.Schema{
@ -183,6 +191,12 @@ func resourceAwsDbInstance() *schema.Resource {
}, },
}, },
"skip_final_snapshot": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: true,
},
"copy_tags_to_snapshot": &schema.Schema{ "copy_tags_to_snapshot": &schema.Schema{
Type: schema.TypeBool, Type: schema.TypeBool,
Optional: true, Optional: true,
@ -285,9 +299,19 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
opts.AvailabilityZone = aws.String(attr.(string)) opts.AvailabilityZone = aws.String(attr.(string))
} }
if attr, ok := d.GetOk("storage_type"); ok {
opts.StorageType = aws.String(attr.(string))
}
if attr, ok := d.GetOk("publicly_accessible"); ok { if attr, ok := d.GetOk("publicly_accessible"); ok {
opts.PubliclyAccessible = aws.Bool(attr.(bool)) opts.PubliclyAccessible = aws.Bool(attr.(bool))
} }
if attr, ok := d.GetOk("db_subnet_group_name"); ok {
opts.DBSubnetGroupName = aws.String(attr.(string))
}
log.Printf("[DEBUG] DB Instance Replica create configuration: %#v", opts)
_, err := conn.CreateDBInstanceReadReplica(&opts) _, err := conn.CreateDBInstanceReadReplica(&opts)
if err != nil { if err != nil {
return fmt.Errorf("Error creating DB Instance: %s", err) return fmt.Errorf("Error creating DB Instance: %s", err)
@ -362,8 +386,9 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
"[INFO] Waiting for DB Instance to be available") "[INFO] Waiting for DB Instance to be available")
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "backing-up", "modifying"}, Pending: []string{"creating", "backing-up", "modifying", "resetting-master-credentials",
Target: "available", "maintenance", "renaming", "rebooting", "upgrading"},
Target: []string{"available"},
Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta),
Timeout: 40 * time.Minute, Timeout: 40 * time.Minute,
MinTimeout: 10 * time.Second, MinTimeout: 10 * time.Second,
@ -383,6 +408,18 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
} }
} else { } else {
if _, ok := d.GetOk("allocated_storage"); !ok {
return fmt.Errorf(`provider.aws: aws_db_instance: %s: "allocated_storage": required field is not set`, d.Get("name").(string))
}
if _, ok := d.GetOk("engine"); !ok {
return fmt.Errorf(`provider.aws: aws_db_instance: %s: "engine": required field is not set`, d.Get("name").(string))
}
if _, ok := d.GetOk("password"); !ok {
return fmt.Errorf(`provider.aws: aws_db_instance: %s: "password": required field is not set`, d.Get("name").(string))
}
if _, ok := d.GetOk("username"); !ok {
return fmt.Errorf(`provider.aws: aws_db_instance: %s: "username": required field is not set`, d.Get("name").(string))
}
opts := rds.CreateDBInstanceInput{ opts := rds.CreateDBInstanceInput{
AllocatedStorage: aws.Int64(int64(d.Get("allocated_storage").(int))), AllocatedStorage: aws.Int64(int64(d.Get("allocated_storage").(int))),
DBName: aws.String(d.Get("name").(string)), DBName: aws.String(d.Get("name").(string)),
@ -473,8 +510,9 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
"[INFO] Waiting for DB Instance to be available") "[INFO] Waiting for DB Instance to be available")
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "backing-up", "modifying"}, Pending: []string{"creating", "backing-up", "modifying", "resetting-master-credentials",
Target: "available", "maintenance", "renaming", "rebooting", "upgrading"},
Target: []string{"available"},
Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta),
Timeout: 40 * time.Minute, Timeout: 40 * time.Minute,
MinTimeout: 10 * time.Second, MinTimeout: 10 * time.Second,
@ -548,6 +586,7 @@ func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error {
} }
log.Printf("[DEBUG] Error building ARN for DB Instance, not setting Tags for DB %s", name) log.Printf("[DEBUG] Error building ARN for DB Instance, not setting Tags for DB %s", name)
} else { } else {
d.Set("arn", arn)
resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{
ResourceName: aws.String(arn), ResourceName: aws.String(arn),
}) })
@ -603,11 +642,15 @@ func resourceAwsDbInstanceDelete(d *schema.ResourceData, meta interface{}) error
opts := rds.DeleteDBInstanceInput{DBInstanceIdentifier: aws.String(d.Id())} opts := rds.DeleteDBInstanceInput{DBInstanceIdentifier: aws.String(d.Id())}
finalSnapshot := d.Get("final_snapshot_identifier").(string) skipFinalSnapshot := d.Get("skip_final_snapshot").(bool)
if finalSnapshot == "" { opts.SkipFinalSnapshot = aws.Bool(skipFinalSnapshot)
opts.SkipFinalSnapshot = aws.Bool(true)
} else { if !skipFinalSnapshot {
opts.FinalDBSnapshotIdentifier = aws.String(finalSnapshot) if name, present := d.GetOk("final_snapshot_identifier"); present {
opts.FinalDBSnapshotIdentifier = aws.String(name.(string))
} else {
return fmt.Errorf("DB Instance FinalSnapshotIdentifier is required when a final snapshot is required")
}
} }
log.Printf("[DEBUG] DB Instance destroy configuration: %v", opts) log.Printf("[DEBUG] DB Instance destroy configuration: %v", opts)
@ -620,7 +663,7 @@ func resourceAwsDbInstanceDelete(d *schema.ResourceData, meta interface{}) error
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "backing-up", Pending: []string{"creating", "backing-up",
"modifying", "deleting", "available"}, "modifying", "deleting", "available"},
Target: "", Target: []string{},
Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta),
Timeout: 40 * time.Minute, Timeout: 40 * time.Minute,
MinTimeout: 10 * time.Second, MinTimeout: 10 * time.Second,

View File

@ -2,6 +2,8 @@ package aws
import ( import (
"fmt" "fmt"
"log"
"math/rand" "math/rand"
"testing" "testing"
"time" "time"
@ -67,6 +69,42 @@ func TestAccAWSDBInstanceReplica(t *testing.T) {
}) })
} }
func TestAccAWSDBInstanceSnapshot(t *testing.T) {
var snap rds.DBInstance
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSDBInstanceSnapshot,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccSnapshotInstanceConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSDBInstanceExists("aws_db_instance.snapshot", &snap),
),
},
},
})
}
func TestAccAWSDBInstanceNoSnapshot(t *testing.T) {
var nosnap rds.DBInstance
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSDBInstanceNoSnapshot,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccNoSnapshotInstanceConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSDBInstanceExists("aws_db_instance.no_snapshot", &nosnap),
),
},
},
})
}
func testAccCheckAWSDBInstanceDestroy(s *terraform.State) error { func testAccCheckAWSDBInstanceDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).rdsconn conn := testAccProvider.Meta().(*AWSClient).rdsconn
@ -82,6 +120,10 @@ func testAccCheckAWSDBInstanceDestroy(s *terraform.State) error {
DBInstanceIdentifier: aws.String(rs.Primary.ID), DBInstanceIdentifier: aws.String(rs.Primary.ID),
}) })
if ae, ok := err.(awserr.Error); ok && ae.Code() == "DBInstanceNotFound" {
continue
}
if err == nil { if err == nil {
if len(resp.DBInstances) != 0 && if len(resp.DBInstances) != 0 &&
*resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID { *resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID {
@ -132,6 +174,104 @@ func testAccCheckAWSDBInstanceReplicaAttributes(source, replica *rds.DBInstance)
} }
} }
func testAccCheckAWSDBInstanceSnapshot(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).rdsconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_db_instance" {
continue
}
var err error
resp, err := conn.DescribeDBInstances(
&rds.DescribeDBInstancesInput{
DBInstanceIdentifier: aws.String(rs.Primary.ID),
})
if err != nil {
newerr, _ := err.(awserr.Error)
if newerr.Code() != "DBInstanceNotFound" {
return err
}
} else {
if len(resp.DBInstances) != 0 &&
*resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID {
return fmt.Errorf("DB Instance still exists")
}
}
log.Printf("[INFO] Trying to locate the DBInstance Final Snapshot")
snapshot_identifier := "foobarbaz-test-terraform-final-snapshot-1"
_, snapErr := conn.DescribeDBSnapshots(
&rds.DescribeDBSnapshotsInput{
DBSnapshotIdentifier: aws.String(snapshot_identifier),
})
if snapErr != nil {
newerr, _ := snapErr.(awserr.Error)
if newerr.Code() == "DBSnapshotNotFound" {
return fmt.Errorf("Snapshot %s not found", snapshot_identifier)
}
} else {
log.Printf("[INFO] Deleting the Snapshot %s", snapshot_identifier)
_, snapDeleteErr := conn.DeleteDBSnapshot(
&rds.DeleteDBSnapshotInput{
DBSnapshotIdentifier: aws.String(snapshot_identifier),
})
if snapDeleteErr != nil {
return err
}
}
}
return nil
}
func testAccCheckAWSDBInstanceNoSnapshot(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).rdsconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_db_instance" {
continue
}
var err error
resp, err := conn.DescribeDBInstances(
&rds.DescribeDBInstancesInput{
DBInstanceIdentifier: aws.String(rs.Primary.ID),
})
if err != nil {
newerr, _ := err.(awserr.Error)
if newerr.Code() != "DBInstanceNotFound" {
return err
}
} else {
if len(resp.DBInstances) != 0 &&
*resp.DBInstances[0].DBInstanceIdentifier == rs.Primary.ID {
return fmt.Errorf("DB Instance still exists")
}
}
snapshot_identifier := "foobarbaz-test-terraform-final-snapshot-2"
_, snapErr := conn.DescribeDBSnapshots(
&rds.DescribeDBSnapshotsInput{
DBSnapshotIdentifier: aws.String(snapshot_identifier),
})
if snapErr != nil {
newerr, _ := snapErr.(awserr.Error)
if newerr.Code() != "DBSnapshotNotFound" {
return fmt.Errorf("Snapshot %s found and it shouldn't have been", snapshot_identifier)
}
}
}
return nil
}
func testAccCheckAWSDBInstanceExists(n string, v *rds.DBInstance) resource.TestCheckFunc { func testAccCheckAWSDBInstanceExists(n string, v *rds.DBInstance) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n] rs, ok := s.RootModule().Resources[n]
@ -226,3 +366,51 @@ func testAccReplicaInstanceConfig(val int) string {
} }
`, val, val) `, val, val)
} }
var testAccSnapshotInstanceConfig = `
provider "aws" {
region = "us-east-1"
}
resource "aws_db_instance" "snapshot" {
identifier = "foobarbaz-test-terraform-snapshot-1"
allocated_storage = 5
engine = "mysql"
engine_version = "5.6.21"
instance_class = "db.t1.micro"
name = "baz"
password = "barbarbarbar"
username = "foo"
security_group_names = ["default"]
backup_retention_period = 1
parameter_group_name = "default.mysql5.6"
skip_final_snapshot = false
final_snapshot_identifier = "foobarbaz-test-terraform-final-snapshot-1"
}
`
var testAccNoSnapshotInstanceConfig = `
provider "aws" {
region = "us-east-1"
}
resource "aws_db_instance" "no_snapshot" {
identifier = "foobarbaz-test-terraform-snapshot-2"
allocated_storage = 5
engine = "mysql"
engine_version = "5.6.21"
instance_class = "db.t1.micro"
name = "baz"
password = "barbarbarbar"
username = "foo"
security_group_names = ["default"]
backup_retention_period = 1
parameter_group_name = "default.mysql5.6"
skip_final_snapshot = true
final_snapshot_identifier = "foobarbaz-test-terraform-final-snapshot-2"
}
`

View File

@ -4,7 +4,6 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"log" "log"
"regexp"
"strings" "strings"
"time" "time"
@ -14,6 +13,7 @@ import (
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/rds" "github.com/aws/aws-sdk-go/service/rds"
) )
@ -24,6 +24,10 @@ func resourceAwsDbParameterGroup() *schema.Resource {
Update: resourceAwsDbParameterGroupUpdate, Update: resourceAwsDbParameterGroupUpdate,
Delete: resourceAwsDbParameterGroupDelete, Delete: resourceAwsDbParameterGroupDelete,
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{
"arn": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"name": &schema.Schema{ "name": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
ForceNew: true, ForceNew: true,
@ -71,17 +75,21 @@ func resourceAwsDbParameterGroup() *schema.Resource {
}, },
Set: resourceAwsDbParameterHash, Set: resourceAwsDbParameterHash,
}, },
"tags": tagsSchema(),
}, },
} }
} }
func resourceAwsDbParameterGroupCreate(d *schema.ResourceData, meta interface{}) error { func resourceAwsDbParameterGroupCreate(d *schema.ResourceData, meta interface{}) error {
rdsconn := meta.(*AWSClient).rdsconn rdsconn := meta.(*AWSClient).rdsconn
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
createOpts := rds.CreateDBParameterGroupInput{ createOpts := rds.CreateDBParameterGroupInput{
DBParameterGroupName: aws.String(d.Get("name").(string)), DBParameterGroupName: aws.String(d.Get("name").(string)),
DBParameterGroupFamily: aws.String(d.Get("family").(string)), DBParameterGroupFamily: aws.String(d.Get("family").(string)),
Description: aws.String(d.Get("description").(string)), Description: aws.String(d.Get("description").(string)),
Tags: tags,
} }
log.Printf("[DEBUG] Create DB Parameter Group: %#v", createOpts) log.Printf("[DEBUG] Create DB Parameter Group: %#v", createOpts)
@ -136,6 +144,31 @@ func resourceAwsDbParameterGroupRead(d *schema.ResourceData, meta interface{}) e
d.Set("parameter", flattenParameters(describeParametersResp.Parameters)) d.Set("parameter", flattenParameters(describeParametersResp.Parameters))
paramGroup := describeResp.DBParameterGroups[0]
arn, err := buildRDSPGARN(d, meta)
if err != nil {
name := "<empty>"
if paramGroup.DBParameterGroupName != nil && *paramGroup.DBParameterGroupName != "" {
name = *paramGroup.DBParameterGroupName
}
log.Printf("[DEBUG] Error building ARN for DB Parameter Group, not setting Tags for Param Group %s", name)
} else {
d.Set("arn", arn)
resp, err := rdsconn.ListTagsForResource(&rds.ListTagsForResourceInput{
ResourceName: aws.String(arn),
})
if err != nil {
log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn)
}
var dt []*rds.Tag
if len(resp.TagList) > 0 {
dt = resp.TagList
}
d.Set("tags", tagsToMapRDS(dt))
}
return nil return nil
} }
@ -177,6 +210,14 @@ func resourceAwsDbParameterGroupUpdate(d *schema.ResourceData, meta interface{})
d.SetPartial("parameter") d.SetPartial("parameter")
} }
if arn, err := buildRDSPGARN(d, meta); err == nil {
if err := setTagsRDS(rdsconn, d, arn); err != nil {
return err
} else {
d.SetPartial("tags")
}
}
d.Partial(false) d.Partial(false)
return resourceAwsDbParameterGroupRead(d, meta) return resourceAwsDbParameterGroupRead(d, meta)
@ -185,7 +226,7 @@ func resourceAwsDbParameterGroupUpdate(d *schema.ResourceData, meta interface{})
func resourceAwsDbParameterGroupDelete(d *schema.ResourceData, meta interface{}) error { func resourceAwsDbParameterGroupDelete(d *schema.ResourceData, meta interface{}) error {
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"pending"}, Pending: []string{"pending"},
Target: "destroyed", Target: []string{"destroyed"},
Refresh: resourceAwsDbParameterGroupDeleteRefreshFunc(d, meta), Refresh: resourceAwsDbParameterGroupDeleteRefreshFunc(d, meta),
Timeout: 3 * time.Minute, Timeout: 3 * time.Minute,
MinTimeout: 1 * time.Second, MinTimeout: 1 * time.Second,
@ -230,28 +271,16 @@ func resourceAwsDbParameterHash(v interface{}) int {
return hashcode.String(buf.String()) return hashcode.String(buf.String())
} }
func validateDbParamGroupName(v interface{}, k string) (ws []string, errors []error) { func buildRDSPGARN(d *schema.ResourceData, meta interface{}) (string, error) {
value := v.(string) iamconn := meta.(*AWSClient).iamconn
if !regexp.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { region := meta.(*AWSClient).region
errors = append(errors, fmt.Errorf( // An zero value GetUserInput{} defers to the currently logged in user
"only lowercase alphanumeric characters and hyphens allowed in %q", k)) resp, err := iamconn.GetUser(&iam.GetUserInput{})
if err != nil {
return "", err
} }
if !regexp.MustCompile(`^[a-z]`).MatchString(value) { userARN := *resp.User.Arn
errors = append(errors, fmt.Errorf( accountID := strings.Split(userARN, ":")[4]
"first character of %q must be a letter", k)) arn := fmt.Sprintf("arn:aws:rds:%s:%s:pg:%s", region, accountID, d.Id())
} return arn, nil
if regexp.MustCompile(`--`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q cannot contain two consecutive hyphens", k))
}
if regexp.MustCompile(`-$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q cannot end with a hyphen", k))
}
if len(value) > 255 {
errors = append(errors, fmt.Errorf(
"%q cannot be greater than 255 characters", k))
}
return
} }

View File

@ -44,6 +44,8 @@ func TestAccAWSDBParameterGroup_basic(t *testing.T) {
"aws_db_parameter_group.bar", "parameter.2478663599.name", "character_set_client"), "aws_db_parameter_group.bar", "parameter.2478663599.name", "character_set_client"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_db_parameter_group.bar", "parameter.2478663599.value", "utf8"), "aws_db_parameter_group.bar", "parameter.2478663599.value", "utf8"),
resource.TestCheckResourceAttr(
"aws_db_parameter_group.bar", "tags.#", "1"),
), ),
}, },
resource.TestStep{ resource.TestStep{
@ -77,6 +79,8 @@ func TestAccAWSDBParameterGroup_basic(t *testing.T) {
"aws_db_parameter_group.bar", "parameter.2478663599.name", "character_set_client"), "aws_db_parameter_group.bar", "parameter.2478663599.name", "character_set_client"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_db_parameter_group.bar", "parameter.2478663599.value", "utf8"), "aws_db_parameter_group.bar", "parameter.2478663599.value", "utf8"),
resource.TestCheckResourceAttr(
"aws_db_parameter_group.bar", "tags.#", "2"),
), ),
}, },
}, },
@ -174,7 +178,7 @@ func testAccCheckAWSDBParameterGroupDestroy(s *terraform.State) error {
if !ok { if !ok {
return err return err
} }
if newerr.Code() != "InvalidDBParameterGroup.NotFound" { if newerr.Code() != "DBParameterGroupNotFound" {
return err return err
} }
} }
@ -262,6 +266,9 @@ resource "aws_db_parameter_group" "bar" {
name = "character_set_results" name = "character_set_results"
value = "utf8" value = "utf8"
} }
tags {
foo = "bar"
}
} }
` `
@ -290,6 +297,10 @@ resource "aws_db_parameter_group" "bar" {
name = "collation_connection" name = "collation_connection"
value = "utf8_unicode_ci" value = "utf8_unicode_ci"
} }
tags {
foo = "bar"
baz = "foo"
}
} }
` `

View File

@ -4,10 +4,12 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"log" "log"
"strings"
"time" "time"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/rds" "github.com/aws/aws-sdk-go/service/rds"
"github.com/hashicorp/go-multierror" "github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/hashcode"
@ -19,9 +21,15 @@ func resourceAwsDbSecurityGroup() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
Create: resourceAwsDbSecurityGroupCreate, Create: resourceAwsDbSecurityGroupCreate,
Read: resourceAwsDbSecurityGroupRead, Read: resourceAwsDbSecurityGroupRead,
Update: resourceAwsDbSecurityGroupUpdate,
Delete: resourceAwsDbSecurityGroupDelete, Delete: resourceAwsDbSecurityGroupDelete,
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{
"arn": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"name": &schema.Schema{ "name": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Required: true,
@ -66,12 +74,15 @@ func resourceAwsDbSecurityGroup() *schema.Resource {
}, },
Set: resourceAwsDbSecurityGroupIngressHash, Set: resourceAwsDbSecurityGroupIngressHash,
}, },
"tags": tagsSchema(),
}, },
} }
} }
func resourceAwsDbSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error { func resourceAwsDbSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn conn := meta.(*AWSClient).rdsconn
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
var err error var err error
var errs []error var errs []error
@ -79,6 +90,7 @@ func resourceAwsDbSecurityGroupCreate(d *schema.ResourceData, meta interface{})
opts := rds.CreateDBSecurityGroupInput{ opts := rds.CreateDBSecurityGroupInput{
DBSecurityGroupName: aws.String(d.Get("name").(string)), DBSecurityGroupName: aws.String(d.Get("name").(string)),
DBSecurityGroupDescription: aws.String(d.Get("description").(string)), DBSecurityGroupDescription: aws.String(d.Get("description").(string)),
Tags: tags,
} }
log.Printf("[DEBUG] DB Security Group create configuration: %#v", opts) log.Printf("[DEBUG] DB Security Group create configuration: %#v", opts)
@ -113,7 +125,7 @@ func resourceAwsDbSecurityGroupCreate(d *schema.ResourceData, meta interface{})
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"authorizing"}, Pending: []string{"authorizing"},
Target: "authorized", Target: []string{"authorized"},
Refresh: resourceAwsDbSecurityGroupStateRefreshFunc(d, meta), Refresh: resourceAwsDbSecurityGroupStateRefreshFunc(d, meta),
Timeout: 10 * time.Minute, Timeout: 10 * time.Minute,
} }
@ -157,9 +169,50 @@ func resourceAwsDbSecurityGroupRead(d *schema.ResourceData, meta interface{}) er
d.Set("ingress", rules) d.Set("ingress", rules)
conn := meta.(*AWSClient).rdsconn
arn, err := buildRDSSecurityGroupARN(d, meta)
if err != nil {
name := "<empty>"
if sg.DBSecurityGroupName != nil && *sg.DBSecurityGroupName != "" {
name = *sg.DBSecurityGroupName
}
log.Printf("[DEBUG] Error building ARN for DB Security Group, not setting Tags for DB Security Group %s", name)
} else {
d.Set("arn", arn)
resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{
ResourceName: aws.String(arn),
})
if err != nil {
log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn)
}
var dt []*rds.Tag
if len(resp.TagList) > 0 {
dt = resp.TagList
}
d.Set("tags", tagsToMapRDS(dt))
}
return nil return nil
} }
func resourceAwsDbSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
d.Partial(true)
if arn, err := buildRDSSecurityGroupARN(d, meta); err == nil {
if err := setTagsRDS(conn, d, arn); err != nil {
return err
} else {
d.SetPartial("tags")
}
}
d.Partial(false)
return resourceAwsDbSecurityGroupRead(d, meta)
}
func resourceAwsDbSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { func resourceAwsDbSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn conn := meta.(*AWSClient).rdsconn
@ -290,3 +343,17 @@ func resourceAwsDbSecurityGroupStateRefreshFunc(
return v, "authorized", nil return v, "authorized", nil
} }
} }
func buildRDSSecurityGroupARN(d *schema.ResourceData, meta interface{}) (string, error) {
iamconn := meta.(*AWSClient).iamconn
region := meta.(*AWSClient).region
// An zero value GetUserInput{} defers to the currently logged in user
resp, err := iamconn.GetUser(&iam.GetUserInput{})
if err != nil {
return "", err
}
userARN := *resp.User.Arn
accountID := strings.Split(userARN, ":")[4]
arn := fmt.Sprintf("arn:aws:rds:%s:%s:secgrp:%s", region, accountID, d.Id())
return arn, nil
}

View File

@ -32,6 +32,8 @@ func TestAccAWSDBSecurityGroup_basic(t *testing.T) {
"aws_db_security_group.bar", "ingress.3363517775.cidr", "10.0.0.1/24"), "aws_db_security_group.bar", "ingress.3363517775.cidr", "10.0.0.1/24"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_db_security_group.bar", "ingress.#", "1"), "aws_db_security_group.bar", "ingress.#", "1"),
resource.TestCheckResourceAttr(
"aws_db_security_group.bar", "tags.#", "1"),
), ),
}, },
}, },
@ -64,7 +66,7 @@ func testAccCheckAWSDBSecurityGroupDestroy(s *terraform.State) error {
if !ok { if !ok {
return err return err
} }
if newerr.Code() != "InvalidDBSecurityGroup.NotFound" { if newerr.Code() != "DBSecurityGroupNotFound" {
return err return err
} }
} }
@ -149,5 +151,9 @@ resource "aws_db_security_group" "bar" {
ingress { ingress {
cidr = "10.0.0.1/24" cidr = "10.0.0.1/24"
} }
tags {
foo = "bar"
}
} }
` `

View File

@ -23,26 +23,16 @@ func resourceAwsDbSubnetGroup() *schema.Resource {
Delete: resourceAwsDbSubnetGroupDelete, Delete: resourceAwsDbSubnetGroupDelete,
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{
"name": &schema.Schema{ "arn": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
ForceNew: true, Computed: true,
Required: true, },
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(string) "name": &schema.Schema{
if !regexp.MustCompile(`^[ .0-9A-Za-z-_]+$`).MatchString(value) { Type: schema.TypeString,
errors = append(errors, fmt.Errorf( ForceNew: true,
"only alphanumeric characters, hyphens, underscores, periods, and spaces allowed in %q", k)) Required: true,
} ValidateFunc: validateSubnetGroupName,
if len(value) > 255 {
errors = append(errors, fmt.Errorf(
"%q cannot be longer than 255 characters", k))
}
if regexp.MustCompile(`(?i)^default$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q is not allowed as %q", "Default", k))
}
return
},
}, },
"description": &schema.Schema{ "description": &schema.Schema{
@ -126,8 +116,8 @@ func resourceAwsDbSubnetGroupRead(d *schema.ResourceData, meta interface{}) erro
return fmt.Errorf("Unable to find DB Subnet Group: %#v", describeResp.DBSubnetGroups) return fmt.Errorf("Unable to find DB Subnet Group: %#v", describeResp.DBSubnetGroups)
} }
d.Set("name", d.Id()) d.Set("name", subnetGroup.DBSubnetGroupName)
d.Set("description", *subnetGroup.DBSubnetGroupDescription) d.Set("description", subnetGroup.DBSubnetGroupDescription)
subnets := make([]string, 0, len(subnetGroup.Subnets)) subnets := make([]string, 0, len(subnetGroup.Subnets))
for _, s := range subnetGroup.Subnets { for _, s := range subnetGroup.Subnets {
@ -142,6 +132,7 @@ func resourceAwsDbSubnetGroupRead(d *schema.ResourceData, meta interface{}) erro
if err != nil { if err != nil {
log.Printf("[DEBUG] Error building ARN for DB Subnet Group, not setting Tags for group %s", *subnetGroup.DBSubnetGroupName) log.Printf("[DEBUG] Error building ARN for DB Subnet Group, not setting Tags for group %s", *subnetGroup.DBSubnetGroupName)
} else { } else {
d.Set("arn", arn)
resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{
ResourceName: aws.String(arn), ResourceName: aws.String(arn),
}) })
@ -198,7 +189,7 @@ func resourceAwsDbSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) er
func resourceAwsDbSubnetGroupDelete(d *schema.ResourceData, meta interface{}) error { func resourceAwsDbSubnetGroupDelete(d *schema.ResourceData, meta interface{}) error {
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"pending"}, Pending: []string{"pending"},
Target: "destroyed", Target: []string{"destroyed"},
Refresh: resourceAwsDbSubnetGroupDeleteRefreshFunc(d, meta), Refresh: resourceAwsDbSubnetGroupDeleteRefreshFunc(d, meta),
Timeout: 3 * time.Minute, Timeout: 3 * time.Minute,
MinTimeout: 1 * time.Second, MinTimeout: 1 * time.Second,
@ -246,3 +237,20 @@ func buildRDSsubgrpARN(d *schema.ResourceData, meta interface{}) (string, error)
arn := fmt.Sprintf("arn:aws:rds:%s:%s:subgrp:%s", region, accountID, d.Id()) arn := fmt.Sprintf("arn:aws:rds:%s:%s:subgrp:%s", region, accountID, d.Id())
return arn, nil return arn, nil
} }
func validateSubnetGroupName(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(`^[ .0-9a-z-_]+$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"only alphanumeric characters, hyphens, underscores, periods, and spaces allowed in %q", k))
}
if len(value) > 255 {
errors = append(errors, fmt.Errorf(
"%q cannot be longer than 255 characters", k))
}
if regexp.MustCompile(`(?i)^default$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q is not allowed as %q", "Default", k))
}
return
}

View File

@ -66,6 +66,38 @@ func TestAccAWSDBSubnetGroup_withUndocumentedCharacters(t *testing.T) {
}) })
} }
func TestResourceAWSDBSubnetGroupNameValidation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
}{
{
Value: "tEsting",
ErrCount: 1,
},
{
Value: "testing?",
ErrCount: 1,
},
{
Value: "default",
ErrCount: 1,
},
{
Value: randomString(300),
ErrCount: 1,
},
}
for _, tc := range cases {
_, errors := validateSubnetGroupName(tc.Value, "aws_db_subnet_group")
if len(errors) != tc.ErrCount {
t.Fatalf("Expected the DB Subnet Group name to trigger a validation error")
}
}
}
func testAccCheckDBSubnetGroupDestroy(s *terraform.State) error { func testAccCheckDBSubnetGroupDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).rdsconn conn := testAccProvider.Meta().(*AWSClient).rdsconn
@ -149,7 +181,7 @@ resource "aws_subnet" "bar" {
} }
resource "aws_db_subnet_group" "foo" { resource "aws_db_subnet_group" "foo" {
name = "FOO" name = "foo"
description = "foo description" description = "foo description"
subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
tags { tags {

View File

@ -8,10 +8,17 @@ import (
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/directoryservice" "github.com/aws/aws-sdk-go/service/directoryservice"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
) )
var directoryCreationFuncs = map[string]func(*directoryservice.DirectoryService, *schema.ResourceData) (string, error){
"SimpleAD": createSimpleDirectoryService,
"MicrosoftAD": createActiveDirectoryService,
"ADConnector": createDirectoryConnector,
}
func resourceAwsDirectoryServiceDirectory() *schema.Resource { func resourceAwsDirectoryServiceDirectory() *schema.Resource {
return &schema.Resource{ return &schema.Resource{
Create: resourceAwsDirectoryServiceDirectoryCreate, Create: resourceAwsDirectoryServiceDirectoryCreate,
@ -32,7 +39,7 @@ func resourceAwsDirectoryServiceDirectory() *schema.Resource {
}, },
"size": &schema.Schema{ "size": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Optional: true,
ForceNew: true, ForceNew: true,
}, },
"alias": &schema.Schema{ "alias": &schema.Schema{
@ -54,7 +61,8 @@ func resourceAwsDirectoryServiceDirectory() *schema.Resource {
}, },
"vpc_settings": &schema.Schema{ "vpc_settings": &schema.Schema{
Type: schema.TypeList, Type: schema.TypeList,
Required: true, Optional: true,
ForceNew: true,
Elem: &schema.Resource{ Elem: &schema.Resource{
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{
"subnet_ids": &schema.Schema{ "subnet_ids": &schema.Schema{
@ -72,6 +80,39 @@ func resourceAwsDirectoryServiceDirectory() *schema.Resource {
}, },
}, },
}, },
"connect_settings": &schema.Schema{
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"customer_username": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"customer_dns_ips": &schema.Schema{
Type: schema.TypeSet,
Required: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"subnet_ids": &schema.Schema{
Type: schema.TypeSet,
Required: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"vpc_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
},
},
},
"enable_sso": &schema.Schema{ "enable_sso": &schema.Schema{
Type: schema.TypeBool, Type: schema.TypeBool,
Optional: true, Optional: true,
@ -89,14 +130,120 @@ func resourceAwsDirectoryServiceDirectory() *schema.Resource {
}, },
"type": &schema.Schema{ "type": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Optional: true,
Default: "SimpleAD",
ForceNew: true,
ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
validTypes := []string{"SimpleAD", "MicrosoftAD"}
value := v.(string)
for validType, _ := range directoryCreationFuncs {
if validType == value {
return
}
}
es = append(es, fmt.Errorf("%q must be one of %q", k, validTypes))
return
},
}, },
}, },
} }
} }
func resourceAwsDirectoryServiceDirectoryCreate(d *schema.ResourceData, meta interface{}) error { func buildVpcSettings(d *schema.ResourceData) (vpcSettings *directoryservice.DirectoryVpcSettings, err error) {
dsconn := meta.(*AWSClient).dsconn if v, ok := d.GetOk("vpc_settings"); !ok {
return nil, fmt.Errorf("vpc_settings is required for type = SimpleAD or MicrosoftAD")
} else {
settings := v.([]interface{})
if len(settings) > 1 {
return nil, fmt.Errorf("Only a single vpc_settings block is expected")
} else if len(settings) == 1 {
s := settings[0].(map[string]interface{})
var subnetIds []*string
for _, id := range s["subnet_ids"].(*schema.Set).List() {
subnetIds = append(subnetIds, aws.String(id.(string)))
}
vpcSettings = &directoryservice.DirectoryVpcSettings{
SubnetIds: subnetIds,
VpcId: aws.String(s["vpc_id"].(string)),
}
}
}
return vpcSettings, nil
}
func buildConnectSettings(d *schema.ResourceData) (connectSettings *directoryservice.DirectoryConnectSettings, err error) {
if v, ok := d.GetOk("connect_settings"); !ok {
return nil, fmt.Errorf("connect_settings is required for type = ADConnector")
} else {
settings := v.([]interface{})
if len(settings) > 1 {
return nil, fmt.Errorf("Only a single connect_settings block is expected")
} else if len(settings) == 1 {
s := settings[0].(map[string]interface{})
var subnetIds []*string
for _, id := range s["subnet_ids"].(*schema.Set).List() {
subnetIds = append(subnetIds, aws.String(id.(string)))
}
var customerDnsIps []*string
for _, id := range s["customer_dns_ips"].(*schema.Set).List() {
customerDnsIps = append(customerDnsIps, aws.String(id.(string)))
}
connectSettings = &directoryservice.DirectoryConnectSettings{
CustomerDnsIps: customerDnsIps,
CustomerUserName: aws.String(s["customer_username"].(string)),
SubnetIds: subnetIds,
VpcId: aws.String(s["vpc_id"].(string)),
}
}
}
return connectSettings, nil
}
func createDirectoryConnector(dsconn *directoryservice.DirectoryService, d *schema.ResourceData) (directoryId string, err error) {
if _, ok := d.GetOk("size"); !ok {
return "", fmt.Errorf("size is required for type = ADConnector")
}
input := directoryservice.ConnectDirectoryInput{
Name: aws.String(d.Get("name").(string)),
Password: aws.String(d.Get("password").(string)),
Size: aws.String(d.Get("size").(string)),
}
if v, ok := d.GetOk("description"); ok {
input.Description = aws.String(v.(string))
}
if v, ok := d.GetOk("short_name"); ok {
input.ShortName = aws.String(v.(string))
}
input.ConnectSettings, err = buildConnectSettings(d)
if err != nil {
return "", err
}
log.Printf("[DEBUG] Creating Directory Connector: %s", input)
out, err := dsconn.ConnectDirectory(&input)
if err != nil {
return "", err
}
log.Printf("[DEBUG] Directory Connector created: %s", out)
return *out.DirectoryId, nil
}
func createSimpleDirectoryService(dsconn *directoryservice.DirectoryService, d *schema.ResourceData) (directoryId string, err error) {
if _, ok := d.GetOk("size"); !ok {
return "", fmt.Errorf("size is required for type = SimpleAD")
}
input := directoryservice.CreateDirectoryInput{ input := directoryservice.CreateDirectoryInput{
Name: aws.String(d.Get("name").(string)), Name: aws.String(d.Get("name").(string)),
@ -111,39 +258,70 @@ func resourceAwsDirectoryServiceDirectoryCreate(d *schema.ResourceData, meta int
input.ShortName = aws.String(v.(string)) input.ShortName = aws.String(v.(string))
} }
if v, ok := d.GetOk("vpc_settings"); ok { input.VpcSettings, err = buildVpcSettings(d)
settings := v.([]interface{}) if err != nil {
return "", err
if len(settings) > 1 {
return fmt.Errorf("Only a single vpc_settings block is expected")
} else if len(settings) == 1 {
s := settings[0].(map[string]interface{})
var subnetIds []*string
for _, id := range s["subnet_ids"].(*schema.Set).List() {
subnetIds = append(subnetIds, aws.String(id.(string)))
}
vpcSettings := directoryservice.DirectoryVpcSettings{
SubnetIds: subnetIds,
VpcId: aws.String(s["vpc_id"].(string)),
}
input.VpcSettings = &vpcSettings
}
} }
log.Printf("[DEBUG] Creating Directory Service: %s", input) log.Printf("[DEBUG] Creating Simple Directory Service: %s", input)
out, err := dsconn.CreateDirectory(&input) out, err := dsconn.CreateDirectory(&input)
if err != nil {
return "", err
}
log.Printf("[DEBUG] Simple Directory Service created: %s", out)
return *out.DirectoryId, nil
}
func createActiveDirectoryService(dsconn *directoryservice.DirectoryService, d *schema.ResourceData) (directoryId string, err error) {
input := directoryservice.CreateMicrosoftADInput{
Name: aws.String(d.Get("name").(string)),
Password: aws.String(d.Get("password").(string)),
}
if v, ok := d.GetOk("description"); ok {
input.Description = aws.String(v.(string))
}
if v, ok := d.GetOk("short_name"); ok {
input.ShortName = aws.String(v.(string))
}
input.VpcSettings, err = buildVpcSettings(d)
if err != nil {
return "", err
}
log.Printf("[DEBUG] Creating Microsoft AD Directory Service: %s", input)
out, err := dsconn.CreateMicrosoftAD(&input)
if err != nil {
return "", err
}
log.Printf("[DEBUG] Microsoft AD Directory Service created: %s", out)
return *out.DirectoryId, nil
}
func resourceAwsDirectoryServiceDirectoryCreate(d *schema.ResourceData, meta interface{}) error {
dsconn := meta.(*AWSClient).dsconn
creationFunc, ok := directoryCreationFuncs[d.Get("type").(string)]
if !ok {
// Shouldn't happen as this is validated above
return fmt.Errorf("Unsupported directory type: %s", d.Get("type"))
}
directoryId, err := creationFunc(dsconn, d)
if err != nil { if err != nil {
return err return err
} }
log.Printf("[DEBUG] Directory Service created: %s", out)
d.SetId(*out.DirectoryId) d.SetId(directoryId)
// Wait for creation // Wait for creation
log.Printf("[DEBUG] Waiting for DS (%q) to become available", d.Id()) log.Printf("[DEBUG] Waiting for DS (%q) to become available", d.Id())
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"Requested", "Creating", "Created"}, Pending: []string{"Requested", "Creating", "Created"},
Target: "Active", Target: []string{"Active"},
Refresh: func() (interface{}, string, error) { Refresh: func() (interface{}, string, error) {
resp, err := dsconn.DescribeDirectories(&directoryservice.DescribeDirectoriesInput{ resp, err := dsconn.DescribeDirectories(&directoryservice.DescribeDirectoriesInput{
DirectoryIds: []*string{aws.String(d.Id())}, DirectoryIds: []*string{aws.String(d.Id())},
@ -158,7 +336,7 @@ func resourceAwsDirectoryServiceDirectoryCreate(d *schema.ResourceData, meta int
d.Id(), *ds.Stage) d.Id(), *ds.Stage)
return ds, *ds.Stage, nil return ds, *ds.Stage, nil
}, },
Timeout: 10 * time.Minute, Timeout: 30 * time.Minute,
} }
if _, err := stateConf.WaitForState(); err != nil { if _, err := stateConf.WaitForState(); err != nil {
return fmt.Errorf( return fmt.Errorf(
@ -233,14 +411,22 @@ func resourceAwsDirectoryServiceDirectoryRead(d *schema.ResourceData, meta inter
if dir.Description != nil { if dir.Description != nil {
d.Set("description", *dir.Description) d.Set("description", *dir.Description)
} }
d.Set("dns_ip_addresses", schema.NewSet(schema.HashString, flattenStringList(dir.DnsIpAddrs)))
if *dir.Type == "ADConnector" {
d.Set("dns_ip_addresses", schema.NewSet(schema.HashString, flattenStringList(dir.ConnectSettings.ConnectIps)))
} else {
d.Set("dns_ip_addresses", schema.NewSet(schema.HashString, flattenStringList(dir.DnsIpAddrs)))
}
d.Set("name", *dir.Name) d.Set("name", *dir.Name)
if dir.ShortName != nil { if dir.ShortName != nil {
d.Set("short_name", *dir.ShortName) d.Set("short_name", *dir.ShortName)
} }
d.Set("size", *dir.Size) if dir.Size != nil {
d.Set("size", *dir.Size)
}
d.Set("type", *dir.Type) d.Set("type", *dir.Type)
d.Set("vpc_settings", flattenDSVpcSettings(dir.VpcSettings)) d.Set("vpc_settings", flattenDSVpcSettings(dir.VpcSettings))
d.Set("connect_settings", flattenDSConnectSettings(dir.DnsIpAddrs, dir.ConnectSettings))
d.Set("enable_sso", *dir.SsoEnabled) d.Set("enable_sso", *dir.SsoEnabled)
return nil return nil
@ -252,6 +438,8 @@ func resourceAwsDirectoryServiceDirectoryDelete(d *schema.ResourceData, meta int
input := directoryservice.DeleteDirectoryInput{ input := directoryservice.DeleteDirectoryInput{
DirectoryId: aws.String(d.Id()), DirectoryId: aws.String(d.Id()),
} }
log.Printf("[DEBUG] Delete Directory input: %s", input)
_, err := dsconn.DeleteDirectory(&input) _, err := dsconn.DeleteDirectory(&input)
if err != nil { if err != nil {
return err return err
@ -261,17 +449,20 @@ func resourceAwsDirectoryServiceDirectoryDelete(d *schema.ResourceData, meta int
log.Printf("[DEBUG] Waiting for DS (%q) to be deleted", d.Id()) log.Printf("[DEBUG] Waiting for DS (%q) to be deleted", d.Id())
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"Deleting"}, Pending: []string{"Deleting"},
Target: "", Target: []string{"Deleted"},
Refresh: func() (interface{}, string, error) { Refresh: func() (interface{}, string, error) {
resp, err := dsconn.DescribeDirectories(&directoryservice.DescribeDirectoriesInput{ resp, err := dsconn.DescribeDirectories(&directoryservice.DescribeDirectoriesInput{
DirectoryIds: []*string{aws.String(d.Id())}, DirectoryIds: []*string{aws.String(d.Id())},
}) })
if err != nil { if err != nil {
return nil, "", err if dserr, ok := err.(awserr.Error); ok && dserr.Code() == "EntityDoesNotExistException" {
return 42, "Deleted", nil
}
return nil, "error", err
} }
if len(resp.DirectoryDescriptions) == 0 { if len(resp.DirectoryDescriptions) == 0 {
return nil, "", nil return 42, "Deleted", nil
} }
ds := resp.DirectoryDescriptions[0] ds := resp.DirectoryDescriptions[0]
@ -279,7 +470,7 @@ func resourceAwsDirectoryServiceDirectoryDelete(d *schema.ResourceData, meta int
d.Id(), *ds.Stage) d.Id(), *ds.Stage)
return ds, *ds.Stage, nil return ds, *ds.Stage, nil
}, },
Timeout: 10 * time.Minute, Timeout: 30 * time.Minute,
} }
if _, err := stateConf.WaitForState(); err != nil { if _, err := stateConf.WaitForState(); err != nil {
return fmt.Errorf( return fmt.Errorf(

View File

@ -5,6 +5,7 @@ import (
"testing" "testing"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/directoryservice" "github.com/aws/aws-sdk-go/service/directoryservice"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
@ -27,6 +28,38 @@ func TestAccAWSDirectoryServiceDirectory_basic(t *testing.T) {
}) })
} }
func TestAccAWSDirectoryServiceDirectory_microsoft(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDirectoryServiceDirectoryDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDirectoryServiceDirectoryConfig_microsoft,
Check: resource.ComposeTestCheckFunc(
testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar"),
),
},
},
})
}
func TestAccAWSDirectoryServiceDirectory_connector(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDirectoryServiceDirectoryDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDirectoryServiceDirectoryConfig_connector,
Check: resource.ComposeTestCheckFunc(
testAccCheckServiceDirectoryExists("aws_directory_service_directory.connector"),
),
},
},
})
}
func TestAccAWSDirectoryServiceDirectory_withAliasAndSso(t *testing.T) { func TestAccAWSDirectoryServiceDirectory_withAliasAndSso(t *testing.T) {
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
@ -65,12 +98,33 @@ func TestAccAWSDirectoryServiceDirectory_withAliasAndSso(t *testing.T) {
} }
func testAccCheckDirectoryServiceDirectoryDestroy(s *terraform.State) error { func testAccCheckDirectoryServiceDirectoryDestroy(s *terraform.State) error {
if len(s.RootModule().Resources) > 0 { dsconn := testAccProvider.Meta().(*AWSClient).dsconn
return fmt.Errorf("Expected all resources to be gone, but found: %#v",
s.RootModule().Resources) for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_directory_service_directory" {
continue
}
input := directoryservice.DescribeDirectoriesInput{
DirectoryIds: []*string{aws.String(rs.Primary.ID)},
}
out, err := dsconn.DescribeDirectories(&input)
if err != nil {
// EntityDoesNotExistException means it's gone, this is good
if dserr, ok := err.(awserr.Error); ok && dserr.Code() == "EntityDoesNotExistException" {
return nil
}
return err
}
if out != nil && len(out.DirectoryDescriptions) > 0 {
return fmt.Errorf("Expected AWS Directory Service Directory to be gone, but was still found")
}
return nil
} }
return nil return fmt.Errorf("Default error in Service Directory Test")
} }
func testAccCheckServiceDirectoryExists(name string) resource.TestCheckFunc { func testAccCheckServiceDirectoryExists(name string) resource.TestCheckFunc {
@ -192,6 +246,76 @@ resource "aws_subnet" "bar" {
} }
` `
const testAccDirectoryServiceDirectoryConfig_connector = `
resource "aws_directory_service_directory" "bar" {
name = "corp.notexample.com"
password = "SuperSecretPassw0rd"
size = "Small"
vpc_settings {
vpc_id = "${aws_vpc.main.id}"
subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
}
}
resource "aws_directory_service_directory" "connector" {
name = "corp.notexample.com"
password = "SuperSecretPassw0rd"
size = "Small"
type = "ADConnector"
connect_settings {
customer_dns_ips = ["${aws_directory_service_directory.bar.dns_ip_addresses}"]
customer_username = "Administrator"
vpc_id = "${aws_vpc.main.id}"
subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
}
}
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
}
resource "aws_subnet" "foo" {
vpc_id = "${aws_vpc.main.id}"
availability_zone = "us-west-2a"
cidr_block = "10.0.1.0/24"
}
resource "aws_subnet" "bar" {
vpc_id = "${aws_vpc.main.id}"
availability_zone = "us-west-2b"
cidr_block = "10.0.2.0/24"
}
`
const testAccDirectoryServiceDirectoryConfig_microsoft = `
resource "aws_directory_service_directory" "bar" {
name = "corp.notexample.com"
password = "SuperSecretPassw0rd"
type = "MicrosoftAD"
vpc_settings {
vpc_id = "${aws_vpc.main.id}"
subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
}
}
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
}
resource "aws_subnet" "foo" {
vpc_id = "${aws_vpc.main.id}"
availability_zone = "us-west-2a"
cidr_block = "10.0.1.0/24"
}
resource "aws_subnet" "bar" {
vpc_id = "${aws_vpc.main.id}"
availability_zone = "us-west-2b"
cidr_block = "10.0.2.0/24"
}
`
var randomInteger = genRandInt() var randomInteger = genRandInt()
var testAccDirectoryServiceDirectoryConfig_withAlias = fmt.Sprintf(` var testAccDirectoryServiceDirectoryConfig_withAlias = fmt.Sprintf(`
resource "aws_directory_service_directory" "bar_a" { resource "aws_directory_service_directory" "bar_a" {

View File

@ -4,8 +4,10 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"log" "log"
"strings"
"time" "time"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
@ -158,6 +160,21 @@ func resourceAwsDynamoDbTable() *schema.Resource {
return hashcode.String(buf.String()) return hashcode.String(buf.String())
}, },
}, },
"stream_enabled": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
},
"stream_view_type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
StateFunc: func(v interface{}) string {
value := v.(string)
return strings.ToUpper(value)
},
ValidateFunc: validateStreamViewType,
},
}, },
} }
} }
@ -263,6 +280,16 @@ func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) er
req.GlobalSecondaryIndexes = globalSecondaryIndexes req.GlobalSecondaryIndexes = globalSecondaryIndexes
} }
if _, ok := d.GetOk("stream_enabled"); ok {
req.StreamSpecification = &dynamodb.StreamSpecification{
StreamEnabled: aws.Bool(d.Get("stream_enabled").(bool)),
StreamViewType: aws.String(d.Get("stream_view_type").(string)),
}
fmt.Printf("[DEBUG] Adding StreamSpecifications to the table")
}
attemptCount := 1 attemptCount := 1
for attemptCount <= DYNAMODB_MAX_THROTTLE_RETRIES { for attemptCount <= DYNAMODB_MAX_THROTTLE_RETRIES {
output, err := dynamodbconn.CreateTable(req) output, err := dynamodbconn.CreateTable(req)
@ -340,6 +367,25 @@ func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) er
waitForTableToBeActive(d.Id(), meta) waitForTableToBeActive(d.Id(), meta)
} }
if d.HasChange("stream_enabled") || d.HasChange("stream_view_type") {
req := &dynamodb.UpdateTableInput{
TableName: aws.String(d.Id()),
}
req.StreamSpecification = &dynamodb.StreamSpecification{
StreamEnabled: aws.Bool(d.Get("stream_enabled").(bool)),
StreamViewType: aws.String(d.Get("stream_view_type").(string)),
}
_, err := dynamodbconn.UpdateTable(req)
if err != nil {
return err
}
waitForTableToBeActive(d.Id(), meta)
}
if d.HasChange("global_secondary_index") { if d.HasChange("global_secondary_index") {
log.Printf("[DEBUG] Changed GSI data") log.Printf("[DEBUG] Changed GSI data")
req := &dynamodb.UpdateTableInput{ req := &dynamodb.UpdateTableInput{
@ -587,6 +633,11 @@ func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) erro
log.Printf("[DEBUG] Added GSI: %s - Read: %d / Write: %d", gsi["name"], gsi["read_capacity"], gsi["write_capacity"]) log.Printf("[DEBUG] Added GSI: %s - Read: %d / Write: %d", gsi["name"], gsi["read_capacity"], gsi["write_capacity"])
} }
if table.StreamSpecification != nil {
d.Set("stream_view_type", table.StreamSpecification.StreamViewType)
d.Set("stream_enabled", table.StreamSpecification.StreamEnabled)
}
err = d.Set("global_secondary_index", gsiList) err = d.Set("global_secondary_index", gsiList)
if err != nil { if err != nil {
return err return err
@ -610,6 +661,37 @@ func resourceAwsDynamoDbTableDelete(d *schema.ResourceData, meta interface{}) er
if err != nil { if err != nil {
return err return err
} }
params := &dynamodb.DescribeTableInput{
TableName: aws.String(d.Id()),
}
err = resource.Retry(10*time.Minute, func() error {
t, err := dynamodbconn.DescribeTable(params)
if err != nil {
if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" {
return nil
}
// Didn't recognize the error, so shouldn't retry.
return resource.RetryError{Err: err}
}
if t != nil {
if t.Table.TableStatus != nil && strings.ToLower(*t.Table.TableStatus) == "deleting" {
log.Printf("[DEBUG] AWS Dynamo DB table (%s) is still deleting", d.Id())
return fmt.Errorf("still deleting")
}
}
// we should be not found or deleting, so error here
return resource.RetryError{Err: fmt.Errorf("[ERR] Error deleting Dynamo DB table, unexpected state: %s", t)}
})
// check error from retry
if err != nil {
return err
}
return nil return nil
} }

View File

@ -2,6 +2,7 @@ package aws
import ( import (
"fmt" "fmt"
"log"
"testing" "testing"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
@ -11,7 +12,7 @@ import (
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
) )
func TestAccAWSDynamoDbTable(t *testing.T) { func TestAccAWSDynamoDbTable_basic(t *testing.T) {
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders, Providers: testAccProviders,
@ -33,6 +34,66 @@ func TestAccAWSDynamoDbTable(t *testing.T) {
}) })
} }
func TestAccAWSDynamoDbTable_streamSpecification(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSDynamoDbTableDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSDynamoDbConfigStreamSpecification,
Check: resource.ComposeTestCheckFunc(
testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.basic-dynamodb-table"),
resource.TestCheckResourceAttr(
"aws_dynamodb_table.basic-dynamodb-table", "stream_enabled", "true"),
resource.TestCheckResourceAttr(
"aws_dynamodb_table.basic-dynamodb-table", "stream_view_type", "KEYS_ONLY"),
),
},
},
})
}
func TestResourceAWSDynamoDbTableStreamViewType_validation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
}{
{
Value: "KEYS-ONLY",
ErrCount: 1,
},
{
Value: "RANDOM-STRING",
ErrCount: 1,
},
{
Value: "KEYS_ONLY",
ErrCount: 0,
},
{
Value: "NEW_AND_OLD_IMAGES",
ErrCount: 0,
},
{
Value: "NEW_IMAGE",
ErrCount: 0,
},
{
Value: "OLD_IMAGE",
ErrCount: 0,
},
}
for _, tc := range cases {
_, errors := validateStreamViewType(tc.Value, "aws_dynamodb_table_stream_view_type")
if len(errors) != tc.ErrCount {
t.Fatalf("Expected the DynamoDB stream_view_type to trigger a validation error")
}
}
}
func testAccCheckAWSDynamoDbTableDestroy(s *terraform.State) error { func testAccCheckAWSDynamoDbTableDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).dynamodbconn conn := testAccProvider.Meta().(*AWSClient).dynamodbconn
@ -41,21 +102,23 @@ func testAccCheckAWSDynamoDbTableDestroy(s *terraform.State) error {
continue continue
} }
fmt.Printf("[DEBUG] Checking if DynamoDB table %s exists", rs.Primary.ID) log.Printf("[DEBUG] Checking if DynamoDB table %s exists", rs.Primary.ID)
// Check if queue exists by checking for its attributes // Check if queue exists by checking for its attributes
params := &dynamodb.DescribeTableInput{ params := &dynamodb.DescribeTableInput{
TableName: aws.String(rs.Primary.ID), TableName: aws.String(rs.Primary.ID),
} }
_, err := conn.DescribeTable(params) _, err := conn.DescribeTable(params)
if err == nil { if err == nil {
return fmt.Errorf("DynamoDB table %s still exists. Failing!", rs.Primary.ID) return fmt.Errorf("DynamoDB table %s still exists. Failing!", rs.Primary.ID)
} }
// Verify the error is what we want // Verify the error is what we want
_, ok := err.(awserr.Error) if dbErr, ok := err.(awserr.Error); ok && dbErr.Code() == "ResourceNotFoundException" {
if !ok { return nil
return err
} }
return err
} }
return nil return nil
@ -295,3 +358,44 @@ resource "aws_dynamodb_table" "basic-dynamodb-table" {
} }
} }
` `
const testAccAWSDynamoDbConfigStreamSpecification = `
resource "aws_dynamodb_table" "basic-dynamodb-table" {
name = "TerraformTestStreamTable"
read_capacity = 10
write_capacity = 20
hash_key = "TestTableHashKey"
range_key = "TestTableRangeKey"
attribute {
name = "TestTableHashKey"
type = "S"
}
attribute {
name = "TestTableRangeKey"
type = "S"
}
attribute {
name = "TestLSIRangeKey"
type = "N"
}
attribute {
name = "TestGSIRangeKey"
type = "S"
}
local_secondary_index {
name = "TestTableLSI"
range_key = "TestLSIRangeKey"
projection_type = "ALL"
}
global_secondary_index {
name = "InitialTestTableGSI"
hash_key = "TestTableHashKey"
range_key = "TestGSIRangeKey"
write_capacity = 10
read_capacity = 10
projection_type = "KEYS_ONLY"
}
stream_enabled = true
stream_view_type = "KEYS_ONLY"
}
`

View File

@ -76,9 +76,6 @@ func resourceAwsEbsVolumeCreate(d *schema.ResourceData, meta interface{}) error
if value, ok := d.GetOk("encrypted"); ok { if value, ok := d.GetOk("encrypted"); ok {
request.Encrypted = aws.Bool(value.(bool)) request.Encrypted = aws.Bool(value.(bool))
} }
if value, ok := d.GetOk("iops"); ok {
request.Iops = aws.Int64(int64(value.(int)))
}
if value, ok := d.GetOk("kms_key_id"); ok { if value, ok := d.GetOk("kms_key_id"); ok {
request.KmsKeyId = aws.String(value.(string)) request.KmsKeyId = aws.String(value.(string))
} }
@ -88,22 +85,39 @@ func resourceAwsEbsVolumeCreate(d *schema.ResourceData, meta interface{}) error
if value, ok := d.GetOk("snapshot_id"); ok { if value, ok := d.GetOk("snapshot_id"); ok {
request.SnapshotId = aws.String(value.(string)) request.SnapshotId = aws.String(value.(string))
} }
// IOPs are only valid, and required for, storage type io1. The current minimu
// is 100. Instead of a hard validation we we only apply the IOPs to the
// request if the type is io1, and log a warning otherwise. This allows users
// to "disable" iops. See https://github.com/hashicorp/terraform/pull/4146
var t string
if value, ok := d.GetOk("type"); ok { if value, ok := d.GetOk("type"); ok {
request.VolumeType = aws.String(value.(string)) t = value.(string)
request.VolumeType = aws.String(t)
} }
iops := d.Get("iops").(int)
if t != "io1" && iops > 0 {
log.Printf("[WARN] IOPs is only valid for storate type io1 for EBS Volumes")
} else if t == "io1" {
// We add the iops value without validating it's size, to allow AWS to
// enforce a size requirement (currently 100)
request.Iops = aws.Int64(int64(iops))
}
log.Printf(
"[DEBUG] EBS Volume create opts: %s", request)
result, err := conn.CreateVolume(request) result, err := conn.CreateVolume(request)
if err != nil { if err != nil {
return fmt.Errorf("Error creating EC2 volume: %s", err) return fmt.Errorf("Error creating EC2 volume: %s", err)
} }
log.Printf( log.Println(
"[DEBUG] Waiting for Volume (%s) to become available", "[DEBUG] Waiting for Volume to become available")
d.Id())
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"creating"}, Pending: []string{"creating"},
Target: "available", Target: []string{"available"},
Refresh: volumeStateRefreshFunc(conn, *result.VolumeId), Refresh: volumeStateRefreshFunc(conn, *result.VolumeId),
Timeout: 5 * time.Minute, Timeout: 5 * time.Minute,
Delay: 10 * time.Second, Delay: 10 * time.Second,
@ -199,9 +213,6 @@ func readVolume(d *schema.ResourceData, volume *ec2.Volume) error {
if volume.Encrypted != nil { if volume.Encrypted != nil {
d.Set("encrypted", *volume.Encrypted) d.Set("encrypted", *volume.Encrypted)
} }
if volume.Iops != nil {
d.Set("iops", *volume.Iops)
}
if volume.KmsKeyId != nil { if volume.KmsKeyId != nil {
d.Set("kms_key_id", *volume.KmsKeyId) d.Set("kms_key_id", *volume.KmsKeyId)
} }
@ -214,6 +225,17 @@ func readVolume(d *schema.ResourceData, volume *ec2.Volume) error {
if volume.VolumeType != nil { if volume.VolumeType != nil {
d.Set("type", *volume.VolumeType) d.Set("type", *volume.VolumeType)
} }
if volume.VolumeType != nil && *volume.VolumeType == "io1" {
// Only set the iops attribute if the volume type is io1. Setting otherwise
// can trigger a refresh/plan loop based on the computed value that is given
// from AWS, and prevent us from specifying 0 as a valid iops.
// See https://github.com/hashicorp/terraform/pull/4146
if volume.Iops != nil {
d.Set("iops", *volume.Iops)
}
}
if volume.Tags != nil { if volume.Tags != nil {
d.Set("tags", tagsToMap(volume.Tags)) d.Set("tags", tagsToMap(volume.Tags))
} }

View File

@ -26,6 +26,22 @@ func TestAccAWSEBSVolume_basic(t *testing.T) {
}) })
} }
func TestAccAWSEBSVolume_NoIops(t *testing.T) {
var v ec2.Volume
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAwsEbsVolumeConfigWithNoIops,
Check: resource.ComposeTestCheckFunc(
testAccCheckVolumeExists("aws_ebs_volume.iops_test", &v),
),
},
},
})
}
func TestAccAWSEBSVolume_withTags(t *testing.T) { func TestAccAWSEBSVolume_withTags(t *testing.T) {
var v ec2.Volume var v ec2.Volume
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
@ -86,3 +102,15 @@ resource "aws_ebs_volume" "tags_test" {
} }
} }
` `
const testAccAwsEbsVolumeConfigWithNoIops = `
resource "aws_ebs_volume" "iops_test" {
availability_zone = "us-west-2a"
size = 10
type = "gp2"
iops = 0
tags {
Name = "TerraformTest"
}
}
`

View File

@ -0,0 +1,106 @@
package aws
import (
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ecr"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsEcrRepository() *schema.Resource {
return &schema.Resource{
Create: resourceAwsEcrRepositoryCreate,
Read: resourceAwsEcrRepositoryRead,
Delete: resourceAwsEcrRepositoryDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"arn": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"registry_id": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceAwsEcrRepositoryCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ecrconn
input := ecr.CreateRepositoryInput{
RepositoryName: aws.String(d.Get("name").(string)),
}
log.Printf("[DEBUG] Creating ECR resository: %s", input)
out, err := conn.CreateRepository(&input)
if err != nil {
return err
}
repository := *out.Repository
log.Printf("[DEBUG] ECR repository created: %q", *repository.RepositoryArn)
d.SetId(*repository.RepositoryName)
d.Set("arn", *repository.RepositoryArn)
d.Set("registry_id", *repository.RegistryId)
return resourceAwsEcrRepositoryRead(d, meta)
}
func resourceAwsEcrRepositoryRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ecrconn
log.Printf("[DEBUG] Reading repository %s", d.Id())
out, err := conn.DescribeRepositories(&ecr.DescribeRepositoriesInput{
RegistryId: aws.String(d.Get("registry_id").(string)),
RepositoryNames: []*string{aws.String(d.Id())},
})
if err != nil {
if ecrerr, ok := err.(awserr.Error); ok && ecrerr.Code() == "RepositoryNotFoundException" {
d.SetId("")
return nil
}
return err
}
repository := out.Repositories[0]
log.Printf("[DEBUG] Received repository %s", out)
d.SetId(*repository.RepositoryName)
d.Set("arn", *repository.RepositoryArn)
d.Set("registry_id", *repository.RegistryId)
return nil
}
func resourceAwsEcrRepositoryDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ecrconn
_, err := conn.DeleteRepository(&ecr.DeleteRepositoryInput{
RepositoryName: aws.String(d.Id()),
RegistryId: aws.String(d.Get("registry_id").(string)),
Force: aws.Bool(true),
})
if err != nil {
if ecrerr, ok := err.(awserr.Error); ok && ecrerr.Code() == "RepositoryNotFoundException" {
d.SetId("")
return nil
}
return err
}
log.Printf("[DEBUG] repository %q deleted.", d.Get("arn").(string))
return nil
}

View File

@ -0,0 +1,141 @@
package aws
import (
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ecr"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsEcrRepositoryPolicy() *schema.Resource {
return &schema.Resource{
Create: resourceAwsEcrRepositoryPolicyCreate,
Read: resourceAwsEcrRepositoryPolicyRead,
Update: resourceAwsEcrRepositoryPolicyUpdate,
Delete: resourceAwsEcrRepositoryPolicyDelete,
Schema: map[string]*schema.Schema{
"repository": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"policy": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"registry_id": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceAwsEcrRepositoryPolicyCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ecrconn
input := ecr.SetRepositoryPolicyInput{
RepositoryName: aws.String(d.Get("repository").(string)),
PolicyText: aws.String(d.Get("policy").(string)),
}
log.Printf("[DEBUG] Creating ECR resository policy: %s", input)
out, err := conn.SetRepositoryPolicy(&input)
if err != nil {
return err
}
repositoryPolicy := *out
log.Printf("[DEBUG] ECR repository policy created: %s", *repositoryPolicy.RepositoryName)
d.SetId(*repositoryPolicy.RepositoryName)
d.Set("registry_id", *repositoryPolicy.RegistryId)
return resourceAwsEcrRepositoryPolicyRead(d, meta)
}
func resourceAwsEcrRepositoryPolicyRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ecrconn
log.Printf("[DEBUG] Reading repository policy %s", d.Id())
out, err := conn.GetRepositoryPolicy(&ecr.GetRepositoryPolicyInput{
RegistryId: aws.String(d.Get("registry_id").(string)),
RepositoryName: aws.String(d.Id()),
})
if err != nil {
if ecrerr, ok := err.(awserr.Error); ok {
switch ecrerr.Code() {
case "RepositoryNotFoundException", "RepositoryPolicyNotFoundException":
d.SetId("")
return nil
default:
return err
}
}
return err
}
log.Printf("[DEBUG] Received repository policy %s", out)
repositoryPolicy := out
d.SetId(*repositoryPolicy.RepositoryName)
d.Set("registry_id", *repositoryPolicy.RegistryId)
return nil
}
func resourceAwsEcrRepositoryPolicyUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ecrconn
if !d.HasChange("policy") {
return nil
}
input := ecr.SetRepositoryPolicyInput{
RepositoryName: aws.String(d.Get("repository").(string)),
RegistryId: aws.String(d.Get("registry_id").(string)),
PolicyText: aws.String(d.Get("policy").(string)),
}
out, err := conn.SetRepositoryPolicy(&input)
if err != nil {
return err
}
repositoryPolicy := *out
d.SetId(*repositoryPolicy.RepositoryName)
d.Set("registry_id", *repositoryPolicy.RegistryId)
return nil
}
func resourceAwsEcrRepositoryPolicyDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ecrconn
_, err := conn.DeleteRepositoryPolicy(&ecr.DeleteRepositoryPolicyInput{
RepositoryName: aws.String(d.Id()),
RegistryId: aws.String(d.Get("registry_id").(string)),
})
if err != nil {
if ecrerr, ok := err.(awserr.Error); ok {
switch ecrerr.Code() {
case "RepositoryNotFoundException", "RepositoryPolicyNotFoundException":
d.SetId("")
return nil
default:
return err
}
}
return err
}
log.Printf("[DEBUG] repository policy %s deleted.", d.Id())
return nil
}

View File

@ -0,0 +1,92 @@
package aws
import (
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ecr"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSEcrRepositoryPolicy_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSEcrRepositoryPolicyDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSEcrRepositoryPolicy,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSEcrRepositoryPolicyExists("aws_ecr_repository_policy.default"),
),
},
},
})
}
func testAccCheckAWSEcrRepositoryPolicyDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).ecrconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_ecr_repository_policy" {
continue
}
_, err := conn.GetRepositoryPolicy(&ecr.GetRepositoryPolicyInput{
RegistryId: aws.String(rs.Primary.Attributes["registry_id"]),
RepositoryName: aws.String(rs.Primary.Attributes["repository"]),
})
if err != nil {
if ecrerr, ok := err.(awserr.Error); ok && ecrerr.Code() == "RepositoryNotFoundException" {
return nil
}
return err
}
}
return nil
}
func testAccCheckAWSEcrRepositoryPolicyExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
_, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("Not found: %s", name)
}
return nil
}
}
var testAccAWSEcrRepositoryPolicy = `
# ECR initially only available in us-east-1
# https://aws.amazon.com/blogs/aws/ec2-container-registry-now-generally-available/
provider "aws" {
region = "us-east-1"
}
resource "aws_ecr_repository" "foo" {
name = "bar"
}
resource "aws_ecr_repository_policy" "default" {
repository = "${aws_ecr_repository.foo.name}"
policy = <<EOF
{
"Version": "2008-10-17",
"Statement": [
{
"Sid": "testpolicy",
"Effect": "Allow",
"Principal": "*",
"Action": [
"ecr:ListImages"
]
}
]
}
EOF
}
`

View File

@ -0,0 +1,82 @@
package aws
import (
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ecr"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSEcrRepository_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSEcrRepositoryDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSEcrRepository,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSEcrRepositoryExists("aws_ecr_repository.default"),
),
},
},
})
}
func testAccCheckAWSEcrRepositoryDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).ecrconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_ecr_repository" {
continue
}
input := ecr.DescribeRepositoriesInput{
RegistryId: aws.String(rs.Primary.Attributes["registry_id"]),
RepositoryNames: []*string{aws.String(rs.Primary.Attributes["name"])},
}
out, err := conn.DescribeRepositories(&input)
if err != nil {
if ecrerr, ok := err.(awserr.Error); ok && ecrerr.Code() == "RepositoryNotFoundException" {
return nil
}
return err
}
for _, repository := range out.Repositories {
if repository.RepositoryName == aws.String(rs.Primary.Attributes["name"]) {
return fmt.Errorf("ECR repository still exists:\n%#v", repository)
}
}
}
return nil
}
func testAccCheckAWSEcrRepositoryExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
_, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("Not found: %s", name)
}
return nil
}
}
var testAccAWSEcrRepository = `
# ECR initially only available in us-east-1
# https://aws.amazon.com/blogs/aws/ec2-container-registry-now-generally-available/
provider "aws" {
region = "us-east-1"
}
resource "aws_ecr_repository" "default" {
name = "foo-repository-terraform"
}
`

View File

@ -1,6 +1,7 @@
package aws package aws
import ( import (
"fmt"
"log" "log"
"time" "time"
@ -61,6 +62,13 @@ func resourceAwsEcsClusterRead(d *schema.ResourceData, meta interface{}) error {
for _, c := range out.Clusters { for _, c := range out.Clusters {
if *c.ClusterName == clusterName { if *c.ClusterName == clusterName {
// Status==INACTIVE means deleted cluster
if *c.Status == "INACTIVE" {
log.Printf("[DEBUG] Removing ECS cluster %q because it's INACTIVE", *c.ClusterArn)
d.SetId("")
return nil
}
d.SetId(*c.ClusterArn) d.SetId(*c.ClusterArn)
d.Set("name", c.ClusterName) d.Set("name", c.ClusterName)
return nil return nil
@ -77,7 +85,7 @@ func resourceAwsEcsClusterDelete(d *schema.ResourceData, meta interface{}) error
log.Printf("[DEBUG] Deleting ECS cluster %s", d.Id()) log.Printf("[DEBUG] Deleting ECS cluster %s", d.Id())
return resource.Retry(10*time.Minute, func() error { err := resource.Retry(10*time.Minute, func() error {
out, err := conn.DeleteCluster(&ecs.DeleteClusterInput{ out, err := conn.DeleteCluster(&ecs.DeleteClusterInput{
Cluster: aws.String(d.Id()), Cluster: aws.String(d.Id()),
}) })
@ -104,4 +112,37 @@ func resourceAwsEcsClusterDelete(d *schema.ResourceData, meta interface{}) error
return resource.RetryError{Err: err} return resource.RetryError{Err: err}
}) })
if err != nil {
return err
}
clusterName := d.Get("name").(string)
err = resource.Retry(5*time.Minute, func() error {
log.Printf("[DEBUG] Checking if ECS Cluster %q is INACTIVE", d.Id())
out, err := conn.DescribeClusters(&ecs.DescribeClustersInput{
Clusters: []*string{aws.String(clusterName)},
})
for _, c := range out.Clusters {
if *c.ClusterName == clusterName {
if *c.Status == "INACTIVE" {
return nil
}
return fmt.Errorf("ECS Cluster %q is still %q", clusterName, *c.Status)
}
}
if err != nil {
return resource.RetryError{Err: err}
}
return nil
})
if err != nil {
return err
}
log.Printf("[DEBUG] ECS cluster %q deleted", d.Id())
return nil
} }

View File

@ -38,13 +38,15 @@ func testAccCheckAWSEcsClusterDestroy(s *terraform.State) error {
Clusters: []*string{aws.String(rs.Primary.ID)}, Clusters: []*string{aws.String(rs.Primary.ID)},
}) })
if err == nil { if err != nil {
if len(out.Clusters) != 0 { return err
return fmt.Errorf("ECS cluster still exists:\n%#v", out.Clusters)
}
} }
return err for _, c := range out.Clusters {
if *c.ClusterArn == rs.Primary.ID && *c.Status != "INACTIVE" {
return fmt.Errorf("ECS cluster still exists:\n%s", c)
}
}
} }
return nil return nil

View File

@ -51,27 +51,32 @@ func resourceAwsEcsService() *schema.Resource {
"iam_role": &schema.Schema{ "iam_role": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
ForceNew: true,
Optional: true, Optional: true,
}, },
"load_balancer": &schema.Schema{ "load_balancer": &schema.Schema{
Type: schema.TypeSet, Type: schema.TypeSet,
Optional: true, Optional: true,
ForceNew: true,
Elem: &schema.Resource{ Elem: &schema.Resource{
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{
"elb_name": &schema.Schema{ "elb_name": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Required: true,
ForceNew: true,
}, },
"container_name": &schema.Schema{ "container_name": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Required: true,
ForceNew: true,
}, },
"container_port": &schema.Schema{ "container_port": &schema.Schema{
Type: schema.TypeInt, Type: schema.TypeInt,
Required: true, Required: true,
ForceNew: true,
}, },
}, },
}, },
@ -274,13 +279,33 @@ func resourceAwsEcsServiceDelete(d *schema.ResourceData, meta interface{}) error
} }
} }
input := ecs.DeleteServiceInput{ // Wait until the ECS service is drained
Service: aws.String(d.Id()), err = resource.Retry(5*time.Minute, func() error {
Cluster: aws.String(d.Get("cluster").(string)), input := ecs.DeleteServiceInput{
} Service: aws.String(d.Id()),
Cluster: aws.String(d.Get("cluster").(string)),
}
log.Printf("[DEBUG] Deleting ECS service %s", input) log.Printf("[DEBUG] Trying to delete ECS service %s", input)
out, err := conn.DeleteService(&input) _, err := conn.DeleteService(&input)
if err == nil {
return nil
}
ec2err, ok := err.(awserr.Error)
if !ok {
return &resource.RetryError{Err: err}
}
if ec2err.Code() == "InvalidParameterException" {
// Prevent "The service cannot be stopped while deployments are active."
log.Printf("[DEBUG] Trying to delete ECS service again: %q",
ec2err.Message())
return err
}
return &resource.RetryError{Err: err}
})
if err != nil { if err != nil {
return err return err
} }
@ -288,7 +313,7 @@ func resourceAwsEcsServiceDelete(d *schema.ResourceData, meta interface{}) error
// Wait until it's deleted // Wait until it's deleted
wait := resource.StateChangeConf{ wait := resource.StateChangeConf{
Pending: []string{"DRAINING"}, Pending: []string{"DRAINING"},
Target: "INACTIVE", Target: []string{"INACTIVE"},
Timeout: 5 * time.Minute, Timeout: 5 * time.Minute,
MinTimeout: 1 * time.Second, MinTimeout: 1 * time.Second,
Refresh: func() (interface{}, string, error) { Refresh: func() (interface{}, string, error) {
@ -301,6 +326,7 @@ func resourceAwsEcsServiceDelete(d *schema.ResourceData, meta interface{}) error
return resp, "FAILED", err return resp, "FAILED", err
} }
log.Printf("[DEBUG] ECS service (%s) is currently %q", d.Id(), *resp.Services[0].Status)
return resp, *resp.Services[0].Status, nil return resp, *resp.Services[0].Status, nil
}, },
} }
@ -310,7 +336,7 @@ func resourceAwsEcsServiceDelete(d *schema.ResourceData, meta interface{}) error
return err return err
} }
log.Printf("[DEBUG] ECS service %s deleted.", *out.Service.ServiceArn) log.Printf("[DEBUG] ECS service %s deleted.", d.Id())
return nil return nil
} }

View File

@ -178,6 +178,29 @@ func TestAccAWSEcsService_withIamRole(t *testing.T) {
}) })
} }
// Regression for https://github.com/hashicorp/terraform/issues/3444
func TestAccAWSEcsService_withLbChanges(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSEcsServiceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSEcsService_withLbChanges,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSEcsServiceExists("aws_ecs_service.with_lb_changes"),
),
},
resource.TestStep{
Config: testAccAWSEcsService_withLbChanges_modified,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSEcsServiceExists("aws_ecs_service.with_lb_changes"),
),
},
},
})
}
// Regression for https://github.com/hashicorp/terraform/issues/3361 // Regression for https://github.com/hashicorp/terraform/issues/3361
func TestAccAWSEcsService_withEcsClusterName(t *testing.T) { func TestAccAWSEcsService_withEcsClusterName(t *testing.T) {
clusterName := regexp.MustCompile("^terraformecstestcluster$") clusterName := regexp.MustCompile("^terraformecstestcluster$")
@ -208,12 +231,24 @@ func testAccCheckAWSEcsServiceDestroy(s *terraform.State) error {
out, err := conn.DescribeServices(&ecs.DescribeServicesInput{ out, err := conn.DescribeServices(&ecs.DescribeServicesInput{
Services: []*string{aws.String(rs.Primary.ID)}, Services: []*string{aws.String(rs.Primary.ID)},
Cluster: aws.String(rs.Primary.Attributes["cluster"]),
}) })
if err == nil { if err == nil {
if len(out.Services) > 0 { if len(out.Services) > 0 {
return fmt.Errorf("ECS service still exists:\n%#v", out.Services) var activeServices []*ecs.Service
for _, svc := range out.Services {
if *svc.Status != "INACTIVE" {
activeServices = append(activeServices, svc)
}
}
if len(activeServices) == 0 {
return nil
}
return fmt.Errorf("ECS service still exists:\n%#v", activeServices)
} }
return nil
} }
return err return err
@ -356,7 +391,6 @@ EOF
} }
resource "aws_elb" "main" { resource "aws_elb" "main" {
name = "foobar-terraform-test"
availability_zones = ["us-west-2a"] availability_zones = ["us-west-2a"]
listener { listener {
@ -384,6 +418,107 @@ resource "aws_ecs_service" "ghost" {
} }
` `
var tpl_testAccAWSEcsService_withLbChanges = `
resource "aws_ecs_cluster" "main" {
name = "terraformecstest12"
}
resource "aws_ecs_task_definition" "with_lb_changes" {
family = "ghost_lbd"
container_definitions = <<DEFINITION
[
{
"cpu": 128,
"essential": true,
"image": "%s",
"memory": 128,
"name": "%s",
"portMappings": [
{
"containerPort": %d,
"hostPort": %d
}
]
}
]
DEFINITION
}
resource "aws_iam_role" "ecs_service" {
name = "EcsServiceLbd"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {"AWS": "*"},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_role_policy" "ecs_service" {
name = "EcsServiceLbd"
role = "${aws_iam_role.ecs_service.name}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"elasticloadbalancing:*",
"ec2:*",
"ecs:*"
],
"Resource": [
"*"
]
}
]
}
EOF
}
resource "aws_elb" "main" {
availability_zones = ["us-west-2a"]
listener {
instance_port = %d
instance_protocol = "http"
lb_port = 80
lb_protocol = "http"
}
}
resource "aws_ecs_service" "with_lb_changes" {
name = "ghost"
cluster = "${aws_ecs_cluster.main.id}"
task_definition = "${aws_ecs_task_definition.with_lb_changes.arn}"
desired_count = 1
iam_role = "${aws_iam_role.ecs_service.name}"
load_balancer {
elb_name = "${aws_elb.main.id}"
container_name = "%s"
container_port = "%d"
}
depends_on = ["aws_iam_role_policy.ecs_service"]
}
`
var testAccAWSEcsService_withLbChanges = fmt.Sprintf(
tpl_testAccAWSEcsService_withLbChanges,
"ghost:latest", "ghost", 2368, 8080, 8080, "ghost", 2368)
var testAccAWSEcsService_withLbChanges_modified = fmt.Sprintf(
tpl_testAccAWSEcsService_withLbChanges,
"nginx:latest", "nginx", 80, 8080, 8080, "nginx", 80)
var testAccAWSEcsServiceWithFamilyAndRevision = ` var testAccAWSEcsServiceWithFamilyAndRevision = `
resource "aws_ecs_cluster" "default" { resource "aws_ecs_cluster" "default" {
name = "terraformecstest2" name = "terraformecstest2"

View File

@ -82,17 +82,19 @@ func testAccCheckAWSEcsTaskDefinitionDestroy(s *terraform.State) error {
continue continue
} }
out, err := conn.DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{ input := ecs.DescribeTaskDefinitionInput{
TaskDefinition: aws.String(rs.Primary.ID), TaskDefinition: aws.String(rs.Primary.Attributes["arn"]),
})
if err == nil {
if out.TaskDefinition != nil {
return fmt.Errorf("ECS task definition still exists:\n%#v", *out.TaskDefinition)
}
} }
return err out, err := conn.DescribeTaskDefinition(&input)
if err != nil {
return err
}
if out.TaskDefinition != nil && *out.TaskDefinition.Status != "INACTIVE" {
return fmt.Errorf("ECS task definition still exists:\n%#v", *out.TaskDefinition)
}
} }
return nil return nil

View File

@ -51,7 +51,7 @@ func resourceAwsEfsFileSystemCreate(d *schema.ResourceData, meta interface{}) er
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"creating"}, Pending: []string{"creating"},
Target: "available", Target: []string{"available"},
Refresh: func() (interface{}, string, error) { Refresh: func() (interface{}, string, error) {
resp, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{ resp, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{
FileSystemId: aws.String(d.Id()), FileSystemId: aws.String(d.Id()),
@ -127,7 +127,7 @@ func resourceAwsEfsFileSystemDelete(d *schema.ResourceData, meta interface{}) er
}) })
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"available", "deleting"}, Pending: []string{"available", "deleting"},
Target: "", Target: []string{},
Refresh: func() (interface{}, string, error) { Refresh: func() (interface{}, string, error) {
resp, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{ resp, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{
FileSystemId: aws.String(d.Id()), FileSystemId: aws.String(d.Id()),

View File

@ -81,7 +81,7 @@ func resourceAwsEfsMountTargetCreate(d *schema.ResourceData, meta interface{}) e
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"creating"}, Pending: []string{"creating"},
Target: "available", Target: []string{"available"},
Refresh: func() (interface{}, string, error) { Refresh: func() (interface{}, string, error) {
resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{ resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{
MountTargetId: aws.String(d.Id()), MountTargetId: aws.String(d.Id()),
@ -179,7 +179,7 @@ func resourceAwsEfsMountTargetDelete(d *schema.ResourceData, meta interface{}) e
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"available", "deleting", "deleted"}, Pending: []string{"available", "deleting", "deleted"},
Target: "", Target: []string{},
Refresh: func() (interface{}, string, error) { Refresh: func() (interface{}, string, error) {
resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{ resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{
MountTargetId: aws.String(d.Id()), MountTargetId: aws.String(d.Id()),

View File

@ -146,9 +146,13 @@ func resourceAwsEipRead(d *schema.ResourceData, meta interface{}) error {
d.Set("association_id", address.AssociationId) d.Set("association_id", address.AssociationId)
if address.InstanceId != nil { if address.InstanceId != nil {
d.Set("instance", address.InstanceId) d.Set("instance", address.InstanceId)
} else {
d.Set("instance", "")
} }
if address.NetworkInterfaceId != nil { if address.NetworkInterfaceId != nil {
d.Set("network_interface", address.NetworkInterfaceId) d.Set("network_interface", address.NetworkInterfaceId)
} else {
d.Set("network_interface", "")
} }
d.Set("private_ip", address.PrivateIpAddress) d.Set("private_ip", address.PrivateIpAddress)
d.Set("public_ip", address.PublicIp) d.Set("public_ip", address.PublicIp)

View File

@ -86,26 +86,38 @@ func testAccCheckAWSEIPDestroy(s *terraform.State) error {
continue continue
} }
req := &ec2.DescribeAddressesInput{ if strings.Contains(rs.Primary.ID, "eipalloc") {
PublicIps: []*string{aws.String(rs.Primary.ID)}, req := &ec2.DescribeAddressesInput{
} AllocationIds: []*string{aws.String(rs.Primary.ID)},
describe, err := conn.DescribeAddresses(req) }
describe, err := conn.DescribeAddresses(req)
if err == nil { if err != nil {
if len(describe.Addresses) != 0 && // Verify the error is what we want
*describe.Addresses[0].PublicIp == rs.Primary.ID { if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidAllocationID.NotFound" {
return fmt.Errorf("EIP still exists") continue
}
return err
} }
}
// Verify the error if len(describe.Addresses) > 0 {
providerErr, ok := err.(awserr.Error) return fmt.Errorf("still exists")
if !ok { }
return err } else {
} req := &ec2.DescribeAddressesInput{
PublicIps: []*string{aws.String(rs.Primary.ID)},
}
describe, err := conn.DescribeAddresses(req)
if err != nil {
// Verify the error is what we want
if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidAllocationID.NotFound" {
continue
}
return err
}
if providerErr.Code() != "InvalidAllocationID.NotFound" { if len(describe.Addresses) > 0 {
return fmt.Errorf("Unexpected error: %s", err) return fmt.Errorf("still exists")
}
} }
} }

View File

@ -120,6 +120,10 @@ func resourceAwsElasticacheCluster() *schema.Resource {
Type: schema.TypeInt, Type: schema.TypeInt,
Computed: true, Computed: true,
}, },
"availability_zone": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
}, },
}, },
}, },
@ -162,6 +166,30 @@ func resourceAwsElasticacheCluster() *schema.Resource {
}, },
}, },
"az_mode": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"availability_zone": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"availability_zones": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: func(v interface{}) int {
return hashcode.String(v.(string))
},
},
"tags": tagsSchema(), "tags": tagsSchema(),
// apply_immediately is used to determine when the update modifications // apply_immediately is used to determine when the update modifications
@ -234,6 +262,20 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{
log.Printf("[DEBUG] Restoring Redis cluster from S3 snapshot: %#v", s) log.Printf("[DEBUG] Restoring Redis cluster from S3 snapshot: %#v", s)
} }
if v, ok := d.GetOk("az_mode"); ok {
req.AZMode = aws.String(v.(string))
}
if v, ok := d.GetOk("availability_zone"); ok {
req.PreferredAvailabilityZone = aws.String(v.(string))
}
preferred_azs := d.Get("availability_zones").(*schema.Set).List()
if len(preferred_azs) > 0 {
azs := expandStringList(preferred_azs)
req.PreferredAvailabilityZones = azs
}
resp, err := conn.CreateCacheCluster(req) resp, err := conn.CreateCacheCluster(req)
if err != nil { if err != nil {
return fmt.Errorf("Error creating Elasticache: %s", err) return fmt.Errorf("Error creating Elasticache: %s", err)
@ -248,7 +290,7 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{
pending := []string{"creating"} pending := []string{"creating"}
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: pending, Pending: pending,
Target: "available", Target: []string{"available"},
Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "available", pending), Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "available", pending),
Timeout: 10 * time.Minute, Timeout: 10 * time.Minute,
Delay: 10 * time.Second, Delay: 10 * time.Second,
@ -306,6 +348,7 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{})
d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn) d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn)
} }
} }
d.Set("availability_zone", c.PreferredAvailabilityZone)
if err := setCacheNodeData(d, c); err != nil { if err := setCacheNodeData(d, c); err != nil {
return err return err
@ -395,8 +438,21 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{
} }
if d.HasChange("num_cache_nodes") { if d.HasChange("num_cache_nodes") {
oraw, nraw := d.GetChange("num_cache_nodes")
o := oraw.(int)
n := nraw.(int)
if v, ok := d.GetOk("az_mode"); ok && v.(string) == "cross-az" && n == 1 {
return fmt.Errorf("[WARN] Error updateing Elasticache cluster (%s), error: Cross-AZ mode is not supported in a single cache node.", d.Id())
}
if n < o {
log.Printf("[INFO] Cluster %s is marked for Decreasing cache nodes from %d to %d", d.Id(), o, n)
nodesToRemove := getCacheNodesToRemove(d, o, o-n)
req.CacheNodeIdsToRemove = nodesToRemove
}
req.NumCacheNodes = aws.Int64(int64(d.Get("num_cache_nodes").(int))) req.NumCacheNodes = aws.Int64(int64(d.Get("num_cache_nodes").(int)))
requestUpdate = true requestUpdate = true
} }
if requestUpdate { if requestUpdate {
@ -410,7 +466,7 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{
pending := []string{"modifying", "rebooting cache cluster nodes", "snapshotting"} pending := []string{"modifying", "rebooting cache cluster nodes", "snapshotting"}
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: pending, Pending: pending,
Target: "available", Target: []string{"available"},
Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "available", pending), Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "available", pending),
Timeout: 5 * time.Minute, Timeout: 5 * time.Minute,
Delay: 5 * time.Second, Delay: 5 * time.Second,
@ -426,6 +482,16 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{
return resourceAwsElasticacheClusterRead(d, meta) return resourceAwsElasticacheClusterRead(d, meta)
} }
func getCacheNodesToRemove(d *schema.ResourceData, oldNumberOfNodes int, cacheNodesToRemove int) []*string {
nodesIdsToRemove := []*string{}
for i := oldNumberOfNodes; i > oldNumberOfNodes-cacheNodesToRemove && i > 0; i-- {
s := fmt.Sprintf("%04d", i)
nodesIdsToRemove = append(nodesIdsToRemove, &s)
}
return nodesIdsToRemove
}
func setCacheNodeData(d *schema.ResourceData, c *elasticache.CacheCluster) error { func setCacheNodeData(d *schema.ResourceData, c *elasticache.CacheCluster) error {
sortedCacheNodes := make([]*elasticache.CacheNode, len(c.CacheNodes)) sortedCacheNodes := make([]*elasticache.CacheNode, len(c.CacheNodes))
copy(sortedCacheNodes, c.CacheNodes) copy(sortedCacheNodes, c.CacheNodes)
@ -434,13 +500,14 @@ func setCacheNodeData(d *schema.ResourceData, c *elasticache.CacheCluster) error
cacheNodeData := make([]map[string]interface{}, 0, len(sortedCacheNodes)) cacheNodeData := make([]map[string]interface{}, 0, len(sortedCacheNodes))
for _, node := range sortedCacheNodes { for _, node := range sortedCacheNodes {
if node.CacheNodeId == nil || node.Endpoint == nil || node.Endpoint.Address == nil || node.Endpoint.Port == nil { if node.CacheNodeId == nil || node.Endpoint == nil || node.Endpoint.Address == nil || node.Endpoint.Port == nil || node.CustomerAvailabilityZone == nil {
return fmt.Errorf("Unexpected nil pointer in: %s", node) return fmt.Errorf("Unexpected nil pointer in: %s", node)
} }
cacheNodeData = append(cacheNodeData, map[string]interface{}{ cacheNodeData = append(cacheNodeData, map[string]interface{}{
"id": *node.CacheNodeId, "id": *node.CacheNodeId,
"address": *node.Endpoint.Address, "address": *node.Endpoint.Address,
"port": int(*node.Endpoint.Port), "port": int(*node.Endpoint.Port),
"availability_zone": *node.CustomerAvailabilityZone,
}) })
} }
@ -470,7 +537,7 @@ func resourceAwsElasticacheClusterDelete(d *schema.ResourceData, meta interface{
log.Printf("[DEBUG] Waiting for deletion: %v", d.Id()) log.Printf("[DEBUG] Waiting for deletion: %v", d.Id())
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "available", "deleting", "incompatible-parameters", "incompatible-network", "restore-failed"}, Pending: []string{"creating", "available", "deleting", "incompatible-parameters", "incompatible-network", "restore-failed"},
Target: "", Target: []string{},
Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "", []string{}), Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "", []string{}),
Timeout: 10 * time.Minute, Timeout: 10 * time.Minute,
Delay: 10 * time.Second, Delay: 10 * time.Second,

View File

@ -8,6 +8,7 @@ import (
"time" "time"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/elasticache" "github.com/aws/aws-sdk-go/service/elasticache"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
@ -72,6 +73,41 @@ func TestAccAWSElasticacheCluster_snapshotsWithUpdates(t *testing.T) {
}) })
} }
func TestAccAWSElasticacheCluster_decreasingCacheNodes(t *testing.T) {
var ec elasticache.CacheCluster
ri := genRandInt()
preConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfigDecreasingNodes, ri, ri, ri)
postConfig := fmt.Sprintf(testAccAWSElasticacheClusterConfigDecreasingNodes_update, ri, ri, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSElasticacheClusterDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: preConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"),
testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
resource.TestCheckResourceAttr(
"aws_elasticache_cluster.bar", "num_cache_nodes", "3"),
),
},
resource.TestStep{
Config: postConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"),
testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
resource.TestCheckResourceAttr(
"aws_elasticache_cluster.bar", "num_cache_nodes", "1"),
),
},
},
})
}
func TestAccAWSElasticacheCluster_vpc(t *testing.T) { func TestAccAWSElasticacheCluster_vpc(t *testing.T) {
var csg elasticache.CacheSubnetGroup var csg elasticache.CacheSubnetGroup
var ec elasticache.CacheCluster var ec elasticache.CacheCluster
@ -86,6 +122,29 @@ func TestAccAWSElasticacheCluster_vpc(t *testing.T) {
testAccCheckAWSElasticacheSubnetGroupExists("aws_elasticache_subnet_group.bar", &csg), testAccCheckAWSElasticacheSubnetGroupExists("aws_elasticache_subnet_group.bar", &csg),
testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec), testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
testAccCheckAWSElasticacheClusterAttributes(&ec), testAccCheckAWSElasticacheClusterAttributes(&ec),
resource.TestCheckResourceAttr(
"aws_elasticache_cluster.bar", "availability_zone", "us-west-2a"),
),
},
},
})
}
func TestAccAWSElasticacheCluster_multiAZInVpc(t *testing.T) {
var csg elasticache.CacheSubnetGroup
var ec elasticache.CacheCluster
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSElasticacheClusterDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSElasticacheClusterMultiAZInVPCConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheSubnetGroupExists("aws_elasticache_subnet_group.bar", &csg),
testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
resource.TestCheckResourceAttr(
"aws_elasticache_cluster.bar", "availability_zone", "Multiple"),
), ),
}, },
}, },
@ -117,6 +176,10 @@ func testAccCheckAWSElasticacheClusterDestroy(s *terraform.State) error {
CacheClusterId: aws.String(rs.Primary.ID), CacheClusterId: aws.String(rs.Primary.ID),
}) })
if err != nil { if err != nil {
// Verify the error is what we want
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "CacheClusterNotFound" {
continue
}
return err return err
} }
if len(res.CacheClusters) > 0 { if len(res.CacheClusters) > 0 {
@ -260,6 +323,71 @@ resource "aws_elasticache_cluster" "bar" {
} }
` `
var testAccAWSElasticacheClusterConfigDecreasingNodes = `
provider "aws" {
region = "us-east-1"
}
resource "aws_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
ingress {
from_port = -1
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_elasticache_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
security_group_names = ["${aws_security_group.bar.name}"]
}
resource "aws_elasticache_cluster" "bar" {
cluster_id = "tf-test-%03d"
engine = "memcached"
node_type = "cache.m1.small"
num_cache_nodes = 3
port = 11211
parameter_group_name = "default.memcached1.4"
security_group_names = ["${aws_elasticache_security_group.bar.name}"]
}
`
var testAccAWSElasticacheClusterConfigDecreasingNodes_update = `
provider "aws" {
region = "us-east-1"
}
resource "aws_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
ingress {
from_port = -1
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_elasticache_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
security_group_names = ["${aws_security_group.bar.name}"]
}
resource "aws_elasticache_cluster" "bar" {
cluster_id = "tf-test-%03d"
engine = "memcached"
node_type = "cache.m1.small"
num_cache_nodes = 1
port = 11211
parameter_group_name = "default.memcached1.4"
security_group_names = ["${aws_elasticache_security_group.bar.name}"]
apply_immediately = true
}
`
var testAccAWSElasticacheClusterInVPCConfig = fmt.Sprintf(` var testAccAWSElasticacheClusterInVPCConfig = fmt.Sprintf(`
resource "aws_vpc" "foo" { resource "aws_vpc" "foo" {
cidr_block = "192.168.0.0/16" cidr_block = "192.168.0.0/16"
@ -309,9 +437,74 @@ resource "aws_elasticache_cluster" "bar" {
security_group_ids = ["${aws_security_group.bar.id}"] security_group_ids = ["${aws_security_group.bar.id}"]
parameter_group_name = "default.redis2.8" parameter_group_name = "default.redis2.8"
notification_topic_arn = "${aws_sns_topic.topic_example.arn}" notification_topic_arn = "${aws_sns_topic.topic_example.arn}"
availability_zone = "us-west-2a"
} }
resource "aws_sns_topic" "topic_example" { resource "aws_sns_topic" "topic_example" {
name = "tf-ecache-cluster-test" name = "tf-ecache-cluster-test"
} }
`, genRandInt(), genRandInt(), genRandInt()) `, genRandInt(), genRandInt(), genRandInt())
var testAccAWSElasticacheClusterMultiAZInVPCConfig = fmt.Sprintf(`
resource "aws_vpc" "foo" {
cidr_block = "192.168.0.0/16"
tags {
Name = "tf-test"
}
}
resource "aws_subnet" "foo" {
vpc_id = "${aws_vpc.foo.id}"
cidr_block = "192.168.0.0/20"
availability_zone = "us-west-2a"
tags {
Name = "tf-test-%03d"
}
}
resource "aws_subnet" "bar" {
vpc_id = "${aws_vpc.foo.id}"
cidr_block = "192.168.16.0/20"
availability_zone = "us-west-2b"
tags {
Name = "tf-test-%03d"
}
}
resource "aws_elasticache_subnet_group" "bar" {
name = "tf-test-cache-subnet-%03d"
description = "tf-test-cache-subnet-group-descr"
subnet_ids = [
"${aws_subnet.foo.id}",
"${aws_subnet.bar.id}"
]
}
resource "aws_security_group" "bar" {
name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr"
vpc_id = "${aws_vpc.foo.id}"
ingress {
from_port = -1
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_elasticache_cluster" "bar" {
cluster_id = "tf-test-%03d"
engine = "memcached"
node_type = "cache.m1.small"
num_cache_nodes = 2
port = 11211
subnet_group_name = "${aws_elasticache_subnet_group.bar.name}"
security_group_ids = ["${aws_security_group.bar.id}"]
parameter_group_name = "default.memcached1.4"
az_mode = "cross-az"
availability_zones = [
"us-west-2a",
"us-west-2b"
]
}
`, genRandInt(), genRandInt(), genRandInt(), genRandInt(), genRandInt())

View File

@ -169,7 +169,7 @@ func resourceAwsElasticacheParameterGroupUpdate(d *schema.ResourceData, meta int
func resourceAwsElasticacheParameterGroupDelete(d *schema.ResourceData, meta interface{}) error { func resourceAwsElasticacheParameterGroupDelete(d *schema.ResourceData, meta interface{}) error {
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"pending"}, Pending: []string{"pending"},
Target: "destroyed", Target: []string{"destroyed"},
Refresh: resourceAwsElasticacheParameterGroupDeleteRefreshFunc(d, meta), Refresh: resourceAwsElasticacheParameterGroupDeleteRefreshFunc(d, meta),
Timeout: 3 * time.Minute, Timeout: 3 * time.Minute,
MinTimeout: 1 * time.Second, MinTimeout: 1 * time.Second,

View File

@ -112,7 +112,7 @@ func testAccCheckAWSElasticacheParameterGroupDestroy(s *terraform.State) error {
if !ok { if !ok {
return err return err
} }
if newerr.Code() != "InvalidCacheParameterGroup.NotFound" { if newerr.Code() != "CacheParameterGroupNotFound" {
return err return err
} }
} }

View File

@ -5,6 +5,7 @@ import (
"testing" "testing"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/elasticache" "github.com/aws/aws-sdk-go/service/elasticache"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
@ -36,12 +37,14 @@ func testAccCheckAWSElasticacheSecurityGroupDestroy(s *terraform.State) error {
res, err := conn.DescribeCacheSecurityGroups(&elasticache.DescribeCacheSecurityGroupsInput{ res, err := conn.DescribeCacheSecurityGroups(&elasticache.DescribeCacheSecurityGroupsInput{
CacheSecurityGroupName: aws.String(rs.Primary.ID), CacheSecurityGroupName: aws.String(rs.Primary.ID),
}) })
if err != nil { if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "CacheSecurityGroupNotFound" {
return err continue
} }
if len(res.CacheSecurityGroups) > 0 { if len(res.CacheSecurityGroups) > 0 {
return fmt.Errorf("still exist.") return fmt.Errorf("cache security group still exists")
} }
return err
} }
return nil return nil
} }
@ -69,6 +72,9 @@ func testAccCheckAWSElasticacheSecurityGroupExists(n string) resource.TestCheckF
} }
var testAccAWSElasticacheSecurityGroupConfig = fmt.Sprintf(` var testAccAWSElasticacheSecurityGroupConfig = fmt.Sprintf(`
provider "aws" {
region = "us-east-1"
}
resource "aws_security_group" "bar" { resource "aws_security_group" "bar" {
name = "tf-test-security-group-%03d" name = "tf-test-security-group-%03d"
description = "tf-test-security-group-descr" description = "tf-test-security-group-descr"

View File

@ -5,6 +5,7 @@ import (
"testing" "testing"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/elasticache" "github.com/aws/aws-sdk-go/service/elasticache"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
@ -71,6 +72,10 @@ func testAccCheckAWSElasticacheSubnetGroupDestroy(s *terraform.State) error {
CacheSubnetGroupName: aws.String(rs.Primary.ID), CacheSubnetGroupName: aws.String(rs.Primary.ID),
}) })
if err != nil { if err != nil {
// Verify the error is what we want
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "CacheSubnetGroupNotFoundFault" {
continue
}
return err return err
} }
if len(res.CacheSubnetGroups) > 0 { if len(res.CacheSubnetGroups) > 0 {

View File

@ -247,7 +247,9 @@ func resourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface{}
ds := out.DomainStatus ds := out.DomainStatus
d.Set("access_policies", *ds.AccessPolicies) if ds.AccessPolicies != nil && *ds.AccessPolicies != "" {
d.Set("access_policies", normalizeJson(*ds.AccessPolicies))
}
err = d.Set("advanced_options", pointersMapToStringList(ds.AdvancedOptions)) err = d.Set("advanced_options", pointersMapToStringList(ds.AdvancedOptions))
if err != nil { if err != nil {
return err return err

View File

@ -5,6 +5,7 @@ import (
"testing" "testing"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
@ -85,8 +86,12 @@ func testAccCheckESDomainDestroy(s *terraform.State) error {
} }
_, err := conn.DescribeElasticsearchDomain(opts) _, err := conn.DescribeElasticsearchDomain(opts)
// Verify the error is what we want
if err != nil { if err != nil {
return fmt.Errorf("Error describing ES domains: %q", err.Error()) if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceNotFoundException" {
continue
}
return err
} }
} }
return nil return nil

View File

@ -4,7 +4,6 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"log" "log"
"regexp"
"strings" "strings"
"time" "time"
@ -49,7 +48,6 @@ func resourceAwsElb() *schema.Resource {
Type: schema.TypeSet, Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString}, Elem: &schema.Schema{Type: schema.TypeString},
Optional: true, Optional: true,
ForceNew: true,
Computed: true, Computed: true,
Set: schema.HashString, Set: schema.HashString,
}, },
@ -85,7 +83,6 @@ func resourceAwsElb() *schema.Resource {
Type: schema.TypeSet, Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString}, Elem: &schema.Schema{Type: schema.TypeString},
Optional: true, Optional: true,
ForceNew: true,
Computed: true, Computed: true,
Set: schema.HashString, Set: schema.HashString,
}, },
@ -339,10 +336,10 @@ func resourceAwsElbRead(d *schema.ResourceData, meta interface{}) error {
d.Set("dns_name", *lb.DNSName) d.Set("dns_name", *lb.DNSName)
d.Set("zone_id", *lb.CanonicalHostedZoneNameID) d.Set("zone_id", *lb.CanonicalHostedZoneNameID)
d.Set("internal", *lb.Scheme == "internal") d.Set("internal", *lb.Scheme == "internal")
d.Set("availability_zones", lb.AvailabilityZones) d.Set("availability_zones", flattenStringList(lb.AvailabilityZones))
d.Set("instances", flattenInstances(lb.Instances)) d.Set("instances", flattenInstances(lb.Instances))
d.Set("listener", flattenListeners(lb.ListenerDescriptions)) d.Set("listener", flattenListeners(lb.ListenerDescriptions))
d.Set("security_groups", lb.SecurityGroups) d.Set("security_groups", flattenStringList(lb.SecurityGroups))
if lb.SourceSecurityGroup != nil { if lb.SourceSecurityGroup != nil {
d.Set("source_security_group", lb.SourceSecurityGroup.GroupName) d.Set("source_security_group", lb.SourceSecurityGroup.GroupName)
@ -350,15 +347,15 @@ func resourceAwsElbRead(d *schema.ResourceData, meta interface{}) error {
var elbVpc string var elbVpc string
if lb.VPCId != nil { if lb.VPCId != nil {
elbVpc = *lb.VPCId elbVpc = *lb.VPCId
} sgId, err := sourceSGIdByName(meta, *lb.SourceSecurityGroup.GroupName, elbVpc)
sgId, err := sourceSGIdByName(meta, *lb.SourceSecurityGroup.GroupName, elbVpc) if err != nil {
if err != nil { return fmt.Errorf("[WARN] Error looking up ELB Security Group ID: %s", err)
return fmt.Errorf("[WARN] Error looking up ELB Security Group ID: %s", err) } else {
} else { d.Set("source_security_group_id", sgId)
d.Set("source_security_group_id", sgId) }
} }
} }
d.Set("subnets", lb.Subnets) d.Set("subnets", flattenStringList(lb.Subnets))
d.Set("idle_timeout", lbAttrs.ConnectionSettings.IdleTimeout) d.Set("idle_timeout", lbAttrs.ConnectionSettings.IdleTimeout)
d.Set("connection_draining", lbAttrs.ConnectionDraining.Enabled) d.Set("connection_draining", lbAttrs.ConnectionDraining.Enabled)
d.Set("connection_draining_timeout", lbAttrs.ConnectionDraining.Timeout) d.Set("connection_draining_timeout", lbAttrs.ConnectionDraining.Timeout)
@ -600,6 +597,80 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error {
d.SetPartial("security_groups") d.SetPartial("security_groups")
} }
if d.HasChange("availability_zones") {
o, n := d.GetChange("availability_zones")
os := o.(*schema.Set)
ns := n.(*schema.Set)
removed := expandStringList(os.Difference(ns).List())
added := expandStringList(ns.Difference(os).List())
if len(added) > 0 {
enableOpts := &elb.EnableAvailabilityZonesForLoadBalancerInput{
LoadBalancerName: aws.String(d.Id()),
AvailabilityZones: added,
}
log.Printf("[DEBUG] ELB enable availability zones opts: %s", enableOpts)
_, err := elbconn.EnableAvailabilityZonesForLoadBalancer(enableOpts)
if err != nil {
return fmt.Errorf("Failure enabling ELB availability zones: %s", err)
}
}
if len(removed) > 0 {
disableOpts := &elb.DisableAvailabilityZonesForLoadBalancerInput{
LoadBalancerName: aws.String(d.Id()),
AvailabilityZones: removed,
}
log.Printf("[DEBUG] ELB disable availability zones opts: %s", disableOpts)
_, err := elbconn.DisableAvailabilityZonesForLoadBalancer(disableOpts)
if err != nil {
return fmt.Errorf("Failure disabling ELB availability zones: %s", err)
}
}
d.SetPartial("availability_zones")
}
if d.HasChange("subnets") {
o, n := d.GetChange("subnets")
os := o.(*schema.Set)
ns := n.(*schema.Set)
removed := expandStringList(os.Difference(ns).List())
added := expandStringList(ns.Difference(os).List())
if len(added) > 0 {
attachOpts := &elb.AttachLoadBalancerToSubnetsInput{
LoadBalancerName: aws.String(d.Id()),
Subnets: added,
}
log.Printf("[DEBUG] ELB attach subnets opts: %s", attachOpts)
_, err := elbconn.AttachLoadBalancerToSubnets(attachOpts)
if err != nil {
return fmt.Errorf("Failure adding ELB subnets: %s", err)
}
}
if len(removed) > 0 {
detachOpts := &elb.DetachLoadBalancerFromSubnetsInput{
LoadBalancerName: aws.String(d.Id()),
Subnets: removed,
}
log.Printf("[DEBUG] ELB detach subnets opts: %s", detachOpts)
_, err := elbconn.DetachLoadBalancerFromSubnets(detachOpts)
if err != nil {
return fmt.Errorf("Failure removing ELB subnets: %s", err)
}
}
d.SetPartial("subnets")
}
if err := setTagsELB(elbconn, d); err != nil { if err := setTagsELB(elbconn, d); err != nil {
return err return err
} }
@ -673,29 +744,6 @@ func isLoadBalancerNotFound(err error) bool {
return ok && elberr.Code() == "LoadBalancerNotFound" return ok && elberr.Code() == "LoadBalancerNotFound"
} }
func validateElbName(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"only alphanumeric characters and hyphens allowed in %q: %q",
k, value))
}
if len(value) > 32 {
errors = append(errors, fmt.Errorf(
"%q cannot be longer than 32 characters: %q", k, value))
}
if regexp.MustCompile(`^-`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q cannot begin with a hyphen: %q", k, value))
}
if regexp.MustCompile(`-$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q cannot end with a hyphen: %q", k, value))
}
return
}
func sourceSGIdByName(meta interface{}, sg, vpcId string) (string, error) { func sourceSGIdByName(meta interface{}, sg, vpcId string) (string, error) {
conn := meta.(*AWSClient).ec2conn conn := meta.(*AWSClient).ec2conn
var filters []*ec2.Filter var filters []*ec2.Filter

View File

@ -2,22 +2,23 @@ package aws
import ( import (
"fmt" "fmt"
"os" "math/rand"
"reflect" "reflect"
"regexp" "regexp"
"sort" "sort"
"testing" "testing"
"time"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/elb" "github.com/aws/aws-sdk-go/service/elb"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
) )
func TestAccAWSELB_basic(t *testing.T) { func TestAccAWSELB_basic(t *testing.T) {
var conf elb.LoadBalancerDescription var conf elb.LoadBalancerDescription
ssl_certificate_id := os.Getenv("AWS_SSL_CERTIFICATE_ID")
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
@ -30,19 +31,20 @@ func TestAccAWSELB_basic(t *testing.T) {
testAccCheckAWSELBExists("aws_elb.bar", &conf), testAccCheckAWSELBExists("aws_elb.bar", &conf),
testAccCheckAWSELBAttributes(&conf), testAccCheckAWSELBAttributes(&conf),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_elb.bar", "name", "foobar-terraform-test"), "aws_elb.bar", "availability_zones.#", "3"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_elb.bar", "availability_zones.2487133097", "us-west-2a"), "aws_elb.bar", "availability_zones.2487133097", "us-west-2a"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_elb.bar", "availability_zones.221770259", "us-west-2b"), "aws_elb.bar", "availability_zones.221770259", "us-west-2b"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_elb.bar", "availability_zones.2050015877", "us-west-2c"), "aws_elb.bar", "availability_zones.2050015877", "us-west-2c"),
resource.TestCheckResourceAttr(
"aws_elb.bar", "subnets.#", "3"),
// NOTE: Subnet IDs are different across AWS accounts and cannot be checked.
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_elb.bar", "listener.206423021.instance_port", "8000"), "aws_elb.bar", "listener.206423021.instance_port", "8000"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_elb.bar", "listener.206423021.instance_protocol", "http"), "aws_elb.bar", "listener.206423021.instance_protocol", "http"),
resource.TestCheckResourceAttr(
"aws_elb.bar", "listener.206423021.ssl_certificate_id", ssl_certificate_id),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_elb.bar", "listener.206423021.lb_port", "80"), "aws_elb.bar", "listener.206423021.lb_port", "80"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
@ -58,17 +60,20 @@ func TestAccAWSELB_basic(t *testing.T) {
func TestAccAWSELB_fullCharacterRange(t *testing.T) { func TestAccAWSELB_fullCharacterRange(t *testing.T) {
var conf elb.LoadBalancerDescription var conf elb.LoadBalancerDescription
lbName := fmt.Sprintf("Tf-%d",
rand.New(rand.NewSource(time.Now().UnixNano())).Int())
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders, Providers: testAccProviders,
CheckDestroy: testAccCheckAWSELBDestroy, CheckDestroy: testAccCheckAWSELBDestroy,
Steps: []resource.TestStep{ Steps: []resource.TestStep{
resource.TestStep{ resource.TestStep{
Config: testAccAWSELBFullRangeOfCharacters, Config: fmt.Sprintf(testAccAWSELBFullRangeOfCharacters, lbName),
Check: resource.ComposeTestCheckFunc( Check: resource.ComposeTestCheckFunc(
testAccCheckAWSELBExists("aws_elb.foo", &conf), testAccCheckAWSELBExists("aws_elb.foo", &conf),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_elb.foo", "name", "FoobarTerraform-test123"), "aws_elb.foo", "name", lbName),
), ),
}, },
}, },
@ -87,8 +92,6 @@ func TestAccAWSELB_AccessLogs(t *testing.T) {
Config: testAccAWSELBAccessLogs, Config: testAccAWSELBAccessLogs,
Check: resource.ComposeTestCheckFunc( Check: resource.ComposeTestCheckFunc(
testAccCheckAWSELBExists("aws_elb.foo", &conf), testAccCheckAWSELBExists("aws_elb.foo", &conf),
resource.TestCheckResourceAttr(
"aws_elb.foo", "name", "FoobarTerraform-test123"),
), ),
}, },
@ -96,8 +99,6 @@ func TestAccAWSELB_AccessLogs(t *testing.T) {
Config: testAccAWSELBAccessLogsOn, Config: testAccAWSELBAccessLogsOn,
Check: resource.ComposeTestCheckFunc( Check: resource.ComposeTestCheckFunc(
testAccCheckAWSELBExists("aws_elb.foo", &conf), testAccCheckAWSELBExists("aws_elb.foo", &conf),
resource.TestCheckResourceAttr(
"aws_elb.foo", "name", "FoobarTerraform-test123"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_elb.foo", "access_logs.#", "1"), "aws_elb.foo", "access_logs.#", "1"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
@ -111,8 +112,6 @@ func TestAccAWSELB_AccessLogs(t *testing.T) {
Config: testAccAWSELBAccessLogs, Config: testAccAWSELBAccessLogs,
Check: resource.ComposeTestCheckFunc( Check: resource.ComposeTestCheckFunc(
testAccCheckAWSELBExists("aws_elb.foo", &conf), testAccCheckAWSELBExists("aws_elb.foo", &conf),
resource.TestCheckResourceAttr(
"aws_elb.foo", "name", "FoobarTerraform-test123"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_elb.foo", "access_logs.#", "0"), "aws_elb.foo", "access_logs.#", "0"),
), ),
@ -142,6 +141,45 @@ func TestAccAWSELB_generatedName(t *testing.T) {
}) })
} }
func TestAccAWSELB_availabilityZones(t *testing.T) {
var conf elb.LoadBalancerDescription
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSELBDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSELBConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSELBExists("aws_elb.bar", &conf),
resource.TestCheckResourceAttr(
"aws_elb.bar", "availability_zones.#", "3"),
resource.TestCheckResourceAttr(
"aws_elb.bar", "availability_zones.2487133097", "us-west-2a"),
resource.TestCheckResourceAttr(
"aws_elb.bar", "availability_zones.221770259", "us-west-2b"),
resource.TestCheckResourceAttr(
"aws_elb.bar", "availability_zones.2050015877", "us-west-2c"),
),
},
resource.TestStep{
Config: testAccAWSELBConfig_AvailabilityZonesUpdate,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSELBExists("aws_elb.bar", &conf),
resource.TestCheckResourceAttr(
"aws_elb.bar", "availability_zones.#", "2"),
resource.TestCheckResourceAttr(
"aws_elb.bar", "availability_zones.2487133097", "us-west-2a"),
resource.TestCheckResourceAttr(
"aws_elb.bar", "availability_zones.221770259", "us-west-2b"),
),
},
},
})
}
func TestAccAWSELB_tags(t *testing.T) { func TestAccAWSELB_tags(t *testing.T) {
var conf elb.LoadBalancerDescription var conf elb.LoadBalancerDescription
var td elb.TagDescription var td elb.TagDescription
@ -156,8 +194,6 @@ func TestAccAWSELB_tags(t *testing.T) {
Check: resource.ComposeTestCheckFunc( Check: resource.ComposeTestCheckFunc(
testAccCheckAWSELBExists("aws_elb.bar", &conf), testAccCheckAWSELBExists("aws_elb.bar", &conf),
testAccCheckAWSELBAttributes(&conf), testAccCheckAWSELBAttributes(&conf),
resource.TestCheckResourceAttr(
"aws_elb.bar", "name", "foobar-terraform-test"),
testAccLoadTags(&conf, &td), testAccLoadTags(&conf, &td),
testAccCheckELBTags(&td.Tags, "bar", "baz"), testAccCheckELBTags(&td.Tags, "bar", "baz"),
), ),
@ -168,8 +204,6 @@ func TestAccAWSELB_tags(t *testing.T) {
Check: resource.ComposeTestCheckFunc( Check: resource.ComposeTestCheckFunc(
testAccCheckAWSELBExists("aws_elb.bar", &conf), testAccCheckAWSELBExists("aws_elb.bar", &conf),
testAccCheckAWSELBAttributes(&conf), testAccCheckAWSELBAttributes(&conf),
resource.TestCheckResourceAttr(
"aws_elb.bar", "name", "foobar-terraform-test"),
testAccLoadTags(&conf, &td), testAccLoadTags(&conf, &td),
testAccCheckELBTags(&td.Tags, "foo", "bar"), testAccCheckELBTags(&td.Tags, "foo", "bar"),
testAccCheckELBTags(&td.Tags, "new", "type"), testAccCheckELBTags(&td.Tags, "new", "type"),
@ -196,7 +230,8 @@ func TestAccAWSELB_iam_server_cert(t *testing.T) {
CheckDestroy: testAccCheckAWSELBDestroy, CheckDestroy: testAccCheckAWSELBDestroy,
Steps: []resource.TestStep{ Steps: []resource.TestStep{
resource.TestStep{ resource.TestStep{
Config: testAccELBIAMServerCertConfig, Config: testAccELBIAMServerCertConfig(
fmt.Sprintf("tf-acctest-%s", acctest.RandString(10))),
Check: resource.ComposeTestCheckFunc( Check: resource.ComposeTestCheckFunc(
testAccCheckAWSELBExists("aws_elb.bar", &conf), testAccCheckAWSELBExists("aws_elb.bar", &conf),
testCheck, testCheck,
@ -571,7 +606,7 @@ func testAccCheckAWSELBDestroy(s *terraform.State) error {
return err return err
} }
if providerErr.Code() != "InvalidLoadBalancerName.NotFound" { if providerErr.Code() != "LoadBalancerNotFound" {
return fmt.Errorf("Unexpected error: %s", err) return fmt.Errorf("Unexpected error: %s", err)
} }
} }
@ -591,10 +626,6 @@ func testAccCheckAWSELBAttributes(conf *elb.LoadBalancerDescription) resource.Te
return fmt.Errorf("bad availability_zones") return fmt.Errorf("bad availability_zones")
} }
if *conf.LoadBalancerName != "foobar-terraform-test" {
return fmt.Errorf("bad name")
}
l := elb.Listener{ l := elb.Listener{
InstancePort: aws.Int64(int64(8000)), InstancePort: aws.Int64(int64(8000)),
InstanceProtocol: aws.String("HTTP"), InstanceProtocol: aws.String("HTTP"),
@ -629,10 +660,6 @@ func testAccCheckAWSELBAttributesHealthCheck(conf *elb.LoadBalancerDescription)
return fmt.Errorf("bad availability_zones") return fmt.Errorf("bad availability_zones")
} }
if *conf.LoadBalancerName != "foobar-terraform-test" {
return fmt.Errorf("bad name")
}
check := &elb.HealthCheck{ check := &elb.HealthCheck{
Timeout: aws.Int64(int64(30)), Timeout: aws.Int64(int64(30)),
UnhealthyThreshold: aws.Int64(int64(5)), UnhealthyThreshold: aws.Int64(int64(5)),
@ -699,7 +726,6 @@ func testAccCheckAWSELBExists(n string, res *elb.LoadBalancerDescription) resour
const testAccAWSELBConfig = ` const testAccAWSELBConfig = `
resource "aws_elb" "bar" { resource "aws_elb" "bar" {
name = "foobar-terraform-test"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener { listener {
@ -720,7 +746,7 @@ resource "aws_elb" "bar" {
const testAccAWSELBFullRangeOfCharacters = ` const testAccAWSELBFullRangeOfCharacters = `
resource "aws_elb" "foo" { resource "aws_elb" "foo" {
name = "FoobarTerraform-test123" name = "%s"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener { listener {
@ -734,7 +760,6 @@ resource "aws_elb" "foo" {
const testAccAWSELBAccessLogs = ` const testAccAWSELBAccessLogs = `
resource "aws_elb" "foo" { resource "aws_elb" "foo" {
name = "FoobarTerraform-test123"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener { listener {
@ -773,7 +798,6 @@ EOF
} }
resource "aws_elb" "foo" { resource "aws_elb" "foo" {
name = "FoobarTerraform-test123"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener { listener {
@ -803,9 +827,21 @@ resource "aws_elb" "foo" {
} }
` `
const testAccAWSELBConfig_AvailabilityZonesUpdate = `
resource "aws_elb" "bar" {
availability_zones = ["us-west-2a", "us-west-2b"]
listener {
instance_port = 8000
instance_protocol = "http"
lb_port = 80
lb_protocol = "http"
}
}
`
const testAccAWSELBConfig_TagUpdate = ` const testAccAWSELBConfig_TagUpdate = `
resource "aws_elb" "bar" { resource "aws_elb" "bar" {
name = "foobar-terraform-test"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener { listener {
@ -826,7 +862,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigNewInstance = ` const testAccAWSELBConfigNewInstance = `
resource "aws_elb" "bar" { resource "aws_elb" "bar" {
name = "foobar-terraform-test"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener { listener {
@ -848,7 +883,6 @@ resource "aws_instance" "foo" {
const testAccAWSELBConfigListenerSSLCertificateId = ` const testAccAWSELBConfigListenerSSLCertificateId = `
resource "aws_elb" "bar" { resource "aws_elb" "bar" {
name = "foobar-terraform-test"
availability_zones = ["us-west-2a"] availability_zones = ["us-west-2a"]
listener { listener {
@ -863,7 +897,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigHealthCheck = ` const testAccAWSELBConfigHealthCheck = `
resource "aws_elb" "bar" { resource "aws_elb" "bar" {
name = "foobar-terraform-test"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener { listener {
@ -885,7 +918,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigHealthCheck_update = ` const testAccAWSELBConfigHealthCheck_update = `
resource "aws_elb" "bar" { resource "aws_elb" "bar" {
name = "foobar-terraform-test"
availability_zones = ["us-west-2a"] availability_zones = ["us-west-2a"]
listener { listener {
@ -907,7 +939,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigListener_update = ` const testAccAWSELBConfigListener_update = `
resource "aws_elb" "bar" { resource "aws_elb" "bar" {
name = "foobar-terraform-test"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener { listener {
@ -921,7 +952,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigIdleTimeout = ` const testAccAWSELBConfigIdleTimeout = `
resource "aws_elb" "bar" { resource "aws_elb" "bar" {
name = "foobar-terraform-test"
availability_zones = ["us-west-2a"] availability_zones = ["us-west-2a"]
listener { listener {
@ -937,7 +967,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigIdleTimeout_update = ` const testAccAWSELBConfigIdleTimeout_update = `
resource "aws_elb" "bar" { resource "aws_elb" "bar" {
name = "foobar-terraform-test"
availability_zones = ["us-west-2a"] availability_zones = ["us-west-2a"]
listener { listener {
@ -953,7 +982,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigConnectionDraining = ` const testAccAWSELBConfigConnectionDraining = `
resource "aws_elb" "bar" { resource "aws_elb" "bar" {
name = "foobar-terraform-test"
availability_zones = ["us-west-2a"] availability_zones = ["us-west-2a"]
listener { listener {
@ -970,7 +998,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigConnectionDraining_update_timeout = ` const testAccAWSELBConfigConnectionDraining_update_timeout = `
resource "aws_elb" "bar" { resource "aws_elb" "bar" {
name = "foobar-terraform-test"
availability_zones = ["us-west-2a"] availability_zones = ["us-west-2a"]
listener { listener {
@ -987,7 +1014,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigConnectionDraining_update_disable = ` const testAccAWSELBConfigConnectionDraining_update_disable = `
resource "aws_elb" "bar" { resource "aws_elb" "bar" {
name = "foobar-terraform-test"
availability_zones = ["us-west-2a"] availability_zones = ["us-west-2a"]
listener { listener {
@ -1003,7 +1029,6 @@ resource "aws_elb" "bar" {
const testAccAWSELBConfigSecurityGroups = ` const testAccAWSELBConfigSecurityGroups = `
resource "aws_elb" "bar" { resource "aws_elb" "bar" {
name = "foobar-terraform-test"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener { listener {
@ -1017,9 +1042,6 @@ resource "aws_elb" "bar" {
} }
resource "aws_security_group" "bar" { resource "aws_security_group" "bar" {
name = "terraform-elb-acceptance-test"
description = "Used in the terraform acceptance tests for the elb resource"
ingress { ingress {
protocol = "tcp" protocol = "tcp"
from_port = 80 from_port = 80
@ -1031,9 +1053,10 @@ resource "aws_security_group" "bar" {
// This IAM Server config is lifted from // This IAM Server config is lifted from
// builtin/providers/aws/resource_aws_iam_server_certificate_test.go // builtin/providers/aws/resource_aws_iam_server_certificate_test.go
var testAccELBIAMServerCertConfig = ` func testAccELBIAMServerCertConfig(certName string) string {
return fmt.Sprintf(`
resource "aws_iam_server_certificate" "test_cert" { resource "aws_iam_server_certificate" "test_cert" {
name = "terraform-test-cert" name = "%s"
certificate_body = <<EOF certificate_body = <<EOF
-----BEGIN CERTIFICATE----- -----BEGIN CERTIFICATE-----
MIIDCDCCAfACAQEwDQYJKoZIhvcNAQELBQAwgY4xCzAJBgNVBAYTAlVTMREwDwYD MIIDCDCCAfACAQEwDQYJKoZIhvcNAQELBQAwgY4xCzAJBgNVBAYTAlVTMREwDwYD
@ -1103,7 +1126,6 @@ EOF
} }
resource "aws_elb" "bar" { resource "aws_elb" "bar" {
name = "foobar-terraform-test"
availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
listener { listener {
@ -1121,4 +1143,5 @@ resource "aws_elb" "bar" {
cross_zone_load_balancing = true cross_zone_load_balancing = true
} }
` `, certName)
}

View File

@ -182,11 +182,26 @@ func testAccCheckVaultNotificationsMissing(name string) resource.TestCheckFunc {
} }
func testAccCheckGlacierVaultDestroy(s *terraform.State) error { func testAccCheckGlacierVaultDestroy(s *terraform.State) error {
if len(s.RootModule().Resources) > 0 { conn := testAccProvider.Meta().(*AWSClient).glacierconn
return fmt.Errorf("Expected all resources to be gone, but found: %#v",
s.RootModule().Resources)
}
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_glacier_vault" {
continue
}
input := &glacier.DescribeVaultInput{
VaultName: aws.String(rs.Primary.ID),
}
if _, err := conn.DescribeVault(input); err != nil {
// Verify the error is what we want
if ae, ok := err.(awserr.Error); ok && ae.Code() == "ResourceNotFoundException" {
continue
}
return err
}
return fmt.Errorf("still exists")
}
return nil return nil
} }

View File

@ -5,6 +5,7 @@ import (
"testing" "testing"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
@ -55,23 +56,18 @@ func testAccCheckAWSGroupMembershipDestroy(s *terraform.State) error {
group := rs.Primary.Attributes["group"] group := rs.Primary.Attributes["group"]
resp, err := conn.GetGroup(&iam.GetGroupInput{ _, err := conn.GetGroup(&iam.GetGroupInput{
GroupName: aws.String(group), GroupName: aws.String(group),
}) })
if err != nil { if err != nil {
// might error here // Verify the error is what we want
if ae, ok := err.(awserr.Error); ok && ae.Code() == "NoSuchEntity" {
continue
}
return err return err
} }
users := []string{"test-user", "test-user-two", "test-user-three"} return fmt.Errorf("still exists")
for _, u := range resp.Users {
for _, i := range users {
if i == *u.UserName {
return fmt.Errorf("Error: User (%s) still a member of Group (%s)", i, *resp.Group.GroupName)
}
}
}
} }
return nil return nil

View File

@ -5,6 +5,7 @@ import (
"testing" "testing"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
@ -39,8 +40,30 @@ func TestAccAWSIAMGroupPolicy_basic(t *testing.T) {
} }
func testAccCheckIAMGroupPolicyDestroy(s *terraform.State) error { func testAccCheckIAMGroupPolicyDestroy(s *terraform.State) error {
if len(s.RootModule().Resources) > 0 { conn := testAccProvider.Meta().(*AWSClient).iamconn
return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_iam_group_policy" {
continue
}
group, name := resourceAwsIamGroupPolicyParseId(rs.Primary.ID)
request := &iam.GetGroupPolicyInput{
PolicyName: aws.String(name),
GroupName: aws.String(group),
}
_, err := conn.GetGroupPolicy(request)
if err != nil {
// Verify the error is what we want
if ae, ok := err.(awserr.Error); ok && ae.Code() == "NoSuchEntity" {
continue
}
return err
}
return fmt.Errorf("still exists")
} }
return nil return nil

View File

@ -5,6 +5,7 @@ import (
"testing" "testing"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
@ -39,8 +40,33 @@ func TestAccAWSIAMRolePolicy_basic(t *testing.T) {
} }
func testAccCheckIAMRolePolicyDestroy(s *terraform.State) error { func testAccCheckIAMRolePolicyDestroy(s *terraform.State) error {
if len(s.RootModule().Resources) > 0 { iamconn := testAccProvider.Meta().(*AWSClient).iamconn
return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_iam_role_policy" {
continue
}
role, name := resourceAwsIamRolePolicyParseId(rs.Primary.ID)
request := &iam.GetRolePolicyInput{
PolicyName: aws.String(name),
RoleName: aws.String(role),
}
var err error
getResp, err := iamconn.GetRolePolicy(request)
if err != nil {
if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" {
// none found, that's good
return nil
}
return fmt.Errorf("Error reading IAM policy %s from role %s: %s", name, role, err)
}
if getResp != nil {
return fmt.Errorf("Found IAM Role, expected none: %s", getResp)
}
} }
return nil return nil

View File

@ -5,6 +5,7 @@ import (
"testing" "testing"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
@ -33,8 +34,28 @@ func TestAccAWSIAMSamlProvider_basic(t *testing.T) {
} }
func testAccCheckIAMSamlProviderDestroy(s *terraform.State) error { func testAccCheckIAMSamlProviderDestroy(s *terraform.State) error {
if len(s.RootModule().Resources) > 0 { iamconn := testAccProvider.Meta().(*AWSClient).iamconn
return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_iam_saml_provider" {
continue
}
input := &iam.GetSAMLProviderInput{
SAMLProviderArn: aws.String(rs.Primary.ID),
}
out, err := iamconn.GetSAMLProvider(input)
if err != nil {
if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" {
// none found, that's good
return nil
}
return fmt.Errorf("Error reading IAM SAML Provider, out: %s, err: %s", out, err)
}
if out != nil {
return fmt.Errorf("Found IAM SAML Provider, expected none: %s", out)
}
} }
return nil return nil

View File

@ -5,6 +5,7 @@ import (
"testing" "testing"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
@ -39,8 +40,33 @@ func TestAccAWSIAMUserPolicy_basic(t *testing.T) {
} }
func testAccCheckIAMUserPolicyDestroy(s *terraform.State) error { func testAccCheckIAMUserPolicyDestroy(s *terraform.State) error {
if len(s.RootModule().Resources) > 0 { iamconn := testAccProvider.Meta().(*AWSClient).iamconn
return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_iam_user_policy" {
continue
}
role, name := resourceAwsIamRolePolicyParseId(rs.Primary.ID)
request := &iam.GetRolePolicyInput{
PolicyName: aws.String(name),
RoleName: aws.String(role),
}
var err error
getResp, err := iamconn.GetRolePolicy(request)
if err != nil {
if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" {
// none found, that's good
return nil
}
return fmt.Errorf("Error reading IAM policy %s from role %s: %s", name, role, err)
}
if getResp != nil {
return fmt.Errorf("Found IAM Role, expected none: %s", getResp)
}
} }
return nil return nil

View File

@ -132,6 +132,11 @@ func resourceAwsInstance() *schema.Resource {
Computed: true, Computed: true,
}, },
"instance_state": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"private_dns": &schema.Schema{ "private_dns": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
Computed: true, Computed: true,
@ -140,6 +145,7 @@ func resourceAwsInstance() *schema.Resource {
"ebs_optimized": &schema.Schema{ "ebs_optimized": &schema.Schema{
Type: schema.TypeBool, Type: schema.TypeBool,
Optional: true, Optional: true,
ForceNew: true,
}, },
"disable_api_termination": &schema.Schema{ "disable_api_termination": &schema.Schema{
@ -364,12 +370,22 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error {
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
continue continue
} }
// Warn if the AWS Error involves group ids, to help identify situation
// where a user uses group ids in security_groups for the Default VPC.
// See https://github.com/hashicorp/terraform/issues/3798
if awsErr.Code() == "InvalidParameterValue" && strings.Contains(awsErr.Message(), "groupId is invalid") {
return fmt.Errorf("Error launching instance, possible mismatch of Security Group IDs and Names. See AWS Instance docs here: %s.\n\n\tAWS Error: %s", "https://terraform.io/docs/providers/aws/r/instance.html", awsErr.Message())
}
} }
break break
} }
if err != nil { if err != nil {
return fmt.Errorf("Error launching source instance: %s", err) return fmt.Errorf("Error launching source instance: %s", err)
} }
if runResp == nil || len(runResp.Instances) == 0 {
return fmt.Errorf("Error launching source instance: no instances returned in response")
}
instance := runResp.Instances[0] instance := runResp.Instances[0]
log.Printf("[INFO] Instance ID: %s", *instance.InstanceId) log.Printf("[INFO] Instance ID: %s", *instance.InstanceId)
@ -385,7 +401,7 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error {
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"pending"}, Pending: []string{"pending"},
Target: "running", Target: []string{"running"},
Refresh: InstanceStateRefreshFunc(conn, *instance.InstanceId), Refresh: InstanceStateRefreshFunc(conn, *instance.InstanceId),
Timeout: 10 * time.Minute, Timeout: 10 * time.Minute,
Delay: 10 * time.Second, Delay: 10 * time.Second,
@ -444,10 +460,14 @@ func resourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error {
instance := resp.Reservations[0].Instances[0] instance := resp.Reservations[0].Instances[0]
// If the instance is terminated, then it is gone if instance.State != nil {
if *instance.State.Name == "terminated" { // If the instance is terminated, then it is gone
d.SetId("") if *instance.State.Name == "terminated" {
return nil d.SetId("")
return nil
}
d.Set("instance_state", instance.State.Name)
} }
if instance.Placement != nil { if instance.Placement != nil {
@ -1062,7 +1082,7 @@ func awsTerminateInstance(conn *ec2.EC2, id string) error {
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"}, Pending: []string{"pending", "running", "shutting-down", "stopped", "stopping"},
Target: "terminated", Target: []string{"terminated"},
Refresh: InstanceStateRefreshFunc(conn, id), Refresh: InstanceStateRefreshFunc(conn, id),
Timeout: 10 * time.Minute, Timeout: 10 * time.Minute,
Delay: 10 * time.Second, Delay: 10 * time.Second,
@ -1082,5 +1102,6 @@ func iamInstanceProfileArnToName(ip *ec2.IamInstanceProfile) string {
if ip == nil || ip.Arn == nil { if ip == nil || ip.Arn == nil {
return "" return ""
} }
return strings.Split(*ip.Arn, "/")[1] parts := strings.Split(*ip.Arn, "/")
return parts[len(parts)-1]
} }

View File

@ -19,12 +19,10 @@ func resourceAwsInstanceMigrateState(
default: default:
return is, fmt.Errorf("Unexpected schema version: %d", v) return is, fmt.Errorf("Unexpected schema version: %d", v)
} }
return is, nil
} }
func migrateStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { func migrateStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) {
if is.Empty() { if is.Empty() || is.Attributes == nil {
log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") log.Println("[DEBUG] Empty InstanceState; nothing to migrate.")
return is, nil return is, nil
} }

View File

@ -112,22 +112,22 @@ func TestAccAWSInstance_blockDevices(t *testing.T) {
// Check if the root block device exists. // Check if the root block device exists.
if _, ok := blockDevices["/dev/sda1"]; !ok { if _, ok := blockDevices["/dev/sda1"]; !ok {
fmt.Errorf("block device doesn't exist: /dev/sda1") return fmt.Errorf("block device doesn't exist: /dev/sda1")
} }
// Check if the secondary block device exists. // Check if the secondary block device exists.
if _, ok := blockDevices["/dev/sdb"]; !ok { if _, ok := blockDevices["/dev/sdb"]; !ok {
fmt.Errorf("block device doesn't exist: /dev/sdb") return fmt.Errorf("block device doesn't exist: /dev/sdb")
} }
// Check if the third block device exists. // Check if the third block device exists.
if _, ok := blockDevices["/dev/sdc"]; !ok { if _, ok := blockDevices["/dev/sdc"]; !ok {
fmt.Errorf("block device doesn't exist: /dev/sdc") return fmt.Errorf("block device doesn't exist: /dev/sdc")
} }
// Check if the encrypted block device exists // Check if the encrypted block device exists
if _, ok := blockDevices["/dev/sdd"]; !ok { if _, ok := blockDevices["/dev/sdd"]; !ok {
fmt.Errorf("block device doesn't exist: /dev/sdd") return fmt.Errorf("block device doesn't exist: /dev/sdd")
} }
return nil return nil
@ -513,6 +513,41 @@ func TestAccAWSInstance_rootBlockDeviceMismatch(t *testing.T) {
}) })
} }
// This test reproduces the bug here:
// https://github.com/hashicorp/terraform/issues/1752
//
// I wish there were a way to exercise resources built with helper.Schema in a
// unit context, in which case this test could be moved there, but for now this
// will cover the bugfix.
//
// The following triggers "diffs didn't match during apply" without the fix in to
// set NewRemoved on the .# field when it changes to 0.
func TestAccAWSInstance_forceNewAndTagsDrift(t *testing.T) {
var v ec2.Instance
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccInstanceConfigForceNewAndTagsDrift,
Check: resource.ComposeTestCheckFunc(
testAccCheckInstanceExists("aws_instance.foo", &v),
driftTags(&v),
),
ExpectNonEmptyPlan: true,
},
resource.TestStep{
Config: testAccInstanceConfigForceNewAndTagsDrift_Update,
Check: resource.ComposeTestCheckFunc(
testAccCheckInstanceExists("aws_instance.foo", &v),
),
},
},
})
}
func testAccCheckInstanceDestroy(s *terraform.State) error { func testAccCheckInstanceDestroy(s *terraform.State) error {
return testAccCheckInstanceDestroyWithProvider(s, testAccProvider) return testAccCheckInstanceDestroyWithProvider(s, testAccProvider)
} }
@ -540,26 +575,25 @@ func testAccCheckInstanceDestroyWithProvider(s *terraform.State, provider *schem
} }
// Try to find the resource // Try to find the resource
var err error
resp, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{ resp, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{
InstanceIds: []*string{aws.String(rs.Primary.ID)}, InstanceIds: []*string{aws.String(rs.Primary.ID)},
}) })
if err == nil { if err == nil {
if len(resp.Reservations) > 0 { for _, r := range resp.Reservations {
return fmt.Errorf("still exist.") for _, i := range r.Instances {
if i.State != nil && *i.State.Name != "terminated" {
return fmt.Errorf("Found unterminated instance: %s", i)
}
}
} }
return nil
} }
// Verify the error is what we want // Verify the error is what we want
ec2err, ok := err.(awserr.Error) if ae, ok := err.(awserr.Error); ok && ae.Code() == "InvalidInstanceID.NotFound" {
if !ok { continue
return err
}
if ec2err.Code() != "InvalidInstanceID.NotFound" {
return err
} }
return err
} }
return nil return nil
@ -623,6 +657,22 @@ func TestInstanceTenancySchema(t *testing.T) {
} }
} }
func driftTags(instance *ec2.Instance) resource.TestCheckFunc {
return func(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).ec2conn
_, err := conn.CreateTags(&ec2.CreateTagsInput{
Resources: []*string{instance.InstanceId},
Tags: []*ec2.Tag{
&ec2.Tag{
Key: aws.String("Drift"),
Value: aws.String("Happens"),
},
},
})
return err
}
}
const testAccInstanceConfig_pre = ` const testAccInstanceConfig_pre = `
resource "aws_security_group" "tf_test_foo" { resource "aws_security_group" "tf_test_foo" {
name = "tf_test_foo" name = "tf_test_foo"
@ -989,3 +1039,37 @@ resource "aws_instance" "foo" {
} }
} }
` `
const testAccInstanceConfigForceNewAndTagsDrift = `
resource "aws_vpc" "foo" {
cidr_block = "10.1.0.0/16"
}
resource "aws_subnet" "foo" {
cidr_block = "10.1.1.0/24"
vpc_id = "${aws_vpc.foo.id}"
}
resource "aws_instance" "foo" {
ami = "ami-22b9a343"
instance_type = "t2.nano"
subnet_id = "${aws_subnet.foo.id}"
}
`
const testAccInstanceConfigForceNewAndTagsDrift_Update = `
resource "aws_vpc" "foo" {
cidr_block = "10.1.0.0/16"
}
resource "aws_subnet" "foo" {
cidr_block = "10.1.1.0/24"
vpc_id = "${aws_vpc.foo.id}"
}
resource "aws_instance" "foo" {
ami = "ami-22b9a343"
instance_type = "t2.micro"
subnet_id = "${aws_subnet.foo.id}"
}
`

View File

@ -170,7 +170,7 @@ func resourceAwsInternetGatewayAttach(d *schema.ResourceData, meta interface{})
log.Printf("[DEBUG] Waiting for internet gateway (%s) to attach", d.Id()) log.Printf("[DEBUG] Waiting for internet gateway (%s) to attach", d.Id())
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"detached", "attaching"}, Pending: []string{"detached", "attaching"},
Target: "available", Target: []string{"available"},
Refresh: IGAttachStateRefreshFunc(conn, d.Id(), "available"), Refresh: IGAttachStateRefreshFunc(conn, d.Id(), "available"),
Timeout: 1 * time.Minute, Timeout: 1 * time.Minute,
} }
@ -205,7 +205,7 @@ func resourceAwsInternetGatewayDetach(d *schema.ResourceData, meta interface{})
log.Printf("[DEBUG] Waiting for internet gateway (%s) to detach", d.Id()) log.Printf("[DEBUG] Waiting for internet gateway (%s) to detach", d.Id())
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"detaching"}, Pending: []string{"detaching"},
Target: "detached", Target: []string{"detached"},
Refresh: detachIGStateRefreshFunc(conn, d.Id(), vpcID.(string)), Refresh: detachIGStateRefreshFunc(conn, d.Id(), vpcID.(string)),
Timeout: 5 * time.Minute, Timeout: 5 * time.Minute,
Delay: 10 * time.Second, Delay: 10 * time.Second,

View File

@ -17,8 +17,6 @@ func resourceAwsKeyPairMigrateState(
default: default:
return is, fmt.Errorf("Unexpected schema version: %d", v) return is, fmt.Errorf("Unexpected schema version: %d", v)
} }
return is, nil
} }
func migrateKeyPairStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { func migrateKeyPairStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) {

View File

@ -2,6 +2,7 @@ package aws
import ( import (
"fmt" "fmt"
"log"
"strings" "strings"
"time" "time"
@ -102,7 +103,7 @@ func resourceAwsKinesisFirehoseDeliveryStreamCreate(d *schema.ResourceData, meta
DeliveryStreamName: aws.String(sn), DeliveryStreamName: aws.String(sn),
} }
s3_config := &firehose.S3DestinationConfiguration{ s3Config := &firehose.S3DestinationConfiguration{
BucketARN: aws.String(d.Get("s3_bucket_arn").(string)), BucketARN: aws.String(d.Get("s3_bucket_arn").(string)),
RoleARN: aws.String(d.Get("role_arn").(string)), RoleARN: aws.String(d.Get("role_arn").(string)),
BufferingHints: &firehose.BufferingHints{ BufferingHints: &firehose.BufferingHints{
@ -112,12 +113,25 @@ func resourceAwsKinesisFirehoseDeliveryStreamCreate(d *schema.ResourceData, meta
CompressionFormat: aws.String(d.Get("s3_data_compression").(string)), CompressionFormat: aws.String(d.Get("s3_data_compression").(string)),
} }
if v, ok := d.GetOk("s3_prefix"); ok { if v, ok := d.GetOk("s3_prefix"); ok {
s3_config.Prefix = aws.String(v.(string)) s3Config.Prefix = aws.String(v.(string))
} }
input.S3DestinationConfiguration = s3_config input.S3DestinationConfiguration = s3Config
_, err := conn.CreateDeliveryStream(input) var err error
for i := 0; i < 5; i++ {
_, err := conn.CreateDeliveryStream(input)
if awsErr, ok := err.(awserr.Error); ok {
// IAM roles can take ~10 seconds to propagate in AWS:
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console
if awsErr.Code() == "InvalidArgumentException" && strings.Contains(awsErr.Message(), "Firehose is unable to assume role") {
log.Printf("[DEBUG] Firehose could not assume role referenced, retrying...")
time.Sleep(2 * time.Second)
continue
}
}
break
}
if err != nil { if err != nil {
if awsErr, ok := err.(awserr.Error); ok { if awsErr, ok := err.(awserr.Error); ok {
return fmt.Errorf("[WARN] Error creating Kinesis Firehose Delivery Stream: \"%s\", code: \"%s\"", awsErr.Message(), awsErr.Code()) return fmt.Errorf("[WARN] Error creating Kinesis Firehose Delivery Stream: \"%s\", code: \"%s\"", awsErr.Message(), awsErr.Code())
@ -127,7 +141,7 @@ func resourceAwsKinesisFirehoseDeliveryStreamCreate(d *schema.ResourceData, meta
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"CREATING"}, Pending: []string{"CREATING"},
Target: "ACTIVE", Target: []string{"ACTIVE"},
Refresh: firehoseStreamStateRefreshFunc(conn, sn), Refresh: firehoseStreamStateRefreshFunc(conn, sn),
Timeout: 5 * time.Minute, Timeout: 5 * time.Minute,
Delay: 10 * time.Second, Delay: 10 * time.Second,
@ -242,7 +256,7 @@ func resourceAwsKinesisFirehoseDeliveryStreamDelete(d *schema.ResourceData, meta
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"DELETING"}, Pending: []string{"DELETING"},
Target: "DESTROYED", Target: []string{"DESTROYED"},
Refresh: firehoseStreamStateRefreshFunc(conn, sn), Refresh: firehoseStreamStateRefreshFunc(conn, sn),
Timeout: 5 * time.Minute, Timeout: 5 * time.Minute,
Delay: 10 * time.Second, Delay: 10 * time.Second,

View File

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"log" "log"
"math/rand" "math/rand"
"os"
"strings" "strings"
"testing" "testing"
"time" "time"
@ -16,12 +17,17 @@ import (
func TestAccAWSKinesisFirehoseDeliveryStream_basic(t *testing.T) { func TestAccAWSKinesisFirehoseDeliveryStream_basic(t *testing.T) {
var stream firehose.DeliveryStreamDescription var stream firehose.DeliveryStreamDescription
ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int() ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int()
config := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_basic, ri, ri) config := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_basic,
os.Getenv("AWS_ACCOUNT_ID"), ri, ri)
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() {
testAccPreCheck(t)
if os.Getenv("AWS_ACCOUNT_ID") == "" {
t.Fatal("AWS_ACCOUNT_ID must be set")
}
},
Providers: testAccProviders, Providers: testAccProviders,
CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy,
Steps: []resource.TestStep{ Steps: []resource.TestStep{
@ -40,11 +46,18 @@ func TestAccAWSKinesisFirehoseDeliveryStream_s3ConfigUpdates(t *testing.T) {
var stream firehose.DeliveryStreamDescription var stream firehose.DeliveryStreamDescription
ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int() ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int()
preconfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3, ri, ri) preconfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3,
postConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3Updates, ri, ri) os.Getenv("AWS_ACCOUNT_ID"), ri, ri)
postConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_s3Updates,
os.Getenv("AWS_ACCOUNT_ID"), ri, ri)
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() {
testAccPreCheck(t)
if os.Getenv("AWS_ACCOUNT_ID") == "" {
t.Fatal("AWS_ACCOUNT_ID must be set")
}
},
Providers: testAccProviders, Providers: testAccProviders,
CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy,
Steps: []resource.TestStep{ Steps: []resource.TestStep{
@ -147,41 +160,200 @@ func testAccCheckKinesisFirehoseDeliveryStreamDestroy(s *terraform.State) error
} }
var testAccKinesisFirehoseDeliveryStreamConfig_basic = ` var testAccKinesisFirehoseDeliveryStreamConfig_basic = `
resource "aws_iam_role" "firehose" {
name = "terraform_acctest_firehose_delivery_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "firehose.amazonaws.com"
},
"Action": "sts:AssumeRole",
"Condition": {
"StringEquals": {
"sts:ExternalId": "%s"
}
}
}
]
}
EOF
}
resource "aws_s3_bucket" "bucket" { resource "aws_s3_bucket" "bucket" {
bucket = "tf-test-bucket-%d" bucket = "tf-test-bucket-%d"
acl = "private" acl = "private"
} }
resource "aws_iam_role_policy" "firehose" {
name = "terraform_acctest_firehose_delivery_policy"
role = "${aws_iam_role.firehose.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Action": [
"s3:AbortMultipartUpload",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:ListBucket",
"s3:ListBucketMultipartUploads",
"s3:PutObject"
],
"Resource": [
"arn:aws:s3:::${aws_s3_bucket.bucket.id}",
"arn:aws:s3:::${aws_s3_bucket.bucket.id}/*"
]
}
]
}
EOF
}
resource "aws_kinesis_firehose_delivery_stream" "test_stream" { resource "aws_kinesis_firehose_delivery_stream" "test_stream" {
depends_on = ["aws_iam_role_policy.firehose"]
name = "terraform-kinesis-firehose-basictest-%d" name = "terraform-kinesis-firehose-basictest-%d"
destination = "s3" destination = "s3"
role_arn = "arn:aws:iam::946579370547:role/firehose_delivery_role" role_arn = "${aws_iam_role.firehose.arn}"
s3_bucket_arn = "${aws_s3_bucket.bucket.arn}" s3_bucket_arn = "${aws_s3_bucket.bucket.arn}"
}` }`
var testAccKinesisFirehoseDeliveryStreamConfig_s3 = ` var testAccKinesisFirehoseDeliveryStreamConfig_s3 = `
resource "aws_iam_role" "firehose" {
name = "terraform_acctest_firehose_delivery_role_s3"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "firehose.amazonaws.com"
},
"Action": "sts:AssumeRole",
"Condition": {
"StringEquals": {
"sts:ExternalId": "%s"
}
}
}
]
}
EOF
}
resource "aws_s3_bucket" "bucket" { resource "aws_s3_bucket" "bucket" {
bucket = "tf-test-bucket-%d" bucket = "tf-test-bucket-%d"
acl = "private" acl = "private"
} }
resource "aws_iam_role_policy" "firehose" {
name = "terraform_acctest_firehose_delivery_policy_s3"
role = "${aws_iam_role.firehose.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Action": [
"s3:AbortMultipartUpload",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:ListBucket",
"s3:ListBucketMultipartUploads",
"s3:PutObject"
],
"Resource": [
"arn:aws:s3:::${aws_s3_bucket.bucket.id}",
"arn:aws:s3:::${aws_s3_bucket.bucket.id}/*"
]
}
]
}
EOF
}
resource "aws_kinesis_firehose_delivery_stream" "test_stream" { resource "aws_kinesis_firehose_delivery_stream" "test_stream" {
depends_on = ["aws_iam_role_policy.firehose"]
name = "terraform-kinesis-firehose-s3test-%d" name = "terraform-kinesis-firehose-s3test-%d"
destination = "s3" destination = "s3"
role_arn = "arn:aws:iam::946579370547:role/firehose_delivery_role" role_arn = "${aws_iam_role.firehose.arn}"
s3_bucket_arn = "${aws_s3_bucket.bucket.arn}" s3_bucket_arn = "${aws_s3_bucket.bucket.arn}"
}` }`
var testAccKinesisFirehoseDeliveryStreamConfig_s3Updates = ` var testAccKinesisFirehoseDeliveryStreamConfig_s3Updates = `
resource "aws_iam_role" "firehose" {
name = "terraform_acctest_firehose_delivery_role_s3"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "firehose.amazonaws.com"
},
"Action": "sts:AssumeRole",
"Condition": {
"StringEquals": {
"sts:ExternalId": "%s"
}
}
}
]
}
EOF
}
resource "aws_s3_bucket" "bucket" { resource "aws_s3_bucket" "bucket" {
bucket = "tf-test-bucket-01-%d" bucket = "tf-test-bucket-%d"
acl = "private" acl = "private"
} }
resource "aws_iam_role_policy" "firehose" {
name = "terraform_acctest_firehose_delivery_policy_s3"
role = "${aws_iam_role.firehose.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Action": [
"s3:AbortMultipartUpload",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:ListBucket",
"s3:ListBucketMultipartUploads",
"s3:PutObject"
],
"Resource": [
"arn:aws:s3:::${aws_s3_bucket.bucket.id}",
"arn:aws:s3:::${aws_s3_bucket.bucket.id}/*"
]
}
]
}
EOF
}
resource "aws_kinesis_firehose_delivery_stream" "test_stream" { resource "aws_kinesis_firehose_delivery_stream" "test_stream" {
depends_on = ["aws_iam_role_policy.firehose"]
name = "terraform-kinesis-firehose-s3test-%d" name = "terraform-kinesis-firehose-s3test-%d"
destination = "s3" destination = "s3"
role_arn = "arn:aws:iam::946579370547:role/firehose_delivery_role" role_arn = "${aws_iam_role.firehose.arn}"
s3_bucket_arn = "${aws_s3_bucket.bucket.arn}" s3_bucket_arn = "${aws_s3_bucket.bucket.arn}"
s3_buffer_size = 10 s3_buffer_size = 10
s3_buffer_interval = 400 s3_buffer_interval = 400

View File

@ -60,7 +60,7 @@ func resourceAwsKinesisStreamCreate(d *schema.ResourceData, meta interface{}) er
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"CREATING"}, Pending: []string{"CREATING"},
Target: "ACTIVE", Target: []string{"ACTIVE"},
Refresh: streamStateRefreshFunc(conn, sn), Refresh: streamStateRefreshFunc(conn, sn),
Timeout: 5 * time.Minute, Timeout: 5 * time.Minute,
Delay: 10 * time.Second, Delay: 10 * time.Second,
@ -74,9 +74,10 @@ func resourceAwsKinesisStreamCreate(d *schema.ResourceData, meta interface{}) er
sn, err) sn, err)
} }
s := streamRaw.(*kinesis.StreamDescription) s := streamRaw.(kinesisStreamState)
d.SetId(*s.StreamARN) d.SetId(s.arn)
d.Set("arn", s.StreamARN) d.Set("arn", s.arn)
d.Set("shard_count", s.shardCount)
return resourceAwsKinesisStreamUpdate(d, meta) return resourceAwsKinesisStreamUpdate(d, meta)
} }
@ -98,10 +99,8 @@ func resourceAwsKinesisStreamUpdate(d *schema.ResourceData, meta interface{}) er
func resourceAwsKinesisStreamRead(d *schema.ResourceData, meta interface{}) error { func resourceAwsKinesisStreamRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).kinesisconn conn := meta.(*AWSClient).kinesisconn
sn := d.Get("name").(string) sn := d.Get("name").(string)
describeOpts := &kinesis.DescribeStreamInput{
StreamName: aws.String(sn), state, err := readKinesisStreamState(conn, sn)
}
resp, err := conn.DescribeStream(describeOpts)
if err != nil { if err != nil {
if awsErr, ok := err.(awserr.Error); ok { if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "ResourceNotFoundException" { if awsErr.Code() == "ResourceNotFoundException" {
@ -111,11 +110,10 @@ func resourceAwsKinesisStreamRead(d *schema.ResourceData, meta interface{}) erro
return fmt.Errorf("[WARN] Error reading Kinesis Stream: \"%s\", code: \"%s\"", awsErr.Message(), awsErr.Code()) return fmt.Errorf("[WARN] Error reading Kinesis Stream: \"%s\", code: \"%s\"", awsErr.Message(), awsErr.Code())
} }
return err return err
}
s := resp.StreamDescription }
d.Set("arn", *s.StreamARN) d.Set("arn", state.arn)
d.Set("shard_count", len(s.Shards)) d.Set("shard_count", state.shardCount)
// set tags // set tags
describeTagsOpts := &kinesis.ListTagsForStreamInput{ describeTagsOpts := &kinesis.ListTagsForStreamInput{
@ -144,7 +142,7 @@ func resourceAwsKinesisStreamDelete(d *schema.ResourceData, meta interface{}) er
stateConf := &resource.StateChangeConf{ stateConf := &resource.StateChangeConf{
Pending: []string{"DELETING"}, Pending: []string{"DELETING"},
Target: "DESTROYED", Target: []string{"DESTROYED"},
Refresh: streamStateRefreshFunc(conn, sn), Refresh: streamStateRefreshFunc(conn, sn),
Timeout: 5 * time.Minute, Timeout: 5 * time.Minute,
Delay: 10 * time.Second, Delay: 10 * time.Second,
@ -162,12 +160,30 @@ func resourceAwsKinesisStreamDelete(d *schema.ResourceData, meta interface{}) er
return nil return nil
} }
type kinesisStreamState struct {
arn string
status string
shardCount int
}
func readKinesisStreamState(conn *kinesis.Kinesis, sn string) (kinesisStreamState, error) {
describeOpts := &kinesis.DescribeStreamInput{
StreamName: aws.String(sn),
}
var state kinesisStreamState
err := conn.DescribeStreamPages(describeOpts, func(page *kinesis.DescribeStreamOutput, last bool) (shouldContinue bool) {
state.arn = aws.StringValue(page.StreamDescription.StreamARN)
state.status = aws.StringValue(page.StreamDescription.StreamStatus)
state.shardCount += len(page.StreamDescription.Shards)
return !last
})
return state, err
}
func streamStateRefreshFunc(conn *kinesis.Kinesis, sn string) resource.StateRefreshFunc { func streamStateRefreshFunc(conn *kinesis.Kinesis, sn string) resource.StateRefreshFunc {
return func() (interface{}, string, error) { return func() (interface{}, string, error) {
describeOpts := &kinesis.DescribeStreamInput{ state, err := readKinesisStreamState(conn, sn)
StreamName: aws.String(sn),
}
resp, err := conn.DescribeStream(describeOpts)
if err != nil { if err != nil {
if awsErr, ok := err.(awserr.Error); ok { if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "ResourceNotFoundException" { if awsErr.Code() == "ResourceNotFoundException" {
@ -178,6 +194,6 @@ func streamStateRefreshFunc(conn *kinesis.Kinesis, sn string) resource.StateRefr
return nil, "failed", err return nil, "failed", err
} }
return resp.StreamDescription, *resp.StreamDescription.StreamStatus, nil return state, state.status, nil
} }
} }

View File

@ -0,0 +1,133 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/lambda"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsLambdaAlias() *schema.Resource {
return &schema.Resource{
Create: resourceAwsLambdaAliasCreate,
Read: resourceAwsLambdaAliasRead,
Update: resourceAwsLambdaAliasUpdate,
Delete: resourceAwsLambdaAliasDelete,
Schema: map[string]*schema.Schema{
"description": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"function_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"function_version": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
},
}
}
// resourceAwsLambdaAliasCreate maps to:
// CreateAlias in the API / SDK
func resourceAwsLambdaAliasCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).lambdaconn
functionName := d.Get("function_name").(string)
aliasName := d.Get("name").(string)
log.Printf("[DEBUG] Creating Lambda alias: alias %s for function %s", aliasName, functionName)
params := &lambda.CreateAliasInput{
Description: aws.String(d.Get("description").(string)),
FunctionName: aws.String(functionName),
FunctionVersion: aws.String(d.Get("function_version").(string)),
Name: aws.String(aliasName),
}
aliasConfiguration, err := conn.CreateAlias(params)
if err != nil {
return fmt.Errorf("Error creating Lambda alias: %s", err)
}
d.SetId(*aliasConfiguration.AliasArn)
return resourceAwsLambdaAliasRead(d, meta)
}
// resourceAwsLambdaAliasRead maps to:
// GetAlias in the API / SDK
func resourceAwsLambdaAliasRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).lambdaconn
log.Printf("[DEBUG] Fetching Lambda alias: %s:%s", d.Get("function_name"), d.Get("name"))
params := &lambda.GetAliasInput{
FunctionName: aws.String(d.Get("function_name").(string)),
Name: aws.String(d.Get("name").(string)),
}
aliasConfiguration, err := conn.GetAlias(params)
if err != nil {
return err
}
d.Set("description", aliasConfiguration.Description)
d.Set("function_version", aliasConfiguration.FunctionVersion)
d.Set("name", aliasConfiguration.Name)
return nil
}
// resourceAwsLambdaAliasDelete maps to:
// DeleteAlias in the API / SDK
func resourceAwsLambdaAliasDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).lambdaconn
log.Printf("[INFO] Deleting Lambda alias: %s:%s", d.Get("function_name"), d.Get("name"))
params := &lambda.DeleteAliasInput{
FunctionName: aws.String(d.Get("function_name").(string)),
Name: aws.String(d.Get("name").(string)),
}
_, err := conn.DeleteAlias(params)
if err != nil {
return fmt.Errorf("Error deleting Lambda alias: %s", err)
}
d.SetId("")
return nil
}
// resourceAwsLambdaAliasUpdate maps to:
// UpdateAlias in the API / SDK
func resourceAwsLambdaAliasUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).lambdaconn
log.Printf("[DEBUG] Updating Lambda alias: %s:%s", d.Get("function_name"), d.Get("name"))
params := &lambda.UpdateAliasInput{
Description: aws.String(d.Get("description").(string)),
FunctionName: aws.String(d.Get("function_name").(string)),
FunctionVersion: aws.String(d.Get("function_version").(string)),
Name: aws.String(d.Get("name").(string)),
}
_, err := conn.UpdateAlias(params)
if err != nil {
return fmt.Errorf("Error updating Lambda alias: %s", err)
}
return nil
}

View File

@ -0,0 +1,157 @@
package aws
import (
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/lambda"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSLambdaAlias_basic(t *testing.T) {
var conf lambda.AliasConfiguration
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAwsLambdaAliasDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAwsLambdaAliasConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsLambdaAliasExists("aws_lambda_alias.lambda_alias_test", &conf),
testAccCheckAwsLambdaAttributes(&conf),
),
},
},
})
}
func testAccCheckAwsLambdaAliasDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).lambdaconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_lambda_alias" {
continue
}
_, err := conn.GetAlias(&lambda.GetAliasInput{
FunctionName: aws.String(rs.Primary.ID),
})
if err == nil {
return fmt.Errorf("Lambda alias was not deleted")
}
}
return nil
}
func testAccCheckAwsLambdaAliasExists(n string, mapping *lambda.AliasConfiguration) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Lambda alias not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("Lambda alias not set")
}
conn := testAccProvider.Meta().(*AWSClient).lambdaconn
params := &lambda.GetAliasInput{
FunctionName: aws.String(rs.Primary.ID),
Name: aws.String("testalias"),
}
getAliasConfiguration, err := conn.GetAlias(params)
if err != nil {
return err
}
*mapping = *getAliasConfiguration
return nil
}
}
func testAccCheckAwsLambdaAttributes(mapping *lambda.AliasConfiguration) resource.TestCheckFunc {
return func(s *terraform.State) error {
name := *mapping.Name
arn := *mapping.AliasArn
if arn == "" {
return fmt.Errorf("Could not read Lambda alias ARN")
}
if name == "" {
return fmt.Errorf("Could not read Lambda alias name")
}
return nil
}
}
const testAccAwsLambdaAliasConfig = `
resource "aws_iam_role" "iam_for_lambda" {
name = "iam_for_lambda"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_policy" "policy_for_role" {
name = "policy_for_role"
path = "/"
description = "IAM policy for for Lamda alias testing"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"lambda:*"
],
"Resource": "*"
}
]
}
EOF
}
resource "aws_iam_policy_attachment" "policy_attachment_for_role" {
name = "policy_attachment_for_role"
roles = ["${aws_iam_role.iam_for_lambda.name}"]
policy_arn = "${aws_iam_policy.policy_for_role.arn}"
}
resource "aws_lambda_function" "lambda_function_test_create" {
filename = "test-fixtures/lambdatest.zip"
function_name = "example_lambda_name_create"
role = "${aws_iam_role.iam_for_lambda.arn}"
handler = "exports.example"
}
resource "aws_lambda_alias" "lambda_alias_test" {
name = "testalias"
description = "a sample description"
function_name = "${aws_lambda_function.lambda_function_test_create.arn}"
function_version = "$LATEST"
}
`

View File

@ -0,0 +1,210 @@
package aws
import (
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/lambda"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsLambdaEventSourceMapping() *schema.Resource {
return &schema.Resource{
Create: resourceAwsLambdaEventSourceMappingCreate,
Read: resourceAwsLambdaEventSourceMappingRead,
Update: resourceAwsLambdaEventSourceMappingUpdate,
Delete: resourceAwsLambdaEventSourceMappingDelete,
Schema: map[string]*schema.Schema{
"event_source_arn": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"function_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"starting_position": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"batch_size": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Default: 100,
},
"enabled": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: true,
},
"function_arn": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"last_modified": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"last_processing_result": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"state": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"state_transition_reason": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"uuid": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
},
}
}
// resourceAwsLambdaEventSourceMappingCreate maps to:
// CreateEventSourceMapping in the API / SDK
func resourceAwsLambdaEventSourceMappingCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).lambdaconn
functionName := d.Get("function_name").(string)
eventSourceArn := d.Get("event_source_arn").(string)
log.Printf("[DEBUG] Creating Lambda event source mapping: source %s to function %s", eventSourceArn, functionName)
params := &lambda.CreateEventSourceMappingInput{
EventSourceArn: aws.String(eventSourceArn),
FunctionName: aws.String(functionName),
StartingPosition: aws.String(d.Get("starting_position").(string)),
BatchSize: aws.Int64(int64(d.Get("batch_size").(int))),
Enabled: aws.Bool(d.Get("enabled").(bool)),
}
// IAM profiles and roles can take some time to propagate in AWS:
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console
// Error creating Lambda function: InvalidParameterValueException: The
// function defined for the task cannot be assumed by Lambda.
//
// The role may exist, but the permissions may not have propagated, so we
// retry
err := resource.Retry(1*time.Minute, func() error {
eventSourceMappingConfiguration, err := conn.CreateEventSourceMapping(params)
if err != nil {
if awserr, ok := err.(awserr.Error); ok {
if awserr.Code() == "InvalidParameterValueException" {
// Retryable
return awserr
}
}
// Not retryable
return resource.RetryError{Err: err}
}
// No error
d.Set("uuid", eventSourceMappingConfiguration.UUID)
d.SetId(*eventSourceMappingConfiguration.UUID)
return nil
})
if err != nil {
return fmt.Errorf("Error creating Lambda event source mapping: %s", err)
}
return resourceAwsLambdaEventSourceMappingRead(d, meta)
}
// resourceAwsLambdaEventSourceMappingRead maps to:
// GetEventSourceMapping in the API / SDK
func resourceAwsLambdaEventSourceMappingRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).lambdaconn
log.Printf("[DEBUG] Fetching Lambda event source mapping: %s", d.Id())
params := &lambda.GetEventSourceMappingInput{
UUID: aws.String(d.Id()),
}
eventSourceMappingConfiguration, err := conn.GetEventSourceMapping(params)
if err != nil {
return err
}
d.Set("batch_size", eventSourceMappingConfiguration.BatchSize)
d.Set("event_source_arn", eventSourceMappingConfiguration.EventSourceArn)
d.Set("function_arn", eventSourceMappingConfiguration.FunctionArn)
d.Set("last_modified", eventSourceMappingConfiguration.LastModified)
d.Set("last_processing_result", eventSourceMappingConfiguration.LastProcessingResult)
d.Set("state", eventSourceMappingConfiguration.State)
d.Set("state_transition_reason", eventSourceMappingConfiguration.StateTransitionReason)
d.Set("uuid", eventSourceMappingConfiguration.UUID)
return nil
}
// resourceAwsLambdaEventSourceMappingDelete maps to:
// DeleteEventSourceMapping in the API / SDK
func resourceAwsLambdaEventSourceMappingDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).lambdaconn
log.Printf("[INFO] Deleting Lambda event source mapping: %s", d.Id())
params := &lambda.DeleteEventSourceMappingInput{
UUID: aws.String(d.Id()),
}
_, err := conn.DeleteEventSourceMapping(params)
if err != nil {
return fmt.Errorf("Error deleting Lambda event source mapping: %s", err)
}
d.SetId("")
return nil
}
// resourceAwsLambdaEventSourceMappingUpdate maps to:
// UpdateEventSourceMapping in the API / SDK
func resourceAwsLambdaEventSourceMappingUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).lambdaconn
log.Printf("[DEBUG] Updating Lambda event source mapping: %s", d.Id())
params := &lambda.UpdateEventSourceMappingInput{
UUID: aws.String(d.Id()),
BatchSize: aws.Int64(int64(d.Get("batch_size").(int))),
FunctionName: aws.String(d.Get("function_name").(string)),
Enabled: aws.Bool(d.Get("enabled").(bool)),
}
err := resource.Retry(1*time.Minute, func() error {
_, err := conn.UpdateEventSourceMapping(params)
if err != nil {
if awserr, ok := err.(awserr.Error); ok {
if awserr.Code() == "InvalidParameterValueException" {
// Retryable
return awserr
}
}
// Not retryable
return resource.RetryError{Err: err}
}
// No error
return nil
})
if err != nil {
return fmt.Errorf("Error updating Lambda event source mapping: %s", err)
}
return resourceAwsLambdaEventSourceMappingRead(d, meta)
}

View File

@ -0,0 +1,279 @@
package aws
import (
"fmt"
"regexp"
"strconv"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/lambda"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSLambdaEventSourceMapping_basic(t *testing.T) {
var conf lambda.EventSourceMappingConfiguration
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckLambdaEventSourceMappingDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSLambdaEventSourceMappingConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsLambdaEventSourceMappingExists("aws_lambda_event_source_mapping.lambda_event_source_mapping_test", &conf),
testAccCheckAWSLambdaEventSourceMappingAttributes(&conf),
),
},
resource.TestStep{
Config: testAccAWSLambdaEventSourceMappingConfigUpdate,
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsLambdaEventSourceMappingExists("aws_lambda_event_source_mapping.lambda_event_source_mapping_test", &conf),
resource.TestCheckResourceAttr("aws_lambda_event_source_mapping.lambda_event_source_mapping_test",
"batch_size",
strconv.Itoa(200)),
resource.TestCheckResourceAttr("aws_lambda_event_source_mapping.lambda_event_source_mapping_test",
"enabled",
strconv.FormatBool(false)),
resource.TestMatchResourceAttr(
"aws_lambda_event_source_mapping.lambda_event_source_mapping_test",
"function_arn",
regexp.MustCompile("example_lambda_name_update$"),
),
),
},
},
})
}
func testAccCheckLambdaEventSourceMappingDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).lambdaconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_lambda_event_source_mapping" {
continue
}
_, err := conn.GetEventSourceMapping(&lambda.GetEventSourceMappingInput{
UUID: aws.String(rs.Primary.ID),
})
if err == nil {
return fmt.Errorf("Lambda event source mapping was not deleted")
}
}
return nil
}
func testAccCheckAwsLambdaEventSourceMappingExists(n string, mapping *lambda.EventSourceMappingConfiguration) resource.TestCheckFunc {
// Wait for IAM role
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Lambda event source mapping not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("Lambda event source mapping ID not set")
}
conn := testAccProvider.Meta().(*AWSClient).lambdaconn
params := &lambda.GetEventSourceMappingInput{
UUID: aws.String(rs.Primary.ID),
}
getSourceMappingConfiguration, err := conn.GetEventSourceMapping(params)
if err != nil {
return err
}
*mapping = *getSourceMappingConfiguration
return nil
}
}
func testAccCheckAWSLambdaEventSourceMappingAttributes(mapping *lambda.EventSourceMappingConfiguration) resource.TestCheckFunc {
return func(s *terraform.State) error {
uuid := *mapping.UUID
if uuid == "" {
return fmt.Errorf("Could not read Lambda event source mapping's UUID")
}
return nil
}
}
const testAccAWSLambdaEventSourceMappingConfig = `
resource "aws_iam_role" "iam_for_lambda" {
name = "iam_for_lambda"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_policy" "policy_for_role" {
name = "policy_for_role"
path = "/"
description = "IAM policy for for Lamda event mapping testing"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"kinesis:GetRecords",
"kinesis:GetShardIterator",
"kinesis:DescribeStream"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"kinesis:ListStreams"
],
"Resource": "*"
}
]
}
EOF
}
resource "aws_iam_policy_attachment" "policy_attachment_for_role" {
name = "policy_attachment_for_role"
roles = ["${aws_iam_role.iam_for_lambda.name}"]
policy_arn = "${aws_iam_policy.policy_for_role.arn}"
}
resource "aws_kinesis_stream" "kinesis_stream_test" {
name = "kinesis_stream_test"
shard_count = 1
}
resource "aws_lambda_function" "lambda_function_test_create" {
filename = "test-fixtures/lambdatest.zip"
function_name = "example_lambda_name_create"
role = "${aws_iam_role.iam_for_lambda.arn}"
handler = "exports.example"
}
resource "aws_lambda_function" "lambda_function_test_update" {
filename = "test-fixtures/lambdatest.zip"
function_name = "example_lambda_name_update"
role = "${aws_iam_role.iam_for_lambda.arn}"
handler = "exports.example"
}
resource "aws_lambda_event_source_mapping" "lambda_event_source_mapping_test" {
batch_size = 100
event_source_arn = "${aws_kinesis_stream.kinesis_stream_test.arn}"
enabled = true
depends_on = ["aws_iam_policy_attachment.policy_attachment_for_role"]
function_name = "${aws_lambda_function.lambda_function_test_create.arn}"
starting_position = "TRIM_HORIZON"
}
`
const testAccAWSLambdaEventSourceMappingConfigUpdate = `
resource "aws_iam_role" "iam_for_lambda" {
name = "iam_for_lambda"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_policy" "policy_for_role" {
name = "policy_for_role"
path = "/"
description = "IAM policy for for Lamda event mapping testing"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"kinesis:GetRecords",
"kinesis:GetShardIterator",
"kinesis:DescribeStream"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"kinesis:ListStreams"
],
"Resource": "*"
}
]
}
EOF
}
resource "aws_iam_policy_attachment" "policy_attachment_for_role" {
name = "policy_attachment_for_role"
roles = ["${aws_iam_role.iam_for_lambda.name}"]
policy_arn = "${aws_iam_policy.policy_for_role.arn}"
}
resource "aws_kinesis_stream" "kinesis_stream_test" {
name = "kinesis_stream_test"
shard_count = 1
}
resource "aws_lambda_function" "lambda_function_test_create" {
filename = "test-fixtures/lambdatest.zip"
function_name = "example_lambda_name_create"
role = "${aws_iam_role.iam_for_lambda.arn}"
handler = "exports.example"
}
resource "aws_lambda_function" "lambda_function_test_update" {
filename = "test-fixtures/lambdatest.zip"
function_name = "example_lambda_name_update"
role = "${aws_iam_role.iam_for_lambda.arn}"
handler = "exports.example"
}
resource "aws_lambda_event_source_mapping" "lambda_event_source_mapping_test" {
batch_size = 200
event_source_arn = "${aws_kinesis_stream.kinesis_stream_test.arn}"
enabled = false
depends_on = ["aws_iam_policy_attachment.policy_attachment_for_role"]
function_name = "${aws_lambda_function.lambda_function_test_update.arn}"
starting_position = "TRIM_HORIZON"
}
`

View File

@ -5,7 +5,6 @@ import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"log" "log"
"strings"
"time" "time"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
@ -15,6 +14,7 @@ import (
"errors" "errors"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
) )
@ -149,22 +149,24 @@ func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) e
Timeout: aws.Int64(int64(d.Get("timeout").(int))), Timeout: aws.Int64(int64(d.Get("timeout").(int))),
} }
var err error // IAM profiles can take ~10 seconds to propagate in AWS:
for i := 0; i < 5; i++ { // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console
_, err = conn.CreateFunction(params) // Error creating Lambda function: InvalidParameterValueException: The role defined for the task cannot be assumed by Lambda.
if awsErr, ok := err.(awserr.Error); ok { err := resource.Retry(1*time.Minute, func() error {
_, err := conn.CreateFunction(params)
// IAM profiles can take ~10 seconds to propagate in AWS: if err != nil {
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console if awserr, ok := err.(awserr.Error); ok {
// Error creating Lambda function: InvalidParameterValueException: The role defined for the task cannot be assumed by Lambda. if awserr.Code() == "InvalidParameterValueException" {
if awsErr.Code() == "InvalidParameterValueException" && strings.Contains(awsErr.Message(), "The role defined for the task cannot be assumed by Lambda.") { // Retryable
log.Printf("[DEBUG] Invalid IAM Instance Profile referenced, retrying...") return awserr
time.Sleep(2 * time.Second) }
continue
} }
// Not retryable
return resource.RetryError{Err: err}
} }
break // No error
} return nil
})
if err != nil { if err != nil {
return fmt.Errorf("Error creating Lambda function: %s", err) return fmt.Errorf("Error creating Lambda function: %s", err)
} }

View File

@ -185,6 +185,13 @@ func resourceAwsLaunchConfiguration() *schema.Resource {
Computed: true, Computed: true,
ForceNew: true, ForceNew: true,
}, },
"encrypted": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Computed: true,
ForceNew: true,
},
}, },
}, },
Set: func(v interface{}) int { Set: func(v interface{}) int {
@ -326,6 +333,7 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
bd := v.(map[string]interface{}) bd := v.(map[string]interface{})
ebs := &autoscaling.Ebs{ ebs := &autoscaling.Ebs{
DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)), DeleteOnTermination: aws.Bool(bd["delete_on_termination"].(bool)),
Encrypted: aws.Bool(bd["encrypted"].(bool)),
} }
if v, ok := bd["snapshot_id"].(string); ok && v != "" { if v, ok := bd["snapshot_id"].(string); ok && v != "" {
@ -386,6 +394,11 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface
} }
if dn, err := fetchRootDeviceName(d.Get("image_id").(string), ec2conn); err == nil { if dn, err := fetchRootDeviceName(d.Get("image_id").(string), ec2conn); err == nil {
if dn == nil {
return fmt.Errorf(
"Expected to find a Root Device name for AMI (%s), but got none",
d.Get("image_id").(string))
}
blockDevices = append(blockDevices, &autoscaling.BlockDeviceMapping{ blockDevices = append(blockDevices, &autoscaling.BlockDeviceMapping{
DeviceName: dn, DeviceName: dn,
Ebs: ebs, Ebs: ebs,
@ -565,6 +578,9 @@ func readBlockDevicesFromLaunchConfiguration(d *schema.ResourceData, lc *autosca
if bdm.Ebs != nil && bdm.Ebs.Iops != nil { if bdm.Ebs != nil && bdm.Ebs.Iops != nil {
bd["iops"] = *bdm.Ebs.Iops bd["iops"] = *bdm.Ebs.Iops
} }
if bdm.Ebs != nil && bdm.Ebs.Encrypted != nil {
bd["encrypted"] = *bdm.Ebs.Encrypted
}
if bdm.DeviceName != nil && *bdm.DeviceName == *rootDeviceName { if bdm.DeviceName != nil && *bdm.DeviceName == *rootDeviceName {
blockDevices["root"] = bd blockDevices["root"] = bd
} else { } else {

View File

@ -89,6 +89,52 @@ func TestAccAWSLaunchConfiguration_withSpotPrice(t *testing.T) {
}) })
} }
func testAccCheckAWSLaunchConfigurationWithEncryption(conf *autoscaling.LaunchConfiguration) resource.TestCheckFunc {
return func(s *terraform.State) error {
// Map out the block devices by name, which should be unique.
blockDevices := make(map[string]*autoscaling.BlockDeviceMapping)
for _, blockDevice := range conf.BlockDeviceMappings {
blockDevices[*blockDevice.DeviceName] = blockDevice
}
// Check if the root block device exists.
if _, ok := blockDevices["/dev/sda1"]; !ok {
return fmt.Errorf("block device doesn't exist: /dev/sda1")
} else if blockDevices["/dev/sda1"].Ebs.Encrypted != nil {
return fmt.Errorf("root device should not include value for Encrypted")
}
// Check if the secondary block device exists.
if _, ok := blockDevices["/dev/sdb"]; !ok {
return fmt.Errorf("block device doesn't exist: /dev/sdb")
} else if !*blockDevices["/dev/sdb"].Ebs.Encrypted {
return fmt.Errorf("block device isn't encrypted as expected: /dev/sdb")
}
return nil
}
}
func TestAccAWSLaunchConfiguration_withEncryption(t *testing.T) {
var conf autoscaling.LaunchConfiguration
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSLaunchConfigurationDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSLaunchConfigurationWithEncryption,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.baz", &conf),
testAccCheckAWSLaunchConfigurationWithEncryption(&conf),
),
},
},
})
}
func testAccCheckAWSLaunchConfigurationGeneratedNamePrefix( func testAccCheckAWSLaunchConfigurationGeneratedNamePrefix(
resource, prefix string) resource.TestCheckFunc { resource, prefix string) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
@ -162,17 +208,17 @@ func testAccCheckAWSLaunchConfigurationAttributes(conf *autoscaling.LaunchConfig
// Check if the root block device exists. // Check if the root block device exists.
if _, ok := blockDevices["/dev/sda1"]; !ok { if _, ok := blockDevices["/dev/sda1"]; !ok {
fmt.Errorf("block device doesn't exist: /dev/sda1") return fmt.Errorf("block device doesn't exist: /dev/sda1")
} }
// Check if the secondary block device exists. // Check if the secondary block device exists.
if _, ok := blockDevices["/dev/sdb"]; !ok { if _, ok := blockDevices["/dev/sdb"]; !ok {
fmt.Errorf("block device doesn't exist: /dev/sdb") return fmt.Errorf("block device doesn't exist: /dev/sdb")
} }
// Check if the third block device exists. // Check if the third block device exists.
if _, ok := blockDevices["/dev/sdc"]; !ok { if _, ok := blockDevices["/dev/sdc"]; !ok {
fmt.Errorf("block device doesn't exist: /dev/sdc") return fmt.Errorf("block device doesn't exist: /dev/sdc")
} }
// Check if the secondary block device exists. // Check if the secondary block device exists.
@ -273,3 +319,21 @@ resource "aws_launch_configuration" "baz" {
associate_public_ip_address = false associate_public_ip_address = false
} }
` `
const testAccAWSLaunchConfigurationWithEncryption = `
resource "aws_launch_configuration" "baz" {
image_id = "ami-5189a661"
instance_type = "t2.micro"
associate_public_ip_address = false
root_block_device {
volume_type = "gp2"
volume_size = 11
}
ebs_block_device {
device_name = "/dev/sdb"
volume_size = 9
encrypted = true
}
}
`

View File

@ -5,13 +5,14 @@ import (
"testing" "testing"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/elb" "github.com/aws/aws-sdk-go/service/elb"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
) )
func TestAccAwsLBCookieStickinessPolicy_basic(t *testing.T) { func TestAccAWSLBCookieStickinessPolicy_basic(t *testing.T) {
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders, Providers: testAccProviders,
@ -40,8 +41,29 @@ func TestAccAwsLBCookieStickinessPolicy_basic(t *testing.T) {
} }
func testAccCheckLBCookieStickinessPolicyDestroy(s *terraform.State) error { func testAccCheckLBCookieStickinessPolicyDestroy(s *terraform.State) error {
if len(s.RootModule().Resources) > 0 { conn := testAccProvider.Meta().(*AWSClient).elbconn
return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_lb_cookie_stickiness_policy" {
continue
}
lbName, _, policyName := resourceAwsLBCookieStickinessPolicyParseId(rs.Primary.ID)
out, err := conn.DescribeLoadBalancerPolicies(
&elb.DescribeLoadBalancerPoliciesInput{
LoadBalancerName: aws.String(lbName),
PolicyNames: []*string{aws.String(policyName)},
})
if err != nil {
if ec2err, ok := err.(awserr.Error); ok && (ec2err.Code() == "PolicyNotFound" || ec2err.Code() == "LoadBalancerNotFound") {
continue
}
return err
}
if len(out.PolicyDescriptions) > 0 {
return fmt.Errorf("Policy still exists")
}
} }
return nil return nil

View File

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"testing" "testing"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
) )
@ -39,8 +40,28 @@ func TestAccAWSMainRouteTableAssociation_basic(t *testing.T) {
} }
func testAccCheckMainRouteTableAssociationDestroy(s *terraform.State) error { func testAccCheckMainRouteTableAssociationDestroy(s *terraform.State) error {
if len(s.RootModule().Resources) > 0 { conn := testAccProvider.Meta().(*AWSClient).ec2conn
return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources)
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_main_route_table_association" {
continue
}
mainAssociation, err := findMainRouteTableAssociation(
conn,
rs.Primary.Attributes["vpc_id"],
)
if err != nil {
// Verify the error is what we want
if ae, ok := err.(awserr.Error); ok && ae.Code() == "ApplicationDoesNotExistException" {
continue
}
return err
}
if mainAssociation != nil {
return fmt.Errorf("still exists")
}
} }
return nil return nil

View File

@ -0,0 +1,181 @@
package aws
import (
"fmt"
"log"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsNatGateway() *schema.Resource {
return &schema.Resource{
Create: resourceAwsNatGatewayCreate,
Read: resourceAwsNatGatewayRead,
Delete: resourceAwsNatGatewayDelete,
Schema: map[string]*schema.Schema{
"allocation_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"subnet_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"network_interface_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"private_ip": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"public_ip": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
},
}
}
func resourceAwsNatGatewayCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
// Create the NAT Gateway
createOpts := &ec2.CreateNatGatewayInput{
AllocationId: aws.String(d.Get("allocation_id").(string)),
SubnetId: aws.String(d.Get("subnet_id").(string)),
}
log.Printf("[DEBUG] Create NAT Gateway: %s", *createOpts)
natResp, err := conn.CreateNatGateway(createOpts)
if err != nil {
return fmt.Errorf("Error creating NAT Gateway: %s", err)
}
// Get the ID and store it
ng := natResp.NatGateway
d.SetId(*ng.NatGatewayId)
log.Printf("[INFO] NAT Gateway ID: %s", d.Id())
// Wait for the NAT Gateway to become available
log.Printf("[DEBUG] Waiting for NAT Gateway (%s) to become available", d.Id())
stateConf := &resource.StateChangeConf{
Pending: []string{"pending"},
Target: []string{"available"},
Refresh: NGStateRefreshFunc(conn, d.Id()),
Timeout: 10 * time.Minute,
}
if _, err := stateConf.WaitForState(); err != nil {
return fmt.Errorf("Error waiting for NAT Gateway (%s) to become available: %s", d.Id(), err)
}
// Update our attributes and return
return resourceAwsNatGatewayRead(d, meta)
}
func resourceAwsNatGatewayRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
// Refresh the NAT Gateway state
ngRaw, state, err := NGStateRefreshFunc(conn, d.Id())()
if err != nil {
return err
}
if ngRaw == nil || strings.ToLower(state) == "deleted" {
log.Printf("[INFO] Removing %s from Terraform state as it is not found or in the deleted state.", d.Id())
d.SetId("")
return nil
}
// Set NAT Gateway attributes
ng := ngRaw.(*ec2.NatGateway)
address := ng.NatGatewayAddresses[0]
d.Set("network_interface_id", address.NetworkInterfaceId)
d.Set("private_ip", address.PrivateIp)
d.Set("public_ip", address.PublicIp)
return nil
}
func resourceAwsNatGatewayDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
deleteOpts := &ec2.DeleteNatGatewayInput{
NatGatewayId: aws.String(d.Id()),
}
log.Printf("[INFO] Deleting NAT Gateway: %s", d.Id())
_, err := conn.DeleteNatGateway(deleteOpts)
if err != nil {
ec2err, ok := err.(awserr.Error)
if !ok {
return err
}
if ec2err.Code() == "NatGatewayNotFound" {
return nil
}
return err
}
stateConf := &resource.StateChangeConf{
Pending: []string{"deleting"},
Target: []string{"deleted"},
Refresh: NGStateRefreshFunc(conn, d.Id()),
Timeout: 30 * time.Minute,
Delay: 10 * time.Second,
MinTimeout: 10 * time.Second,
}
_, stateErr := stateConf.WaitForState()
if stateErr != nil {
return fmt.Errorf("Error waiting for NAT Gateway (%s) to delete: %s", d.Id(), err)
}
return nil
}
// NGStateRefreshFunc returns a resource.StateRefreshFunc that is used to watch
// a NAT Gateway.
func NGStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
opts := &ec2.DescribeNatGatewaysInput{
NatGatewayIds: []*string{aws.String(id)},
}
resp, err := conn.DescribeNatGateways(opts)
if err != nil {
if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "NatGatewayNotFound" {
resp = nil
} else {
log.Printf("Error on NGStateRefresh: %s", err)
return nil, "", err
}
}
if resp == nil {
// Sometimes AWS just has consistency issues and doesn't see
// our instance yet. Return an empty state.
return nil, "", nil
}
ng := resp.NatGateways[0]
return ng, *ng.State, nil
}
}

View File

@ -0,0 +1,154 @@
package aws
import (
"fmt"
"strings"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSNatGateway_basic(t *testing.T) {
var natGateway ec2.NatGateway
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckNatGatewayDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccNatGatewayConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckNatGatewayExists("aws_nat_gateway.gateway", &natGateway),
),
},
},
})
}
func testAccCheckNatGatewayDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).ec2conn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_nat_gateway" {
continue
}
// Try to find the resource
resp, err := conn.DescribeNatGateways(&ec2.DescribeNatGatewaysInput{
NatGatewayIds: []*string{aws.String(rs.Primary.ID)},
})
if err == nil {
if len(resp.NatGateways) > 0 && strings.ToLower(*resp.NatGateways[0].State) != "deleted" {
return fmt.Errorf("still exists")
}
return nil
}
// Verify the error is what we want
ec2err, ok := err.(awserr.Error)
if !ok {
return err
}
if ec2err.Code() != "NatGatewayNotFound" {
return err
}
}
return nil
}
func testAccCheckNatGatewayExists(n string, ng *ec2.NatGateway) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
conn := testAccProvider.Meta().(*AWSClient).ec2conn
resp, err := conn.DescribeNatGateways(&ec2.DescribeNatGatewaysInput{
NatGatewayIds: []*string{aws.String(rs.Primary.ID)},
})
if err != nil {
return err
}
if len(resp.NatGateways) == 0 {
return fmt.Errorf("NatGateway not found")
}
*ng = *resp.NatGateways[0]
return nil
}
}
const testAccNatGatewayConfig = `
resource "aws_vpc" "vpc" {
cidr_block = "10.0.0.0/16"
}
resource "aws_subnet" "private" {
vpc_id = "${aws_vpc.vpc.id}"
cidr_block = "10.0.1.0/24"
map_public_ip_on_launch = false
}
resource "aws_subnet" "public" {
vpc_id = "${aws_vpc.vpc.id}"
cidr_block = "10.0.2.0/24"
map_public_ip_on_launch = true
}
resource "aws_internet_gateway" "gw" {
vpc_id = "${aws_vpc.vpc.id}"
}
resource "aws_eip" "nat_gateway" {
vpc = true
}
// Actual SUT
resource "aws_nat_gateway" "gateway" {
allocation_id = "${aws_eip.nat_gateway.id}"
subnet_id = "${aws_subnet.public.id}"
depends_on = ["aws_internet_gateway.gw"]
}
resource "aws_route_table" "private" {
vpc_id = "${aws_vpc.vpc.id}"
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = "${aws_nat_gateway.gateway.id}"
}
}
resource "aws_route_table_association" "private" {
subnet_id = "${aws_subnet.private.id}"
route_table_id = "${aws_route_table.private.id}"
}
resource "aws_route_table" "public" {
vpc_id = "${aws_vpc.vpc.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.gw.id}"
}
}
resource "aws_route_table_association" "public" {
subnet_id = "${aws_subnet.public.id}"
route_table_id = "${aws_route_table.public.id}"
}
`

View File

@ -50,6 +50,7 @@ func resourceAwsNetworkAcl() *schema.Resource {
Type: schema.TypeSet, Type: schema.TypeSet,
Required: false, Required: false,
Optional: true, Optional: true,
Computed: true,
Elem: &schema.Resource{ Elem: &schema.Resource{
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{
"from_port": &schema.Schema{ "from_port": &schema.Schema{
@ -92,6 +93,7 @@ func resourceAwsNetworkAcl() *schema.Resource {
Type: schema.TypeSet, Type: schema.TypeSet,
Required: false, Required: false,
Optional: true, Optional: true,
Computed: true,
Elem: &schema.Resource{ Elem: &schema.Resource{
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{
"from_port": &schema.Schema{ "from_port": &schema.Schema{
@ -316,87 +318,89 @@ func resourceAwsNetworkAclUpdate(d *schema.ResourceData, meta interface{}) error
func updateNetworkAclEntries(d *schema.ResourceData, entryType string, conn *ec2.EC2) error { func updateNetworkAclEntries(d *schema.ResourceData, entryType string, conn *ec2.EC2) error {
o, n := d.GetChange(entryType) if d.HasChange(entryType) {
o, n := d.GetChange(entryType)
if o == nil { if o == nil {
o = new(schema.Set) o = new(schema.Set)
} }
if n == nil { if n == nil {
n = new(schema.Set) n = new(schema.Set)
}
os := o.(*schema.Set)
ns := n.(*schema.Set)
toBeDeleted, err := expandNetworkAclEntries(os.Difference(ns).List(), entryType)
if err != nil {
return err
}
for _, remove := range toBeDeleted {
// AWS includes default rules with all network ACLs that can be
// neither modified nor destroyed. They have a custom rule
// number that is out of bounds for any other rule. If we
// encounter it, just continue. There's no work to be done.
if *remove.RuleNumber == 32767 {
continue
} }
// Delete old Acl os := o.(*schema.Set)
_, err := conn.DeleteNetworkAclEntry(&ec2.DeleteNetworkAclEntryInput{ ns := n.(*schema.Set)
NetworkAclId: aws.String(d.Id()),
RuleNumber: remove.RuleNumber, toBeDeleted, err := expandNetworkAclEntries(os.Difference(ns).List(), entryType)
Egress: remove.Egress,
})
if err != nil { if err != nil {
return fmt.Errorf("Error deleting %s entry: %s", entryType, err)
}
}
toBeCreated, err := expandNetworkAclEntries(ns.Difference(os).List(), entryType)
if err != nil {
return err
}
for _, add := range toBeCreated {
// Protocol -1 rules don't store ports in AWS. Thus, they'll always
// hash differently when being read out of the API. Force the user
// to set from_port and to_port to 0 for these rules, to keep the
// hashing consistent.
if *add.Protocol == "-1" {
to := *add.PortRange.To
from := *add.PortRange.From
expected := &expectedPortPair{
to_port: 0,
from_port: 0,
}
if ok := validatePorts(to, from, *expected); !ok {
return fmt.Errorf(
"to_port (%d) and from_port (%d) must both be 0 to use the the 'all' \"-1\" protocol!",
to, from)
}
}
// AWS mutates the CIDR block into a network implied by the IP and
// mask provided. This results in hashing inconsistencies between
// the local config file and the state returned by the API. Error
// if the user provides a CIDR block with an inappropriate mask
if err := validateCIDRBlock(*add.CidrBlock); err != nil {
return err return err
} }
for _, remove := range toBeDeleted {
// Add new Acl entry // AWS includes default rules with all network ACLs that can be
_, connErr := conn.CreateNetworkAclEntry(&ec2.CreateNetworkAclEntryInput{ // neither modified nor destroyed. They have a custom rule
NetworkAclId: aws.String(d.Id()), // number that is out of bounds for any other rule. If we
CidrBlock: add.CidrBlock, // encounter it, just continue. There's no work to be done.
Egress: add.Egress, if *remove.RuleNumber == 32767 {
PortRange: add.PortRange, continue
Protocol: add.Protocol, }
RuleAction: add.RuleAction,
RuleNumber: add.RuleNumber, // Delete old Acl
IcmpTypeCode: add.IcmpTypeCode, _, err := conn.DeleteNetworkAclEntry(&ec2.DeleteNetworkAclEntryInput{
}) NetworkAclId: aws.String(d.Id()),
if connErr != nil { RuleNumber: remove.RuleNumber,
return fmt.Errorf("Error creating %s entry: %s", entryType, connErr) Egress: remove.Egress,
})
if err != nil {
return fmt.Errorf("Error deleting %s entry: %s", entryType, err)
}
}
toBeCreated, err := expandNetworkAclEntries(ns.Difference(os).List(), entryType)
if err != nil {
return err
}
for _, add := range toBeCreated {
// Protocol -1 rules don't store ports in AWS. Thus, they'll always
// hash differently when being read out of the API. Force the user
// to set from_port and to_port to 0 for these rules, to keep the
// hashing consistent.
if *add.Protocol == "-1" {
to := *add.PortRange.To
from := *add.PortRange.From
expected := &expectedPortPair{
to_port: 0,
from_port: 0,
}
if ok := validatePorts(to, from, *expected); !ok {
return fmt.Errorf(
"to_port (%d) and from_port (%d) must both be 0 to use the the 'all' \"-1\" protocol!",
to, from)
}
}
// AWS mutates the CIDR block into a network implied by the IP and
// mask provided. This results in hashing inconsistencies between
// the local config file and the state returned by the API. Error
// if the user provides a CIDR block with an inappropriate mask
if err := validateCIDRBlock(*add.CidrBlock); err != nil {
return err
}
// Add new Acl entry
_, connErr := conn.CreateNetworkAclEntry(&ec2.CreateNetworkAclEntryInput{
NetworkAclId: aws.String(d.Id()),
CidrBlock: add.CidrBlock,
Egress: add.Egress,
PortRange: add.PortRange,
Protocol: add.Protocol,
RuleAction: add.RuleAction,
RuleNumber: add.RuleNumber,
IcmpTypeCode: add.IcmpTypeCode,
})
if connErr != nil {
return fmt.Errorf("Error creating %s entry: %s", entryType, connErr)
}
} }
} }
return nil return nil

View File

@ -0,0 +1,247 @@
package aws
import (
"bytes"
"fmt"
"log"
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsNetworkAclRule() *schema.Resource {
return &schema.Resource{
Create: resourceAwsNetworkAclRuleCreate,
Read: resourceAwsNetworkAclRuleRead,
Delete: resourceAwsNetworkAclRuleDelete,
Schema: map[string]*schema.Schema{
"network_acl_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"rule_number": &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"egress": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
Default: false,
},
"protocol": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"rule_action": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"cidr_block": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"from_port": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
},
"to_port": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
},
"icmp_type": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
},
"icmp_code": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
},
},
}
}
func resourceAwsNetworkAclRuleCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
protocol := d.Get("protocol").(string)
p, protocolErr := strconv.Atoi(protocol)
if protocolErr != nil {
var ok bool
p, ok = protocolIntegers()[protocol]
if !ok {
return fmt.Errorf("Invalid Protocol %s for rule %#v", protocol, d.Get("rule_number").(int))
}
}
log.Printf("[INFO] Transformed Protocol %s into %d", protocol, p)
params := &ec2.CreateNetworkAclEntryInput{
NetworkAclId: aws.String(d.Get("network_acl_id").(string)),
Egress: aws.Bool(d.Get("egress").(bool)),
RuleNumber: aws.Int64(int64(d.Get("rule_number").(int))),
Protocol: aws.String(strconv.Itoa(p)),
CidrBlock: aws.String(d.Get("cidr_block").(string)),
RuleAction: aws.String(d.Get("rule_action").(string)),
PortRange: &ec2.PortRange{
From: aws.Int64(int64(d.Get("from_port").(int))),
To: aws.Int64(int64(d.Get("to_port").(int))),
},
}
// Specify additional required fields for ICMP
if p == 1 {
params.IcmpTypeCode = &ec2.IcmpTypeCode{}
if v, ok := d.GetOk("icmp_code"); ok {
params.IcmpTypeCode.Code = aws.Int64(int64(v.(int)))
}
if v, ok := d.GetOk("icmp_type"); ok {
params.IcmpTypeCode.Type = aws.Int64(int64(v.(int)))
}
}
log.Printf("[INFO] Creating Network Acl Rule: %d (%t)", d.Get("rule_number").(int), d.Get("egress").(bool))
_, err := conn.CreateNetworkAclEntry(params)
if err != nil {
return fmt.Errorf("Error Creating Network Acl Rule: %s", err.Error())
}
d.SetId(networkAclIdRuleNumberEgressHash(d.Get("network_acl_id").(string), d.Get("rule_number").(int), d.Get("egress").(bool), d.Get("protocol").(string)))
// It appears it might be a while until the newly created rule is visible via the
// API (see issue GH-4721). Retry the `findNetworkAclRule` function until it is
// visible (which in most cases is likely immediately).
err = resource.Retry(3*time.Minute, func() error {
_, findErr := findNetworkAclRule(d, meta)
if findErr != nil {
return findErr
}
return nil
})
if err != nil {
return fmt.Errorf("Created Network ACL Rule was not visible in API within 3 minute period. Running 'terraform apply' again will resume infrastructure creation.")
}
return resourceAwsNetworkAclRuleRead(d, meta)
}
func resourceAwsNetworkAclRuleRead(d *schema.ResourceData, meta interface{}) error {
resp, err := findNetworkAclRule(d, meta)
if err != nil {
return err
}
d.Set("rule_number", resp.RuleNumber)
d.Set("cidr_block", resp.CidrBlock)
d.Set("egress", resp.Egress)
if resp.IcmpTypeCode != nil {
d.Set("icmp_code", resp.IcmpTypeCode.Code)
d.Set("icmp_type", resp.IcmpTypeCode.Type)
}
if resp.PortRange != nil {
d.Set("from_port", resp.PortRange.From)
d.Set("to_port", resp.PortRange.To)
}
d.Set("rule_action", resp.RuleAction)
p, protocolErr := strconv.Atoi(*resp.Protocol)
log.Printf("[INFO] Converting the protocol %v", p)
if protocolErr == nil {
var ok bool
protocol, ok := protocolStrings(protocolIntegers())[p]
if !ok {
return fmt.Errorf("Invalid Protocol %s for rule %#v", *resp.Protocol, d.Get("rule_number").(int))
}
log.Printf("[INFO] Transformed Protocol %s back into %s", *resp.Protocol, protocol)
d.Set("protocol", protocol)
}
return nil
}
func resourceAwsNetworkAclRuleDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
params := &ec2.DeleteNetworkAclEntryInput{
NetworkAclId: aws.String(d.Get("network_acl_id").(string)),
RuleNumber: aws.Int64(int64(d.Get("rule_number").(int))),
Egress: aws.Bool(d.Get("egress").(bool)),
}
log.Printf("[INFO] Deleting Network Acl Rule: %s", d.Id())
_, err := conn.DeleteNetworkAclEntry(params)
if err != nil {
return fmt.Errorf("Error Deleting Network Acl Rule: %s", err.Error())
}
return nil
}
func findNetworkAclRule(d *schema.ResourceData, meta interface{}) (*ec2.NetworkAclEntry, error) {
conn := meta.(*AWSClient).ec2conn
filters := make([]*ec2.Filter, 0, 2)
ruleNumberFilter := &ec2.Filter{
Name: aws.String("entry.rule-number"),
Values: []*string{aws.String(fmt.Sprintf("%v", d.Get("rule_number").(int)))},
}
filters = append(filters, ruleNumberFilter)
egressFilter := &ec2.Filter{
Name: aws.String("entry.egress"),
Values: []*string{aws.String(fmt.Sprintf("%v", d.Get("egress").(bool)))},
}
filters = append(filters, egressFilter)
params := &ec2.DescribeNetworkAclsInput{
NetworkAclIds: []*string{aws.String(d.Get("network_acl_id").(string))},
Filters: filters,
}
log.Printf("[INFO] Describing Network Acl: %s", d.Get("network_acl_id").(string))
log.Printf("[INFO] Describing Network Acl with the Filters %#v", params)
resp, err := conn.DescribeNetworkAcls(params)
if err != nil {
return nil, fmt.Errorf("Error Finding Network Acl Rule %d: %s", d.Get("rule_number").(int), err.Error())
}
if resp == nil || len(resp.NetworkAcls) != 1 || resp.NetworkAcls[0] == nil {
return nil, fmt.Errorf(
"Expected to find one Network ACL, got: %#v",
resp.NetworkAcls)
}
networkAcl := resp.NetworkAcls[0]
if networkAcl.Entries != nil {
for _, i := range networkAcl.Entries {
if *i.RuleNumber == int64(d.Get("rule_number").(int)) && *i.Egress == d.Get("egress").(bool) {
return i, nil
}
}
}
return nil, fmt.Errorf(
"Expected the Network ACL to have Entries, got: %#v",
networkAcl)
}
func networkAclIdRuleNumberEgressHash(networkAclId string, ruleNumber int, egress bool, protocol string) string {
var buf bytes.Buffer
buf.WriteString(fmt.Sprintf("%s-", networkAclId))
buf.WriteString(fmt.Sprintf("%d-", ruleNumber))
buf.WriteString(fmt.Sprintf("%t-", egress))
buf.WriteString(fmt.Sprintf("%s-", protocol))
return fmt.Sprintf("nacl-%d", hashcode.String(buf.String()))
}

Some files were not shown because too many files have changed in this diff Show More