mirror of
https://github.com/opentofu/opentofu.git
synced 2025-02-20 11:48:24 -06:00
* provider/aws: Provide the option to skip_destroy on aws_volume_attachment When you want to attach and detach pre-existing EBS volumes to an instance, we would do that as follows: ``` resource "aws_instance" "web" { ami = "ami-21f78e11" availability_zone = "us-west-2a" instance_type = "t1.micro" tags { Name = "HelloWorld" } } data "aws_ebs_volume" "ebs_volume" { filter { name = "size" values = ["${aws_ebs_volume.example.size}"] } filter { name = "availability-zone" values = ["${aws_ebs_volume.example.availability_zone}"] } filter { name = "tag:Name" values = ["TestVolume"] } } resource "aws_volume_attachment" "ebs_att" { device_name = "/dev/sdh" volume_id = "${data.aws_ebs_volume.ebs_volume.id}" instance_id = "${aws_instance.web.id}" skip_destroy = true } ``` The issue here is that when we run a terraform destroy command, the volume tries to get detached from a running instance and goes into a non-responsive state. We would have to force_destroy the volume at that point and risk losing any data on it. This PR introduces the idea of `skip_destroy` on a volume attachment. tl;dr: We want the volume to be detached from the instane when the instance itself has been destroyed. This way the normal shut procedures will happen and protect the disk for attachment to another instance Volume Attachment Tests: ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSVolumeAttachment_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) 2016/11/02 00:47:27 Generated command/internal_plugin_list.go TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSVolumeAttachment_ -timeout 120m === RUN TestAccAWSVolumeAttachment_basic --- PASS: TestAccAWSVolumeAttachment_basic (133.49s) === RUN TestAccAWSVolumeAttachment_skipDestroy --- PASS: TestAccAWSVolumeAttachment_skipDestroy (119.64s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 253.158s ``` EBS Volume Tests: ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSEBSVolume_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) 2016/11/02 01:00:18 Generated command/internal_plugin_list.go TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSEBSVolume_ -timeout 120m === RUN TestAccAWSEBSVolume_importBasic --- PASS: TestAccAWSEBSVolume_importBasic (26.38s) === RUN TestAccAWSEBSVolume_basic --- PASS: TestAccAWSEBSVolume_basic (26.86s) === RUN TestAccAWSEBSVolume_NoIops --- PASS: TestAccAWSEBSVolume_NoIops (27.89s) === RUN TestAccAWSEBSVolume_withTags --- PASS: TestAccAWSEBSVolume_withTags (26.88s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 108.032s ``` * Update volume_attachment.html.markdown
213 lines
5.6 KiB
Go
213 lines
5.6 KiB
Go
package aws
|
|
|
|
import (
|
|
"bytes"
|
|
"fmt"
|
|
"log"
|
|
"time"
|
|
|
|
"github.com/aws/aws-sdk-go/aws"
|
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
"github.com/aws/aws-sdk-go/service/ec2"
|
|
"github.com/hashicorp/terraform/helper/hashcode"
|
|
"github.com/hashicorp/terraform/helper/resource"
|
|
"github.com/hashicorp/terraform/helper/schema"
|
|
)
|
|
|
|
func resourceAwsVolumeAttachment() *schema.Resource {
|
|
return &schema.Resource{
|
|
Create: resourceAwsVolumeAttachmentCreate,
|
|
Read: resourceAwsVolumeAttachmentRead,
|
|
Delete: resourceAwsVolumeAttachmentDelete,
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
"device_name": {
|
|
Type: schema.TypeString,
|
|
Required: true,
|
|
ForceNew: true,
|
|
},
|
|
|
|
"instance_id": {
|
|
Type: schema.TypeString,
|
|
Required: true,
|
|
ForceNew: true,
|
|
},
|
|
|
|
"volume_id": {
|
|
Type: schema.TypeString,
|
|
Required: true,
|
|
ForceNew: true,
|
|
},
|
|
|
|
"force_detach": {
|
|
Type: schema.TypeBool,
|
|
Optional: true,
|
|
Computed: true,
|
|
},
|
|
"skip_destroy": {
|
|
Type: schema.TypeBool,
|
|
Optional: true,
|
|
Computed: true,
|
|
},
|
|
},
|
|
}
|
|
}
|
|
|
|
func resourceAwsVolumeAttachmentCreate(d *schema.ResourceData, meta interface{}) error {
|
|
conn := meta.(*AWSClient).ec2conn
|
|
name := d.Get("device_name").(string)
|
|
iID := d.Get("instance_id").(string)
|
|
vID := d.Get("volume_id").(string)
|
|
|
|
opts := &ec2.AttachVolumeInput{
|
|
Device: aws.String(name),
|
|
InstanceId: aws.String(iID),
|
|
VolumeId: aws.String(vID),
|
|
}
|
|
|
|
log.Printf("[DEBUG] Attaching Volume (%s) to Instance (%s)", vID, iID)
|
|
_, err := conn.AttachVolume(opts)
|
|
if err != nil {
|
|
if awsErr, ok := err.(awserr.Error); ok {
|
|
return fmt.Errorf("[WARN] Error attaching volume (%s) to instance (%s), message: \"%s\", code: \"%s\"",
|
|
vID, iID, awsErr.Message(), awsErr.Code())
|
|
}
|
|
return err
|
|
}
|
|
|
|
stateConf := &resource.StateChangeConf{
|
|
Pending: []string{"attaching"},
|
|
Target: []string{"attached"},
|
|
Refresh: volumeAttachmentStateRefreshFunc(conn, vID, iID),
|
|
Timeout: 5 * time.Minute,
|
|
Delay: 10 * time.Second,
|
|
MinTimeout: 3 * time.Second,
|
|
}
|
|
|
|
_, err = stateConf.WaitForState()
|
|
if err != nil {
|
|
return fmt.Errorf(
|
|
"Error waiting for Volume (%s) to attach to Instance: %s, error: %s",
|
|
vID, iID, err)
|
|
}
|
|
|
|
d.SetId(volumeAttachmentID(name, vID, iID))
|
|
return resourceAwsVolumeAttachmentRead(d, meta)
|
|
}
|
|
|
|
func volumeAttachmentStateRefreshFunc(conn *ec2.EC2, volumeID, instanceID string) resource.StateRefreshFunc {
|
|
return func() (interface{}, string, error) {
|
|
|
|
request := &ec2.DescribeVolumesInput{
|
|
VolumeIds: []*string{aws.String(volumeID)},
|
|
Filters: []*ec2.Filter{
|
|
&ec2.Filter{
|
|
Name: aws.String("attachment.instance-id"),
|
|
Values: []*string{aws.String(instanceID)},
|
|
},
|
|
},
|
|
}
|
|
|
|
resp, err := conn.DescribeVolumes(request)
|
|
if err != nil {
|
|
if awsErr, ok := err.(awserr.Error); ok {
|
|
return nil, "failed", fmt.Errorf("code: %s, message: %s", awsErr.Code(), awsErr.Message())
|
|
}
|
|
return nil, "failed", err
|
|
}
|
|
|
|
if len(resp.Volumes) > 0 {
|
|
v := resp.Volumes[0]
|
|
for _, a := range v.Attachments {
|
|
if a.InstanceId != nil && *a.InstanceId == instanceID {
|
|
return a, *a.State, nil
|
|
}
|
|
}
|
|
}
|
|
// assume detached if volume count is 0
|
|
return 42, "detached", nil
|
|
}
|
|
}
|
|
func resourceAwsVolumeAttachmentRead(d *schema.ResourceData, meta interface{}) error {
|
|
conn := meta.(*AWSClient).ec2conn
|
|
|
|
request := &ec2.DescribeVolumesInput{
|
|
VolumeIds: []*string{aws.String(d.Get("volume_id").(string))},
|
|
Filters: []*ec2.Filter{
|
|
&ec2.Filter{
|
|
Name: aws.String("attachment.instance-id"),
|
|
Values: []*string{aws.String(d.Get("instance_id").(string))},
|
|
},
|
|
},
|
|
}
|
|
|
|
vols, err := conn.DescribeVolumes(request)
|
|
if err != nil {
|
|
if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidVolume.NotFound" {
|
|
d.SetId("")
|
|
return nil
|
|
}
|
|
return fmt.Errorf("Error reading EC2 volume %s for instance: %s: %#v", d.Get("volume_id").(string), d.Get("instance_id").(string), err)
|
|
}
|
|
|
|
if len(vols.Volumes) == 0 || *vols.Volumes[0].State == "available" {
|
|
log.Printf("[DEBUG] Volume Attachment (%s) not found, removing from state", d.Id())
|
|
d.SetId("")
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func resourceAwsVolumeAttachmentDelete(d *schema.ResourceData, meta interface{}) error {
|
|
conn := meta.(*AWSClient).ec2conn
|
|
|
|
if _, ok := d.GetOk("skip_destroy"); ok {
|
|
log.Printf("[INFO] Found skip_destroy to be true, removing attachment %q from state", d.Id())
|
|
d.SetId("")
|
|
return nil
|
|
}
|
|
|
|
vID := d.Get("volume_id").(string)
|
|
iID := d.Get("instance_id").(string)
|
|
|
|
opts := &ec2.DetachVolumeInput{
|
|
Device: aws.String(d.Get("device_name").(string)),
|
|
InstanceId: aws.String(iID),
|
|
VolumeId: aws.String(vID),
|
|
Force: aws.Bool(d.Get("force_detach").(bool)),
|
|
}
|
|
|
|
_, err := conn.DetachVolume(opts)
|
|
if err != nil {
|
|
return fmt.Errorf("Failed to detach Volume (%s) from Instance (%s): %s",
|
|
vID, iID, err)
|
|
}
|
|
stateConf := &resource.StateChangeConf{
|
|
Pending: []string{"detaching"},
|
|
Target: []string{"detached"},
|
|
Refresh: volumeAttachmentStateRefreshFunc(conn, vID, iID),
|
|
Timeout: 5 * time.Minute,
|
|
Delay: 10 * time.Second,
|
|
MinTimeout: 3 * time.Second,
|
|
}
|
|
|
|
log.Printf("[DEBUG] Detaching Volume (%s) from Instance (%s)", vID, iID)
|
|
_, err = stateConf.WaitForState()
|
|
if err != nil {
|
|
return fmt.Errorf(
|
|
"Error waiting for Volume (%s) to detach from Instance: %s",
|
|
vID, iID)
|
|
}
|
|
d.SetId("")
|
|
return nil
|
|
}
|
|
|
|
func volumeAttachmentID(name, volumeID, instanceID string) string {
|
|
var buf bytes.Buffer
|
|
buf.WriteString(fmt.Sprintf("%s-", name))
|
|
buf.WriteString(fmt.Sprintf("%s-", instanceID))
|
|
buf.WriteString(fmt.Sprintf("%s-", volumeID))
|
|
|
|
return fmt.Sprintf("vai-%d", hashcode.String(buf.String()))
|
|
}
|