From 87cde8834ec176e5b1a86bec65853a345bd56890 Mon Sep 17 00:00:00 2001 From: Jean Mertz Date: Sun, 3 May 2015 16:00:00 +0200 Subject: [PATCH 001/335] OpenStack: add functionality to attach FloatingIP to Port --- ...urce_openstack_networking_floatingip_v2.go | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go b/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go index 1b81c6a96e..37f1ca7cfe 100644 --- a/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go +++ b/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go @@ -14,6 +14,7 @@ func resourceNetworkingFloatingIPV2() *schema.Resource { return &schema.Resource{ Create: resourceNetworkFloatingIPV2Create, Read: resourceNetworkFloatingIPV2Read, + Update: resourceNetworkFloatingIPV2Update, Delete: resourceNetworkFloatingIPV2Delete, Schema: map[string]*schema.Schema{ @@ -33,6 +34,11 @@ func resourceNetworkingFloatingIPV2() *schema.Resource { ForceNew: true, DefaultFunc: envDefaultFunc("OS_POOL_NAME"), }, + "port_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + }, }, } } @@ -53,6 +59,7 @@ func resourceNetworkFloatingIPV2Create(d *schema.ResourceData, meta interface{}) } createOpts := floatingips.CreateOpts{ FloatingNetworkID: poolID, + PortID: d.Get("port_id").(string), } log.Printf("[DEBUG] Create Options: %#v", createOpts) floatingIP, err := floatingips.Create(networkClient, createOpts).Extract() @@ -78,6 +85,7 @@ func resourceNetworkFloatingIPV2Read(d *schema.ResourceData, meta interface{}) e } d.Set("address", floatingIP.FloatingIP) + d.Set("port_id", floatingIP.PortID) poolName, err := getNetworkName(d, meta, floatingIP.FloatingNetworkID) if err != nil { return fmt.Errorf("Error retrieving floating IP pool name: %s", err) @@ -87,6 +95,29 @@ func resourceNetworkFloatingIPV2Read(d *schema.ResourceData, meta interface{}) e return nil } +func resourceNetworkFloatingIPV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + var updateOpts floatingips.UpdateOpts + + if d.HasChange("port_id") { + updateOpts.PortID = d.Get("port_id").(string) + } + + log.Printf("[DEBUG] Update Options: %#v", updateOpts) + + _, err = floatingips.Update(networkClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating floating IP: %s", err) + } + + return resourceNetworkFloatingIPV2Read(d, meta) +} + func resourceNetworkFloatingIPV2Delete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) networkClient, err := config.networkingV2Client(d.Get("region").(string)) From 739a411b4d867bf7036a38f3fd4feeca867cd939 Mon Sep 17 00:00:00 2001 From: Alexander Dupuy Date: Mon, 18 May 2015 21:45:50 +0200 Subject: [PATCH 002/335] debug security group ids --- .../resource_openstack_compute_instance_v2.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/builtin/providers/openstack/resource_openstack_compute_instance_v2.go b/builtin/providers/openstack/resource_openstack_compute_instance_v2.go index 44fca923f6..066fa637ff 100644 --- a/builtin/providers/openstack/resource_openstack_compute_instance_v2.go +++ b/builtin/providers/openstack/resource_openstack_compute_instance_v2.go @@ -557,13 +557,6 @@ func resourceComputeInstanceV2Update(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Security groups to remove: %v", secgroupsToRemove) - for _, g := range secgroupsToAdd.List() { - err := secgroups.AddServerToGroup(computeClient, d.Id(), g.(string)).ExtractErr() - if err != nil { - return fmt.Errorf("Error adding security group to OpenStack server (%s): %s", d.Id(), err) - } - log.Printf("[DEBUG] Added security group (%s) to instance (%s)", g.(string), d.Id()) - } for _, g := range secgroupsToRemove.List() { err := secgroups.RemoveServerFromGroup(computeClient, d.Id(), g.(string)).ExtractErr() @@ -581,6 +574,14 @@ func resourceComputeInstanceV2Update(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Removed security group (%s) from instance (%s)", g.(string), d.Id()) } } + for _, g := range secgroupsToAdd.List() { + err := secgroups.AddServerToGroup(computeClient, d.Id(), g.(string)).ExtractErr() + if err != nil { + return fmt.Errorf("Error adding security group to OpenStack server (%s): %s", d.Id(), err) + } + log.Printf("[DEBUG] Added security group (%s) to instance (%s)", g.(string), d.Id()) + } + } if d.HasChange("admin_pass") { From 07ad320960e3fd61e80f00b919dc1a5217d0d321 Mon Sep 17 00:00:00 2001 From: Aaron Welch Date: Sun, 31 May 2015 15:58:14 +0100 Subject: [PATCH 003/335] Packet bare metal cloud hosting platform provider --- builtin/bins/provider-packet/main.go | 12 + builtin/providers/packet/config.go | 18 ++ builtin/providers/packet/provider.go | 36 +++ builtin/providers/packet/provider_test.go | 35 ++ .../packet/resource_packet_device.go | 302 ++++++++++++++++++ .../packet/resource_packet_project.go | 123 +++++++ .../packet/resource_packet_project_test.go | 95 ++++++ .../packet/resource_packet_ssh_key.go | 128 ++++++++ .../packet/resource_packet_ssh_key_test.go | 104 ++++++ .../docs/providers/packet/index.html.markdown | 47 +++ .../providers/packet/r/device.html.markdown | 55 ++++ .../providers/packet/r/project.html.markdown | 40 +++ .../providers/packet/r/ssh_key.html.markdown | 43 +++ 13 files changed, 1038 insertions(+) create mode 100644 builtin/bins/provider-packet/main.go create mode 100644 builtin/providers/packet/config.go create mode 100644 builtin/providers/packet/provider.go create mode 100644 builtin/providers/packet/provider_test.go create mode 100644 builtin/providers/packet/resource_packet_device.go create mode 100644 builtin/providers/packet/resource_packet_project.go create mode 100644 builtin/providers/packet/resource_packet_project_test.go create mode 100644 builtin/providers/packet/resource_packet_ssh_key.go create mode 100644 builtin/providers/packet/resource_packet_ssh_key_test.go create mode 100644 website/source/docs/providers/packet/index.html.markdown create mode 100644 website/source/docs/providers/packet/r/device.html.markdown create mode 100644 website/source/docs/providers/packet/r/project.html.markdown create mode 100644 website/source/docs/providers/packet/r/ssh_key.html.markdown diff --git a/builtin/bins/provider-packet/main.go b/builtin/bins/provider-packet/main.go new file mode 100644 index 0000000000..6d8198ef2b --- /dev/null +++ b/builtin/bins/provider-packet/main.go @@ -0,0 +1,12 @@ +package main + +import ( + "github.com/hashicorp/terraform/builtin/providers/packet" + "github.com/hashicorp/terraform/plugin" +) + +func main() { + plugin.Serve(&plugin.ServeOpts{ + ProviderFunc: packet.Provider, + }) +} diff --git a/builtin/providers/packet/config.go b/builtin/providers/packet/config.go new file mode 100644 index 0000000000..659ee9ebc8 --- /dev/null +++ b/builtin/providers/packet/config.go @@ -0,0 +1,18 @@ +package packet + +import ( + "github.com/packethost/packngo" +) + +const ( + consumerToken = "aZ9GmqHTPtxevvFq9SK3Pi2yr9YCbRzduCSXF2SNem5sjB91mDq7Th3ZwTtRqMWZ" +) + +type Config struct { + AuthToken string +} + +// Client() returns a new client for accessing packet. +func (c *Config) Client() *packngo.Client { + return packngo.NewClient(consumerToken, c.AuthToken) +} diff --git a/builtin/providers/packet/provider.go b/builtin/providers/packet/provider.go new file mode 100644 index 0000000000..c1efd6e838 --- /dev/null +++ b/builtin/providers/packet/provider.go @@ -0,0 +1,36 @@ +package packet + +import ( + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +// Provider returns a schema.Provider for Packet. +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "auth_token": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("PACKET_AUTH_TOKEN", nil), + Description: "The API auth key for API operations.", + }, + }, + + ResourcesMap: map[string]*schema.Resource{ + "packet_device": resourcePacketDevice(), + "packet_ssh_key": resourcePacketSSHKey(), + "packet_project": resourcePacketProject(), + }, + + ConfigureFunc: providerConfigure, + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + config := Config{ + AuthToken: d.Get("auth_token").(string), + } + + return config.Client(), nil +} diff --git a/builtin/providers/packet/provider_test.go b/builtin/providers/packet/provider_test.go new file mode 100644 index 0000000000..5483c4fb08 --- /dev/null +++ b/builtin/providers/packet/provider_test.go @@ -0,0 +1,35 @@ +package packet + +import ( + "os" + "testing" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +var testAccProviders map[string]terraform.ResourceProvider +var testAccProvider *schema.Provider + +func init() { + testAccProvider = Provider().(*schema.Provider) + testAccProviders = map[string]terraform.ResourceProvider{ + "packet": testAccProvider, + } +} + +func TestProvider(t *testing.T) { + if err := Provider().(*schema.Provider).InternalValidate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProvider_impl(t *testing.T) { + var _ terraform.ResourceProvider = Provider() +} + +func testAccPreCheck(t *testing.T) { + if v := os.Getenv("PACKET_AUTH_TOKEN"); v == "" { + t.Fatal("PACKET_AUTH_TOKEN must be set for acceptance tests") + } +} diff --git a/builtin/providers/packet/resource_packet_device.go b/builtin/providers/packet/resource_packet_device.go new file mode 100644 index 0000000000..56fc7afe55 --- /dev/null +++ b/builtin/providers/packet/resource_packet_device.go @@ -0,0 +1,302 @@ +package packet + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketDevice() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketDeviceCreate, + Read: resourcePacketDeviceRead, + Update: resourcePacketDeviceUpdate, + Delete: resourcePacketDeviceDelete, + + Schema: map[string]*schema.Schema{ + "project_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "hostname": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "operating_system": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "facility": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "plan": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "billing_cycle": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "locked": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + }, + + "network": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "gateway": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "family": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + + "cidr": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + + "public": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + + "created": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "updated": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "user_data": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "tags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourcePacketDeviceCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + createRequest := &packngo.DeviceCreateRequest{ + HostName: d.Get("hostname").(string), + Plan: d.Get("plan").(string), + Facility: d.Get("facility").(string), + OS: d.Get("operating_system").(string), + BillingCycle: d.Get("billing_cycle").(string), + ProjectID: d.Get("project_id").(string), + } + + if attr, ok := d.GetOk("user_data"); ok { + createRequest.UserData = attr.(string) + } + + tags := d.Get("tags.#").(int) + if tags > 0 { + createRequest.Tags = make([]string, 0, tags) + for i := 0; i < tags; i++ { + key := fmt.Sprintf("tags.%d", i) + createRequest.Tags = append(createRequest.Tags, d.Get(key).(string)) + } + } + + log.Printf("[DEBUG] Device create configuration: %#v", createRequest) + + newDevice, _, err := client.Devices.Create(createRequest) + if err != nil { + return fmt.Errorf("Error creating device: %s", err) + } + + // Assign the device id + d.SetId(newDevice.ID) + + log.Printf("[INFO] Device ID: %s", d.Id()) + + _, err = WaitForDeviceAttribute(d, "active", []string{"provisioning"}, "state", meta) + if err != nil { + return fmt.Errorf( + "Error waiting for device (%s) to become ready: %s", d.Id(), err) + } + + return resourcePacketDeviceRead(d, meta) +} + +func resourcePacketDeviceRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + // Retrieve the device properties for updating the state + device, _, err := client.Devices.Get(d.Id()) + if err != nil { + return fmt.Errorf("Error retrieving device: %s", err) + } + + d.Set("name", device.Hostname) + d.Set("plan", device.Plan.Slug) + d.Set("facility", device.Facility.Code) + d.Set("operating_system", device.OS.Slug) + d.Set("state", device.State) + d.Set("billing_cycle", device.BillingCycle) + d.Set("locked", device.Locked) + d.Set("created", device.Created) + d.Set("udpated", device.Updated) + + tags := make([]string, 0) + for _, tag := range device.Tags { + tags = append(tags, tag) + } + d.Set("tags", tags) + + networks := make([]map[string]interface{}, 0, 1) + for _, ip := range device.Network { + network := make(map[string]interface{}) + network["address"] = ip.Address + network["gateway"] = ip.Gateway + network["family"] = ip.Family + network["cidr"] = ip.Cidr + network["public"] = ip.Public + networks = append(networks, network) + } + d.Set("network", networks) + + return nil +} + +func resourcePacketDeviceUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + if d.HasChange("locked") && d.Get("locked").(bool) { + _, err := client.Devices.Lock(d.Id()) + + if err != nil { + return fmt.Errorf( + "Error locking device (%s): %s", d.Id(), err) + } + } else if d.HasChange("locked") { + _, err := client.Devices.Unlock(d.Id()) + + if err != nil { + return fmt.Errorf( + "Error unlocking device (%s): %s", d.Id(), err) + } + } + + return resourcePacketDeviceRead(d, meta) +} + +func resourcePacketDeviceDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + log.Printf("[INFO] Deleting device: %s", d.Id()) + if _, err := client.Devices.Delete(d.Id()); err != nil { + return fmt.Errorf("Error deleting device: %s", err) + } + + return nil +} + +func WaitForDeviceAttribute( + d *schema.ResourceData, target string, pending []string, attribute string, meta interface{}) (interface{}, error) { + // Wait for the device so we can get the networking attributes + // that show up after a while + log.Printf( + "[INFO] Waiting for device (%s) to have %s of %s", + d.Id(), attribute, target) + + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: target, + Refresh: newDeviceStateRefreshFunc(d, attribute, meta), + Timeout: 60 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + return stateConf.WaitForState() +} + +func newDeviceStateRefreshFunc( + d *schema.ResourceData, attribute string, meta interface{}) resource.StateRefreshFunc { + client := meta.(*packngo.Client) + return func() (interface{}, string, error) { + err := resourcePacketDeviceRead(d, meta) + if err != nil { + return nil, "", err + } + + // See if we can access our attribute + if attr, ok := d.GetOk(attribute); ok { + // Retrieve the device properties + device, _, err := client.Devices.Get(d.Id()) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving device: %s", err) + } + + return &device, attr.(string), nil + } + + return nil, "", nil + } +} + +// Powers on the device and waits for it to be active +func powerOnAndWait(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + _, err := client.Devices.PowerOn(d.Id()) + if err != nil { + return err + } + + // Wait for power on + _, err = WaitForDeviceAttribute(d, "active", []string{"off"}, "state", client) + if err != nil { + return err + } + + return nil +} diff --git a/builtin/providers/packet/resource_packet_project.go b/builtin/providers/packet/resource_packet_project.go new file mode 100644 index 0000000000..e41ef1381a --- /dev/null +++ b/builtin/providers/packet/resource_packet_project.go @@ -0,0 +1,123 @@ +package packet + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketProject() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketProjectCreate, + Read: resourcePacketProjectRead, + Update: resourcePacketProjectUpdate, + Delete: resourcePacketProjectDelete, + + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "payment_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "created": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "updated": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourcePacketProjectCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + createRequest := &packngo.ProjectCreateRequest{ + Name: d.Get("name").(string), + PaymentMethod: d.Get("payment_method").(string), + } + + log.Printf("[DEBUG] Project create configuration: %#v", createRequest) + project, _, err := client.Projects.Create(createRequest) + if err != nil { + return fmt.Errorf("Error creating Project: %s", err) + } + + d.SetId(project.ID) + log.Printf("[INFO] Project created: %s", project.ID) + + return resourcePacketProjectRead(d, meta) +} + +func resourcePacketProjectRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + key, _, err := client.Projects.Get(d.Id()) + if err != nil { + // If the project somehow already destroyed, mark as + // succesfully gone + if strings.Contains(err.Error(), "404") { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving Project: %s", err) + } + + d.Set("id", key.ID) + d.Set("name", key.Name) + d.Set("created", key.Created) + d.Set("updated", key.Updated) + + return nil +} + +func resourcePacketProjectUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + updateRequest := &packngo.ProjectUpdateRequest{ + ID: d.Get("id").(string), + Name: d.Get("name").(string), + } + + if attr, ok := d.GetOk("payment_method"); ok { + updateRequest.PaymentMethod = attr.(string) + } + + log.Printf("[DEBUG] Project update: %#v", d.Get("id")) + _, _, err := client.Projects.Update(updateRequest) + if err != nil { + return fmt.Errorf("Failed to update Project: %s", err) + } + + return resourcePacketProjectRead(d, meta) +} + +func resourcePacketProjectDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + log.Printf("[INFO] Deleting Project: %s", d.Id()) + _, err := client.Projects.Delete(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting SSH key: %s", err) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/packet/resource_packet_project_test.go b/builtin/providers/packet/resource_packet_project_test.go new file mode 100644 index 0000000000..b0179cfbec --- /dev/null +++ b/builtin/providers/packet/resource_packet_project_test.go @@ -0,0 +1,95 @@ +package packet + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/packethost/packngo" +) + +func TestAccPacketProject_Basic(t *testing.T) { + var project packngo.Project + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckPacketProjectDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckPacketProjectConfig_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckPacketProjectExists("packet_project.foobar", &project), + testAccCheckPacketProjectAttributes(&project), + resource.TestCheckResourceAttr( + "packet_project.foobar", "name", "foobar"), + ), + }, + }, + }) +} + +func testAccCheckPacketProjectDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*packngo.Client) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "packet_project" { + continue + } + + _, _, err := client.Projects.Get(rs.Primary.ID) + + if err == nil { + fmt.Errorf("Project cstill exists") + } + } + + return nil +} + +func testAccCheckPacketProjectAttributes(project *packngo.Project) resource.TestCheckFunc { + return func(s *terraform.State) error { + + if project.Name != "foobar" { + return fmt.Errorf("Bad name: %s", project.Name) + } + + return nil + } +} + +func testAccCheckPacketProjectExists(n string, project *packngo.Project) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Record ID is set") + } + + client := testAccProvider.Meta().(*packngo.Client) + + foundProject, _, err := client.Projects.Get(rs.Primary.ID) + + if err != nil { + return err + } + + if foundProject.ID != rs.Primary.ID { + return fmt.Errorf("Record not found: %v - %v", rs.Primary.ID, foundProject) + } + + *project = *foundProject + + return nil + } +} + +var testAccCheckPacketProjectConfig_basic = fmt.Sprintf(` +resource "packet_project" "foobar" { + name = "foobar" +}`) diff --git a/builtin/providers/packet/resource_packet_ssh_key.go b/builtin/providers/packet/resource_packet_ssh_key.go new file mode 100644 index 0000000000..95e04bd8ca --- /dev/null +++ b/builtin/providers/packet/resource_packet_ssh_key.go @@ -0,0 +1,128 @@ +package packet + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketSSHKey() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketSSHKeyCreate, + Read: resourcePacketSSHKeyRead, + Update: resourcePacketSSHKeyUpdate, + Delete: resourcePacketSSHKeyDelete, + + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "public_key": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "created": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "updated": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourcePacketSSHKeyCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + createRequest := &packngo.SSHKeyCreateRequest{ + Label: d.Get("name").(string), + Key: d.Get("public_key").(string), + } + + log.Printf("[DEBUG] SSH Key create configuration: %#v", createRequest) + key, _, err := client.SSHKeys.Create(createRequest) + if err != nil { + return fmt.Errorf("Error creating SSH Key: %s", err) + } + + d.SetId(key.ID) + log.Printf("[INFO] SSH Key: %s", key.ID) + + return resourcePacketSSHKeyRead(d, meta) +} + +func resourcePacketSSHKeyRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + key, _, err := client.SSHKeys.Get(d.Id()) + if err != nil { + // If the key is somehow already destroyed, mark as + // succesfully gone + if strings.Contains(err.Error(), "404") { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving SSH key: %s", err) + } + + d.Set("id", key.ID) + d.Set("name", key.Label) + d.Set("public_key", key.Key) + d.Set("fingerprint", key.FingerPrint) + d.Set("created", key.Created) + d.Set("updated", key.Updated) + + return nil +} + +func resourcePacketSSHKeyUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + updateRequest := &packngo.SSHKeyUpdateRequest{ + ID: d.Get("id").(string), + Label: d.Get("name").(string), + Key: d.Get("public_key").(string), + } + + log.Printf("[DEBUG] SSH key update: %#v", d.Get("id")) + _, _, err := client.SSHKeys.Update(updateRequest) + if err != nil { + return fmt.Errorf("Failed to update SSH key: %s", err) + } + + return resourcePacketSSHKeyRead(d, meta) +} + +func resourcePacketSSHKeyDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + log.Printf("[INFO] Deleting SSH key: %s", d.Id()) + _, err := client.SSHKeys.Delete(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting SSH key: %s", err) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/packet/resource_packet_ssh_key_test.go b/builtin/providers/packet/resource_packet_ssh_key_test.go new file mode 100644 index 0000000000..765086d4fa --- /dev/null +++ b/builtin/providers/packet/resource_packet_ssh_key_test.go @@ -0,0 +1,104 @@ +package packet + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/packethost/packngo" +) + +func TestAccPacketSSHKey_Basic(t *testing.T) { + var key packngo.SSHKey + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckPacketSSHKeyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckPacketSSHKeyConfig_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckPacketSSHKeyExists("packet_ssh_key.foobar", &key), + testAccCheckPacketSSHKeyAttributes(&key), + resource.TestCheckResourceAttr( + "packet_ssh_key.foobar", "name", "foobar"), + resource.TestCheckResourceAttr( + "packet_ssh_key.foobar", "public_key", testAccValidPublicKey), + ), + }, + }, + }) +} + +func testAccCheckPacketSSHKeyDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*packngo.Client) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "packet_ssh_key" { + continue + } + + _, _, err := client.SSHKeys.Get(rs.Primary.ID) + + if err == nil { + fmt.Errorf("SSH key still exists") + } + } + + return nil +} + +func testAccCheckPacketSSHKeyAttributes(key *packngo.SSHKey) resource.TestCheckFunc { + return func(s *terraform.State) error { + + if key.Label != "foobar" { + return fmt.Errorf("Bad name: %s", key.Label) + } + + return nil + } +} + +func testAccCheckPacketSSHKeyExists(n string, key *packngo.SSHKey) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Record ID is set") + } + + client := testAccProvider.Meta().(*packngo.Client) + + foundKey, _, err := client.SSHKeys.Get(rs.Primary.ID) + + if err != nil { + return err + } + + if foundKey.ID != rs.Primary.ID { + return fmt.Errorf("SSh Key not found: %v - %v", rs.Primary.ID, foundKey) + } + + *key = *foundKey + + fmt.Printf("key: %v", key) + return nil + } +} + +var testAccCheckPacketSSHKeyConfig_basic = fmt.Sprintf(` +resource "packet_ssh_key" "foobar" { + name = "foobar" + public_key = "%s" +}`, testAccValidPublicKey) + +var testAccValidPublicKey = strings.TrimSpace(` +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCKVmnMOlHKcZK8tpt3MP1lqOLAcqcJzhsvJcjscgVERRN7/9484SOBJ3HSKxxNG5JN8owAjy5f9yYwcUg+JaUVuytn5Pv3aeYROHGGg+5G346xaq3DAwX6Y5ykr2fvjObgncQBnuU5KHWCECO/4h8uWuwh/kfniXPVjFToc+gnkqA+3RKpAecZhFXwfalQ9mMuYGFxn+fwn8cYEApsJbsEmb0iJwPiZ5hjFC8wREuiTlhPHDgkBLOiycd20op2nXzDbHfCHInquEe/gYxEitALONxm0swBOwJZwlTDOB7C6y2dzlrtxr1L59m7pCkWI4EtTRLvleehBoj3u7jB4usR +`) diff --git a/website/source/docs/providers/packet/index.html.markdown b/website/source/docs/providers/packet/index.html.markdown new file mode 100644 index 0000000000..bbe9f5d1ea --- /dev/null +++ b/website/source/docs/providers/packet/index.html.markdown @@ -0,0 +1,47 @@ +--- +layout: "packet" +page_title: "Provider: Packet" +sidebar_current: "docs-packet-index" +description: |- + The Packet provider is used to interact with the resources supported by Packet. The provider needs to be configured with the proper credentials before it can be used. +--- + +# Packet Provider + +The Packet provider is used to interact with the resources supported by Packet. +The provider needs to be configured with the proper credentials before it can be used. + +Use the navigation to the left to read about the available resources. + +## Example Usage + +``` +# Configure the Packet Provider +provider "packet" { + auth_token = "${var.auth_token}" +} + +# Create a project +resource "packet_project" "tf_project_1" { + name = "My First Terraform Project" + payment_method = "PAYMENT_METHOD_ID" +} + +# Create a device and add it to tf_project_1 +resource "packet_device" "web1" { + hostname = "tf.coreos2" + plan = "baremetal_1" + facility = "ewr1" + operating_system = "coreos_stable" + billing_cycle = "hourly" + project_id = "${packet_project.tf_project_1.id}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `auth_token` - (Required) This is your Packet API Auth token. This can also be specified + with the `PACKET_AUTH_TOKEN` shell environment variable. + diff --git a/website/source/docs/providers/packet/r/device.html.markdown b/website/source/docs/providers/packet/r/device.html.markdown new file mode 100644 index 0000000000..6d57dcbb51 --- /dev/null +++ b/website/source/docs/providers/packet/r/device.html.markdown @@ -0,0 +1,55 @@ +--- +layout: "packet" +page_title: "Packet: packet_device" +sidebar_current: "docs-packet-resource-device" +description: |- + Provides a Packet device resource. This can be used to create, modify, and delete devices. +--- + +# packet\_device + +Provides a Packet device resource. This can be used to create, +modify, and delete devices. + +## Example Usage + +``` +# Create a device and add it to tf_project_1 +resource "packet_device" "web1" { + hostname = "tf.coreos2" + plan = "baremetal_1" + facility = "ewr1" + operating_system = "coreos_stable" + billing_cycle = "hourly" + project_id = "${packet_project.tf_project_1.id}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `hostname` - (Required) The device name +* `project_id` - (Required) The id of the project in which to create the device +* `operating_system` - (Required) The operating system slug +* `facility` - (Required) The facility in which to create the device +* `plan` - (Required) The config type slug +* `billing_cycle` - (Required) monthly or hourly +* `user_data` (Optional) - A string of the desired User Data for the device. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the device +* `hostname`- The hostname of the device +* `project_id`- The Id of the project the device belonds to +* `facility` - The facility the device is in +* `plan` - The config type of the device +* `network` - The private and public v4 and v6 IPs assigned to the device +* `locked` - Is the device locked +* `billing_cycle` - The billing cycle of the device (monthly or hourly) +* `operating_system` - The operating system running on the device +* `status` - The status of the device +* `created` - The timestamp for when the device was created +* `updated` - The timestamp for the last time the device was udpated diff --git a/website/source/docs/providers/packet/r/project.html.markdown b/website/source/docs/providers/packet/r/project.html.markdown new file mode 100644 index 0000000000..d17190eec5 --- /dev/null +++ b/website/source/docs/providers/packet/r/project.html.markdown @@ -0,0 +1,40 @@ +--- +layout: "packet" +page_title: "Packet: packet_ssh_key" +sidebar_current: "docs-packet-resource-project" +description: |- + Provides a Packet Project resource. +--- + +# packet\_project + +Provides a Packet Project resource to allow you manage devices +in your projects. + +## Example Usage + +``` +# Create a new Project +resource "packet_project" "tf_project_1" { + name = "Terraform Fun" + payment_method = "payment-method-id" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the SSH key for identification +* `payment_method` - (Required) The id of the payment method on file to use for services created +on this project. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The unique ID of the key +* `payment_method` - The id of the payment method on file to use for services created +on this project. +* `created` - The timestamp for when the SSH key was created +* `updated` - The timestamp for the last time the SSH key was udpated diff --git a/website/source/docs/providers/packet/r/ssh_key.html.markdown b/website/source/docs/providers/packet/r/ssh_key.html.markdown new file mode 100644 index 0000000000..cb27aaa774 --- /dev/null +++ b/website/source/docs/providers/packet/r/ssh_key.html.markdown @@ -0,0 +1,43 @@ +--- +layout: "packet" +page_title: "Packet: packet_ssh_key" +sidebar_current: "docs-packet-resource-ssh-key" +description: |- + Provides a Packet SSH key resource. +--- + +# packet\_ssh_key + +Provides a Packet SSH key resource to allow you manage SSH +keys on your account. All ssh keys on your account are loaded on +all new devices, they do not have to be explicitly declared on +device creation. + +## Example Usage + +``` +# Create a new SSH key +resource "packet_ssh_key" "key1" { + name = "terraform-1" + public_key = "${file("/home/terraform/.ssh/id_rsa.pub")}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the SSH key for identification +* `public_key` - (Required) The public key. If this is a file, it +can be read using the file interpolation function + +## Attributes Reference + +The following attributes are exported: + +* `id` - The unique ID of the key +* `name` - The name of the SSH key +* `public_key` - The text of the public key +* `fingerprint` - The fingerprint of the SSH key +* `created` - The timestamp for when the SSH key was created +* `updated` - The timestamp for the last time the SSH key was udpated From 09e336a80a6afac5a8c998a704c59db54d87259f Mon Sep 17 00:00:00 2001 From: Matti Savolainen Date: Fri, 3 Jul 2015 12:58:05 +0300 Subject: [PATCH 004/335] Fix Repository attribute in docker client PullOptions for private registries. --- .../docker/resource_docker_image_funcs.go | 4 +-- .../docker/resource_docker_image_test.go | 28 +++++++++++++++++-- 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/builtin/providers/docker/resource_docker_image_funcs.go b/builtin/providers/docker/resource_docker_image_funcs.go index f45dd22264..454113c5fd 100644 --- a/builtin/providers/docker/resource_docker_image_funcs.go +++ b/builtin/providers/docker/resource_docker_image_funcs.go @@ -83,7 +83,7 @@ func pullImage(data *Data, client *dc.Client, image string) error { splitPortRepo := strings.Split(splitImageName[1], "/") pullOpts.Registry = splitImageName[0] + ":" + splitPortRepo[0] pullOpts.Tag = splitImageName[2] - pullOpts.Repository = strings.Join(splitPortRepo[1:], "/") + pullOpts.Repository = pullOpts.Registry + "/" + strings.Join(splitPortRepo[1:], "/") // It's either registry:port/username/repo, registry:port/repo, // or repo:tag with default registry @@ -98,7 +98,7 @@ func pullImage(data *Data, client *dc.Client, image string) error { // registry:port/username/repo or registry:port/repo default: pullOpts.Registry = splitImageName[0] + ":" + splitPortRepo[0] - pullOpts.Repository = strings.Join(splitPortRepo[1:], "/") + pullOpts.Repository = pullOpts.Registry + "/" + strings.Join(splitPortRepo[1:], "/") pullOpts.Tag = "latest" } diff --git a/builtin/providers/docker/resource_docker_image_test.go b/builtin/providers/docker/resource_docker_image_test.go index 14dfb29b7c..844b56329e 100644 --- a/builtin/providers/docker/resource_docker_image_test.go +++ b/builtin/providers/docker/resource_docker_image_test.go @@ -1,9 +1,8 @@ package docker import ( - "testing" - "github.com/hashicorp/terraform/helper/resource" + "testing" ) func TestAccDockerImage_basic(t *testing.T) { @@ -24,9 +23,34 @@ func TestAccDockerImage_basic(t *testing.T) { }) } +func TestAddDockerImage_private(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAddDockerPrivateImageConfig, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "docker_image.foobar", + "latest", + "2c40b0526b6358710fd09e7b8c022429268cc61703b4777e528ac9d469a07ca1"), + ), + }, + }, + }) +} + const testAccDockerImageConfig = ` resource "docker_image" "foo" { name = "ubuntu:trusty-20150320" keep_updated = true } ` + +const testAddDockerPrivateImageConfig = ` +resource "docker_image" "foobar" { + name = "gcr.io:443/google_containers/pause:0.8.0" + keep_updated = true +} +` From c617445fec616bbc9e92013869d33d4b43294642 Mon Sep 17 00:00:00 2001 From: Paul Forman Date: Wed, 29 Jul 2015 15:44:02 -0600 Subject: [PATCH 005/335] Update AWS ASG termination policy code and tests The initial commit of AWS autoscaling group termination policy was unfinished. It only worked on "create", and so had a needless ForceNew that would rebuild autoscaling groups on any change. It also used a HashString set, so it didn't preserve ordering of multiple policies correctly. Added the "update" operation, and converted to a TypeList to preserve ordering. In addition, removing the policy or setting it to a null list will reset the policy to "Default", the standard AWS policy. Updated the acceptance tests to verify the update, but the null case is difficult to test. --- .../aws/resource_aws_autoscaling_group.go | 28 ++++++++++++++----- .../resource_aws_autoscaling_group_test.go | 9 ++++-- 2 files changed, 28 insertions(+), 9 deletions(-) diff --git a/builtin/providers/aws/resource_aws_autoscaling_group.go b/builtin/providers/aws/resource_aws_autoscaling_group.go index 88fa2561de..52aab5acd4 100644 --- a/builtin/providers/aws/resource_aws_autoscaling_group.go +++ b/builtin/providers/aws/resource_aws_autoscaling_group.go @@ -112,12 +112,9 @@ func resourceAwsAutoscalingGroup() *schema.Resource { }, "termination_policies": &schema.Schema{ - Type: schema.TypeSet, + Type: schema.TypeList, Optional: true, - Computed: true, - ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, }, "tag": autoscalingTagsSchema(), @@ -169,9 +166,8 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) autoScalingGroupOpts.VPCZoneIdentifier = expandVpcZoneIdentifiers(v.(*schema.Set).List()) } - if v, ok := d.GetOk("termination_policies"); ok && v.(*schema.Set).Len() > 0 { - autoScalingGroupOpts.TerminationPolicies = expandStringList( - v.(*schema.Set).List()) + if v, ok := d.GetOk("termination_policies"); ok && len(v.([]interface{})) > 0 { + autoScalingGroupOpts.TerminationPolicies = expandStringList(v.([]interface{})) } log.Printf("[DEBUG] AutoScaling Group create configuration: %#v", autoScalingGroupOpts) @@ -262,6 +258,24 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) } } + if d.HasChange("termination_policies") { + // If the termination policy is set to null, we need to explicitly set + // it back to "Default", or the API won't reset it for us. + // This means GetOk() will fail us on the zero check. + v := d.Get("termination_policies") + if len(v.([]interface{})) > 0 { + opts.TerminationPolicies = expandStringList(v.([]interface{})) + } else { + // Policies is a slice of string pointers, so build one. + // Maybe there's a better idiom for this? + log.Printf("[DEBUG] Explictly setting null termination policy to 'Default'") + pol := "Default" + s := make([]*string, 1, 1) + s[0] = &pol + opts.TerminationPolicies = s + } + } + if err := setAutoscalingTags(conn, d); err != nil { return err } else { diff --git a/builtin/providers/aws/resource_aws_autoscaling_group_test.go b/builtin/providers/aws/resource_aws_autoscaling_group_test.go index 814a51bc72..1bc1cea883 100644 --- a/builtin/providers/aws/resource_aws_autoscaling_group_test.go +++ b/builtin/providers/aws/resource_aws_autoscaling_group_test.go @@ -45,7 +45,9 @@ func TestAccAWSAutoScalingGroup_basic(t *testing.T) { resource.TestCheckResourceAttr( "aws_autoscaling_group.bar", "force_delete", "true"), resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "termination_policies.912102603", "OldestInstance"), + "aws_autoscaling_group.bar", "termination_policies.0", "OldestInstance"), + resource.TestCheckResourceAttr( + "aws_autoscaling_group.bar", "termination_policies.1", "ClosestToNextInstanceHour"), ), }, @@ -56,6 +58,8 @@ func TestAccAWSAutoScalingGroup_basic(t *testing.T) { testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.new", &lc), resource.TestCheckResourceAttr( "aws_autoscaling_group.bar", "desired_capacity", "5"), + resource.TestCheckResourceAttr( + "aws_autoscaling_group.bar", "termination_policies.0", "ClosestToNextInstanceHour"), testLaunchConfigurationName("aws_autoscaling_group.bar", &lc), testAccCheckAutoscalingTags(&group.Tags, "Bar", map[string]interface{}{ "value": "bar-foo", @@ -359,7 +363,7 @@ resource "aws_autoscaling_group" "bar" { health_check_type = "ELB" desired_capacity = 4 force_delete = true - termination_policies = ["OldestInstance"] + termination_policies = ["OldestInstance","ClosestToNextInstanceHour"] launch_configuration = "${aws_launch_configuration.foobar.name}" @@ -391,6 +395,7 @@ resource "aws_autoscaling_group" "bar" { health_check_type = "ELB" desired_capacity = 5 force_delete = true + termination_policies = ["ClosestToNextInstanceHour"] launch_configuration = "${aws_launch_configuration.new.name}" From b928777cace13e4e1cc322e5688708f3a26d4e5b Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 11 Aug 2015 11:36:56 -0500 Subject: [PATCH 006/335] core: don't error on computed value during input walk fixes #2987 --- terraform/context_input_test.go | 59 +++++++++++++++++++ terraform/interpolate.go | 16 ++++- .../child/main.tf | 5 ++ .../input-var-partially-computed/main.tf | 7 +++ 4 files changed, 85 insertions(+), 2 deletions(-) create mode 100644 terraform/test-fixtures/input-var-partially-computed/child/main.tf create mode 100644 terraform/test-fixtures/input-var-partially-computed/main.tf diff --git a/terraform/context_input_test.go b/terraform/context_input_test.go index 155f4c72fa..404ef0ffc4 100644 --- a/terraform/context_input_test.go +++ b/terraform/context_input_test.go @@ -510,3 +510,62 @@ aws_instance.foo: t.Fatalf("expected: \n%s\ngot: \n%s\n", expectedStr, actualStr) } } + +func TestContext2Input_varPartiallyComputed(t *testing.T) { + input := new(MockUIInput) + m := testModule(t, "input-var-partially-computed") + p := testProvider("aws") + p.ApplyFn = testApplyFn + p.DiffFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + Variables: map[string]string{ + "foo": "foovalue", + }, + UIInput: input, + State: &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "aws_instance.foo": &ResourceState{ + Type: "aws_instance", + Primary: &InstanceState{ + ID: "i-abc123", + Attributes: map[string]string{ + "id": "i-abc123", + }, + }, + }, + }, + }, + &ModuleState{ + Path: append(rootModulePath, "child"), + Resources: map[string]*ResourceState{ + "aws_instance.mod": &ResourceState{ + Type: "aws_instance", + Primary: &InstanceState{ + ID: "i-bcd345", + Attributes: map[string]string{ + "id": "i-bcd345", + "value": "one,i-abc123", + }, + }, + }, + }, + }, + }, + }, + }) + + if err := ctx.Input(InputModeStd); err != nil { + t.Fatalf("err: %s", err) + } + + if _, err := ctx.Plan(); err != nil { + t.Fatalf("err: %s", err) + } +} diff --git a/terraform/interpolate.go b/terraform/interpolate.go index 6d103cd802..0197c0e422 100644 --- a/terraform/interpolate.go +++ b/terraform/interpolate.go @@ -370,7 +370,13 @@ MISSING: // be unknown. Instead, we return that the value is computed so // that the graph can continue to refresh other nodes. It doesn't // matter because the config isn't interpolated anyways. - if i.Operation == walkRefresh || i.Operation == walkPlanDestroy { + // + // For a Destroy, we're also fine with computed values, since our goal is + // only to get destroy nodes for existing resources. + // + // For an input walk, computed values are okay to return because we're only + // looking for missing variables to prompt the user for. + if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkInput { return config.UnknownVariableValue, nil } @@ -446,7 +452,13 @@ func (i *Interpolater) computeResourceMultiVariable( // be unknown. Instead, we return that the value is computed so // that the graph can continue to refresh other nodes. It doesn't // matter because the config isn't interpolated anyways. - if i.Operation == walkRefresh || i.Operation == walkPlanDestroy { + // + // For a Destroy, we're also fine with computed values, since our goal is + // only to get destroy nodes for existing resources. + // + // For an input walk, computed values are okay to return because we're only + // looking for missing variables to prompt the user for. + if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkInput { return config.UnknownVariableValue, nil } diff --git a/terraform/test-fixtures/input-var-partially-computed/child/main.tf b/terraform/test-fixtures/input-var-partially-computed/child/main.tf new file mode 100644 index 0000000000..a11cc5e835 --- /dev/null +++ b/terraform/test-fixtures/input-var-partially-computed/child/main.tf @@ -0,0 +1,5 @@ +variable "in" {} + +resource "aws_instance" "mod" { + value = "${var.in}" +} diff --git a/terraform/test-fixtures/input-var-partially-computed/main.tf b/terraform/test-fixtures/input-var-partially-computed/main.tf new file mode 100644 index 0000000000..ada6f0cead --- /dev/null +++ b/terraform/test-fixtures/input-var-partially-computed/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { } +resource "aws_instance" "bar" { } + +module "child" { + source = "./child" + in = "one,${aws_instance.foo.id},${aws_instance.bar.id}" +} From a29ee391eeeba31d8607928ab419ce914f664633 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Fri, 21 Aug 2015 18:34:22 +0200 Subject: [PATCH 007/335] [Vagrantfile] upgrade all packages while provisioning --- Vagrantfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Vagrantfile b/Vagrantfile index 5b2d70bcce..061c6316ae 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -13,6 +13,7 @@ ARCH=`uname -m | sed 's|i686|386|' | sed 's|x86_64|amd64|'` # Install Prereq Packages sudo apt-get update +sudo apt-get upgrade -y sudo apt-get install -y build-essential curl git-core libpcre3-dev mercurial pkg-config zip # Install Go From 8411e5e5bd16b1247541d52d9781b1302cddf3c3 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Fri, 21 Aug 2015 19:54:16 +0200 Subject: [PATCH 008/335] [Vagrantfile] set resources for the provider 'virtualbox' The default resources (384 MByte memory and 1 VCPU) of the used box are not sufficient to create binaries for testing Terraform locally. --- Vagrantfile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Vagrantfile b/Vagrantfile index 5b2d70bcce..be5eefddf9 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -53,4 +53,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| v.vmx["numvcpus"] = "2" end end + + config.vm.provider "virtualbox" do |v| + v.memory = 4096 + v.cpus = 2 + end end From f6d69164e84a0e1efd396025f79e3743d28a0034 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Thu, 27 Aug 2015 17:13:59 +0200 Subject: [PATCH 009/335] examples: add OpenStack configuration with networking --- examples/openstack-with-networking/README.md | 63 +++++++++++++++ examples/openstack-with-networking/main.tf | 79 +++++++++++++++++++ .../openstack-with-networking/openrc.sample | 7 ++ examples/openstack-with-networking/outputs.tf | 3 + .../openstack-with-networking/variables.tf | 22 ++++++ 5 files changed, 174 insertions(+) create mode 100644 examples/openstack-with-networking/README.md create mode 100644 examples/openstack-with-networking/main.tf create mode 100644 examples/openstack-with-networking/openrc.sample create mode 100644 examples/openstack-with-networking/outputs.tf create mode 100644 examples/openstack-with-networking/variables.tf diff --git a/examples/openstack-with-networking/README.md b/examples/openstack-with-networking/README.md new file mode 100644 index 0000000000..2f9d381ca3 --- /dev/null +++ b/examples/openstack-with-networking/README.md @@ -0,0 +1,63 @@ +# Basic OpenStack architecture with networking + +This provides a template for running a simple architecture on an OpenStack +cloud. + +To simplify the example, this intentionally ignores deploying and +getting your application onto the servers. However, you could do so either via +[provisioners](https://www.terraform.io/docs/provisioners/) and a configuration +management tool, or by pre-baking configured images with +[Packer](http://www.packer.io). + +After you run `terraform apply` on this configuration, it will output the +floating IP address assigned to the instance. After your instance started, +this should respond with the default nginx web page. + +First set the required environment variables for the OpenStack provider by +sourcing the [credentials file](http://docs.openstack.org/cli-reference/content/cli_openrc.html). + +``` +source openrc +``` + +Afterwards run with a command like this: + +``` +terraform apply \ + -var 'external_gateway=c1901f39-f76e-498a-9547-c29ba45f64df' \ + -var 'pool=public' +``` + +To get a list of usable floating IP pools run this command: + +``` +$ nova floating-ip-pool-list ++--------+ +| name | ++--------+ +| public | ++--------+ +``` + +To get the UUID of the external gateway run this command: + +``` +$ neutron net-show FLOATING_IP_POOL ++---------------------------+--------------------------------------+ +| Field | Value | ++---------------------------+--------------------------------------+ +| admin_state_up | True | +| id | c1901f39-f76e-498a-9547-c29ba45f64df | +| mtu | 0 | +| name | public | +| port_security_enabled | True | +| provider:network_type | vxlan | +| provider:physical_network | | +| provider:segmentation_id | 1092 | +| router:external | True | +| shared | False | +| status | ACTIVE | +| subnets | 42b672ae-8d51-4a18-a028-ddae7859ec4c | +| tenant_id | 1bde0a49d2ff44ffb44e6339a8cefe3a | ++---------------------------+--------------------------------------+ +``` diff --git a/examples/openstack-with-networking/main.tf b/examples/openstack-with-networking/main.tf new file mode 100644 index 0000000000..d579252632 --- /dev/null +++ b/examples/openstack-with-networking/main.tf @@ -0,0 +1,79 @@ +resource "openstack_compute_keypair_v2" "terraform" { + name = "terraform" + public_key = "${file("${var.ssh_key_file}.pub")}" +} + +resource "openstack_networking_network_v2" "terraform" { + name = "terraform" + admin_state_up = "true" +} + +resource "openstack_networking_subnet_v2" "terraform" { + name = "terraform" + network_id = "${openstack_networking_network_v2.terraform.id}" + cidr = "10.0.0.0/24" + ip_version = 4 + dns_nameservers = ["8.8.8.8","8.8.4.4"] +} + +resource "openstack_networking_router_v2" "terraform" { + name = "terraform" + admin_state_up = "true" + external_gateway = "${var.external_gateway}" +} + +resource "openstack_networking_router_interface_v2" "terraform" { + router_id = "${openstack_networking_router_v2.terraform.id}" + subnet_id = "${openstack_networking_subnet_v2.terraform.id}" +} + +resource "openstack_compute_secgroup_v2" "terraform" { + name = "terraform" + description = "Security group for the Terraform example instances" + rule { + from_port = 22 + to_port = 22 + ip_protocol = "tcp" + cidr = "0.0.0.0/0" + } + rule { + from_port = 80 + to_port = 80 + ip_protocol = "tcp" + cidr = "0.0.0.0/0" + } + rule { + from_port = -1 + to_port = -1 + ip_protocol = "icmp" + cidr = "0.0.0.0/0" + } +} + +resource "openstack_compute_floatingip_v2" "terraform" { + pool = "${var.pool}" + depends_on = ["openstack_networking_router_interface_v2.terraform"] +} + +resource "openstack_compute_instance_v2" "terraform" { + name = "terraform" + image_name = "${var.image}" + flavor_name = "${var.flavor}" + key_pair = "${openstack_compute_keypair_v2.terraform.name}" + security_groups = [ "${openstack_compute_secgroup_v2.terraform.name}" ] + floating_ip = "${openstack_compute_floatingip_v2.terraform.address}" + network { + uuid = "${openstack_networking_network_v2.terraform.id}" + } + provisioner "remote-exec" { + connection { + user = "${var.ssh_user_name}" + key_file = "${var.ssh_key_file}" + } + inline = [ + "sudo apt-get -y update", + "sudo apt-get -y install nginx", + "sudo service nginx start" + ] + } +} diff --git a/examples/openstack-with-networking/openrc.sample b/examples/openstack-with-networking/openrc.sample new file mode 100644 index 0000000000..c9a38e0a13 --- /dev/null +++ b/examples/openstack-with-networking/openrc.sample @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +export OS_AUTH_URL=http://KEYSTONE.ENDPOINT.URL:5000/v2.0 +export OS_TENANT_NAME=YOUR_TENANT_NAME +export OS_USERNAME=YOUR_USERNAME +export OS_PASSWORD=YOUR_PASSWORD +export OS_REGION_NAME=YOUR_REGION_NAME diff --git a/examples/openstack-with-networking/outputs.tf b/examples/openstack-with-networking/outputs.tf new file mode 100644 index 0000000000..42f923fe28 --- /dev/null +++ b/examples/openstack-with-networking/outputs.tf @@ -0,0 +1,3 @@ +output "address" { + value = "${openstack_compute_floatingip_v2.terraform.address}" +} diff --git a/examples/openstack-with-networking/variables.tf b/examples/openstack-with-networking/variables.tf new file mode 100644 index 0000000000..3477cf67e9 --- /dev/null +++ b/examples/openstack-with-networking/variables.tf @@ -0,0 +1,22 @@ +variable "image" { + default = "Ubuntu 14.04" +} + +variable "flavor" { + default = "m1.small" +} + +variable "ssh_key_file" { + default = "~/.ssh/id_rsa.terraform" +} + +variable "ssh_user_name" { + default = "ubuntu" +} + +variable "external_gateway" { +} + +variable "pool" { + default = "public" +} From 2e51915431c74d59e6cdb3fd8eccd156bdc38353 Mon Sep 17 00:00:00 2001 From: Sharif Nassar Date: Mon, 31 Aug 2015 15:37:09 -0700 Subject: [PATCH 010/335] Colorize the 'forces new resource' message. Sometimes in all the output from ```terraform plan```, it is difficult to see the ```(forces new resource)``` message. This patch adds a little bit of color. --- command/format_plan.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command/format_plan.go b/command/format_plan.go index 66df5f8c28..daf3f60aa0 100644 --- a/command/format_plan.go +++ b/command/format_plan.go @@ -131,7 +131,7 @@ func formatPlanModuleExpand( newResource := "" if attrDiff.RequiresNew && rdiff.Destroy { - newResource = " (forces new resource)" + newResource = opts.Color.Color(" [red](forces new resource)") } buf.WriteString(fmt.Sprintf( From 72e421942e3bda361362b6f349e816566d0c8ef7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcello=20Lagan=C3=A0?= Date: Tue, 1 Sep 2015 17:21:49 +0200 Subject: [PATCH 011/335] Support tags for aws_db_subnet_group --- .../aws/resource_aws_db_subnet_group.go | 26 +++++++++++++++++++ .../aws/resource_aws_db_subnet_group_test.go | 3 +++ .../aws/r/db_subnet_group.html.markdown | 4 +++ 3 files changed, 33 insertions(+) diff --git a/builtin/providers/aws/resource_aws_db_subnet_group.go b/builtin/providers/aws/resource_aws_db_subnet_group.go index 9c09b72d79..709809c4a3 100644 --- a/builtin/providers/aws/resource_aws_db_subnet_group.go +++ b/builtin/providers/aws/resource_aws_db_subnet_group.go @@ -56,12 +56,15 @@ func resourceAwsDbSubnetGroup() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, + + "tags": tagsSchema(), }, } } func resourceAwsDbSubnetGroupCreate(d *schema.ResourceData, meta interface{}) error { rdsconn := meta.(*AWSClient).rdsconn + tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) subnetIdsSet := d.Get("subnet_ids").(*schema.Set) subnetIds := make([]*string, subnetIdsSet.Len()) @@ -73,6 +76,7 @@ func resourceAwsDbSubnetGroupCreate(d *schema.ResourceData, meta interface{}) er DBSubnetGroupName: aws.String(d.Get("name").(string)), DBSubnetGroupDescription: aws.String(d.Get("description").(string)), SubnetIds: subnetIds, + Tags: tags, } log.Printf("[DEBUG] Create DB Subnet Group: %#v", createOpts) @@ -130,6 +134,28 @@ func resourceAwsDbSubnetGroupRead(d *schema.ResourceData, meta interface{}) erro } d.Set("subnet_ids", subnets) + // list tags for resource + // set tags + conn := meta.(*AWSClient).rdsconn + arn, err := buildRDSARN(d, meta) + if err != nil { + log.Printf("[DEBUG] Error building ARN for DB Subnet Group, not setting Tags for group %s", subnetGroup.DBSubnetGroupName) + } else { + resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ + ResourceName: aws.String(arn), + }) + + if err != nil { + log.Printf("[DEBUG] Error retreiving tags for ARN: %s", arn) + } + + var dt []*rds.Tag + if len(resp.TagList) > 0 { + dt = resp.TagList + } + d.Set("tags", tagsToMapRDS(dt)) + } + return nil } diff --git a/builtin/providers/aws/resource_aws_db_subnet_group_test.go b/builtin/providers/aws/resource_aws_db_subnet_group_test.go index cbf1f84978..e189b1e217 100644 --- a/builtin/providers/aws/resource_aws_db_subnet_group_test.go +++ b/builtin/providers/aws/resource_aws_db_subnet_group_test.go @@ -150,6 +150,9 @@ resource "aws_db_subnet_group" "foo" { name = "FOO" description = "foo description" subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] + tags { + Name = "tf-dbsubnet-group-test" + } } ` diff --git a/website/source/docs/providers/aws/r/db_subnet_group.html.markdown b/website/source/docs/providers/aws/r/db_subnet_group.html.markdown index 2937b54e72..e3dcd18ed9 100644 --- a/website/source/docs/providers/aws/r/db_subnet_group.html.markdown +++ b/website/source/docs/providers/aws/r/db_subnet_group.html.markdown @@ -17,6 +17,9 @@ resource "aws_db_subnet_group" "default" { name = "main" description = "Our main group of subnets" subnet_ids = ["${aws_subnet.frontend.id}", "${aws_subnet.backend.id}"] + tags { + Name = "My DB subnet group" + } } ``` @@ -27,6 +30,7 @@ The following arguments are supported: * `name` - (Required) The name of the DB subnet group. * `description` - (Required) The description of the DB subnet group. * `subnet_ids` - (Required) A list of VPC subnet IDs. +* `tags` - (Optional) A mapping of tags to assign to the resource. ## Attributes Reference From d9c4afce216cad4e9c1a89a11c8ae01deab597b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcello=20Lagan=C3=A0?= Date: Tue, 1 Sep 2015 17:38:51 +0200 Subject: [PATCH 012/335] Modify tags on update and fix tests --- builtin/providers/aws/resource_aws_db_subnet_group.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_db_subnet_group.go b/builtin/providers/aws/resource_aws_db_subnet_group.go index 709809c4a3..3de717e66c 100644 --- a/builtin/providers/aws/resource_aws_db_subnet_group.go +++ b/builtin/providers/aws/resource_aws_db_subnet_group.go @@ -139,7 +139,7 @@ func resourceAwsDbSubnetGroupRead(d *schema.ResourceData, meta interface{}) erro conn := meta.(*AWSClient).rdsconn arn, err := buildRDSARN(d, meta) if err != nil { - log.Printf("[DEBUG] Error building ARN for DB Subnet Group, not setting Tags for group %s", subnetGroup.DBSubnetGroupName) + log.Printf("[DEBUG] Error building ARN for DB Subnet Group, not setting Tags for group %s", *subnetGroup.DBSubnetGroupName) } else { resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{ ResourceName: aws.String(arn), @@ -182,6 +182,15 @@ func resourceAwsDbSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) er return err } } + + if arn, err := buildRDSARN(d, meta); err == nil { + if err := setTagsRDS(conn, d, arn); err != nil { + return err + } else { + d.SetPartial("tags") + } + } + return resourceAwsDbSubnetGroupRead(d, meta) } From 98808cb9b8f7f1126aebe9d1e1da715ec3ef1224 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcello=20Lagan=C3=A0?= Date: Wed, 2 Sep 2015 09:24:34 +0200 Subject: [PATCH 013/335] Build RDS subgrp ARN --- .../aws/resource_aws_db_subnet_group.go | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/builtin/providers/aws/resource_aws_db_subnet_group.go b/builtin/providers/aws/resource_aws_db_subnet_group.go index 3de717e66c..e6b17ea1fe 100644 --- a/builtin/providers/aws/resource_aws_db_subnet_group.go +++ b/builtin/providers/aws/resource_aws_db_subnet_group.go @@ -9,6 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/rds" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" @@ -137,7 +138,7 @@ func resourceAwsDbSubnetGroupRead(d *schema.ResourceData, meta interface{}) erro // list tags for resource // set tags conn := meta.(*AWSClient).rdsconn - arn, err := buildRDSARN(d, meta) + arn, err := buildRDSsubgrpARN(d, meta) if err != nil { log.Printf("[DEBUG] Error building ARN for DB Subnet Group, not setting Tags for group %s", *subnetGroup.DBSubnetGroupName) } else { @@ -183,7 +184,7 @@ func resourceAwsDbSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) er } } - if arn, err := buildRDSARN(d, meta); err == nil { + if arn, err := buildRDSsubgrpARN(d, meta); err == nil { if err := setTagsRDS(conn, d, arn); err != nil { return err } else { @@ -231,3 +232,17 @@ func resourceAwsDbSubnetGroupDeleteRefreshFunc( return d, "destroyed", nil } } + +func buildRDSsubgrpARN(d *schema.ResourceData, meta interface{}) (string, error) { + iamconn := meta.(*AWSClient).iamconn + region := meta.(*AWSClient).region + // An zero value GetUserInput{} defers to the currently logged in user + resp, err := iamconn.GetUser(&iam.GetUserInput{}) + if err != nil { + return "", err + } + userARN := *resp.User.Arn + accountID := strings.Split(userARN, ":")[4] + arn := fmt.Sprintf("arn:aws:rds:%s:%s:subgrp:%s", region, accountID, d.Id()) + return arn, nil +} From 5001bb078e06566d2f9e7dd438aaafa103a6c8d7 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 2 Sep 2015 14:44:12 +0100 Subject: [PATCH 014/335] provider/aws: Add new resource - aws_iam_saml_provider --- builtin/providers/aws/provider.go | 1 + .../aws/resource_aws_iam_saml_provider.go | 101 ++++++++++++++++++ 2 files changed, 102 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_iam_saml_provider.go diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index a5029b400a..9a00edffc3 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -187,6 +187,7 @@ func Provider() terraform.ResourceProvider { "aws_iam_policy_attachment": resourceAwsIamPolicyAttachment(), "aws_iam_role_policy": resourceAwsIamRolePolicy(), "aws_iam_role": resourceAwsIamRole(), + "aws_iam_saml_provider": resourceAwsIamSamlProvider(), "aws_iam_server_certificate": resourceAwsIAMServerCertificate(), "aws_iam_user_policy": resourceAwsIamUserPolicy(), "aws_iam_user": resourceAwsIamUser(), diff --git a/builtin/providers/aws/resource_aws_iam_saml_provider.go b/builtin/providers/aws/resource_aws_iam_saml_provider.go new file mode 100644 index 0000000000..6a166d711e --- /dev/null +++ b/builtin/providers/aws/resource_aws_iam_saml_provider.go @@ -0,0 +1,101 @@ +package aws + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamSamlProvider() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamSamlProviderCreate, + Read: resourceAwsIamSamlProviderRead, + Update: resourceAwsIamSamlProviderUpdate, + Delete: resourceAwsIamSamlProviderDelete, + + Schema: map[string]*schema.Schema{ + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "valid_until": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "saml_metadata_document": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceAwsIamSamlProviderCreate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.CreateSAMLProviderInput{ + Name: aws.String(d.Get("name").(string)), + SAMLMetadataDocument: aws.String(d.Get("saml_metadata_document").(string)), + } + + out, err := iamconn.CreateSAMLProvider(input) + if err != nil { + return err + } + + d.SetId(*out.SAMLProviderArn) + + return resourceAwsIamSamlProviderRead(d, meta) +} + +func resourceAwsIamSamlProviderRead(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.GetSAMLProviderInput{ + SAMLProviderArn: aws.String(d.Id()), + } + out, err := iamconn.GetSAMLProvider(input) + if err != nil { + return err + } + + validUntil := out.ValidUntil.Format(time.RFC1123) + d.Set("valid_until", validUntil) + d.Set("saml_metadata_document", *out.SAMLMetadataDocument) + + return nil +} + +func resourceAwsIamSamlProviderUpdate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.UpdateSAMLProviderInput{ + SAMLProviderArn: aws.String(d.Id()), + SAMLMetadataDocument: aws.String(d.Get("saml_metadata_document").(string)), + } + _, err := iamconn.UpdateSAMLProvider(input) + if err != nil { + return err + } + + return resourceAwsIamSamlProviderRead(d, meta) +} + +func resourceAwsIamSamlProviderDelete(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.DeleteSAMLProviderInput{ + SAMLProviderArn: aws.String(d.Id()), + } + _, err := iamconn.DeleteSAMLProvider(input) + + return err +} From ac762e5503b1c0661329efec543614a3a0fe34a3 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 2 Sep 2015 20:01:36 +0100 Subject: [PATCH 015/335] provider/aws: Add docs for aws_iam_saml_provider --- .../aws/r/iam_saml_provider.html.markdown | 34 +++++++++++++++++++ website/source/layouts/aws.erb | 4 +++ 2 files changed, 38 insertions(+) create mode 100644 website/source/docs/providers/aws/r/iam_saml_provider.html.markdown diff --git a/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown b/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown new file mode 100644 index 0000000000..adba6d350d --- /dev/null +++ b/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown @@ -0,0 +1,34 @@ +--- +layout: "aws" +page_title: "AWS: aws_saml_provider" +sidebar_current: "docs-aws-resource-iam-saml-provider" +description: |- + Provides an IAM SAML provider. +--- + +# aws\_iam\_saml\_provider + +Provides an IAM SAML provider. + +## Example Usage + +``` +resource "aws_saml_provider" "default" { + name = "myprovider" + saml_metadata_document = "${file("saml-metadata.xml")}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the provider to create. +* `saml_metadata_document` - (Required) An XML document generated by an identity provider that supports SAML 2.0. + +## Attributes Reference + +The following attributes are exported: + +* `arn` - The ARN assigned by AWS for this provider. +* `valid_until` - The expiration date and time for the SAML provider in RFC1123 format, e.g. `Mon, 02 Jan 2006 15:04:05 MST`. diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 2bbff22f4b..e07992b842 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -179,6 +179,10 @@ aws_iam_role_policy + > + aws_iam_saml_provider + + > aws_iam_server_certificate From 5d215c42db6aef7b7cf86bc3dee37c41fa8327a1 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 2 Sep 2015 20:02:00 +0100 Subject: [PATCH 016/335] provider/aws: Add acceptance test for aws_iam_saml_provider --- .../resource_aws_iam_saml_provider_test.go | 79 +++++++++++++++++++ .../test-fixtures/saml-metadata-modified.xml | 14 ++++ .../aws/test-fixtures/saml-metadata.xml | 14 ++++ 3 files changed, 107 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_iam_saml_provider_test.go create mode 100644 builtin/providers/aws/test-fixtures/saml-metadata-modified.xml create mode 100644 builtin/providers/aws/test-fixtures/saml-metadata.xml diff --git a/builtin/providers/aws/resource_aws_iam_saml_provider_test.go b/builtin/providers/aws/resource_aws_iam_saml_provider_test.go new file mode 100644 index 0000000000..63ed395883 --- /dev/null +++ b/builtin/providers/aws/resource_aws_iam_saml_provider_test.go @@ -0,0 +1,79 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSIAMSamlProvider_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIAMSamlProviderDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccIAMSamlProviderConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckIAMSamlProvider("aws_iam_saml_provider.salesforce"), + ), + }, + resource.TestStep{ + Config: testAccIAMSamlProviderConfigUpdate, + Check: resource.ComposeTestCheckFunc( + testAccCheckIAMSamlProvider("aws_iam_saml_provider.salesforce"), + ), + }, + }, + }) +} + +func testAccCheckIAMSamlProviderDestroy(s *terraform.State) error { + if len(s.RootModule().Resources) > 0 { + return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources) + } + + return nil +} + +func testAccCheckIAMSamlProvider(id string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[id] + if !ok { + return fmt.Errorf("Not Found: %s", id) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + iamconn := testAccProvider.Meta().(*AWSClient).iamconn + _, err := iamconn.GetSAMLProvider(&iam.GetSAMLProviderInput{ + SAMLProviderArn: aws.String(rs.Primary.ID), + }) + + if err != nil { + return err + } + + return nil + } +} + +const testAccIAMSamlProviderConfig = ` +resource "aws_iam_saml_provider" "salesforce" { + name = "tf-salesforce-test" + saml_metadata_document = "${file("./test-fixtures/saml-metadata.xml")}" +} +` + +const testAccIAMSamlProviderConfigUpdate = ` +resource "aws_iam_saml_provider" "salesforce" { + name = "tf-salesforce-test" + saml_metadata_document = "${file("./test-fixtures/saml-metadata-modified.xml")}" +} +` diff --git a/builtin/providers/aws/test-fixtures/saml-metadata-modified.xml b/builtin/providers/aws/test-fixtures/saml-metadata-modified.xml new file mode 100644 index 0000000000..aaca7afc0b --- /dev/null +++ b/builtin/providers/aws/test-fixtures/saml-metadata-modified.xml @@ -0,0 +1,14 @@ + + + + + + MIIErDCCA5SgAwIBAgIOAU+PT8RBAAAAAHxJXEcwDQYJKoZIhvcNAQELBQAwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0EwHhcNMTUwOTAyMTgyNjUzWhcNMTcwOTAyMTIwMDAwWjCBkDEoMCYGA1UEAwwfU2VsZlNpZ25lZENlcnRfMDJTZXAyMDE1XzE4MjY1MzEYMBYGA1UECwwPMDBEMjQwMDAwMDBwQW9BMRcwFQYDVQQKDA5TYWxlc2ZvcmNlLmNvbTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzELMAkGA1UECAwCQ0ExDDAKBgNVBAYTA1VTQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJp/wTRr9n1IWJpkRTjNpep47OKJrD2E6rGbJ18TG2RxtIz+zCn2JwH2aP3TULh0r0hhcg/pecv51RRcG7O19DBBaTQ5+KuoICQyKZy07/yDXSiZontTwkEYs06ssTwTHUcRXbcwTKv16L7omt0MjIhTTGfvtLOYiPwyvKvzAHg4eNuAcli0duVM78UIBORtdmy9C9ZcMh8yRJo5aPBq85wsE3JXU58ytyZzCHTBLH+2xFQrjYnUSEW+FOEEpI7o33MVdFBvWWg1R17HkWzcve4C30lqOHqvxBzyESZ/N1mMlmSt8gPFyB+mUXY99StJDJpnytbY8DwSzMQUo/sOVB0CAwEAAaOCAQAwgf0wHQYDVR0OBBYEFByu1EQqRQS0bYQBKS9K5qwKi+6IMA8GA1UdEwEB/wQFMAMBAf8wgcoGA1UdIwSBwjCBv4AUHK7URCpFBLRthAEpL0rmrAqL7oihgZakgZMwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0GCDgFPj0/EQQAAAAB8SVxHMA0GCSqGSIb3DQEBCwUAA4IBAQA9O5o1tC71qJnkq+ABPo4A1aFKZVT/07GcBX4/wetcbYySL4Q2nR9pMgfPYYS1j+P2E3viPsQwPIWDUBwFkNsjjX5DSGEkLAioVGKRwJshRSCSynMcsVZbQkfBUiZXqhM0wzvoa/ALvGD+aSSb1m+x7lEpDYNwQKWaUW2VYcHWv9wjujMyy7dlj8E/jqM71mw7ThNl6k4+3RQ802dMa14txm8pkF0vZgfpV3tkqhBqtjBAicVCaveqr3r3iGqjvyilBgdY+0NR8szqzm7CD/Bkb22+/IgM/mXQuL9KHD/WADlSGmYKmG3SSahmcZxznYCnzcRNN9LVuXlz5cbljmBj + + + + urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified + + + + diff --git a/builtin/providers/aws/test-fixtures/saml-metadata.xml b/builtin/providers/aws/test-fixtures/saml-metadata.xml new file mode 100644 index 0000000000..69e353b770 --- /dev/null +++ b/builtin/providers/aws/test-fixtures/saml-metadata.xml @@ -0,0 +1,14 @@ + + + + + + MIIErDCCA5SgAwIBAgIOAU+PT8RBAAAAAHxJXEcwDQYJKoZIhvcNAQELBQAwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0EwHhcNMTUwOTAyMTgyNjUzWhcNMTcwOTAyMTIwMDAwWjCBkDEoMCYGA1UEAwwfU2VsZlNpZ25lZENlcnRfMDJTZXAyMDE1XzE4MjY1MzEYMBYGA1UECwwPMDBEMjQwMDAwMDBwQW9BMRcwFQYDVQQKDA5TYWxlc2ZvcmNlLmNvbTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzELMAkGA1UECAwCQ0ExDDAKBgNVBAYTA1VTQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJp/wTRr9n1IWJpkRTjNpep47OKJrD2E6rGbJ18TG2RxtIz+zCn2JwH2aP3TULh0r0hhcg/pecv51RRcG7O19DBBaTQ5+KuoICQyKZy07/yDXSiZontTwkEYs06ssTwTHUcRXbcwTKv16L7omt0MjIhTTGfvtLOYiPwyvKvzAHg4eNuAcli0duVM78UIBORtdmy9C9ZcMh8yRJo5aPBq85wsE3JXU58ytyZzCHTBLH+2xFQrjYnUSEW+FOEEpI7o33MVdFBvWWg1R17HkWzcve4C30lqOHqvxBzyESZ/N1mMlmSt8gPFyB+mUXY99StJDJpnytbY8DwSzMQUo/sOVB0CAwEAAaOCAQAwgf0wHQYDVR0OBBYEFByu1EQqRQS0bYQBKS9K5qwKi+6IMA8GA1UdEwEB/wQFMAMBAf8wgcoGA1UdIwSBwjCBv4AUHK7URCpFBLRthAEpL0rmrAqL7oihgZakgZMwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0GCDgFPj0/EQQAAAAB8SVxHMA0GCSqGSIb3DQEBCwUAA4IBAQA9O5o1tC71qJnkq+ABPo4A1aFKZVT/07GcBX4/wetcbYySL4Q2nR9pMgfPYYS1j+P2E3viPsQwPIWDUBwFkNsjjX5DSGEkLAioVGKRwJshRSCSynMcsVZbQkfBUiZXqhM0wzvoa/ALvGD+aSSb1m+x7lEpDYNwQKWaUW2VYcHWv9wjujMyy7dlj8E/jqM71mw7ThNl6k4+3RQ802dMa14txm8pkF0vZgfpV3tkqhBqtjBAicVCaveqr3r3iGqjvyilBgdY+0NR8szqzm7CD/Bkb22+/IgM/mXQuL9KHD/WADlSGmYKmG3SSahmcZxznYCnzcRNN9LVuXlz5cbljmBj + + + + urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified + + + + From b06f0bbf4aa4d2e09b5715adf4fb6a455735a807 Mon Sep 17 00:00:00 2001 From: Joshua Semar Date: Thu, 3 Sep 2015 10:33:59 -0500 Subject: [PATCH 017/335] fix documentation --- .../aws/r/launch_configuration.html.markdown | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/website/source/docs/providers/aws/r/launch_configuration.html.markdown b/website/source/docs/providers/aws/r/launch_configuration.html.markdown index ea96503dc0..85d45bcb03 100644 --- a/website/source/docs/providers/aws/r/launch_configuration.html.markdown +++ b/website/source/docs/providers/aws/r/launch_configuration.html.markdown @@ -23,10 +23,10 @@ resource "aws_launch_configuration" "as_conf" { ## Using with AutoScaling Groups Launch Configurations cannot be updated after creation with the Amazon -Web Service API. In order to update a Launch Configuration, Terraform will -destroy the existing resource and create a replacement. If order to effectively -use a Launch Configuration resource with an[AutoScaling Group resource][1], -it's recommend to omit the Launch Configuration `name` attribute, and +Web Service API. In order to update a Launch Configuration, Terraform will +destroy the existing resource and create a replacement. If order to effectively +use a Launch Configuration resource with an[AutoScaling Group resource][1], +it's recommend to omit the Launch Configuration `name` attribute, and specify `create_before_destroy` in a [lifecycle][2] block, as shown: ``` @@ -69,7 +69,12 @@ The following arguments are supported: * `user_data` - (Optional) The user data to provide when launching the instance. * `enable_monitoring` - (Optional) Enables/disables detailed monitoring. This is enabled by default. * `ebs_optimized` - (Optional) If true, the launched EC2 instance will be EBS-optimized. -* `block_device_mapping` - (Optional) A list of block devices to add. Their keys are documented below. +* `root_block_device` - (Optional) Customize details about the root block + device of the instance. See [Block Devices](#block-devices) below for details. +* `ebs_block_device` - (Optional) Additional EBS block devices to attach to the + instance. See [Block Devices](#block-devices) below for details. +* `ephemeral_block_device` - (Optional) Customize Ephemeral (also known as + "Instance Store") volumes on the instance. See [Block Devices](#block-devices) below for details. ## Block devices From 10c96afa9b3daade66d1911b1c8575f35c8aa786 Mon Sep 17 00:00:00 2001 From: Mike Fiedler Date: Tue, 8 Sep 2015 09:10:54 -0400 Subject: [PATCH 018/335] Update aws_db_instance `db_subnet_group_name` When launching a new RDS instance in a VPC-default AWS account, trying to control which VPC the new RDS instance lands in is not apparent from the parameters available. The following works: ``` resource "aws_db_subnet_group" "foo" { name = "foo" description = "DB Subnet for foo" subnet_ids = ["${aws_subnet.foo_1a.id}", "${aws_subnet.foo_1b.id}"] } resource "aws_db_instance" "bar" { ... db_subnet_group_name = "${aws_db_subnet_group.foo.name}" ... } ``` Hopefully this doc update will help others --- website/source/docs/providers/aws/r/db_instance.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/db_instance.html.markdown b/website/source/docs/providers/aws/r/db_instance.html.markdown index c2f0063f4c..adf2dafe6b 100644 --- a/website/source/docs/providers/aws/r/db_instance.html.markdown +++ b/website/source/docs/providers/aws/r/db_instance.html.markdown @@ -65,7 +65,7 @@ The following arguments are supported: * `vpc_security_group_ids` - (Optional) List of VPC security groups to associate. * `security_group_names` - (Optional/Deprecated) List of DB Security Groups to associate. Only used for [DB Instances on the _EC2-Classic_ Platform](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html#USER_VPC.FindDefaultVPC). -* `db_subnet_group_name` - (Optional) Name of DB subnet group +* `db_subnet_group_name` - (Optional) Name of DB subnet group. DB instance will be created in the VPC associated with the DB subnet group. If unspecified, will be created in the `default` VPC, or in EC2 Classic, if available. * `parameter_group_name` - (Optional) Name of the DB parameter group to associate. * `storage_encrypted` - (Optional) Specifies whether the DB instance is encrypted. The default is `false` if not specified. * `apply_immediately` - (Optional) Specifies whether any database modifications From 506aae2f285ea216ec6f93a5b7b441b8b091981b Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 8 Sep 2015 13:15:30 -0500 Subject: [PATCH 019/335] provider/aws: configurable capacity waiting duration move wait for capacity timeout from a constant to a configurable --- .../aws/resource_aws_autoscaling_group.go | 36 ++++++++++++++++--- .../aws/r/autoscaling_group.html.markdown | 23 ++++++++---- 2 files changed, 48 insertions(+), 11 deletions(-) diff --git a/builtin/providers/aws/resource_aws_autoscaling_group.go b/builtin/providers/aws/resource_aws_autoscaling_group.go index 771bda2e3a..b96d6885a7 100644 --- a/builtin/providers/aws/resource_aws_autoscaling_group.go +++ b/builtin/providers/aws/resource_aws_autoscaling_group.go @@ -120,6 +120,25 @@ func resourceAwsAutoscalingGroup() *schema.Resource { Set: schema.HashString, }, + "wait_for_capacity_timeout": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "10m", + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + duration, err := time.ParseDuration(value) + if err != nil { + errors = append(errors, fmt.Errorf( + "%q cannot be parsed as a duration: %s", k, err)) + } + if duration < 0 { + errors = append(errors, fmt.Errorf( + "%q must be greater than zero", k)) + } + return + }, + }, + "tag": autoscalingTagsSchema(), }, } @@ -445,8 +464,6 @@ func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) }) } -var waitForASGCapacityTimeout = 10 * time.Minute - // Waits for a minimum number of healthy instances to show up as healthy in the // ASG before continuing. Waits up to `waitForASGCapacityTimeout` for // "desired_capacity", or "min_size" if desired capacity is not specified. @@ -461,9 +478,20 @@ func waitForASGCapacity(d *schema.ResourceData, meta interface{}) error { } wantELB := d.Get("min_elb_capacity").(int) - log.Printf("[DEBUG] Waiting for capacity: %d ASG, %d ELB", wantASG, wantELB) + wait, err := time.ParseDuration(d.Get("wait_for_capacity_timeout").(string)) + if err != nil { + return err + } - return resource.Retry(waitForASGCapacityTimeout, func() error { + if wait == 0 { + log.Printf("[DEBUG] Capacity timeout set to 0, skipping capacity waiting.") + return nil + } + + log.Printf("[DEBUG] Waiting %s for capacity: %d ASG, %d ELB", + wait, wantASG, wantELB) + + return resource.Retry(wait, func() error { g, err := getAwsAutoscalingGroup(d, meta) if err != nil { return resource.RetryError{Err: err} diff --git a/website/source/docs/providers/aws/r/autoscaling_group.html.markdown b/website/source/docs/providers/aws/r/autoscaling_group.html.markdown index 022b1cf715..caf272c946 100644 --- a/website/source/docs/providers/aws/r/autoscaling_group.html.markdown +++ b/website/source/docs/providers/aws/r/autoscaling_group.html.markdown @@ -63,6 +63,11 @@ The following arguments are supported: * `vpc_zone_identifier` (Optional) A list of subnet IDs to launch resources in. * `termination_policies` (Optional) A list of policies to decide how the instances in the auto scale group should be terminated. * `tag` (Optional) A list of tag blocks. Tags documented below. +* `wait_for_capacity_timeout` (Default: "10m") A maximum + [duration](https://golang.org/pkg/time/#ParseDuration) that Terraform should + wait for ASG instances to be healthy before timing out. (See also [Waiting + for Capacity](#waiting-for-capacity) below.) Setting this to "0" causes + Terraform to skip all Capacity Waiting behavior. Tags support the following: @@ -110,9 +115,12 @@ Terraform considers an instance "healthy" when the ASG reports `HealthStatus: Docs](https://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) for more information on an ASG's lifecycle. -Terraform will wait for healthy instances for up to 10 minutes. If ASG creation -is taking more than a few minutes, it's worth investigating for scaling activity -errors, which can be caused by problems with the selected Launch Configuration. +Terraform will wait for healthy instances for up to +`wait_for_capacity_timeout`. If ASG creation is taking more than a few minutes, +it's worth investigating for scaling activity errors, which can be caused by +problems with the selected Launch Configuration. + +Setting `wait_for_capacity_timeout` to `"0"` disables ASG Capacity waiting. #### Waiting for ELB Capacity @@ -121,8 +129,9 @@ Balancers. If `min_elb_capacity` is set, Terraform will wait for that number of Instances to be `"InService"` in all attached `load_balancers`. This can be used to ensure that service is being provided before Terraform moves on. -As with ASG Capacity, Terraform will wait for up to 10 minutes for -`"InService"` instances. If ASG creation takes more than a few minutes, this -could indicate one of a number of configuration problems. See the [AWS Docs on -Load Balancer Troubleshooting](https://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-troubleshooting.html) +As with ASG Capacity, Terraform will wait for up to `wait_for_capacity_timeout` +(for `"InService"` instances. If ASG creation takes more than a few minutes, +this could indicate one of a number of configuration problems. See the [AWS +Docs on Load Balancer +Troubleshooting](https://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-troubleshooting.html) for more information. From 03f94d66aef7fde266ad8e2f831c373ea37b99ad Mon Sep 17 00:00:00 2001 From: zpatrick Date: Wed, 9 Sep 2015 21:13:36 +0000 Subject: [PATCH 020/335] adding content field to s3_bucket_object --- .../aws/resource_aws_s3_bucket_object.go | 31 ++++++++++++--- .../aws/resource_aws_s3_bucket_object_test.go | 39 +++++++++++++++++-- 2 files changed, 61 insertions(+), 9 deletions(-) diff --git a/builtin/providers/aws/resource_aws_s3_bucket_object.go b/builtin/providers/aws/resource_aws_s3_bucket_object.go index 9d46952d07..8a2e8370b0 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_object.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_object.go @@ -4,6 +4,8 @@ import ( "fmt" "log" "os" + "io" + "bytes" "github.com/hashicorp/terraform/helper/schema" @@ -34,10 +36,18 @@ func resourceAwsS3BucketObject() *schema.Resource { "source": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, + ConflictsWith: []string{"content"}, }, + "content": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"source"}, + }, + "etag": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -51,19 +61,28 @@ func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) erro bucket := d.Get("bucket").(string) key := d.Get("key").(string) - source := d.Get("source").(string) + var body io.ReadSeeker - file, err := os.Open(source) + if v, ok := d.GetOk("source"); ok { + source := v.(string) + file, err := os.Open(source) + if err != nil { + return fmt.Errorf("Error opening S3 bucket object source (%s): %s", source, err) + } - if err != nil { - return fmt.Errorf("Error opening S3 bucket object source (%s): %s", source, err) + body = file + } else if v, ok := d.GetOk("content"); ok { + content := v.(string) + body = bytes.NewReader([]byte(content)) + } else { + return fmt.Errorf("Must specify \"source\" or \"content\" field") } resp, err := s3conn.PutObject( &s3.PutObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), - Body: file, + Body: body, }) if err != nil { diff --git a/builtin/providers/aws/resource_aws_s3_bucket_object_test.go b/builtin/providers/aws/resource_aws_s3_bucket_object_test.go index 4f947736ae..6311dd7c32 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_object_test.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_object_test.go @@ -15,7 +15,7 @@ import ( var tf, err = ioutil.TempFile("", "tf") -func TestAccAWSS3BucketObject_basic(t *testing.T) { +func TestAccAWSS3BucketObject_source(t *testing.T) { // first write some data to the tempfile just so it's not 0 bytes. ioutil.WriteFile(tf.Name(), []byte("{anything will do }"), 0644) resource.Test(t, resource.TestCase{ @@ -29,7 +29,26 @@ func TestAccAWSS3BucketObject_basic(t *testing.T) { CheckDestroy: testAccCheckAWSS3BucketObjectDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSS3BucketObjectConfig, + Config: testAccAWSS3BucketObjectConfigSource, + Check: testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object"), + }, + }, + }) +} + +func TestAccAWSS3BucketObject_content(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if err != nil { + panic(err) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketObjectDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSS3BucketObjectConfigContent, Check: testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object"), }, }, @@ -86,7 +105,7 @@ func testAccCheckAWSS3BucketObjectExists(n string) resource.TestCheckFunc { } var randomBucket = randInt -var testAccAWSS3BucketObjectConfig = fmt.Sprintf(` +var testAccAWSS3BucketObjectConfigSource = fmt.Sprintf(` resource "aws_s3_bucket" "object_bucket" { bucket = "tf-object-test-bucket-%d" } @@ -97,3 +116,17 @@ resource "aws_s3_bucket_object" "object" { source = "%s" } `, randomBucket, tf.Name()) + + +var testAccAWSS3BucketObjectConfigContent = fmt.Sprintf(` +resource "aws_s3_bucket" "object_bucket" { + bucket = "tf-object-test-bucket-%d" +} + +resource "aws_s3_bucket_object" "object" { + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "test-key" + content = "some_bucket_content" +} +`, randomBucket) + From 141c419cc70827ecc97211889913f6cdd1b59cb3 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 9 Sep 2015 23:17:57 -0700 Subject: [PATCH 021/335] Docs for aws_s3_bucket content argument. --- .../docs/providers/aws/r/s3_bucket_object.html.markdown | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown b/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown index 63d201b826..14286a603f 100644 --- a/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown +++ b/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown @@ -28,7 +28,11 @@ The following arguments are supported: * `bucket` - (Required) The name of the bucket to put the file in. * `key` - (Required) The name of the object once it is in the bucket. -* `source` - (Required) The path to the source file being uploaded to the bucket. +* `source` - (Required unless `content` given) The path to the source file being uploaded to the bucket. +* `content` - (Required unless `source` given) The literal content being uploaded to the bucket. + +Either `source` or `content` must be provided to specify the bucket content. +These two arguments are mutually-exclusive. ## Attributes Reference From 5256a6df6b7677d26b63efe1c5a932e2cce884b3 Mon Sep 17 00:00:00 2001 From: zpatrick Date: Thu, 10 Sep 2015 18:37:17 +0000 Subject: [PATCH 022/335] fix formatting --- .../aws/resource_aws_s3_bucket_object.go | 21 ++++++++++--------- .../aws/resource_aws_s3_bucket_object_test.go | 11 ++++------ 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/builtin/providers/aws/resource_aws_s3_bucket_object.go b/builtin/providers/aws/resource_aws_s3_bucket_object.go index 8a2e8370b0..3a4cc4df25 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_object.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_object.go @@ -1,11 +1,11 @@ package aws import ( + "bytes" "fmt" + "io" "log" "os" - "io" - "bytes" "github.com/hashicorp/terraform/helper/schema" @@ -35,18 +35,18 @@ func resourceAwsS3BucketObject() *schema.Resource { }, "source": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, ConflictsWith: []string{"content"}, }, - "content": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, + "content": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, ConflictsWith: []string{"source"}, - }, + }, "etag": &schema.Schema{ Type: schema.TypeString, @@ -138,3 +138,4 @@ func resourceAwsS3BucketObjectDelete(d *schema.ResourceData, meta interface{}) e } return nil } + diff --git a/builtin/providers/aws/resource_aws_s3_bucket_object_test.go b/builtin/providers/aws/resource_aws_s3_bucket_object_test.go index 6311dd7c32..0e0651ad00 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_object_test.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_object_test.go @@ -107,22 +107,19 @@ func testAccCheckAWSS3BucketObjectExists(n string) resource.TestCheckFunc { var randomBucket = randInt var testAccAWSS3BucketObjectConfigSource = fmt.Sprintf(` resource "aws_s3_bucket" "object_bucket" { - bucket = "tf-object-test-bucket-%d" + bucket = "tf-object-test-bucket-%d" } - resource "aws_s3_bucket_object" "object" { - bucket = "${aws_s3_bucket.object_bucket.bucket}" - key = "test-key" - source = "%s" + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "test-key" + source = "%s" } `, randomBucket, tf.Name()) - var testAccAWSS3BucketObjectConfigContent = fmt.Sprintf(` resource "aws_s3_bucket" "object_bucket" { bucket = "tf-object-test-bucket-%d" } - resource "aws_s3_bucket_object" "object" { bucket = "${aws_s3_bucket.object_bucket.bucket}" key = "test-key" From 863a7383aa5b50ab23d497d7d895d1da392539e8 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Thu, 10 Sep 2015 16:08:48 -0500 Subject: [PATCH 023/335] doc: module sources from private github repos --- .../source/docs/modules/sources.html.markdown | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/website/source/docs/modules/sources.html.markdown b/website/source/docs/modules/sources.html.markdown index b0a2b4d0cc..d9e6a13169 100644 --- a/website/source/docs/modules/sources.html.markdown +++ b/website/source/docs/modules/sources.html.markdown @@ -81,6 +81,30 @@ You can use the same parameters to GitHub repositories as you can generic Git repositories (such as tags or branches). See the documentation for generic Git repositories for more information. +#### Private GitHub Repos + +If you need Terraform to be able to fetch modules from private GitHub repos on +a remote machine (like a Atlas or a CI server), you'll need to provide +Terraform with credentials that can be used to authenticate as a user with read +access to the private repo. + +First, create a [machine +user](https://developer.github.com/guides/managing-deploy-keys/#machine-users) +with access to read from the private repo in question, then embed this user's +credentials into the source field: + +``` +module "private-infra" { + source = "git::https://MACHINE-USER:MACHINE-PASS@github.com/org/privatemodules//modules/foo" +} +``` + +Note that Terraform does not yet support interpolations in the `source` field, +so the machine username and password will have to be embedded directly into the +source string. You can track +[GH-1439](https://github.com/hashicorp/terraform/issues/1439) to learn when this +limitation is lifted. + ## BitBucket Terraform will automatically recognize BitBucket URLs and turn them into From 3d77d158f7270472446b9e1fe461487c9763c91c Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 14 Sep 2015 10:38:29 +0100 Subject: [PATCH 024/335] remote/s3: Add support for ACL --- state/remote/s3.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/state/remote/s3.go b/state/remote/s3.go index c2d897dd00..26330d1126 100644 --- a/state/remote/s3.go +++ b/state/remote/s3.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "io" + "log" "os" "strconv" @@ -45,6 +46,11 @@ func s3Factory(conf map[string]string) (Client, error) { serverSideEncryption = v } + acl := "" + if raw, ok := conf["acl"]; ok { + acl = raw + } + accessKeyId := conf["access_key"] secretAccessKey := conf["secret_key"] @@ -77,6 +83,7 @@ func s3Factory(conf map[string]string) (Client, error) { bucketName: bucketName, keyName: keyName, serverSideEncryption: serverSideEncryption, + acl: acl, }, nil } @@ -85,6 +92,7 @@ type S3Client struct { bucketName string keyName string serverSideEncryption bool + acl string } func (c *S3Client) Get() (*Payload, error) { @@ -140,6 +148,12 @@ func (c *S3Client) Put(data []byte) error { i.ServerSideEncryption = aws.String("AES256") } + if c.acl != "" { + i.ACL = aws.String(c.acl) + } + + log.Printf("[DEBUG] Uploading remote state to S3: %#v", i) + if _, err := c.nativeClient.PutObject(i); err == nil { return nil } else { From 4f7f20ba23b3b46680005a0efd4f06c80ad9a2b5 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 14 Sep 2015 10:36:55 +0100 Subject: [PATCH 025/335] remote/s3: Add some docs for supported parameters --- website/source/docs/commands/remote-config.html.markdown | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/website/source/docs/commands/remote-config.html.markdown b/website/source/docs/commands/remote-config.html.markdown index c7586ac0e4..73a06f8211 100644 --- a/website/source/docs/commands/remote-config.html.markdown +++ b/website/source/docs/commands/remote-config.html.markdown @@ -57,6 +57,13 @@ The following backends are supported: in the `access_key`, `secret_key` and `region` variables respectively, but passing credentials this way is not recommended since they will be included in cleartext inside the persisted state. + Other supported parameters include: + * `bucket` - the name of the S3 bucket + * `key` - path where to place/look for state file inside the bucket + * `encrypt` - whether to enable [server side encryption](http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) + of the state file + * `acl` - [Canned ACL](http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) + to be applied to the state file. * HTTP - Stores the state using a simple REST client. State will be fetched via GET, updated via POST, and purged with DELETE. Requires the `address` variable. From 55f3c8c76498cf181a1f3605b0e796e5cac6be07 Mon Sep 17 00:00:00 2001 From: thrashr888 Date: Mon, 14 Sep 2015 16:50:53 -0700 Subject: [PATCH 026/335] provider/aws: aws_elasticache_cluster normalizes name to lowercase --- .../aws/resource_aws_elasticache_cluster.go | 12 +++++++++++- .../aws/resource_aws_elasticache_cluster_test.go | 5 ++++- .../aws/r/elasticache_cluster.html.markdown | 4 ++-- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index 080c56ac9c..520ea13427 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -28,6 +28,12 @@ func resourceAwsElasticacheCluster() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, + StateFunc: func(val interface{}) string { + // Elasticache normalizes cluster ids to lowercase, + // so we have to do this too or else we can end up + // with non-converging diffs. + return strings.ToLower(val.(string)) + }, }, "engine": &schema.Schema{ Type: schema.TypeString, @@ -190,7 +196,11 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{ return fmt.Errorf("Error creating Elasticache: %s", err) } - d.SetId(*resp.CacheCluster.CacheClusterId) + // Assign the cluster id as the resource ID + // Elasticache always retains the id in lower case, so we have to + // mimic that or else we won't be able to refresh a resource whose + // name contained uppercase characters. + d.SetId(strings.ToLower(*resp.CacheCluster.CacheClusterId)) pending := []string{"creating"} stateConf := &resource.StateChangeConf{ diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go index caa14a8df7..173ca21ea7 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go @@ -163,7 +163,10 @@ resource "aws_security_group" "bar" { } resource "aws_elasticache_cluster" "bar" { - cluster_id = "tf-test-%03d" + // Including uppercase letters in this name to ensure + // that we correctly handle the fact that the API + // normalizes names to lowercase. + cluster_id = "tf-TEST-%03d" node_type = "cache.m1.small" num_cache_nodes = 1 engine = "redis" diff --git a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown index 953b78a9c0..dc4df4c2a0 100644 --- a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown @@ -27,8 +27,8 @@ resource "aws_elasticache_cluster" "bar" { The following arguments are supported: -* `cluster_id` – (Required) Group identifier. This parameter is stored as a -lowercase string +* `cluster_id` – (Required) Group identifier. Elasticache converts + this name to lowercase * `engine` – (Required) Name of the cache engine to be used for this cache cluster. Valid values for this parameter are `memcached` or `redis` From 4fd5c7254046bd7ef3d1b6872dcefe45f15fcbc2 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Tue, 15 Sep 2015 15:52:43 -0400 Subject: [PATCH 027/335] Fix "malformed url" bug in instance template when using network name --- .../providers/google/resource_compute_instance_template.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/builtin/providers/google/resource_compute_instance_template.go b/builtin/providers/google/resource_compute_instance_template.go index 060f4bb393..ce2c727349 100644 --- a/builtin/providers/google/resource_compute_instance_template.go +++ b/builtin/providers/google/resource_compute_instance_template.go @@ -305,11 +305,9 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) (error, []*compute. for i := 0; i < networksCount; i++ { prefix := fmt.Sprintf("network_interface.%d", i) - source := "global/networks/default" + source := "global/networks/" if v, ok := d.GetOk(prefix + ".network"); ok { - if v.(string) != "default" { - source = v.(string) - } + source += v.(string) } // Build the networkInterface From 32832ba030c1d2b53274c134b6125ab7cd37d653 Mon Sep 17 00:00:00 2001 From: Kevin Nuckolls Date: Tue, 15 Sep 2015 16:00:12 -0500 Subject: [PATCH 028/335] adds triggers to the null resource --- builtin/providers/null/resource.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/builtin/providers/null/resource.go b/builtin/providers/null/resource.go index 0badf346cd..bd1e6f89c7 100644 --- a/builtin/providers/null/resource.go +++ b/builtin/providers/null/resource.go @@ -19,7 +19,13 @@ func resource() *schema.Resource { Update: resourceUpdate, Delete: resourceDelete, - Schema: map[string]*schema.Schema{}, + Schema: map[string]*schema.Schema{ + "triggers": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, } } From b224abb7a9b09247c3913c28d68870a6471efe86 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 16 Sep 2015 22:02:28 +0100 Subject: [PATCH 029/335] provider/aws: Add cloudwatch_log_group --- builtin/providers/aws/config.go | 37 +++-- builtin/providers/aws/provider.go | 1 + .../aws/resource_aws_cloudwatch_log_group.go | 146 ++++++++++++++++++ 3 files changed, 168 insertions(+), 16 deletions(-) create mode 100644 builtin/providers/aws/resource_aws_cloudwatch_log_group.go diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index a57c65c1b2..c1fc7ca92f 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -12,6 +12,7 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ecs" @@ -41,22 +42,23 @@ type Config struct { } type AWSClient struct { - cloudwatchconn *cloudwatch.CloudWatch - dynamodbconn *dynamodb.DynamoDB - ec2conn *ec2.EC2 - ecsconn *ecs.ECS - elbconn *elb.ELB - autoscalingconn *autoscaling.AutoScaling - s3conn *s3.S3 - sqsconn *sqs.SQS - snsconn *sns.SNS - r53conn *route53.Route53 - region string - rdsconn *rds.RDS - iamconn *iam.IAM - kinesisconn *kinesis.Kinesis - elasticacheconn *elasticache.ElastiCache - lambdaconn *lambda.Lambda + cloudwatchconn *cloudwatch.CloudWatch + cloudwatchlogsconn *cloudwatchlogs.CloudWatchLogs + dynamodbconn *dynamodb.DynamoDB + ec2conn *ec2.EC2 + ecsconn *ecs.ECS + elbconn *elb.ELB + autoscalingconn *autoscaling.AutoScaling + s3conn *s3.S3 + sqsconn *sqs.SQS + snsconn *sns.SNS + r53conn *route53.Route53 + region string + rdsconn *rds.RDS + iamconn *iam.IAM + kinesisconn *kinesis.Kinesis + elasticacheconn *elasticache.ElastiCache + lambdaconn *lambda.Lambda } // Client configures and returns a fully initialized AWSClient @@ -156,6 +158,9 @@ func (c *Config) Client() (interface{}, error) { log.Println("[INFO] Initializing CloudWatch SDK connection") client.cloudwatchconn = cloudwatch.New(awsConfig) + + log.Println("[INFO] Initializing CloudWatch Logs connection") + client.cloudwatchlogsconn = cloudwatchlogs.New(awsConfig) } if len(errs) > 0 { diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 6b2c16c7ab..16e4f3789d 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -163,6 +163,7 @@ func Provider() terraform.ResourceProvider { "aws_autoscaling_group": resourceAwsAutoscalingGroup(), "aws_autoscaling_notification": resourceAwsAutoscalingNotification(), "aws_autoscaling_policy": resourceAwsAutoscalingPolicy(), + "aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(), "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), "aws_customer_gateway": resourceAwsCustomerGateway(), "aws_db_instance": resourceAwsDbInstance(), diff --git a/builtin/providers/aws/resource_aws_cloudwatch_log_group.go b/builtin/providers/aws/resource_aws_cloudwatch_log_group.go new file mode 100644 index 0000000000..e4f7236b2d --- /dev/null +++ b/builtin/providers/aws/resource_aws_cloudwatch_log_group.go @@ -0,0 +1,146 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" +) + +func resourceAwsCloudWatchLogGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudWatchLogGroupCreate, + Read: resourceAwsCloudWatchLogGroupRead, + Update: resourceAwsCloudWatchLogGroupUpdate, + Delete: resourceAwsCloudWatchLogGroupDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "retention_in_days": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 0, + }, + + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsCloudWatchLogGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + + log.Printf("[DEBUG] Creating CloudWatch Log Group: %s", d.Get("name").(string)) + _, err := conn.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String(d.Get("name").(string)), + }) + if err != nil { + return fmt.Errorf("Creating CloudWatch Log Group failed: %s", err) + } + + d.SetId(d.Get("name").(string)) + + log.Println("[INFO] CloudWatch Log Group created") + + return resourceAwsCloudWatchLogGroupUpdate(d, meta) +} + +func resourceAwsCloudWatchLogGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + log.Printf("[DEBUG] Reading CloudWatch Log Group: %q", d.Get("name").(string)) + lg, err := lookupCloudWatchLogGroup(conn, d.Get("name").(string), nil) + if err != nil { + return err + } + + log.Printf("[DEBUG] Found Log Group: %#v", *lg) + + d.Set("arn", *lg.Arn) + d.Set("name", *lg.LogGroupName) + + if lg.RetentionInDays != nil { + d.Set("retention_in_days", *lg.RetentionInDays) + } + + return nil +} + +func lookupCloudWatchLogGroup(conn *cloudwatchlogs.CloudWatchLogs, + name string, nextToken *string) (*cloudwatchlogs.LogGroup, error) { + input := &cloudwatchlogs.DescribeLogGroupsInput{ + LogGroupNamePrefix: aws.String(name), + NextToken: nextToken, + } + resp, err := conn.DescribeLogGroups(input) + if err != nil { + return nil, err + } + + for _, lg := range resp.LogGroups { + if *lg.LogGroupName == name { + return lg, nil + } + } + + if resp.NextToken != nil { + return lookupCloudWatchLogGroup(conn, name, resp.NextToken) + } + + return nil, fmt.Errorf("CloudWatch Log Group %q not found", name) +} + +func resourceAwsCloudWatchLogGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + + name := d.Get("name").(string) + log.Printf("[DEBUG] Updating CloudWatch Log Group: %q", name) + + if d.HasChange("retention_in_days") { + var err error + + if v, ok := d.GetOk("retention_in_days"); ok { + input := cloudwatchlogs.PutRetentionPolicyInput{ + LogGroupName: aws.String(name), + RetentionInDays: aws.Int64(int64(v.(int))), + } + log.Printf("[DEBUG] Setting retention for CloudWatch Log Group: %q: %s", name, input) + _, err = conn.PutRetentionPolicy(&input) + } else { + log.Printf("[DEBUG] Deleting retention for CloudWatch Log Group: %q", name) + _, err = conn.DeleteRetentionPolicy(&cloudwatchlogs.DeleteRetentionPolicyInput{ + LogGroupName: aws.String(name), + }) + } + + return err + } + + return resourceAwsCloudWatchLogGroupRead(d, meta) +} + +func resourceAwsCloudWatchLogGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + log.Printf("[INFO] Deleting CloudWatch Log Group: %s", d.Id()) + _, err := conn.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ + LogGroupName: aws.String(d.Get("name").(string)), + }) + if err != nil { + return fmt.Errorf("Error deleting CloudWatch Log Group: %s", err) + } + log.Println("[INFO] CloudWatch Log Group deleted") + + d.SetId("") + + return nil +} From 7b0626adb6fa59fbefbc648c8456e4a8796fe274 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 16 Sep 2015 22:02:45 +0100 Subject: [PATCH 030/335] provider/aws: Add docs for CloudWatch Log Group --- .../aws/r/cloudwatch_log_group.html.markdown | 33 +++++++++++++++++++ website/source/layouts/aws.erb | 4 +++ 2 files changed, 37 insertions(+) create mode 100644 website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown diff --git a/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown new file mode 100644 index 0000000000..e784c63893 --- /dev/null +++ b/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown @@ -0,0 +1,33 @@ +--- +layout: "aws" +page_title: "AWS: aws_cloudwatch_log_group" +sidebar_current: "docs-aws-resource-cloudwatch-log-group" +description: |- + Provides a CloudWatch Log Group resource. +--- + +# aws\_cloudwatch\_log\_group + +Provides a CloudWatch Log Group resource. + +## Example Usage + +``` +resource "aws_cloudwatch_log_group" "yada" { + name = "Yada" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the log group +* `retention_in_days` - (Optional) Specifies the number of days + you want to retain log events in the specified log group. + +## Attributes Reference + +The following attributes are exported: + +* `arn` - The Amazon Resource Name (ARN) specifying the log group. diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 22801a5075..5c67ad58ef 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -15,6 +15,10 @@ CloudWatch Resources From 9f106bc98e9dc985d2f1ed5824925ded8ef918c3 Mon Sep 17 00:00:00 2001 From: Clint Date: Mon, 12 Oct 2015 14:24:14 -0500 Subject: [PATCH 211/335] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d04f464e22..9160afcc8c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ IMPROVEMENTS: * provider/aws: Support IAM role names (previously just ARNs) in `aws_ecs_service.iam_role` [GH-3061] * provider/aws: Add update method to RDS Subnet groups, can modify subnets without recreating [GH-3053] * provider/aws: Paginate notifications returned for ASG Notifications [GH-3043] + * provider/aws: Adds additional S3 Bucket Object inputs [GH-3265] * provider/aws: add `ses_smtp_password` to `aws_iam_access_key` [GH-3165] * provider/aws: read `iam_instance_profile` for `aws_instance` and save to state [GH-3167] * provider/aws: allow `instance` to be computed in `aws_eip` [GH-3036] From ed25948651799ff7a03575faa2a9d3f5b016900b Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 12 Oct 2015 14:24:57 -0500 Subject: [PATCH 212/335] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9160afcc8c..4ca088c721 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ FEATURES: * **New resource: `aws_elasticsearch_domain`** [GH-3443] * **New resource: `aws_directory_service_directory`** [GH-3228] * **New resource: `aws_autoscaling_lifecycle_hook`** [GH-3351] + * **New resource: `aws_placement_group`** [GH-3457] IMPROVEMENTS: From 810d0882792cedbb5ce3898b6d93673e5c461265 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 12 Oct 2015 15:50:04 -0500 Subject: [PATCH 213/335] Fix whitespace formatting with go fmt --- builtin/providers/aws/resource_aws_eip.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/builtin/providers/aws/resource_aws_eip.go b/builtin/providers/aws/resource_aws_eip.go index bf7a9e3c5c..4b369ee606 100644 --- a/builtin/providers/aws/resource_aws_eip.go +++ b/builtin/providers/aws/resource_aws_eip.go @@ -30,13 +30,13 @@ func resourceAwsEip() *schema.Resource { "instance": &schema.Schema{ Type: schema.TypeString, Optional: true, - Computed: true, + Computed: true, }, "network_interface": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, + Type: schema.TypeString, + Optional: true, + Computed: true, }, "allocation_id": &schema.Schema{ From d3c5c0d85f72025536152921f80d72f63ba3580b Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Tue, 18 Aug 2015 09:56:54 -0500 Subject: [PATCH 214/335] provider/aws: Update Security Group Rules to Version 2 --- .../aws/resource_aws_security_group_rule.go | 83 +++-- ...esource_aws_security_group_rule_migrate.go | 8 +- .../resource_aws_security_group_rule_test.go | 284 ++++++++++++++++-- 3 files changed, 332 insertions(+), 43 deletions(-) diff --git a/builtin/providers/aws/resource_aws_security_group_rule.go b/builtin/providers/aws/resource_aws_security_group_rule.go index 97b6d40250..bd40c284f4 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule.go +++ b/builtin/providers/aws/resource_aws_security_group_rule.go @@ -20,7 +20,7 @@ func resourceAwsSecurityGroupRule() *schema.Resource { Read: resourceAwsSecurityGroupRuleRead, Delete: resourceAwsSecurityGroupRuleDelete, - SchemaVersion: 1, + SchemaVersion: 2, MigrateState: resourceAwsSecurityGroupRuleMigrateState, Schema: map[string]*schema.Schema{ @@ -67,14 +67,15 @@ func resourceAwsSecurityGroupRule() *schema.Resource { Optional: true, ForceNew: true, Computed: true, - ConflictsWith: []string{"cidr_blocks"}, + ConflictsWith: []string{"cidr_blocks", "self"}, }, "self": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + ConflictsWith: []string{"cidr_blocks"}, }, }, } @@ -142,7 +143,7 @@ information and instructions for recovery. Error message: %s`, awsErr.Message()) ruleType, autherr) } - d.SetId(ipPermissionIDHash(ruleType, perm)) + d.SetId(ipPermissionIDHash(sg_id, ruleType, perm)) return resourceAwsSecurityGroupRuleRead(d, meta) } @@ -158,24 +159,67 @@ func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) } var rule *ec2.IpPermission + var rules []*ec2.IpPermission ruleType := d.Get("type").(string) - var rl []*ec2.IpPermission switch ruleType { case "ingress": - rl = sg.IpPermissions + rules = sg.IpPermissions default: - rl = sg.IpPermissionsEgress + rules = sg.IpPermissionsEgress } - for _, r := range rl { - if d.Id() == ipPermissionIDHash(ruleType, r) { - rule = r + p := expandIPPerm(d, sg) + + if len(rules) == 0 { + return fmt.Errorf("No IPPerms") + } + + for _, r := range rules { + if r.ToPort != nil && *p.ToPort != *r.ToPort { + continue + } + + if r.FromPort != nil && *p.FromPort != *r.FromPort { + continue + } + + if r.IpProtocol != nil && *p.IpProtocol != *r.IpProtocol { + continue + } + + remaining := len(p.IpRanges) + for _, ip := range p.IpRanges { + for _, rip := range r.IpRanges { + if *ip.CidrIp == *rip.CidrIp { + remaining-- + } + } + } + + if remaining > 0 { + continue } + + remaining = len(p.UserIdGroupPairs) + for _, ip := range p.UserIdGroupPairs { + for _, rip := range r.UserIdGroupPairs { + if *ip.GroupId == *rip.GroupId { + remaining-- + } + } + } + + if remaining > 0 { + continue + } + + log.Printf("[DEBUG] Found rule for Security Group Rule (%s): %s", d.Id(), r) + rule = r } if rule == nil { - log.Printf("[DEBUG] Unable to find matching %s Security Group Rule for Group %s", - ruleType, sg_id) + log.Printf("[DEBUG] Unable to find matching %s Security Group Rule (%s) for Group %s", + ruleType, d.Id(), sg_id) d.SetId("") return nil } @@ -186,14 +230,14 @@ func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) d.Set("type", ruleType) var cb []string - for _, c := range rule.IpRanges { + for _, c := range p.IpRanges { cb = append(cb, *c.CidrIp) } d.Set("cidr_blocks", cb) - if len(rule.UserIdGroupPairs) > 0 { - s := rule.UserIdGroupPairs[0] + if len(p.UserIdGroupPairs) > 0 { + s := p.UserIdGroupPairs[0] d.Set("source_security_group_id", *s.GroupId) } @@ -285,8 +329,9 @@ func (b ByGroupPair) Less(i, j int) bool { panic("mismatched security group rules, may be a terraform bug") } -func ipPermissionIDHash(ruleType string, ip *ec2.IpPermission) string { +func ipPermissionIDHash(sg_id, ruleType string, ip *ec2.IpPermission) string { var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("%s-", sg_id)) if ip.FromPort != nil && *ip.FromPort > 0 { buf.WriteString(fmt.Sprintf("%d-", *ip.FromPort)) } diff --git a/builtin/providers/aws/resource_aws_security_group_rule_migrate.go b/builtin/providers/aws/resource_aws_security_group_rule_migrate.go index 98ecced70f..3dd6f5f726 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule_migrate.go +++ b/builtin/providers/aws/resource_aws_security_group_rule_migrate.go @@ -17,6 +17,12 @@ func resourceAwsSecurityGroupRuleMigrateState( case 0: log.Println("[INFO] Found AWS Security Group State v0; migrating to v1") return migrateSGRuleStateV0toV1(is) + case 1: + log.Println("[INFO] Found AWS Security Group State v1; migrating to v2") + // migrating to version 2 of the schema is the same as 0->1, since the + // method signature has changed now and will use the security group id in + // the hash + return migrateSGRuleStateV0toV1(is) default: return is, fmt.Errorf("Unexpected schema version: %d", v) } @@ -37,7 +43,7 @@ func migrateSGRuleStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceS } log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - newID := ipPermissionIDHash(is.Attributes["type"], perm) + newID := ipPermissionIDHash(is.Attributes["security_group_id"], is.Attributes["type"], perm) is.Attributes["id"] = newID is.ID = newID log.Printf("[DEBUG] Attributes after migration: %#v, new id: %s", is.Attributes, newID) diff --git a/builtin/providers/aws/resource_aws_security_group_rule_test.go b/builtin/providers/aws/resource_aws_security_group_rule_test.go index c160703f36..a00385ba79 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule_test.go +++ b/builtin/providers/aws/resource_aws_security_group_rule_test.go @@ -2,7 +2,7 @@ package aws import ( "fmt" - "reflect" + "log" "testing" "github.com/aws/aws-sdk-go/aws" @@ -90,15 +90,15 @@ func TestIpPermissionIDHash(t *testing.T) { Type string Output string }{ - {simple, "ingress", "sg-82613597"}, - {egress, "egress", "sg-363054720"}, - {egress_all, "egress", "sg-2766285362"}, - {vpc_security_group_source, "egress", "sg-2661404947"}, - {security_group_source, "egress", "sg-1841245863"}, + {simple, "ingress", "sg-3403497314"}, + {egress, "egress", "sg-1173186295"}, + {egress_all, "egress", "sg-766323498"}, + {vpc_security_group_source, "egress", "sg-351225364"}, + {security_group_source, "egress", "sg-2198807188"}, } for _, tc := range cases { - actual := ipPermissionIDHash(tc.Type, tc.Input) + actual := ipPermissionIDHash("sg-12345", tc.Type, tc.Input) if actual != tc.Output { t.Errorf("input: %s - %s\noutput: %s", tc.Type, tc.Input, actual) } @@ -132,7 +132,7 @@ func TestAccAWSSecurityGroupRule_Ingress_VPC(t *testing.T) { Config: testAccAWSSecurityGroupRuleIngressConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleAttributes(&group, "ingress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress_1", &group, nil, "ingress"), resource.TestCheckResourceAttr( "aws_security_group_rule.ingress_1", "from_port", "80"), testRuleCount, @@ -169,7 +169,7 @@ func TestAccAWSSecurityGroupRule_Ingress_Classic(t *testing.T) { Config: testAccAWSSecurityGroupRuleIngressClassicConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleAttributes(&group, "ingress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress_1", &group, nil, "ingress"), resource.TestCheckResourceAttr( "aws_security_group_rule.ingress_1", "from_port", "80"), testRuleCount, @@ -231,7 +231,7 @@ func TestAccAWSSecurityGroupRule_Egress(t *testing.T) { Config: testAccAWSSecurityGroupRuleEgressConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleAttributes(&group, "egress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.egress_1", &group, nil, "egress"), ), }, }, @@ -256,6 +256,92 @@ func TestAccAWSSecurityGroupRule_SelfReference(t *testing.T) { }) } +// testing partial match implementation +func TestAccAWSSecurityGroupRule_PartialMatching_Basic(t *testing.T) { + var group ec2.SecurityGroup + + p := ec2.IpPermission{ + FromPort: aws.Int64(80), + ToPort: aws.Int64(80), + IpProtocol: aws.String("tcp"), + IpRanges: []*ec2.IpRange{ + &ec2.IpRange{CidrIp: aws.String("10.0.2.0/24")}, + &ec2.IpRange{CidrIp: aws.String("10.0.3.0/24")}, + &ec2.IpRange{CidrIp: aws.String("10.0.4.0/24")}, + }, + } + + o := ec2.IpPermission{ + FromPort: aws.Int64(80), + ToPort: aws.Int64(80), + IpProtocol: aws.String("tcp"), + IpRanges: []*ec2.IpRange{ + &ec2.IpRange{CidrIp: aws.String("10.0.5.0/24")}, + }, + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSecurityGroupRulePartialMatching, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress", &group, &p, "ingress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.other", &group, &o, "ingress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.nat_ingress", &group, &o, "ingress"), + ), + }, + }, + }) +} + +func TestAccAWSSecurityGroupRule_PartialMatching_Source(t *testing.T) { + var group ec2.SecurityGroup + var nat ec2.SecurityGroup + var p ec2.IpPermission + + // This function creates the expected IPPermission with the group id from an + // external security group, needed because Security Group IDs are generated on + // AWS side and can't be known ahead of time. + setupSG := func(*terraform.State) error { + if nat.GroupId == nil { + return fmt.Errorf("Error: nat group has nil GroupID") + } + + p = ec2.IpPermission{ + FromPort: aws.Int64(80), + ToPort: aws.Int64(80), + IpProtocol: aws.String("tcp"), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + &ec2.UserIdGroupPair{GroupId: nat.GroupId}, + }, + } + + return nil + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSecurityGroupRulePartialMatching_Source, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), + testAccCheckAWSSecurityGroupRuleExists("aws_security_group.nat", &nat), + setupSG, + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.source_ingress", &group, &p, "ingress"), + ), + }, + }, + }) + +} + func testAccCheckAWSSecurityGroupRuleDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).ec2conn @@ -319,14 +405,27 @@ func testAccCheckAWSSecurityGroupRuleExists(n string, group *ec2.SecurityGroup) } } -func testAccCheckAWSSecurityGroupRuleAttributes(group *ec2.SecurityGroup, ruleType string) resource.TestCheckFunc { +func testAccCheckAWSSecurityGroupRuleAttributes(n string, group *ec2.SecurityGroup, p *ec2.IpPermission, ruleType string) resource.TestCheckFunc { return func(s *terraform.State) error { - p := &ec2.IpPermission{ - FromPort: aws.Int64(80), - ToPort: aws.Int64(8000), - IpProtocol: aws.String("tcp"), - IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("10.0.0.0/8")}}, + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Security Group Rule Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Security Group Rule is set") } + + if p == nil { + p = &ec2.IpPermission{ + FromPort: aws.Int64(80), + ToPort: aws.Int64(8000), + IpProtocol: aws.String("tcp"), + IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("10.0.0.0/8")}}, + } + } + + var matchingRule *ec2.IpPermission var rules []*ec2.IpPermission if ruleType == "ingress" { rules = group.IpPermissions @@ -338,15 +437,53 @@ func testAccCheckAWSSecurityGroupRuleAttributes(group *ec2.SecurityGroup, ruleTy return fmt.Errorf("No IPPerms") } - // Compare our ingress - if !reflect.DeepEqual(rules[0], p) { - return fmt.Errorf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - rules[0], - p) + for _, r := range rules { + if r.ToPort != nil && *p.ToPort != *r.ToPort { + continue + } + + if r.FromPort != nil && *p.FromPort != *r.FromPort { + continue + } + + if r.IpProtocol != nil && *p.IpProtocol != *r.IpProtocol { + continue + } + + remaining := len(p.IpRanges) + for _, ip := range p.IpRanges { + for _, rip := range r.IpRanges { + if *ip.CidrIp == *rip.CidrIp { + remaining-- + } + } + } + + if remaining > 0 { + continue + } + + remaining = len(p.UserIdGroupPairs) + for _, ip := range p.UserIdGroupPairs { + for _, rip := range r.UserIdGroupPairs { + if *ip.GroupId == *rip.GroupId { + remaining-- + } + } + } + + if remaining > 0 { + continue + } + matchingRule = r } - return nil + if matchingRule != nil { + log.Printf("[DEBUG] Matching rule found : %s", matchingRule) + return nil + } + + return fmt.Errorf("Error here\n\tlooking for %s, wasn't found in %s", p, rules) } } @@ -480,3 +617,104 @@ resource "aws_security_group_rule" "self" { security_group_id = "${aws_security_group.web.id}" } ` + +const testAccAWSSecurityGroupRulePartialMatching = ` +resource "aws_vpc" "default" { + cidr_block = "10.0.0.0/16" + tags { + Name = "tf-sg-rule-bug" + } +} + +resource "aws_security_group" "web" { + name = "tf-other" + vpc_id = "${aws_vpc.default.id}" + tags { + Name = "tf-other-sg" + } +} + +resource "aws_security_group" "nat" { + name = "tf-nat" + vpc_id = "${aws_vpc.default.id}" + tags { + Name = "tf-nat-sg" + } +} + +resource "aws_security_group_rule" "ingress" { + type = "ingress" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["10.0.2.0/24", "10.0.3.0/24", "10.0.4.0/24"] + + security_group_id = "${aws_security_group.web.id}" +} + +resource "aws_security_group_rule" "other" { + type = "ingress" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["10.0.5.0/24"] + + security_group_id = "${aws_security_group.web.id}" +} + +// same a above, but different group, to guard against bad hashing +resource "aws_security_group_rule" "nat_ingress" { + type = "ingress" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["10.0.2.0/24", "10.0.3.0/24", "10.0.4.0/24"] + + security_group_id = "${aws_security_group.nat.id}" +} +` + +const testAccAWSSecurityGroupRulePartialMatching_Source = ` +resource "aws_vpc" "default" { + cidr_block = "10.0.0.0/16" + tags { + Name = "tf-sg-rule-bug" + } +} + +resource "aws_security_group" "web" { + name = "tf-other" + vpc_id = "${aws_vpc.default.id}" + tags { + Name = "tf-other-sg" + } +} + +resource "aws_security_group" "nat" { + name = "tf-nat" + vpc_id = "${aws_vpc.default.id}" + tags { + Name = "tf-nat-sg" + } +} + +resource "aws_security_group_rule" "source_ingress" { + type = "ingress" + from_port = 80 + to_port = 80 + protocol = "tcp" + + source_security_group_id = "${aws_security_group.nat.id}" + security_group_id = "${aws_security_group.web.id}" +} + +resource "aws_security_group_rule" "other_ingress" { + type = "ingress" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["10.0.2.0/24", "10.0.3.0/24", "10.0.4.0/24"] + + security_group_id = "${aws_security_group.web.id}" +} +` From e0bb04b82287565f33baa1dc4cdef8953000616e Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Tue, 18 Aug 2015 10:17:15 -0500 Subject: [PATCH 215/335] update expeded hash for migration test --- .../aws/resource_aws_security_group_rule_migrate_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go b/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go index 664f050393..f9352fa278 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go +++ b/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go @@ -27,7 +27,7 @@ func TestAWSSecurityGroupRuleMigrateState(t *testing.T) { "from_port": "0", "source_security_group_id": "sg-11877275", }, - Expected: "sg-3766347571", + Expected: "sg-2889201120", }, "v0_2": { StateVersion: 0, @@ -44,7 +44,7 @@ func TestAWSSecurityGroupRuleMigrateState(t *testing.T) { "cidr_blocks.2": "172.16.3.0/24", "cidr_blocks.3": "172.16.4.0/24", "cidr_blocks.#": "4"}, - Expected: "sg-4100229787", + Expected: "sg-1826358977", }, } From 03aac9f42b7c1159abe681951acb5a3ac1aea34b Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Fri, 21 Aug 2015 09:58:56 -0500 Subject: [PATCH 216/335] Expand on an error case with more descriptive error --- builtin/providers/aws/resource_aws_eip.go | 22 +++++++++---------- .../aws/resource_aws_security_group_rule.go | 8 ++++--- .../resource_aws_security_group_rule_test.go | 2 +- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/builtin/providers/aws/resource_aws_eip.go b/builtin/providers/aws/resource_aws_eip.go index 4b369ee606..0a7801beed 100644 --- a/builtin/providers/aws/resource_aws_eip.go +++ b/builtin/providers/aws/resource_aws_eip.go @@ -27,19 +27,19 @@ func resourceAwsEip() *schema.Resource { ForceNew: true, }, - "instance": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, + "instance": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, - "network_interface": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, + "network_interface": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, - "allocation_id": &schema.Schema{ + "allocation_id": &schema.Schema{ Type: schema.TypeString, Computed: true, }, diff --git a/builtin/providers/aws/resource_aws_security_group_rule.go b/builtin/providers/aws/resource_aws_security_group_rule.go index bd40c284f4..a1f078a827 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule.go +++ b/builtin/providers/aws/resource_aws_security_group_rule.go @@ -171,7 +171,9 @@ func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) p := expandIPPerm(d, sg) if len(rules) == 0 { - return fmt.Errorf("No IPPerms") + return fmt.Errorf( + "[WARN] No %s rules were found for Security Group (%s) looking for Security Group Rule (%s)", + ruleType, *sg.GroupName, d.Id()) } for _, r := range rules { @@ -198,7 +200,7 @@ func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) if remaining > 0 { continue - } + } remaining = len(p.UserIdGroupPairs) for _, ip := range p.UserIdGroupPairs { @@ -211,7 +213,7 @@ func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) if remaining > 0 { continue - } + } log.Printf("[DEBUG] Found rule for Security Group Rule (%s): %s", d.Id(), r) rule = r diff --git a/builtin/providers/aws/resource_aws_security_group_rule_test.go b/builtin/providers/aws/resource_aws_security_group_rule_test.go index a00385ba79..29e831446c 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule_test.go +++ b/builtin/providers/aws/resource_aws_security_group_rule_test.go @@ -257,7 +257,7 @@ func TestAccAWSSecurityGroupRule_SelfReference(t *testing.T) { } // testing partial match implementation -func TestAccAWSSecurityGroupRule_PartialMatching_Basic(t *testing.T) { +func TestAccAWSSecurityGroupRule_PartialMatching_basic(t *testing.T) { var group ec2.SecurityGroup p := ec2.IpPermission{ From 9f3a17e9b4074b420431a8e345b377f543a654d2 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 12 Oct 2015 15:19:42 -0500 Subject: [PATCH 217/335] update sg rule ids --- builtin/providers/aws/resource_aws_eip.go | 22 +- .../aws/resource_aws_security_group_rule.go | 126 ++++---- ...esource_aws_security_group_rule_migrate.go | 14 +- ...ce_aws_security_group_rule_migrate_test.go | 4 +- .../resource_aws_security_group_rule_test.go | 270 +++++++++--------- 5 files changed, 218 insertions(+), 218 deletions(-) diff --git a/builtin/providers/aws/resource_aws_eip.go b/builtin/providers/aws/resource_aws_eip.go index 0a7801beed..4b369ee606 100644 --- a/builtin/providers/aws/resource_aws_eip.go +++ b/builtin/providers/aws/resource_aws_eip.go @@ -27,19 +27,19 @@ func resourceAwsEip() *schema.Resource { ForceNew: true, }, - "instance": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, + "instance": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, - "network_interface": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, + "network_interface": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, - "allocation_id": &schema.Schema{ + "allocation_id": &schema.Schema{ Type: schema.TypeString, Computed: true, }, diff --git a/builtin/providers/aws/resource_aws_security_group_rule.go b/builtin/providers/aws/resource_aws_security_group_rule.go index a1f078a827..55499cfd58 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule.go +++ b/builtin/providers/aws/resource_aws_security_group_rule.go @@ -20,7 +20,7 @@ func resourceAwsSecurityGroupRule() *schema.Resource { Read: resourceAwsSecurityGroupRuleRead, Delete: resourceAwsSecurityGroupRuleDelete, - SchemaVersion: 2, + SchemaVersion: 2, MigrateState: resourceAwsSecurityGroupRuleMigrateState, Schema: map[string]*schema.Schema{ @@ -67,15 +67,15 @@ func resourceAwsSecurityGroupRule() *schema.Resource { Optional: true, ForceNew: true, Computed: true, - ConflictsWith: []string{"cidr_blocks", "self"}, + ConflictsWith: []string{"cidr_blocks", "self"}, }, "self": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - ConflictsWith: []string{"cidr_blocks"}, + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + ConflictsWith: []string{"cidr_blocks"}, }, }, } @@ -143,7 +143,7 @@ information and instructions for recovery. Error message: %s`, awsErr.Message()) ruleType, autherr) } - d.SetId(ipPermissionIDHash(sg_id, ruleType, perm)) + d.SetId(ipPermissionIDHash(sg_id, ruleType, perm)) return resourceAwsSecurityGroupRuleRead(d, meta) } @@ -159,69 +159,69 @@ func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) } var rule *ec2.IpPermission - var rules []*ec2.IpPermission + var rules []*ec2.IpPermission ruleType := d.Get("type").(string) switch ruleType { case "ingress": - rules = sg.IpPermissions + rules = sg.IpPermissions default: - rules = sg.IpPermissionsEgress + rules = sg.IpPermissionsEgress } - p := expandIPPerm(d, sg) + p := expandIPPerm(d, sg) - if len(rules) == 0 { - return fmt.Errorf( - "[WARN] No %s rules were found for Security Group (%s) looking for Security Group Rule (%s)", - ruleType, *sg.GroupName, d.Id()) - } + if len(rules) == 0 { + return fmt.Errorf( + "[WARN] No %s rules were found for Security Group (%s) looking for Security Group Rule (%s)", + ruleType, *sg.GroupName, d.Id()) + } - for _, r := range rules { - if r.ToPort != nil && *p.ToPort != *r.ToPort { - continue - } - - if r.FromPort != nil && *p.FromPort != *r.FromPort { - continue - } - - if r.IpProtocol != nil && *p.IpProtocol != *r.IpProtocol { - continue - } - - remaining := len(p.IpRanges) - for _, ip := range p.IpRanges { - for _, rip := range r.IpRanges { - if *ip.CidrIp == *rip.CidrIp { - remaining-- - } - } - } - - if remaining > 0 { - continue - } - - remaining = len(p.UserIdGroupPairs) - for _, ip := range p.UserIdGroupPairs { - for _, rip := range r.UserIdGroupPairs { - if *ip.GroupId == *rip.GroupId { - remaining-- - } - } - } - - if remaining > 0 { - continue + for _, r := range rules { + if r.ToPort != nil && *p.ToPort != *r.ToPort { + continue } - log.Printf("[DEBUG] Found rule for Security Group Rule (%s): %s", d.Id(), r) - rule = r + if r.FromPort != nil && *p.FromPort != *r.FromPort { + continue + } + + if r.IpProtocol != nil && *p.IpProtocol != *r.IpProtocol { + continue + } + + remaining := len(p.IpRanges) + for _, ip := range p.IpRanges { + for _, rip := range r.IpRanges { + if *ip.CidrIp == *rip.CidrIp { + remaining-- + } + } + } + + if remaining > 0 { + continue + } + + remaining = len(p.UserIdGroupPairs) + for _, ip := range p.UserIdGroupPairs { + for _, rip := range r.UserIdGroupPairs { + if *ip.GroupId == *rip.GroupId { + remaining-- + } + } + } + + if remaining > 0 { + continue + } + + log.Printf("[DEBUG] Found rule for Security Group Rule (%s): %s", d.Id(), r) + rule = r } if rule == nil { - log.Printf("[DEBUG] Unable to find matching %s Security Group Rule (%s) for Group %s", - ruleType, d.Id(), sg_id) + log.Printf("[DEBUG] Unable to find matching %s Security Group Rule (%s) for Group %s", + ruleType, d.Id(), sg_id) d.SetId("") return nil } @@ -232,14 +232,14 @@ func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) d.Set("type", ruleType) var cb []string - for _, c := range p.IpRanges { + for _, c := range p.IpRanges { cb = append(cb, *c.CidrIp) } d.Set("cidr_blocks", cb) - if len(p.UserIdGroupPairs) > 0 { - s := p.UserIdGroupPairs[0] + if len(p.UserIdGroupPairs) > 0 { + s := p.UserIdGroupPairs[0] d.Set("source_security_group_id", *s.GroupId) } @@ -333,7 +333,7 @@ func (b ByGroupPair) Less(i, j int) bool { func ipPermissionIDHash(sg_id, ruleType string, ip *ec2.IpPermission) string { var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("%s-", sg_id)) + buf.WriteString(fmt.Sprintf("%s-", sg_id)) if ip.FromPort != nil && *ip.FromPort > 0 { buf.WriteString(fmt.Sprintf("%d-", *ip.FromPort)) } @@ -373,7 +373,7 @@ func ipPermissionIDHash(sg_id, ruleType string, ip *ec2.IpPermission) string { } } - return fmt.Sprintf("sg-%d", hashcode.String(buf.String())) + return fmt.Sprintf("sgrule-%d", hashcode.String(buf.String())) } func expandIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup) *ec2.IpPermission { diff --git a/builtin/providers/aws/resource_aws_security_group_rule_migrate.go b/builtin/providers/aws/resource_aws_security_group_rule_migrate.go index 3dd6f5f726..0b57f3f171 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule_migrate.go +++ b/builtin/providers/aws/resource_aws_security_group_rule_migrate.go @@ -17,12 +17,12 @@ func resourceAwsSecurityGroupRuleMigrateState( case 0: log.Println("[INFO] Found AWS Security Group State v0; migrating to v1") return migrateSGRuleStateV0toV1(is) - case 1: - log.Println("[INFO] Found AWS Security Group State v1; migrating to v2") - // migrating to version 2 of the schema is the same as 0->1, since the - // method signature has changed now and will use the security group id in - // the hash - return migrateSGRuleStateV0toV1(is) + case 1: + log.Println("[INFO] Found AWS Security Group State v1; migrating to v2") + // migrating to version 2 of the schema is the same as 0->1, since the + // method signature has changed now and will use the security group id in + // the hash + return migrateSGRuleStateV0toV1(is) default: return is, fmt.Errorf("Unexpected schema version: %d", v) } @@ -43,7 +43,7 @@ func migrateSGRuleStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceS } log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - newID := ipPermissionIDHash(is.Attributes["security_group_id"], is.Attributes["type"], perm) + newID := ipPermissionIDHash(is.Attributes["security_group_id"], is.Attributes["type"], perm) is.Attributes["id"] = newID is.ID = newID log.Printf("[DEBUG] Attributes after migration: %#v, new id: %s", is.Attributes, newID) diff --git a/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go b/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go index f9352fa278..87e3a1d63c 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go +++ b/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go @@ -27,7 +27,7 @@ func TestAWSSecurityGroupRuleMigrateState(t *testing.T) { "from_port": "0", "source_security_group_id": "sg-11877275", }, - Expected: "sg-2889201120", + Expected: "sg-2889201120", }, "v0_2": { StateVersion: 0, @@ -44,7 +44,7 @@ func TestAWSSecurityGroupRuleMigrateState(t *testing.T) { "cidr_blocks.2": "172.16.3.0/24", "cidr_blocks.3": "172.16.4.0/24", "cidr_blocks.#": "4"}, - Expected: "sg-1826358977", + Expected: "sg-1826358977", }, } diff --git a/builtin/providers/aws/resource_aws_security_group_rule_test.go b/builtin/providers/aws/resource_aws_security_group_rule_test.go index 29e831446c..f06dd3e137 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule_test.go +++ b/builtin/providers/aws/resource_aws_security_group_rule_test.go @@ -2,7 +2,7 @@ package aws import ( "fmt" - "log" + "log" "testing" "github.com/aws/aws-sdk-go/aws" @@ -90,15 +90,15 @@ func TestIpPermissionIDHash(t *testing.T) { Type string Output string }{ - {simple, "ingress", "sg-3403497314"}, - {egress, "egress", "sg-1173186295"}, - {egress_all, "egress", "sg-766323498"}, - {vpc_security_group_source, "egress", "sg-351225364"}, - {security_group_source, "egress", "sg-2198807188"}, + {simple, "ingress", "sgrule-3403497314"}, + {egress, "egress", "sgrule-1173186295"}, + {egress_all, "egress", "sgrule-766323498"}, + {vpc_security_group_source, "egress", "sgrule-351225364"}, + {security_group_source, "egress", "sgrule-2198807188"}, } for _, tc := range cases { - actual := ipPermissionIDHash("sg-12345", tc.Type, tc.Input) + actual := ipPermissionIDHash("sg-12345", tc.Type, tc.Input) if actual != tc.Output { t.Errorf("input: %s - %s\noutput: %s", tc.Type, tc.Input, actual) } @@ -132,7 +132,7 @@ func TestAccAWSSecurityGroupRule_Ingress_VPC(t *testing.T) { Config: testAccAWSSecurityGroupRuleIngressConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress_1", &group, nil, "ingress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress_1", &group, nil, "ingress"), resource.TestCheckResourceAttr( "aws_security_group_rule.ingress_1", "from_port", "80"), testRuleCount, @@ -169,7 +169,7 @@ func TestAccAWSSecurityGroupRule_Ingress_Classic(t *testing.T) { Config: testAccAWSSecurityGroupRuleIngressClassicConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress_1", &group, nil, "ingress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress_1", &group, nil, "ingress"), resource.TestCheckResourceAttr( "aws_security_group_rule.ingress_1", "from_port", "80"), testRuleCount, @@ -231,7 +231,7 @@ func TestAccAWSSecurityGroupRule_Egress(t *testing.T) { Config: testAccAWSSecurityGroupRuleEgressConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.egress_1", &group, nil, "egress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.egress_1", &group, nil, "egress"), ), }, }, @@ -258,87 +258,87 @@ func TestAccAWSSecurityGroupRule_SelfReference(t *testing.T) { // testing partial match implementation func TestAccAWSSecurityGroupRule_PartialMatching_basic(t *testing.T) { - var group ec2.SecurityGroup + var group ec2.SecurityGroup - p := ec2.IpPermission{ - FromPort: aws.Int64(80), - ToPort: aws.Int64(80), - IpProtocol: aws.String("tcp"), - IpRanges: []*ec2.IpRange{ - &ec2.IpRange{CidrIp: aws.String("10.0.2.0/24")}, - &ec2.IpRange{CidrIp: aws.String("10.0.3.0/24")}, - &ec2.IpRange{CidrIp: aws.String("10.0.4.0/24")}, - }, - } + p := ec2.IpPermission{ + FromPort: aws.Int64(80), + ToPort: aws.Int64(80), + IpProtocol: aws.String("tcp"), + IpRanges: []*ec2.IpRange{ + &ec2.IpRange{CidrIp: aws.String("10.0.2.0/24")}, + &ec2.IpRange{CidrIp: aws.String("10.0.3.0/24")}, + &ec2.IpRange{CidrIp: aws.String("10.0.4.0/24")}, + }, + } - o := ec2.IpPermission{ - FromPort: aws.Int64(80), - ToPort: aws.Int64(80), - IpProtocol: aws.String("tcp"), - IpRanges: []*ec2.IpRange{ - &ec2.IpRange{CidrIp: aws.String("10.0.5.0/24")}, - }, - } + o := ec2.IpPermission{ + FromPort: aws.Int64(80), + ToPort: aws.Int64(80), + IpProtocol: aws.String("tcp"), + IpRanges: []*ec2.IpRange{ + &ec2.IpRange{CidrIp: aws.String("10.0.5.0/24")}, + }, + } - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSSecurityGroupRulePartialMatching, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress", &group, &p, "ingress"), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.other", &group, &o, "ingress"), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.nat_ingress", &group, &o, "ingress"), - ), - }, - }, - }) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSecurityGroupRulePartialMatching, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.ingress", &group, &p, "ingress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.other", &group, &o, "ingress"), + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.nat_ingress", &group, &o, "ingress"), + ), + }, + }, + }) } func TestAccAWSSecurityGroupRule_PartialMatching_Source(t *testing.T) { - var group ec2.SecurityGroup - var nat ec2.SecurityGroup - var p ec2.IpPermission + var group ec2.SecurityGroup + var nat ec2.SecurityGroup + var p ec2.IpPermission - // This function creates the expected IPPermission with the group id from an - // external security group, needed because Security Group IDs are generated on - // AWS side and can't be known ahead of time. - setupSG := func(*terraform.State) error { - if nat.GroupId == nil { - return fmt.Errorf("Error: nat group has nil GroupID") - } + // This function creates the expected IPPermission with the group id from an + // external security group, needed because Security Group IDs are generated on + // AWS side and can't be known ahead of time. + setupSG := func(*terraform.State) error { + if nat.GroupId == nil { + return fmt.Errorf("Error: nat group has nil GroupID") + } - p = ec2.IpPermission{ - FromPort: aws.Int64(80), - ToPort: aws.Int64(80), - IpProtocol: aws.String("tcp"), - UserIdGroupPairs: []*ec2.UserIdGroupPair{ - &ec2.UserIdGroupPair{GroupId: nat.GroupId}, - }, - } + p = ec2.IpPermission{ + FromPort: aws.Int64(80), + ToPort: aws.Int64(80), + IpProtocol: aws.String("tcp"), + UserIdGroupPairs: []*ec2.UserIdGroupPair{ + &ec2.UserIdGroupPair{GroupId: nat.GroupId}, + }, + } - return nil - } + return nil + } - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSSecurityGroupRulePartialMatching_Source, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), - testAccCheckAWSSecurityGroupRuleExists("aws_security_group.nat", &nat), - setupSG, - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.source_ingress", &group, &p, "ingress"), - ), - }, - }, - }) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSecurityGroupRulePartialMatching_Source, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), + testAccCheckAWSSecurityGroupRuleExists("aws_security_group.nat", &nat), + setupSG, + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.source_ingress", &group, &p, "ingress"), + ), + }, + }, + }) } @@ -407,25 +407,25 @@ func testAccCheckAWSSecurityGroupRuleExists(n string, group *ec2.SecurityGroup) func testAccCheckAWSSecurityGroupRuleAttributes(n string, group *ec2.SecurityGroup, p *ec2.IpPermission, ruleType string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Security Group Rule Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Security Group Rule is set") + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Security Group Rule Not found: %s", n) } - if p == nil { - p = &ec2.IpPermission{ - FromPort: aws.Int64(80), - ToPort: aws.Int64(8000), - IpProtocol: aws.String("tcp"), - IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("10.0.0.0/8")}}, - } - } + if rs.Primary.ID == "" { + return fmt.Errorf("No Security Group Rule is set") + } - var matchingRule *ec2.IpPermission + if p == nil { + p = &ec2.IpPermission{ + FromPort: aws.Int64(80), + ToPort: aws.Int64(8000), + IpProtocol: aws.String("tcp"), + IpRanges: []*ec2.IpRange{&ec2.IpRange{CidrIp: aws.String("10.0.0.0/8")}}, + } + } + + var matchingRule *ec2.IpPermission var rules []*ec2.IpPermission if ruleType == "ingress" { rules = group.IpPermissions @@ -437,53 +437,53 @@ func testAccCheckAWSSecurityGroupRuleAttributes(n string, group *ec2.SecurityGro return fmt.Errorf("No IPPerms") } - for _, r := range rules { - if r.ToPort != nil && *p.ToPort != *r.ToPort { - continue - } + for _, r := range rules { + if r.ToPort != nil && *p.ToPort != *r.ToPort { + continue + } - if r.FromPort != nil && *p.FromPort != *r.FromPort { - continue - } + if r.FromPort != nil && *p.FromPort != *r.FromPort { + continue + } - if r.IpProtocol != nil && *p.IpProtocol != *r.IpProtocol { - continue - } + if r.IpProtocol != nil && *p.IpProtocol != *r.IpProtocol { + continue + } - remaining := len(p.IpRanges) - for _, ip := range p.IpRanges { - for _, rip := range r.IpRanges { - if *ip.CidrIp == *rip.CidrIp { - remaining-- - } - } - } + remaining := len(p.IpRanges) + for _, ip := range p.IpRanges { + for _, rip := range r.IpRanges { + if *ip.CidrIp == *rip.CidrIp { + remaining-- + } + } + } - if remaining > 0 { - continue - } + if remaining > 0 { + continue + } - remaining = len(p.UserIdGroupPairs) - for _, ip := range p.UserIdGroupPairs { - for _, rip := range r.UserIdGroupPairs { - if *ip.GroupId == *rip.GroupId { - remaining-- - } - } - } + remaining = len(p.UserIdGroupPairs) + for _, ip := range p.UserIdGroupPairs { + for _, rip := range r.UserIdGroupPairs { + if *ip.GroupId == *rip.GroupId { + remaining-- + } + } + } - if remaining > 0 { - continue - } - matchingRule = r + if remaining > 0 { + continue + } + matchingRule = r } - if matchingRule != nil { - log.Printf("[DEBUG] Matching rule found : %s", matchingRule) - return nil - } + if matchingRule != nil { + log.Printf("[DEBUG] Matching rule found : %s", matchingRule) + return nil + } - return fmt.Errorf("Error here\n\tlooking for %s, wasn't found in %s", p, rules) + return fmt.Errorf("Error here\n\tlooking for %s, wasn't found in %s", p, rules) } } From 8d84369738340701912a538c07d1b7926ef5c695 Mon Sep 17 00:00:00 2001 From: Clint Date: Mon, 12 Oct 2015 16:03:43 -0500 Subject: [PATCH 218/335] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4ca088c721..5bc6260f58 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,6 +68,7 @@ BUG FIXES: * provider/aws: Allow `weight = 0` in Route53 records [GH-3196] * provider/aws: Normalize aws_elasticache_cluster id to lowercase, allowing convergence. [GH-3235] * provider/aws: Fix ValidateAccountId for IAM Instance Profiles [GH-3313] + * provider/aws: Update Security Group Rules to Version 2 [GH-3019] * provider/docker: Fix issue preventing private images from being referenced [GH-2619] * provider/digitalocean: Fix issue causing unnecessary diffs based on droplet slugsize case [GH-3284] * provider/openstack: add state 'downloading' to list of expected states in From 31b8f04bda7234c37bd01589bdb5f0c3d7a10c3c Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Fri, 9 Oct 2015 11:49:36 -0500 Subject: [PATCH 219/335] provider/aws: Migrate KeyPair to version 1 --- .../providers/aws/resource_aws_key_pair.go | 13 +++++ .../aws/resource_aws_key_pair_migrate.go | 38 +++++++++++++ .../aws/resource_aws_key_pair_migrate_test.go | 55 +++++++++++++++++++ 3 files changed, 106 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_key_pair_migrate.go create mode 100644 builtin/providers/aws/resource_aws_key_pair_migrate_test.go diff --git a/builtin/providers/aws/resource_aws_key_pair.go b/builtin/providers/aws/resource_aws_key_pair.go index e747fbfc50..0d6c51fcf9 100644 --- a/builtin/providers/aws/resource_aws_key_pair.go +++ b/builtin/providers/aws/resource_aws_key_pair.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "strings" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" @@ -18,6 +19,9 @@ func resourceAwsKeyPair() *schema.Resource { Update: nil, Delete: resourceAwsKeyPairDelete, + SchemaVersion: 1, + MigrateState: resourceAwsKeyPairMigrateState, + Schema: map[string]*schema.Schema{ "key_name": &schema.Schema{ Type: schema.TypeString, @@ -29,6 +33,14 @@ func resourceAwsKeyPair() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, + StateFunc: func(v interface{}) string { + switch v.(type) { + case string: + return strings.TrimSpace(v.(string)) + default: + return "" + } + }, }, "fingerprint": &schema.Schema{ Type: schema.TypeString, @@ -45,6 +57,7 @@ func resourceAwsKeyPairCreate(d *schema.ResourceData, meta interface{}) error { if keyName == "" { keyName = resource.UniqueId() } + publicKey := d.Get("public_key").(string) req := &ec2.ImportKeyPairInput{ KeyName: aws.String(keyName), diff --git a/builtin/providers/aws/resource_aws_key_pair_migrate.go b/builtin/providers/aws/resource_aws_key_pair_migrate.go new file mode 100644 index 0000000000..0d56123aab --- /dev/null +++ b/builtin/providers/aws/resource_aws_key_pair_migrate.go @@ -0,0 +1,38 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceAwsKeyPairMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found AWS Key Pair State v0; migrating to v1") + return migrateKeyPairStateV0toV1(is) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } + + return is, nil +} + +func migrateKeyPairStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + // replace public_key with a stripped version, removing `\n` from the end + // see https://github.com/hashicorp/terraform/issues/3455 + is.Attributes["public_key"] = strings.TrimSpace(is.Attributes["public_key"]) + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/builtin/providers/aws/resource_aws_key_pair_migrate_test.go b/builtin/providers/aws/resource_aws_key_pair_migrate_test.go new file mode 100644 index 0000000000..825d3c40fe --- /dev/null +++ b/builtin/providers/aws/resource_aws_key_pair_migrate_test.go @@ -0,0 +1,55 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/terraform" +) + +func TestAWSKeyPairMigrateState(t *testing.T) { + cases := map[string]struct { + StateVersion int + ID string + Attributes map[string]string + Expected string + Meta interface{} + }{ + "v0_1": { + StateVersion: 0, + ID: "tf-testing-file", + Attributes: map[string]string{ + "fingerprint": "1d:cd:46:31:a9:4a:e0:06:8a:a1:22:cb:3b:bf:8e:42", + "key_name": "tf-testing-file", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4LBtwcFsQAYWw1cnOwRTZCJCzPSzq0dl3== ctshryock", + }, + Expected: "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4LBtwcFsQAYWw1cnOwRTZCJCzPSzq0dl3== ctshryock", + }, + "v0_2": { + StateVersion: 0, + ID: "tf-testing-file", + Attributes: map[string]string{ + "fingerprint": "1d:cd:46:31:a9:4a:e0:06:8a:a1:22:cb:3b:bf:8e:42", + "key_name": "tf-testing-file", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4LBtwcFsQAYWw1cnOwRTZCJCzPSzq0dl3== ctshryock\n", + }, + Expected: "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4LBtwcFsQAYWw1cnOwRTZCJCzPSzq0dl3== ctshryock", + }, + } + + for tn, tc := range cases { + is := &terraform.InstanceState{ + ID: tc.ID, + Attributes: tc.Attributes, + } + is, err := resourceAwsKeyPairMigrateState( + tc.StateVersion, is, tc.Meta) + + if err != nil { + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + if is.Attributes["public_key"] != tc.Expected { + t.Fatalf("Bad public_key migration: %s\n\n expected: %s", is.Attributes["public_key"], tc.Expected) + } + } +} From c44e9d10a48994d0b2136592f420e275131b2384 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 12 Oct 2015 16:26:49 -0500 Subject: [PATCH 220/335] update migration test --- .../aws/resource_aws_security_group_rule_migrate_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go b/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go index 87e3a1d63c..496834b8c0 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go +++ b/builtin/providers/aws/resource_aws_security_group_rule_migrate_test.go @@ -27,7 +27,7 @@ func TestAWSSecurityGroupRuleMigrateState(t *testing.T) { "from_port": "0", "source_security_group_id": "sg-11877275", }, - Expected: "sg-2889201120", + Expected: "sgrule-2889201120", }, "v0_2": { StateVersion: 0, @@ -44,7 +44,7 @@ func TestAWSSecurityGroupRuleMigrateState(t *testing.T) { "cidr_blocks.2": "172.16.3.0/24", "cidr_blocks.3": "172.16.4.0/24", "cidr_blocks.#": "4"}, - Expected: "sg-1826358977", + Expected: "sgrule-1826358977", }, } From 307902ec2d8ec5728fdbc3e135e38343775ded48 Mon Sep 17 00:00:00 2001 From: Clint Date: Mon, 12 Oct 2015 16:34:16 -0500 Subject: [PATCH 221/335] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5bc6260f58..119e027b3b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -69,6 +69,7 @@ BUG FIXES: * provider/aws: Normalize aws_elasticache_cluster id to lowercase, allowing convergence. [GH-3235] * provider/aws: Fix ValidateAccountId for IAM Instance Profiles [GH-3313] * provider/aws: Update Security Group Rules to Version 2 [GH-3019] + * provider/aws: Migrate KeyPair to version 1, fixing issue with using `file()` [GH-3470] * provider/docker: Fix issue preventing private images from being referenced [GH-2619] * provider/digitalocean: Fix issue causing unnecessary diffs based on droplet slugsize case [GH-3284] * provider/openstack: add state 'downloading' to list of expected states in From a811a72f11e327dde17a8c5db06e5b44765da482 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 12 Oct 2015 15:50:07 -0500 Subject: [PATCH 222/335] provider/aws: fix force_delete on ASGs The `ForceDelete` parameter was getting sent to the upstream API call, but only after we had already finished draining instances from Terraform, so it was a moot point by then. This fixes that by skipping the drain when force_delete is true, and it also simplifies the field config a bit: * set a default of false to simplify the logic * remove `ForceNew` since there's no need to replace the resource to flip this value * pull a detail comment from code into the docs --- .../aws/resource_aws_autoscaling_group.go | 20 +++++++++---------- .../aws/r/autoscaling_group.html.markdown | 5 ++++- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/builtin/providers/aws/resource_aws_autoscaling_group.go b/builtin/providers/aws/resource_aws_autoscaling_group.go index 771bda2e3a..e6d62b61a0 100644 --- a/builtin/providers/aws/resource_aws_autoscaling_group.go +++ b/builtin/providers/aws/resource_aws_autoscaling_group.go @@ -73,8 +73,7 @@ func resourceAwsAutoscalingGroup() *schema.Resource { "force_delete": &schema.Schema{ Type: schema.TypeBool, Optional: true, - Computed: true, - ForceNew: true, + Default: false, }, "health_check_grace_period": &schema.Schema{ @@ -334,15 +333,9 @@ func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) } log.Printf("[DEBUG] AutoScaling Group destroy: %v", d.Id()) - deleteopts := autoscaling.DeleteAutoScalingGroupInput{AutoScalingGroupName: aws.String(d.Id())} - - // You can force an autoscaling group to delete - // even if it's in the process of scaling a resource. - // Normally, you would set the min-size and max-size to 0,0 - // and then delete the group. This bypasses that and leaves - // resources potentially dangling. - if d.Get("force_delete").(bool) { - deleteopts.ForceDelete = aws.Bool(true) + deleteopts := autoscaling.DeleteAutoScalingGroupInput{ + AutoScalingGroupName: aws.String(d.Id()), + ForceDelete: aws.Bool(d.Get("force_delete").(bool)), } // We retry the delete operation to handle InUse/InProgress errors coming @@ -414,6 +407,11 @@ func getAwsAutoscalingGroup( func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).autoscalingconn + if d.Get("force_delete").(bool) { + log.Printf("[DEBUG] Skipping ASG drain, force_delete was set.") + return nil + } + // First, set the capacity to zero so the group will drain log.Printf("[DEBUG] Reducing autoscaling group capacity to zero") opts := autoscaling.UpdateAutoScalingGroupInput{ diff --git a/website/source/docs/providers/aws/r/autoscaling_group.html.markdown b/website/source/docs/providers/aws/r/autoscaling_group.html.markdown index 022b1cf715..40831e99a1 100644 --- a/website/source/docs/providers/aws/r/autoscaling_group.html.markdown +++ b/website/source/docs/providers/aws/r/autoscaling_group.html.markdown @@ -57,7 +57,10 @@ The following arguments are supported: for this number of healthy instances all attached load balancers. (See also [Waiting for Capacity](#waiting-for-capacity) below.) * `force_delete` - (Optional) Allows deleting the autoscaling group without waiting - for all instances in the pool to terminate. + for all instances in the pool to terminate. You can force an autoscaling group to delete + even if it's in the process of scaling a resource. Normally, Terraform + drains all the instances before deleting the group. This bypasses that + behavior and potentially leaves resources dangling. * `load_balancers` (Optional) A list of load balancer names to add to the autoscaling group names. * `vpc_zone_identifier` (Optional) A list of subnet IDs to launch resources in. From 7549872780675a25474fbad4a9cab5746a032706 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 12 Oct 2015 17:20:05 -0500 Subject: [PATCH 223/335] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 119e027b3b..a61e8af63f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -70,6 +70,7 @@ BUG FIXES: * provider/aws: Fix ValidateAccountId for IAM Instance Profiles [GH-3313] * provider/aws: Update Security Group Rules to Version 2 [GH-3019] * provider/aws: Migrate KeyPair to version 1, fixing issue with using `file()` [GH-3470] + * provider/aws: Fix force_delete on autoscaling groups [GH-3485] * provider/docker: Fix issue preventing private images from being referenced [GH-2619] * provider/digitalocean: Fix issue causing unnecessary diffs based on droplet slugsize case [GH-3284] * provider/openstack: add state 'downloading' to list of expected states in From aaac9435ecf4fa139e5160c2e39295ccc5d223d8 Mon Sep 17 00:00:00 2001 From: Geert Theys Date: Tue, 13 Oct 2015 12:57:22 +0200 Subject: [PATCH 224/335] fix illegal char in the policy name aws_lb_cookie_stickiness_policy.elbland: Error creating LBCookieStickinessPolicy: ValidationError: Policy name cannot contain characters that are not letters, or digits or the dash. --- .../providers/aws/r/lb_cookie_stickiness_policy.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/lb_cookie_stickiness_policy.html.markdown b/website/source/docs/providers/aws/r/lb_cookie_stickiness_policy.html.markdown index bb4ad524e3..59e581c124 100644 --- a/website/source/docs/providers/aws/r/lb_cookie_stickiness_policy.html.markdown +++ b/website/source/docs/providers/aws/r/lb_cookie_stickiness_policy.html.markdown @@ -25,7 +25,7 @@ resource "aws_elb" "lb" { } resource "aws_lb_cookie_stickiness_policy" "foo" { - name = "foo_policy" + name = "foo-policy" load_balancer = "${aws_elb.lb.id}" lb_port = 80 cookie_expiration_period = 600 From 60b7037cdd9e6d903a662f3dbdf4c48b0b58dcc5 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Tue, 13 Oct 2015 06:20:46 -0500 Subject: [PATCH 225/335] provider/aws: Additional error checking to VPC Peering conn --- .../aws/resource_aws_vpc_peering_connection.go | 12 +++++++----- .../aws/resource_aws_vpc_peering_connection_test.go | 1 + 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/builtin/providers/aws/resource_aws_vpc_peering_connection.go b/builtin/providers/aws/resource_aws_vpc_peering_connection.go index b279797f6d..6b7c4dc52c 100644 --- a/builtin/providers/aws/resource_aws_vpc_peering_connection.go +++ b/builtin/providers/aws/resource_aws_vpc_peering_connection.go @@ -127,6 +127,9 @@ func resourceVPCPeeringConnectionAccept(conn *ec2.EC2, id string) (string, error } resp, err := conn.AcceptVpcPeeringConnection(req) + if err != nil { + return "", err + } pc := resp.VpcPeeringConnection return *pc.Status.Code, err } @@ -153,16 +156,15 @@ func resourceAwsVPCPeeringUpdate(d *schema.ResourceData, meta interface{}) error } pc := pcRaw.(*ec2.VpcPeeringConnection) - if *pc.Status.Code == "pending-acceptance" { + if pc.Status != nil && *pc.Status.Code == "pending-acceptance" { status, err := resourceVPCPeeringConnectionAccept(conn, d.Id()) - - log.Printf( - "[DEBUG] VPC Peering connection accept status %s", - status) if err != nil { return err } + log.Printf( + "[DEBUG] VPC Peering connection accept status: %s", + status) } } diff --git a/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go b/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go index dc78a70826..8f73602505 100644 --- a/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go +++ b/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go @@ -117,6 +117,7 @@ resource "aws_vpc" "bar" { resource "aws_vpc_peering_connection" "foo" { vpc_id = "${aws_vpc.foo.id}" peer_vpc_id = "${aws_vpc.bar.id}" + auto_accept = true } ` From 5266db31e26712f29d950abe46e22a9a925934d6 Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 3 Sep 2015 22:57:56 +0100 Subject: [PATCH 226/335] Adding the ability to manage a glacier vault --- builtin/providers/aws/config.go | 5 + builtin/providers/aws/provider.go | 1 + .../aws/resource_aws_glacier_vault.go | 380 ++++++++++++++++++ 3 files changed, 386 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_glacier_vault.go diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index 5eac34e8aa..f8f443b73d 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -21,6 +21,7 @@ import ( "github.com/aws/aws-sdk-go/service/elasticache" elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go/service/glacier" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/lambda" @@ -67,6 +68,7 @@ type AWSClient struct { elasticacheconn *elasticache.ElastiCache lambdaconn *lambda.Lambda opsworksconn *opsworks.OpsWorks + glacierconn *glacier.Glacier } // Client configures and returns a fully initialized AWSClient @@ -184,6 +186,9 @@ func (c *Config) Client() (interface{}, error) { log.Println("[INFO] Initializing Directory Service connection") client.dsconn = directoryservice.New(awsConfig) + + log.Println("[INFO] Initializing Glacier connection") + client.glacierconn = glacier.New(awsConfig) } if len(errs) > 0 { diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index c740e4bc8c..f73580d0f7 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -187,6 +187,7 @@ func Provider() terraform.ResourceProvider { "aws_elasticsearch_domain": resourceAwsElasticSearchDomain(), "aws_elb": resourceAwsElb(), "aws_flow_log": resourceAwsFlowLog(), + "aws_glacier_vault": resourceAwsGlacierVault(), "aws_iam_access_key": resourceAwsIamAccessKey(), "aws_iam_group_policy": resourceAwsIamGroupPolicy(), "aws_iam_group": resourceAwsIamGroup(), diff --git a/builtin/providers/aws/resource_aws_glacier_vault.go b/builtin/providers/aws/resource_aws_glacier_vault.go new file mode 100644 index 0000000000..b077a35cd9 --- /dev/null +++ b/builtin/providers/aws/resource_aws_glacier_vault.go @@ -0,0 +1,380 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/glacier" +) + +func resourceAwsGlacierVault() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsGlacierVaultCreate, + Read: resourceAwsGlacierVaultRead, + Update: resourceAwsGlacierVaultUpdate, + Delete: resourceAwsGlacierVaultDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[.0-9A-Za-z-_]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "only alphanumeric characters, hyphens, underscores, and periods allowed in %q", k)) + } + if len(value) > 255 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 255 characters", k)) + } + return + }, + }, + + "location": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "access_policy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + StateFunc: normalizeJson, + }, + + "notification": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "events": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "sns_topic": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsGlacierVaultCreate(d *schema.ResourceData, meta interface{}) error { + glacierconn := meta.(*AWSClient).glacierconn + + input := &glacier.CreateVaultInput{ + VaultName: aws.String(d.Get("name").(string)), + } + + out, err := glacierconn.CreateVault(input) + if err != nil { + return fmt.Errorf("Error creating Glacier Vault: %s", err) + } + + d.SetId(d.Get("name").(string)) + d.Set("location", *out.Location) + + return resourceAwsGlacierVaultUpdate(d, meta) +} + +func resourceAwsGlacierVaultUpdate(d *schema.ResourceData, meta interface{}) error { + glacierconn := meta.(*AWSClient).glacierconn + + if err := setGlacierVaultTags(glacierconn, d); err != nil { + return err + } + + if d.HasChange("access_policy") { + if err := resourceAwsGlacierVaultPolicyUpdate(glacierconn, d); err != nil { + return err + } + } + + if d.HasChange("notification") { + if err := resourceAwsGlacierVaultNotificationUpdate(glacierconn, d); err != nil { + return err + } + } + + return resourceAwsGlacierVaultRead(d, meta) +} + +func resourceAwsGlacierVaultRead(d *schema.ResourceData, meta interface{}) error { + glacierconn := meta.(*AWSClient).glacierconn + + input := &glacier.DescribeVaultInput{ + VaultName: aws.String(d.Id()), + } + + out, err := glacierconn.DescribeVault(input) + if err != nil { + return fmt.Errorf("Error reading Glacier Vault: %s", err.Error()) + } + + d.Set("arn", *out.VaultARN) + + tags, err := getGlacierVaultTags(glacierconn, d.Id()) + if err != nil { + return err + } + d.Set("tags", tags) + + log.Printf("[DEBUG] Getting the access_policy for Vault %s", d.Id()) + pol, err := glacierconn.GetVaultAccessPolicy(&glacier.GetVaultAccessPolicyInput{ + VaultName: aws.String(d.Id()), + }) + + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "ResourceNotFoundException" { + d.Set("access_policy", "") + } else if pol != nil { + d.Set("access_policy", normalizeJson(*pol.Policy.Policy)) + } else { + return err + } + + notifications, err := getGlacierVaultNotification(glacierconn, d.Id()) + if err != nil { + return err + } + d.Set("notification", notifications) + + return nil +} + +func resourceAwsGlacierVaultDelete(d *schema.ResourceData, meta interface{}) error { + glacierconn := meta.(*AWSClient).glacierconn + + log.Printf("[DEBUG] Glacier Delete Vault: %s", d.Id()) + _, err := glacierconn.DeleteVault(&glacier.DeleteVaultInput{ + VaultName: aws.String(d.Id()), + }) + if err != nil { + return fmt.Errorf("Error deleting Glacier Vault: %s", err.Error()) + } + return nil +} + +func resourceAwsGlacierVaultNotificationUpdate(glacierconn *glacier.Glacier, d *schema.ResourceData) error { + + if v, ok := d.GetOk("notification"); ok { + settings := v.([]interface{}) + + if len(settings) > 1 { + return fmt.Errorf("Only a single Notification setup is allowed for Glacier Vault") + } else if len(settings) == 1 { + s := settings[0].(map[string]interface{}) + var events []*string + for _, id := range s["events"].(*schema.Set).List() { + event := id.(string) + if event != "ArchiveRetrievalCompleted" && event != "InventoryRetrievalCompleted" { + return fmt.Errorf("Glacier Vault Notification Events can only be 'ArchiveRetrievalCompleted' or 'InventoryRetrievalCompleted'") + } else { + events = append(events, aws.String(event)) + } + } + + _, err := glacierconn.SetVaultNotifications(&glacier.SetVaultNotificationsInput{ + VaultName: aws.String(d.Id()), + VaultNotificationConfig: &glacier.VaultNotificationConfig{ + SNSTopic: aws.String(s["sns_topic"].(string)), + Events: events, + }, + }) + + if err != nil { + return fmt.Errorf("Error Updating Glacier Vault Notifications: %s", err.Error()) + } + } + } + + return nil +} + +func resourceAwsGlacierVaultPolicyUpdate(glacierconn *glacier.Glacier, d *schema.ResourceData) error { + vaultName := d.Id() + policyContents := d.Get("access_policy").(string) + + policy := &glacier.VaultAccessPolicy{ + Policy: aws.String(policyContents), + } + + if policyContents != "" { + log.Printf("[DEBUG] Glacier Vault: %s, put policy", vaultName) + + _, err := glacierconn.SetVaultAccessPolicy(&glacier.SetVaultAccessPolicyInput{ + VaultName: aws.String(d.Id()), + Policy: policy, + }) + + if err != nil { + return fmt.Errorf("Error putting Glacier Vault policy: %s", err.Error()) + } + } else { + log.Printf("[DEBUG] Glacier Vault: %s, delete policy: %s", vaultName, policy) + _, err := glacierconn.DeleteVaultAccessPolicy(&glacier.DeleteVaultAccessPolicyInput{ + VaultName: aws.String(d.Id()), + }) + + if err != nil { + return fmt.Errorf("Error deleting Glacier Vault policy: %s", err.Error()) + } + } + + return nil +} + +func setGlacierVaultTags(conn *glacier.Glacier, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffGlacierVaultTags(mapGlacierVaultTags(o), mapGlacierVaultTags(n)) + + // Set tags + if len(remove) > 0 { + tagsToRemove := &glacier.RemoveTagsFromVaultInput{ + VaultName: aws.String(d.Id()), + TagKeys: glacierStringsToPointyString(remove), + } + + log.Printf("[DEBUG] Removing tags: from %s", d.Id()) + _, err := conn.RemoveTagsFromVault(tagsToRemove) + if err != nil { + return err + } + } + if len(create) > 0 { + tagsToAdd := &glacier.AddTagsToVaultInput{ + VaultName: aws.String(d.Id()), + Tags: glacierVaultTagsFromMap(create), + } + + log.Printf("[DEBUG] Creating tags: for %s", d.Id()) + _, err := conn.AddTagsToVault(tagsToAdd) + if err != nil { + return err + } + } + } + + return nil +} + +func mapGlacierVaultTags(m map[string]interface{}) map[string]string { + results := make(map[string]string) + for k, v := range m { + results[k] = v.(string) + } + + return results +} + +func diffGlacierVaultTags(oldTags, newTags map[string]string) (map[string]string, []string) { + + create := make(map[string]string) + for k, v := range newTags { + create[k] = v + } + + // Build the list of what to remove + var remove []string + for k, v := range oldTags { + old, ok := create[k] + if !ok || old != v { + // Delete it! + remove = append(remove, k) + } + } + + return create, remove +} + +func getGlacierVaultTags(glacierconn *glacier.Glacier, vaultName string) (map[string]string, error) { + request := &glacier.ListTagsForVaultInput{ + VaultName: aws.String(vaultName), + } + + log.Printf("[DEBUG] Getting the tags: for %s", vaultName) + response, err := glacierconn.ListTagsForVault(request) + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "NoSuchTagSet" { + return map[string]string{}, nil + } else if err != nil { + return nil, err + } + + return glacierVaultTagsToMap(response.Tags), nil +} + +func glacierVaultTagsToMap(responseTags map[string]*string) map[string]string { + results := make(map[string]string, len(responseTags)) + for k, v := range responseTags { + results[k] = *v + } + + return results +} + +func glacierVaultTagsFromMap(responseTags map[string]string) map[string]*string { + results := make(map[string]*string, len(responseTags)) + for k, v := range responseTags { + results[k] = aws.String(v) + } + + return results +} + +func glacierStringsToPointyString(s []string) []*string { + results := make([]*string, len(s)) + for i, x := range s { + results[i] = aws.String(x) + } + + return results +} + +func glacierPointersToStringList(pointers []*string) []interface{} { + list := make([]interface{}, len(pointers)) + for i, v := range pointers { + list[i] = *v + } + return list +} + +func getGlacierVaultNotification(glacierconn *glacier.Glacier, vaultName string) ([]map[string]interface{}, error) { + request := &glacier.GetVaultNotificationsInput{ + VaultName: aws.String(vaultName), + } + + response, err := glacierconn.GetVaultNotifications(request) + if err != nil { + return nil, fmt.Errorf("Error reading Glacier Vault Notifications: %s", err.Error()) + } + + notifications := make(map[string]interface{}, 0) + + log.Print("[DEBUG] Flattening Glacier Vault Notifications") + + notifications["events"] = schema.NewSet(schema.HashString, glacierPointersToStringList(response.VaultNotificationConfig.Events)) + notifications["sns_topic"] = *response.VaultNotificationConfig.SNSTopic + + return []map[string]interface{}{notifications}, nil +} From 95d35ad77f5609a54f59b454b4860f3aa7dba33c Mon Sep 17 00:00:00 2001 From: stack72 Date: Tue, 15 Sep 2015 23:32:54 +0100 Subject: [PATCH 227/335] Adding the the docs for the Glacier Vault resource Updating the glacier docs to include a link to the AWS developer guide --- .../aws/r/glacier_vault.html.markdown | 68 +++++++++++++++++++ website/source/layouts/aws.erb | 9 +++ 2 files changed, 77 insertions(+) create mode 100644 website/source/docs/providers/aws/r/glacier_vault.html.markdown diff --git a/website/source/docs/providers/aws/r/glacier_vault.html.markdown b/website/source/docs/providers/aws/r/glacier_vault.html.markdown new file mode 100644 index 0000000000..ad7e2a6d14 --- /dev/null +++ b/website/source/docs/providers/aws/r/glacier_vault.html.markdown @@ -0,0 +1,68 @@ +--- +layout: "aws" +page_title: "AWS: aws_glacier_vault" +sidebar_current: "docs-aws-resource-glacier-vault" +description: |- + Provides a Glacier Vault. +--- + +# aws\_glacier\_vault + +Provides a Glacier Vault Resource. You can refer to the [Glacier Developer Guide](http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-vaults.html) for a full explanation of the Glacier Vault functionality + +## Example Usage + +``` +resource "aws_glacier_vault" "my_archive" { + name = "MyArchive" + + notification { + sns_topic = "arn:aws:sns:us-west-2:432981146916:MyArchiveTopic" + events = ["ArchiveRetrievalCompleted","InventoryRetrievalCompleted"] + } + + access_policy = < + > + Glacier Resources + + + > IAM Resources From 2a7b8be9f3aae116c5168f10aa19ea6c7273e643 Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 17 Sep 2015 01:46:10 +0100 Subject: [PATCH 228/335] Gofmt of the aws glacier vault resource --- .../aws/resource_aws_glacier_vault.go | 29 +-- .../aws/resource_aws_glacier_vault_test.go | 175 ++++++++++++++++++ .../aws/r/glacier_vault.html.markdown | 19 +- 3 files changed, 206 insertions(+), 17 deletions(-) create mode 100644 builtin/providers/aws/resource_aws_glacier_vault_test.go diff --git a/builtin/providers/aws/resource_aws_glacier_vault.go b/builtin/providers/aws/resource_aws_glacier_vault.go index b077a35cd9..21ac4d7cc1 100644 --- a/builtin/providers/aws/resource_aws_glacier_vault.go +++ b/builtin/providers/aws/resource_aws_glacier_vault.go @@ -143,7 +143,7 @@ func resourceAwsGlacierVaultRead(d *schema.ResourceData, meta interface{}) error VaultName: aws.String(d.Id()), }) - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "ResourceNotFoundException" { + if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" { d.Set("access_policy", "") } else if pol != nil { d.Set("access_policy", normalizeJson(*pol.Policy.Policy)) @@ -152,10 +152,13 @@ func resourceAwsGlacierVaultRead(d *schema.ResourceData, meta interface{}) error } notifications, err := getGlacierVaultNotification(glacierconn, d.Id()) - if err != nil { + if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" { + d.Set("notification", "") + } else if pol != nil { + d.Set("notification", notifications) + } else { return err } - d.Set("notification", notifications) return nil } @@ -179,17 +182,12 @@ func resourceAwsGlacierVaultNotificationUpdate(glacierconn *glacier.Glacier, d * settings := v.([]interface{}) if len(settings) > 1 { - return fmt.Errorf("Only a single Notification setup is allowed for Glacier Vault") + return fmt.Errorf("Only a single Notification Block is allowed for Glacier Vault") } else if len(settings) == 1 { s := settings[0].(map[string]interface{}) var events []*string for _, id := range s["events"].(*schema.Set).List() { - event := id.(string) - if event != "ArchiveRetrievalCompleted" && event != "InventoryRetrievalCompleted" { - return fmt.Errorf("Glacier Vault Notification Events can only be 'ArchiveRetrievalCompleted' or 'InventoryRetrievalCompleted'") - } else { - events = append(events, aws.String(event)) - } + events = append(events, aws.String(id.(string))) } _, err := glacierconn.SetVaultNotifications(&glacier.SetVaultNotificationsInput{ @@ -204,6 +202,15 @@ func resourceAwsGlacierVaultNotificationUpdate(glacierconn *glacier.Glacier, d * return fmt.Errorf("Error Updating Glacier Vault Notifications: %s", err.Error()) } } + } else { + _, err := glacierconn.DeleteVaultNotifications(&glacier.DeleteVaultNotificationsInput{ + VaultName: aws.String(d.Id()), + }) + + if err != nil { + return fmt.Errorf("Error Removing Glacier Vault Notifications: %s", err.Error()) + } + } return nil @@ -315,7 +322,7 @@ func getGlacierVaultTags(glacierconn *glacier.Glacier, vaultName string) (map[st log.Printf("[DEBUG] Getting the tags: for %s", vaultName) response, err := glacierconn.ListTagsForVault(request) - if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "NoSuchTagSet" { + if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "NoSuchTagSet" { return map[string]string{}, nil } else if err != nil { return nil, err diff --git a/builtin/providers/aws/resource_aws_glacier_vault_test.go b/builtin/providers/aws/resource_aws_glacier_vault_test.go new file mode 100644 index 0000000000..fc5e150d94 --- /dev/null +++ b/builtin/providers/aws/resource_aws_glacier_vault_test.go @@ -0,0 +1,175 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/glacier" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSGlacierVault_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckGlacierVaultDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccGlacierVault_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckGlacierVaultExists("aws_glacier_vault.test"), + ), + }, + }, + }) +} + +func TestAccAWSGlacierVault_full(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckGlacierVaultDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccGlacierVault_full, + Check: resource.ComposeTestCheckFunc( + testAccCheckGlacierVaultExists("aws_glacier_vault.full"), + ), + }, + }, + }) +} + +func TestAccAWSGlacierVault_RemoveNotifications(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckGlacierVaultDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccGlacierVault_full, + Check: resource.ComposeTestCheckFunc( + testAccCheckGlacierVaultExists("aws_glacier_vault.full"), + ), + }, + resource.TestStep{ + Config: testAccGlacierVault_withoutNotification, + Check: resource.ComposeTestCheckFunc( + testAccCheckGlacierVaultExists("aws_glacier_vault.full"), + testAccCheckVaultNotificationsMissing("aws_glacier_vault.full"), + ), + }, + }, + }) +} + +func testAccCheckGlacierVaultExists(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + glacierconn := testAccProvider.Meta().(*AWSClient).glacierconn + out, err := glacierconn.DescribeVault(&glacier.DescribeVaultInput{ + VaultName: aws.String(rs.Primary.ID), + }) + + if err != nil { + return err + } + + if out.VaultARN == nil { + return fmt.Errorf("No Glacier Vault Found") + } + + if *out.VaultName != rs.Primary.ID { + return fmt.Errorf("Glacier Vault Mismatch - existing: %q, state: %q", + *out.VaultName, rs.Primary.ID) + } + + return nil + } +} + +func testAccCheckVaultNotificationsMissing(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + glacierconn := testAccProvider.Meta().(*AWSClient).glacierconn + out, err := glacierconn.GetVaultNotifications(&glacier.GetVaultNotificationsInput{ + VaultName: aws.String(rs.Primary.ID), + }) + + if awserr, ok := err.(awserr.Error); ok && awserr.Code() != "ResourceNotFoundException" { + return fmt.Errorf("Expected ResourceNotFoundException for Vault %s Notification Block but got %s", rs.Primary.ID, awserr.Code()) + } + + if out.VaultNotificationConfig != nil { + return fmt.Errorf("Vault Notification Block has been found for %s", rs.Primary.ID) + } + + return nil + } + +} + +func testAccCheckGlacierVaultDestroy(s *terraform.State) error { + if len(s.RootModule().Resources) > 0 { + return fmt.Errorf("Expected all resources to be gone, but found: %#v", + s.RootModule().Resources) + } + + return nil +} + +const testAccGlacierVault_basic = ` +resource "aws_glacier_vault" "test" { + name = "my_test_vault" +} +` + +const testAccGlacierVault_full = ` +resource "aws_sns_topic" "aws_sns_topic" { + name = "glacier-sns-topic" +} + +resource "aws_glacier_vault" "full" { + name = "my_test_vault" + notification { + sns_topic = "${aws_sns_topic.aws_sns_topic.arn}" + events = ["ArchiveRetrievalCompleted","InventoryRetrievalCompleted"] + } + tags { + Test="Test1" + } +} +` + +const testAccGlacierVault_withoutNotification = ` +resource "aws_sns_topic" "aws_sns_topic" { + name = "glacier-sns-topic" +} + +resource "aws_glacier_vault" "full" { + name = "my_test_vault" + tags { + Test="Test1" + } +} +` diff --git a/website/source/docs/providers/aws/r/glacier_vault.html.markdown b/website/source/docs/providers/aws/r/glacier_vault.html.markdown index ad7e2a6d14..920bee4f54 100644 --- a/website/source/docs/providers/aws/r/glacier_vault.html.markdown +++ b/website/source/docs/providers/aws/r/glacier_vault.html.markdown @@ -10,14 +10,21 @@ description: |- Provides a Glacier Vault Resource. You can refer to the [Glacier Developer Guide](http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-vaults.html) for a full explanation of the Glacier Vault functionality +~> **NOTE:** When trying to remove a Glacier Vault, the Vault must be empty. + ## Example Usage ``` + +resource "aws_sns_topic" "aws_sns_topic" { + name = "glacier-sns-topic" +} + resource "aws_glacier_vault" "my_archive" { name = "MyArchive" notification { - sns_topic = "arn:aws:sns:us-west-2:432981146916:MyArchiveTopic" + sns_topic = "${aws_sns_topic.aws_sns_topic.arn}" events = ["ArchiveRetrievalCompleted","InventoryRetrievalCompleted"] } @@ -51,15 +58,15 @@ EOF The following arguments are supported: -* `name` - (Required) The name of the Vault. Names can be between 1 and 255 characters long and the valid characters are a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), and '.' (period). -* `access_policy` - (Required) The policy document. This is a JSON formatted string. - The heredoc syntax or `file` function is helpful here. -* `notification` - (Required) The notifications for the Vault. Fields documented below. +* `name` - (Required) The name of the Vault. Names can be between 1 and 255 characters long and the valid characters are a-z, A-Z, 0-9, '\_' (underscore), '-' (hyphen), and '.' (period). +* `access_policy` - (Optional) The policy document. This is a JSON formatted string. + The heredoc syntax or `file` function is helpful here. Use the [Glacier Developer Guide](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-access-policy.html) for more information on Glacier Vault Policy +* `notification` - (Optional) The notifications for the Vault. Fields documented below. * `tags` - (Optional) A mapping of tags to assign to the resource. **notification** supports the following: -* `events` - (Required) You can configure a vault to public a notification for `ArchiveRetrievalCompleted` and `InventoryRetrievalCompleted` events. +* `events` - (Required) You can configure a vault to publish a notification for `ArchiveRetrievalCompleted` and `InventoryRetrievalCompleted` events. * `sns_topic` - (Required) The SNS Topic ARN. The following attributes are exported: From 9f01efae6f027ce6cd646ebb7c018f95a410104d Mon Sep 17 00:00:00 2001 From: stack72 Date: Mon, 5 Oct 2015 11:24:09 +0100 Subject: [PATCH 229/335] Adding a test to make sure that the diffGlacierVaultTags func works as expected --- .../aws/resource_aws_glacier_vault_test.go | 52 +++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/builtin/providers/aws/resource_aws_glacier_vault_test.go b/builtin/providers/aws/resource_aws_glacier_vault_test.go index fc5e150d94..4f5c26bf28 100644 --- a/builtin/providers/aws/resource_aws_glacier_vault_test.go +++ b/builtin/providers/aws/resource_aws_glacier_vault_test.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "reflect" "testing" "github.com/aws/aws-sdk-go/aws" @@ -67,6 +68,57 @@ func TestAccAWSGlacierVault_RemoveNotifications(t *testing.T) { }) } +func TestDiffGlacierVaultTags(t *testing.T) { + cases := []struct { + Old, New map[string]interface{} + Create map[string]string + Remove []string + }{ + // Basic add/remove + { + Old: map[string]interface{}{ + "foo": "bar", + }, + New: map[string]interface{}{ + "bar": "baz", + }, + Create: map[string]string{ + "bar": "baz", + }, + Remove: []string{ + "foo", + }, + }, + + // Modify + { + Old: map[string]interface{}{ + "foo": "bar", + }, + New: map[string]interface{}{ + "foo": "baz", + }, + Create: map[string]string{ + "foo": "baz", + }, + Remove: []string{ + "foo", + }, + }, + } + + for i, tc := range cases { + c, r := diffGlacierVaultTags(mapGlacierVaultTags(tc.Old), mapGlacierVaultTags(tc.New)) + + if !reflect.DeepEqual(c, tc.Create) { + t.Fatalf("%d: bad create: %#v", i, c) + } + if !reflect.DeepEqual(r, tc.Remove) { + t.Fatalf("%d: bad remove: %#v", i, r) + } + } +} + func testAccCheckGlacierVaultExists(name string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] From 2f42f58256b0a6f4e6921f2d1f8a1e63a0545442 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 13 Oct 2015 17:15:34 +0200 Subject: [PATCH 230/335] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a61e8af63f..cfe4dd75ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ FEATURES: * **New resource: `aws_directory_service_directory`** [GH-3228] * **New resource: `aws_autoscaling_lifecycle_hook`** [GH-3351] * **New resource: `aws_placement_group`** [GH-3457] + * **New resource: `aws_glacier_vault`** [GH-3491] IMPROVEMENTS: From 95832c2fb217182c16f856edbed2d51d8e07719b Mon Sep 17 00:00:00 2001 From: Clint Date: Tue, 13 Oct 2015 12:56:53 -0500 Subject: [PATCH 231/335] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cfe4dd75ae..9458c1a733 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -72,6 +72,7 @@ BUG FIXES: * provider/aws: Update Security Group Rules to Version 2 [GH-3019] * provider/aws: Migrate KeyPair to version 1, fixing issue with using `file()` [GH-3470] * provider/aws: Fix force_delete on autoscaling groups [GH-3485] + * provider/aws: Fix crash with VPC Peering connections [GH-3490] * provider/docker: Fix issue preventing private images from being referenced [GH-2619] * provider/digitalocean: Fix issue causing unnecessary diffs based on droplet slugsize case [GH-3284] * provider/openstack: add state 'downloading' to list of expected states in From 43c7711ac89172ac45b365be60c81d757f765532 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 13 Oct 2015 18:21:21 +0200 Subject: [PATCH 232/335] docs/aws: Fix whitespacing in glacier_vault --- .../docs/providers/aws/r/glacier_vault.html.markdown | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/website/source/docs/providers/aws/r/glacier_vault.html.markdown b/website/source/docs/providers/aws/r/glacier_vault.html.markdown index 920bee4f54..523260d721 100644 --- a/website/source/docs/providers/aws/r/glacier_vault.html.markdown +++ b/website/source/docs/providers/aws/r/glacier_vault.html.markdown @@ -10,24 +10,23 @@ description: |- Provides a Glacier Vault Resource. You can refer to the [Glacier Developer Guide](http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-vaults.html) for a full explanation of the Glacier Vault functionality -~> **NOTE:** When trying to remove a Glacier Vault, the Vault must be empty. +~> **NOTE:** When trying to remove a Glacier Vault, the Vault must be empty. ## Example Usage ``` - resource "aws_sns_topic" "aws_sns_topic" { name = "glacier-sns-topic" } resource "aws_glacier_vault" "my_archive" { name = "MyArchive" - + notification { sns_topic = "${aws_sns_topic.aws_sns_topic.arn}" events = ["ArchiveRetrievalCompleted","InventoryRetrievalCompleted"] } - + access_policy = < Date: Tue, 13 Oct 2015 22:52:11 +0200 Subject: [PATCH 233/335] docs: Make IAM policy doc canonical --- .../source/docs/providers/aws/r/glacier_vault.html.markdown | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/website/source/docs/providers/aws/r/glacier_vault.html.markdown b/website/source/docs/providers/aws/r/glacier_vault.html.markdown index 523260d721..6805338c79 100644 --- a/website/source/docs/providers/aws/r/glacier_vault.html.markdown +++ b/website/source/docs/providers/aws/r/glacier_vault.html.markdown @@ -39,9 +39,7 @@ resource "aws_glacier_vault" "my_archive" { "glacier:InitiateJob", "glacier:GetJobOutput" ], - "Resource": [ - "arn:aws:glacier:eu-west-1:432981146916:vaults/MyArchive" - ] + "Resource": "arn:aws:glacier:eu-west-1:432981146916:vaults/MyArchive" } ] } From 61948f35d25076d19c14d02ccbc09c7c8fcdf0f8 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 6 Jul 2015 23:45:47 +0100 Subject: [PATCH 234/335] provider/aws: Add docs for aws_cloudformation_stack --- .../aws/r/cloudformation_stack.html.markdown | 63 +++++++++++++++++++ website/source/layouts/aws.erb | 8 +++ 2 files changed, 71 insertions(+) create mode 100644 website/source/docs/providers/aws/r/cloudformation_stack.html.markdown diff --git a/website/source/docs/providers/aws/r/cloudformation_stack.html.markdown b/website/source/docs/providers/aws/r/cloudformation_stack.html.markdown new file mode 100644 index 0000000000..6a13520a20 --- /dev/null +++ b/website/source/docs/providers/aws/r/cloudformation_stack.html.markdown @@ -0,0 +1,63 @@ +--- +layout: "aws" +page_title: "AWS: aws_cloudformation_stack" +sidebar_current: "docs-aws-resource-cloudformation-stack" +description: |- + Provides a CloudFormation Stack resource. +--- + +# aws\_cloudformation\_stack + +Provides a CloudFormation Stack resource. + +## Example Usage + +``` +resource "aws_cloudformation_stack" "network" { + name = "networking-stack" + template_body = <AWS Provider + > + CloudFormation Resources + + > CloudWatch Resources From 4dfbbe307490d28c2039ed7da8113818e82026f3 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 7 Jul 2015 08:00:05 +0100 Subject: [PATCH 235/335] provider/aws: Add implementation for aws_cloudformation_stack --- builtin/providers/aws/config.go | 5 + builtin/providers/aws/provider.go | 1 + .../aws/resource_aws_cloudformation_stack.go | 451 ++++++++++++++++++ builtin/providers/aws/structure.go | 55 +++ 4 files changed, 512 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_cloudformation_stack.go diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index f8f443b73d..bbbad7eeaf 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go/service/cloudformation" "github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/aws/aws-sdk-go/service/directoryservice" @@ -47,6 +48,7 @@ type Config struct { } type AWSClient struct { + cfconn *cloudformation.CloudFormation cloudwatchconn *cloudwatch.CloudWatch cloudwatchlogsconn *cloudwatchlogs.CloudWatchLogs dsconn *directoryservice.DirectoryService @@ -175,6 +177,9 @@ func (c *Config) Client() (interface{}, error) { log.Println("[INFO] Initializing Lambda Connection") client.lambdaconn = lambda.New(awsConfig) + log.Println("[INFO] Initializing Cloudformation Connection") + client.cfconn = cloudformation.New(awsConfig) + log.Println("[INFO] Initializing CloudWatch SDK connection") client.cloudwatchconn = cloudwatch.New(awsConfig) diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index f73580d0f7..547f9617ac 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -163,6 +163,7 @@ func Provider() terraform.ResourceProvider { "aws_autoscaling_group": resourceAwsAutoscalingGroup(), "aws_autoscaling_notification": resourceAwsAutoscalingNotification(), "aws_autoscaling_policy": resourceAwsAutoscalingPolicy(), + "aws_cloudformation_stack": resourceAwsCloudFormationStack(), "aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(), "aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(), "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), diff --git a/builtin/providers/aws/resource_aws_cloudformation_stack.go b/builtin/providers/aws/resource_aws_cloudformation_stack.go new file mode 100644 index 0000000000..1846a31054 --- /dev/null +++ b/builtin/providers/aws/resource_aws_cloudformation_stack.go @@ -0,0 +1,451 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/cloudformation" +) + +func resourceAwsCloudFormationStack() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudFormationStackCreate, + Read: resourceAwsCloudFormationStackRead, + Update: resourceAwsCloudFormationStackUpdate, + Delete: resourceAwsCloudFormationStackDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "template_body": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + StateFunc: normalizeJson, + }, + "template_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "capabilities": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "disable_rollback": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "notification_arns": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "on_failure": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "parameters": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Computed: true, + }, + "outputs": &schema.Schema{ + Type: schema.TypeMap, + Computed: true, + }, + "policy_body": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + StateFunc: normalizeJson, + }, + "policy_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "timeout_in_minutes": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "tags": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsCloudFormationStackCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cfconn + + input := cloudformation.CreateStackInput{ + StackName: aws.String(d.Get("name").(string)), + } + if v, ok := d.GetOk("template_body"); ok { + input.TemplateBody = aws.String(normalizeJson(v.(string))) + } + if v, ok := d.GetOk("template_url"); ok { + input.TemplateURL = aws.String(v.(string)) + } + if v, ok := d.GetOk("capabilities"); ok { + input.Capabilities = expandStringList(v.(*schema.Set).List()) + } + if v, ok := d.GetOk("disable_rollback"); ok { + input.DisableRollback = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("notification_arns"); ok { + input.NotificationARNs = expandStringList(v.(*schema.Set).List()) + } + if v, ok := d.GetOk("on_failure"); ok { + input.OnFailure = aws.String(v.(string)) + } + if v, ok := d.GetOk("parameters"); ok { + input.Parameters = expandCloudFormationParameters(v.(map[string]interface{})) + } + if v, ok := d.GetOk("policy_body"); ok { + input.StackPolicyBody = aws.String(normalizeJson(v.(string))) + } + if v, ok := d.GetOk("policy_url"); ok { + input.StackPolicyURL = aws.String(v.(string)) + } + if v, ok := d.GetOk("tags"); ok { + input.Tags = expandCloudFormationTags(v.(map[string]interface{})) + } + if v, ok := d.GetOk("timeout_in_minutes"); ok { + input.TimeoutInMinutes = aws.Int64(int64(v.(int))) + } + + log.Printf("[DEBUG] Creating CloudFormation Stack: %s", input) + resp, err := conn.CreateStack(&input) + if err != nil { + return fmt.Errorf("Creating CloudFormation stack failed: %s", err.Error()) + } + + d.SetId(*resp.StackId) + + wait := resource.StateChangeConf{ + Pending: []string{"CREATE_IN_PROGRESS", "ROLLBACK_IN_PROGRESS", "ROLLBACK_COMPLETE"}, + Target: "CREATE_COMPLETE", + Timeout: 30 * time.Minute, + MinTimeout: 5 * time.Second, + Refresh: func() (interface{}, string, error) { + resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{ + StackName: aws.String(d.Get("name").(string)), + }) + status := *resp.Stacks[0].StackStatus + log.Printf("[DEBUG] Current CloudFormation stack status: %q", status) + + if status == "ROLLBACK_COMPLETE" { + stack := resp.Stacks[0] + failures, err := getCloudFormationFailures(stack.StackName, *stack.CreationTime, conn) + if err != nil { + return resp, "", fmt.Errorf( + "Failed getting details about rollback: %q", err.Error()) + } + + return resp, "", fmt.Errorf("ROLLBACK_COMPLETE:\n%q", failures) + } + return resp, status, err + }, + } + + _, err = wait.WaitForState() + if err != nil { + return err + } + + log.Printf("[INFO] CloudFormation Stack %q created", d.Get("name").(string)) + + return resourceAwsCloudFormationStackRead(d, meta) +} + +func resourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cfconn + stackName := d.Get("name").(string) + + input := &cloudformation.DescribeStacksInput{ + StackName: aws.String(stackName), + } + resp, err := conn.DescribeStacks(input) + if err != nil { + return err + } + + stacks := resp.Stacks + if len(stacks) < 1 { + return nil + } + + tInput := cloudformation.GetTemplateInput{ + StackName: aws.String(stackName), + } + out, err := conn.GetTemplate(&tInput) + if err != nil { + return err + } + + d.Set("template_body", normalizeJson(*out.TemplateBody)) + + stack := stacks[0] + log.Printf("[DEBUG] Received CloudFormation stack: %s", stack) + + d.Set("name", stack.StackName) + d.Set("arn", stack.StackId) + + if stack.TimeoutInMinutes != nil { + d.Set("timeout_in_minutes", int(*stack.TimeoutInMinutes)) + } + if stack.Description != nil { + d.Set("description", stack.Description) + } + if stack.DisableRollback != nil { + d.Set("disable_rollback", stack.DisableRollback) + } + if len(stack.NotificationARNs) > 0 { + err = d.Set("notification_arns", schema.NewSet(schema.HashString, flattenStringList(stack.NotificationARNs))) + if err != nil { + return err + } + } + + originalParams := d.Get("parameters").(map[string]interface{}) + err = d.Set("parameters", flattenCloudFormationParameters(stack.Parameters, originalParams)) + if err != nil { + return err + } + + err = d.Set("tags", flattenCloudFormationTags(stack.Tags)) + if err != nil { + return err + } + + err = d.Set("outputs", flattenCloudFormationOutputs(stack.Outputs)) + if err != nil { + return err + } + + if len(stack.Capabilities) > 0 { + err = d.Set("capabilities", schema.NewSet(schema.HashString, flattenStringList(stack.Capabilities))) + if err != nil { + return err + } + } + + return nil +} + +func resourceAwsCloudFormationStackUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cfconn + + input := &cloudformation.UpdateStackInput{ + StackName: aws.String(d.Get("name").(string)), + } + + if d.HasChange("template_body") { + input.TemplateBody = aws.String(normalizeJson(d.Get("template_body").(string))) + } + if d.HasChange("template_url") { + input.TemplateURL = aws.String(d.Get("template_url").(string)) + } + if d.HasChange("capabilities") { + input.Capabilities = expandStringList(d.Get("capabilities").(*schema.Set).List()) + } + if d.HasChange("notification_arns") { + input.NotificationARNs = expandStringList(d.Get("notification_arns").(*schema.Set).List()) + } + if d.HasChange("parameters") { + input.Parameters = expandCloudFormationParameters(d.Get("parameters").(map[string]interface{})) + } + if d.HasChange("policy_body") { + input.StackPolicyBody = aws.String(normalizeJson(d.Get("policy_body").(string))) + } + if d.HasChange("policy_url") { + input.StackPolicyURL = aws.String(d.Get("policy_url").(string)) + } + + log.Printf("[DEBUG] Updating CloudFormation stack: %s", input) + stack, err := conn.UpdateStack(input) + if err != nil { + return err + } + + lastUpdatedTime, err := getLastCfEventTimestamp(d.Get("name").(string), conn) + if err != nil { + return err + } + + wait := resource.StateChangeConf{ + Pending: []string{ + "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", + "UPDATE_IN_PROGRESS", + "UPDATE_ROLLBACK_IN_PROGRESS", + "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", + "UPDATE_ROLLBACK_COMPLETE", + }, + Target: "UPDATE_COMPLETE", + Timeout: 15 * time.Minute, + MinTimeout: 5 * time.Second, + Refresh: func() (interface{}, string, error) { + resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{ + StackName: aws.String(d.Get("name").(string)), + }) + stack := resp.Stacks[0] + status := *stack.StackStatus + log.Printf("[DEBUG] Current CloudFormation stack status: %q", status) + + if status == "UPDATE_ROLLBACK_COMPLETE" { + failures, err := getCloudFormationFailures(stack.StackName, *lastUpdatedTime, conn) + if err != nil { + return resp, "", fmt.Errorf( + "Failed getting details about rollback: %q", err.Error()) + } + + return resp, "", fmt.Errorf( + "UPDATE_ROLLBACK_COMPLETE:\n%q", failures) + } + + return resp, status, err + }, + } + + _, err = wait.WaitForState() + if err != nil { + return err + } + + log.Printf("[DEBUG] CloudFormation stack %q has been updated", *stack.StackId) + + return resourceAwsCloudFormationStackRead(d, meta) +} + +func resourceAwsCloudFormationStackDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cfconn + + input := &cloudformation.DeleteStackInput{ + StackName: aws.String(d.Get("name").(string)), + } + log.Printf("[DEBUG] Deleting CloudFormation stack %s", input) + _, err := conn.DeleteStack(input) + if err != nil { + awsErr, ok := err.(awserr.Error) + if !ok { + return err + } + + if awsErr.Code() == "ValidationError" { + // Ignore stack which has been already deleted + return nil + } + return err + } + + wait := resource.StateChangeConf{ + Pending: []string{"DELETE_IN_PROGRESS", "ROLLBACK_IN_PROGRESS"}, + Target: "DELETE_COMPLETE", + Timeout: 30 * time.Minute, + MinTimeout: 5 * time.Second, + Refresh: func() (interface{}, string, error) { + resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{ + StackName: aws.String(d.Get("name").(string)), + }) + + if err != nil { + awsErr, ok := err.(awserr.Error) + if !ok { + return resp, "DELETE_FAILED", err + } + + log.Printf("[DEBUG] Error when deleting CloudFormation stack: %s: %s", + awsErr.Code(), awsErr.Message()) + + if awsErr.Code() == "ValidationError" { + return resp, "DELETE_COMPLETE", nil + } + } + + if len(resp.Stacks) == 0 { + log.Printf("[DEBUG] CloudFormation stack %q is already gone", d.Get("name")) + return resp, "DELETE_COMPLETE", nil + } + + status := *resp.Stacks[0].StackStatus + log.Printf("[DEBUG] Current CloudFormation stack status: %q", status) + + return resp, status, err + }, + } + + _, err = wait.WaitForState() + if err != nil { + return err + } + + log.Printf("[DEBUG] CloudFormation stack %q has been deleted", d.Id()) + + d.SetId("") + + return nil +} + +// getLastCfEventTimestamp takes the first event in a list +// of events ordered from the newest to the oldest +// and extracts timestamp from it +// LastUpdatedTime only provides last >successful< updated time +func getLastCfEventTimestamp(stackName string, conn *cloudformation.CloudFormation) ( + *time.Time, error) { + output, err := conn.DescribeStackEvents(&cloudformation.DescribeStackEventsInput{ + StackName: aws.String(stackName), + }) + if err != nil { + return nil, err + } + + return output.StackEvents[0].Timestamp, nil +} + +// getCloudFormationFailures returns ResourceStatusReason(s) +// of events that should be failures based on regexp match of status +func getCloudFormationFailures(stackName *string, afterTime time.Time, + conn *cloudformation.CloudFormation) ([]string, error) { + var failures []string + // Only catching failures from last 100 events + // Some extra iteration logic via NextToken could be added + // but in reality it's nearly impossible to generate >100 + // events by a single stack update + events, err := conn.DescribeStackEvents(&cloudformation.DescribeStackEventsInput{ + StackName: stackName, + }) + + if err != nil { + return nil, err + } + + failRe := regexp.MustCompile("_FAILED$") + rollbackRe := regexp.MustCompile("^ROLLBACK_") + + for _, e := range events.StackEvents { + if (failRe.MatchString(*e.ResourceStatus) || rollbackRe.MatchString(*e.ResourceStatus)) && + e.Timestamp.After(afterTime) && e.ResourceStatusReason != nil { + failures = append(failures, *e.ResourceStatusReason) + } + } + + return failures, nil +} diff --git a/builtin/providers/aws/structure.go b/builtin/providers/aws/structure.go index 5976a8ff0e..fd581c84a7 100644 --- a/builtin/providers/aws/structure.go +++ b/builtin/providers/aws/structure.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudformation" "github.com/aws/aws-sdk-go/service/directoryservice" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ecs" @@ -601,3 +602,57 @@ func flattenDSVpcSettings( return []map[string]interface{}{settings} } + +func expandCloudFormationParameters(params map[string]interface{}) []*cloudformation.Parameter { + var cfParams []*cloudformation.Parameter + for k, v := range params { + cfParams = append(cfParams, &cloudformation.Parameter{ + ParameterKey: aws.String(k), + ParameterValue: aws.String(v.(string)), + }) + } + + return cfParams +} + +// flattenCloudFormationParameters is flattening list of +// *cloudformation.Parameters and only returning existing +// parameters to avoid clash with default values +func flattenCloudFormationParameters(cfParams []*cloudformation.Parameter, + originalParams map[string]interface{}) map[string]interface{} { + params := make(map[string]interface{}, len(cfParams)) + for _, p := range cfParams { + _, isConfigured := originalParams[*p.ParameterKey] + if isConfigured { + params[*p.ParameterKey] = *p.ParameterValue + } + } + return params +} + +func expandCloudFormationTags(tags map[string]interface{}) []*cloudformation.Tag { + var cfTags []*cloudformation.Tag + for k, v := range tags { + cfTags = append(cfTags, &cloudformation.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + }) + } + return cfTags +} + +func flattenCloudFormationTags(cfTags []*cloudformation.Tag) map[string]string { + tags := make(map[string]string, len(cfTags)) + for _, t := range cfTags { + tags[*t.Key] = *t.Value + } + return tags +} + +func flattenCloudFormationOutputs(cfOutputs []*cloudformation.Output) map[string]string { + outputs := make(map[string]string, len(cfOutputs)) + for _, o := range cfOutputs { + outputs[*o.OutputKey] = *o.OutputValue + } + return outputs +} From 7088a0096e920501726c5769b941dfa85d4a38d6 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 13 Jul 2015 07:51:32 +0100 Subject: [PATCH 236/335] provider/aws: Add acceptance tests for aws_cloudformation_stack --- .../resource_aws_cloudformation_stack_test.go | 228 ++++++++++++++++++ 1 file changed, 228 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_cloudformation_stack_test.go diff --git a/builtin/providers/aws/resource_aws_cloudformation_stack_test.go b/builtin/providers/aws/resource_aws_cloudformation_stack_test.go new file mode 100644 index 0000000000..7ad24be344 --- /dev/null +++ b/builtin/providers/aws/resource_aws_cloudformation_stack_test.go @@ -0,0 +1,228 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudformation" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSCloudFormation_basic(t *testing.T) { + var stack cloudformation.Stack + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCloudFormationDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSCloudFormationConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudFormationStackExists("aws_cloudformation_stack.network", &stack), + ), + }, + }, + }) +} + +func TestAccAWSCloudFormation_defaultParams(t *testing.T) { + var stack cloudformation.Stack + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCloudFormationDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSCloudFormationConfig_defaultParams, + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudFormationStackExists("aws_cloudformation_stack.asg-demo", &stack), + ), + }, + }, + }) +} + +func TestAccAWSCloudFormation_allAttributes(t *testing.T) { + var stack cloudformation.Stack + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCloudFormationDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSCloudFormationConfig_allAttributes, + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudFormationStackExists("aws_cloudformation_stack.full", &stack), + ), + }, + }, + }) +} + +func testAccCheckCloudFormationStackExists(n string, stack *cloudformation.Stack) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + rs = rs + return fmt.Errorf("Not found: %s", n) + } + + conn := testAccProvider.Meta().(*AWSClient).cfconn + params := &cloudformation.DescribeStacksInput{ + StackName: aws.String(rs.Primary.ID), + } + resp, err := conn.DescribeStacks(params) + if err != nil { + return err + } + if len(resp.Stacks) == 0 { + return fmt.Errorf("CloudFormation stack not found") + } + + return nil + } +} + +func testAccCheckAWSCloudFormationDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).cfconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_cloudformation_stack" { + continue + } + + params := cloudformation.DescribeStacksInput{ + StackName: aws.String(rs.Primary.ID), + } + + resp, err := conn.DescribeStacks(¶ms) + + if err == nil { + if len(resp.Stacks) != 0 && + *resp.Stacks[0].StackId == rs.Primary.ID { + return fmt.Errorf("CloudFormation stack still exists: %q", rs.Primary.ID) + } + } + } + + return nil +} + +var testAccAWSCloudFormationConfig = ` +resource "aws_cloudformation_stack" "network" { + name = "tf-networking-stack" + template_body = < Date: Tue, 22 Sep 2015 14:39:49 -0700 Subject: [PATCH 237/335] provider/aws: fix bug with reading GSIs from dynamodb --- .../providers/aws/resource_aws_dynamodb_table.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/builtin/providers/aws/resource_aws_dynamodb_table.go b/builtin/providers/aws/resource_aws_dynamodb_table.go index df043ffe08..4193ddcb0d 100644 --- a/builtin/providers/aws/resource_aws_dynamodb_table.go +++ b/builtin/providers/aws/resource_aws_dynamodb_table.go @@ -571,14 +571,23 @@ func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) erro } } - gsi["projection_type"] = *gsiObject.Projection.ProjectionType - gsi["non_key_attributes"] = gsiObject.Projection.NonKeyAttributes + gsi["projection_type"] = *(gsiObject.Projection.ProjectionType) + + nonKeyAttrs := make([]string, 0, len(gsiObject.Projection.NonKeyAttributes)) + for _, nonKeyAttr := range gsiObject.Projection.NonKeyAttributes { + nonKeyAttrs = append(nonKeyAttrs, *nonKeyAttr) + } + gsi["non_key_attributes"] = nonKeyAttrs gsiList = append(gsiList, gsi) log.Printf("[DEBUG] Added GSI: %s - Read: %d / Write: %d", gsi["name"], gsi["read_capacity"], gsi["write_capacity"]) } - d.Set("global_secondary_index", gsiList) + err = d.Set("global_secondary_index", gsiList) + if err != nil { + return err + } + d.Set("arn", table.TableArn) return nil From ef5b6e93a9d399b1b54d6d6207d7ed44e13f44f0 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 13 Oct 2015 16:57:11 -0500 Subject: [PATCH 238/335] provider/azure: fix issues loading config from homedir Issues were: * `settings_file` `ValidateFunc` needs to expand homedir just like the `configure` does, otherwise ~-based paths fail validation * `isFile` was being called before ~-expand so configure was failing as well * `Config` was swallowing error so provider was ending up with `nil`, resulting in crash To fix: * Consolidate settings_file path/contents handling into a single helper called from both `validate` and `configure` funcs * Return err from `Config` To cover: * Added test case to validate w/ tilde-path * Added configure test w/ tilde-path --- builtin/providers/azure/config.go | 2 +- builtin/providers/azure/provider.go | 58 ++++++++-------- builtin/providers/azure/provider_test.go | 85 +++++++++++++++++++++++- 3 files changed, 111 insertions(+), 34 deletions(-) diff --git a/builtin/providers/azure/config.go b/builtin/providers/azure/config.go index cbb23d58b5..b096a10c4b 100644 --- a/builtin/providers/azure/config.go +++ b/builtin/providers/azure/config.go @@ -98,7 +98,7 @@ func (c Client) getStorageServiceQueueClient(serviceName string) (storage.QueueS func (c *Config) NewClientFromSettingsData() (*Client, error) { mc, err := management.ClientFromPublishSettingsData(c.Settings, c.SubscriptionID) if err != nil { - return nil, nil + return nil, err } return &Client{ diff --git a/builtin/providers/azure/provider.go b/builtin/providers/azure/provider.go index fe100be35f..975a93b001 100644 --- a/builtin/providers/azure/provider.go +++ b/builtin/providers/azure/provider.go @@ -64,22 +64,12 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { Certificate: []byte(d.Get("certificate").(string)), } - settings := d.Get("settings_file").(string) - - if settings != "" { - if ok, _ := isFile(settings); ok { - settingsFile, err := homedir.Expand(settings) - if err != nil { - return nil, fmt.Errorf("Error expanding the settings file path: %s", err) - } - publishSettingsContent, err := ioutil.ReadFile(settingsFile) - if err != nil { - return nil, fmt.Errorf("Error reading settings file: %s", err) - } - config.Settings = publishSettingsContent - } else { - config.Settings = []byte(settings) - } + settingsFile := d.Get("settings_file").(string) + if settingsFile != "" { + // any errors from readSettings would have been caught at the validate + // step, so we can avoid handling them now + settings, _, _ := readSettings(settingsFile) + config.Settings = settings return config.NewClientFromSettingsData() } @@ -92,31 +82,39 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { "or both a 'subscription_id' and 'certificate'.") } -func validateSettingsFile(v interface{}, k string) (warnings []string, errors []error) { +func validateSettingsFile(v interface{}, k string) ([]string, []error) { value := v.(string) - if value == "" { - return + return nil, nil } - var settings settingsData - if err := xml.Unmarshal([]byte(value), &settings); err != nil { - warnings = append(warnings, ` + _, warnings, errors := readSettings(value) + return warnings, errors +} + +const settingsPathWarnMsg = ` settings_file is not valid XML, so we are assuming it is a file path. This support will be removed in the future. Please update your configuration to use -${file("filename.publishsettings")} instead.`) - } else { +${file("filename.publishsettings")} instead.` + +func readSettings(pathOrContents string) (s []byte, ws []string, es []error) { + var settings settingsData + if err := xml.Unmarshal([]byte(pathOrContents), &settings); err == nil { + s = []byte(pathOrContents) return } - if ok, err := isFile(value); !ok { - errors = append(errors, - fmt.Errorf( - "account_file path could not be read from '%s': %s", - value, - err)) + ws = append(ws, settingsPathWarnMsg) + path, err := homedir.Expand(pathOrContents) + if err != nil { + es = append(es, fmt.Errorf("Error expanding path: %s", err)) + return } + s, err = ioutil.ReadFile(path) + if err != nil { + es = append(es, fmt.Errorf("Could not read file '%s': %s", path, err)) + } return } diff --git a/builtin/providers/azure/provider_test.go b/builtin/providers/azure/provider_test.go index 5c720640fb..b3feb83925 100644 --- a/builtin/providers/azure/provider_test.go +++ b/builtin/providers/azure/provider_test.go @@ -3,12 +3,14 @@ package azure import ( "io" "io/ioutil" - "log" "os" + "strings" "testing" + "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/go-homedir" ) var testAccProviders map[string]terraform.ResourceProvider @@ -67,20 +69,33 @@ func TestAzure_validateSettingsFile(t *testing.T) { if err != nil { t.Fatalf("Error creating temporary file in TestAzure_validateSettingsFile: %s", err) } + defer os.Remove(f.Name()) fx, err := ioutil.TempFile("", "tf-test-xml") if err != nil { t.Fatalf("Error creating temporary file with XML in TestAzure_validateSettingsFile: %s", err) } + defer os.Remove(fx.Name()) + + home, err := homedir.Dir() + if err != nil { + t.Fatalf("Error fetching homedir: %s", err) + } + fh, err := ioutil.TempFile(home, "tf-test-home") + if err != nil { + t.Fatalf("Error creating homedir-based temporary file: %s", err) + } + defer os.Remove(fh.Name()) _, err = io.WriteString(fx, "") if err != nil { t.Fatalf("Error writing XML File: %s", err) } - - log.Printf("fx name: %s", fx.Name()) fx.Close() + r := strings.NewReplacer(home, "~") + homePath := r.Replace(fh.Name()) + cases := []struct { Input string // String of XML or a path to an XML file W int // expected count of warnings @@ -89,6 +104,7 @@ func TestAzure_validateSettingsFile(t *testing.T) { {"test", 1, 1}, {f.Name(), 1, 0}, {fx.Name(), 1, 0}, + {homePath, 1, 0}, {"", 0, 0}, } @@ -104,6 +120,53 @@ func TestAzure_validateSettingsFile(t *testing.T) { } } +func TestAzure_providerConfigure(t *testing.T) { + home, err := homedir.Dir() + if err != nil { + t.Fatalf("Error fetching homedir: %s", err) + } + fh, err := ioutil.TempFile(home, "tf-test-home") + if err != nil { + t.Fatalf("Error creating homedir-based temporary file: %s", err) + } + defer os.Remove(fh.Name()) + + _, err = io.WriteString(fh, testAzurePublishSettingsStr) + if err != nil { + t.Fatalf("err: %s", err) + } + fh.Close() + + r := strings.NewReplacer(home, "~") + homePath := r.Replace(fh.Name()) + + cases := []struct { + SettingsFile string // String of XML or a path to an XML file + NilMeta bool // whether meta is expected to be nil + }{ + {testAzurePublishSettingsStr, false}, + {homePath, false}, + } + + for _, tc := range cases { + rp := Provider() + raw := map[string]interface{}{ + "settings_file": tc.SettingsFile, + } + + rawConfig, err := config.NewRawConfig(raw) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = rp.Configure(terraform.NewResourceConfig(rawConfig)) + meta := rp.(*schema.Provider).Meta() + if (meta == nil) != tc.NilMeta { + t.Fatalf("expected NilMeta: %t, got meta: %#v", tc.NilMeta, meta) + } + } +} + func TestAzure_isFile(t *testing.T) { f, err := ioutil.TempFile("", "tf-test-file") if err != nil { @@ -129,3 +192,19 @@ func TestAzure_isFile(t *testing.T) { } } } + +// testAzurePublishSettingsStr is a revoked publishsettings file +const testAzurePublishSettingsStr = ` + + + + + + +` From 8d017be63724fb6bc9b237de5196b9b88b3c4523 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 10:35:40 -0500 Subject: [PATCH 239/335] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9458c1a733..6dfefc4193 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ IMPROVEMENTS: * provider/aws: Add validation for `db_parameter_group.name` [GH-3279] * provider/aws: `aws_s3_bucket_object` allows interpolated content to be set with new `content` attribute. [GH-3200] * provider/aws: Allow tags for `aws_kinesis_stream` resource. [GH-3397] + * provider/aws: Configurable capacity waiting duration for ASGs [GH-3191] * provider/cloudstack: Add `project` parameter to `cloudstack_vpc`, `cloudstack_network`, `cloudstack_ipaddress` and `cloudstack_disk` [GH-3035] * provider/openstack: add functionality to attach FloatingIP to Port [GH-1788] * provider/google: Can now do multi-region deployments without using multiple providers [GH-3258] From 6d2fee9c28831251aace76ea8f5653ba0c5510b1 Mon Sep 17 00:00:00 2001 From: stack72 Date: Wed, 14 Oct 2015 18:06:09 +0100 Subject: [PATCH 240/335] After the DynamoDB table is created, the ARN wasn't being set --- builtin/providers/aws/resource_aws_dynamodb_table.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/builtin/providers/aws/resource_aws_dynamodb_table.go b/builtin/providers/aws/resource_aws_dynamodb_table.go index df043ffe08..b322ad8977 100644 --- a/builtin/providers/aws/resource_aws_dynamodb_table.go +++ b/builtin/providers/aws/resource_aws_dynamodb_table.go @@ -287,6 +287,10 @@ func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) er } else { // No error, set ID and return d.SetId(*output.TableDescription.TableName) + if err := d.Set("arn", *output.TableDescription.TableArn); err != nil { + return err + } + return nil } } From 12625997c1124f5bc410d1d311d6b49909ead03f Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Fri, 25 Sep 2015 17:48:08 -0400 Subject: [PATCH 241/335] Added global address & tests --- builtin/providers/google/provider.go | 1 + .../google/resource_compute_global_address.go | 100 ++++++++++++++++++ .../resource_compute_global_address_test.go | 81 ++++++++++++++ .../r/compute_global_address.html.markdown | 37 +++++++ 4 files changed, 219 insertions(+) create mode 100644 builtin/providers/google/resource_compute_global_address.go create mode 100644 builtin/providers/google/resource_compute_global_address_test.go create mode 100644 website/source/docs/providers/google/r/compute_global_address.html.markdown diff --git a/builtin/providers/google/provider.go b/builtin/providers/google/provider.go index 7c9587219b..87a299d81b 100644 --- a/builtin/providers/google/provider.go +++ b/builtin/providers/google/provider.go @@ -40,6 +40,7 @@ func Provider() terraform.ResourceProvider { "google_compute_disk": resourceComputeDisk(), "google_compute_firewall": resourceComputeFirewall(), "google_compute_forwarding_rule": resourceComputeForwardingRule(), + "google_compute_global_address": resourceComputeGlobalAddress(), "google_compute_http_health_check": resourceComputeHttpHealthCheck(), "google_compute_instance": resourceComputeInstance(), "google_compute_instance_template": resourceComputeInstanceTemplate(), diff --git a/builtin/providers/google/resource_compute_global_address.go b/builtin/providers/google/resource_compute_global_address.go new file mode 100644 index 0000000000..0d19bdfcf6 --- /dev/null +++ b/builtin/providers/google/resource_compute_global_address.go @@ -0,0 +1,100 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeGlobalAddress() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeGlobalAddressCreate, + Read: resourceComputeGlobalAddressRead, + Delete: resourceComputeGlobalAddressDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the address parameter + addr := &compute.Address{Name: d.Get("name").(string)} + op, err := config.clientCompute.GlobalAddresses.Insert( + config.Project, addr).Do() + if err != nil { + return fmt.Errorf("Error creating address: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(addr.Name) + + err = resourceOperationWaitGlobal(config, op, "Creating Global Address") + if err != nil { + return err + } + + return resourceComputeGlobalAddressRead(d, meta) +} + +func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + addr, err := config.clientCompute.GlobalAddresses.Get( + config.Project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading address: %s", err) + } + + d.Set("address", addr.Address) + d.Set("self_link", addr.SelfLink) + + return nil +} + +func resourceComputeGlobalAddressDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the address + log.Printf("[DEBUG] address delete request") + op, err := config.clientCompute.GlobalAddresses.Delete( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting address: %s", err) + } + + err = resourceOperationWaitGlobal(config, op, "Deletingg Global Address") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/google/resource_compute_global_address_test.go b/builtin/providers/google/resource_compute_global_address_test.go new file mode 100644 index 0000000000..2ef7b97ea7 --- /dev/null +++ b/builtin/providers/google/resource_compute_global_address_test.go @@ -0,0 +1,81 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeGlobalAddress_basic(t *testing.T) { + var addr compute.Address + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeGlobalAddressDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeGlobalAddress_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeGlobalAddressExists( + "google_compute_global_address.foobar", &addr), + ), + }, + }, + }) +} + +func testAccCheckComputeGlobalAddressDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_global_address" { + continue + } + + _, err := config.clientCompute.GlobalAddresses.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Address still exists") + } + } + + return nil +} + +func testAccCheckComputeGlobalAddressExists(n string, addr *compute.Address) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.GlobalAddresses.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Addr not found") + } + + *addr = *found + + return nil + } +} + +const testAccComputeGlobalAddress_basic = ` +resource "google_compute_global_address" "foobar" { + name = "terraform-test" +}` diff --git a/website/source/docs/providers/google/r/compute_global_address.html.markdown b/website/source/docs/providers/google/r/compute_global_address.html.markdown new file mode 100644 index 0000000000..1fdb24e6dd --- /dev/null +++ b/website/source/docs/providers/google/r/compute_global_address.html.markdown @@ -0,0 +1,37 @@ +--- +layout: "google" +page_title: "Google: google_compute_global_address" +sidebar_current: "docs-google-resource-global-address" +description: |- + Creates a static global IP address resource for a Google Compute Engine project. +--- + +# google\_compute\_global\_address + +Creates a static IP address resource global to a for Google Compute Engine project. For more information see +[the official documentation](https://cloud.google.com/compute/docs/instances-and-network) and +[API](https://cloud.google.com/compute/docs/reference/latest/globalAddresses). + + +## Example Usage + +``` +resource "google_compute_global_address" "default" { + name = "test-address" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) A unique name for the resource, required by GCE. + Changing this forces a new resource to be created. + +## Attributes Reference + +The following attributes are exported: + +* `name` - The name of the resource. +* `address` - The IP address that was allocated. +* `self_link` - The URI of the created resource. From b7f7c7a7315a2e6c355f45c330c9d59ebc9c0e36 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Wed, 14 Oct 2015 13:17:08 -0400 Subject: [PATCH 242/335] Provider GCE, fixed metadata state update bug --- builtin/providers/google/provider.go | 1 - .../google/resource_compute_global_address.go | 100 ------------------ .../resource_compute_global_address_test.go | 81 -------------- .../google/resource_compute_instance.go | 18 +++- .../google/resource_compute_instance_test.go | 2 +- .../r/compute_global_address.html.markdown | 37 ------- 6 files changed, 18 insertions(+), 221 deletions(-) delete mode 100644 builtin/providers/google/resource_compute_global_address.go delete mode 100644 builtin/providers/google/resource_compute_global_address_test.go delete mode 100644 website/source/docs/providers/google/r/compute_global_address.html.markdown diff --git a/builtin/providers/google/provider.go b/builtin/providers/google/provider.go index 87a299d81b..7c9587219b 100644 --- a/builtin/providers/google/provider.go +++ b/builtin/providers/google/provider.go @@ -40,7 +40,6 @@ func Provider() terraform.ResourceProvider { "google_compute_disk": resourceComputeDisk(), "google_compute_firewall": resourceComputeFirewall(), "google_compute_forwarding_rule": resourceComputeForwardingRule(), - "google_compute_global_address": resourceComputeGlobalAddress(), "google_compute_http_health_check": resourceComputeHttpHealthCheck(), "google_compute_instance": resourceComputeInstance(), "google_compute_instance_template": resourceComputeInstanceTemplate(), diff --git a/builtin/providers/google/resource_compute_global_address.go b/builtin/providers/google/resource_compute_global_address.go deleted file mode 100644 index 0d19bdfcf6..0000000000 --- a/builtin/providers/google/resource_compute_global_address.go +++ /dev/null @@ -1,100 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" -) - -func resourceComputeGlobalAddress() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeGlobalAddressCreate, - Read: resourceComputeGlobalAddressRead, - Delete: resourceComputeGlobalAddressDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - // Build the address parameter - addr := &compute.Address{Name: d.Get("name").(string)} - op, err := config.clientCompute.GlobalAddresses.Insert( - config.Project, addr).Do() - if err != nil { - return fmt.Errorf("Error creating address: %s", err) - } - - // It probably maybe worked, so store the ID now - d.SetId(addr.Name) - - err = resourceOperationWaitGlobal(config, op, "Creating Global Address") - if err != nil { - return err - } - - return resourceComputeGlobalAddressRead(d, meta) -} - -func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - addr, err := config.clientCompute.GlobalAddresses.Get( - config.Project, d.Id()).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading address: %s", err) - } - - d.Set("address", addr.Address) - d.Set("self_link", addr.SelfLink) - - return nil -} - -func resourceComputeGlobalAddressDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - // Delete the address - log.Printf("[DEBUG] address delete request") - op, err := config.clientCompute.GlobalAddresses.Delete( - config.Project, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting address: %s", err) - } - - err = resourceOperationWaitGlobal(config, op, "Deletingg Global Address") - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/google/resource_compute_global_address_test.go b/builtin/providers/google/resource_compute_global_address_test.go deleted file mode 100644 index 2ef7b97ea7..0000000000 --- a/builtin/providers/google/resource_compute_global_address_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v1" -) - -func TestAccComputeGlobalAddress_basic(t *testing.T) { - var addr compute.Address - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeGlobalAddressDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeGlobalAddress_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeGlobalAddressExists( - "google_compute_global_address.foobar", &addr), - ), - }, - }, - }) -} - -func testAccCheckComputeGlobalAddressDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_global_address" { - continue - } - - _, err := config.clientCompute.GlobalAddresses.Get( - config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("Address still exists") - } - } - - return nil -} - -func testAccCheckComputeGlobalAddressExists(n string, addr *compute.Address) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.GlobalAddresses.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("Addr not found") - } - - *addr = *found - - return nil - } -} - -const testAccComputeGlobalAddress_basic = ` -resource "google_compute_global_address" "foobar" { - name = "terraform-test" -}` diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go index 52575767e5..229d1b05e3 100644 --- a/builtin/providers/google/resource_compute_instance.go +++ b/builtin/providers/google/resource_compute_instance.go @@ -515,10 +515,17 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error // Synch metadata md := instance.Metadata - if err = d.Set("metadata", MetadataFormatSchema(md)); err != nil { + _md := MetadataFormatSchema(md) + if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { + d.Set("metadata_startup_script", script) + delete(_md, "startup-script") + } + + if err = d.Set("metadata", _md); err != nil { return fmt.Errorf("Error setting metadata: %s", err) } + d.Set("can_ip_forward", instance.CanIpForward) // Set the service accounts @@ -635,6 +642,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error } d.Set("self_link", instance.SelfLink) + d.SetId(instance.Name) return nil } @@ -655,6 +663,14 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err // If the Metadata has changed, then update that. if d.HasChange("metadata") { o, n := d.GetChange("metadata") + if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { + if _, ok := n.(map[string]interface{})["startup-script"]; ok { + return fmt.Errorf("Only one of metadata.startup-script and metadata_startup_script may be defined") + } + + n.(map[string]interface{})["startup-script"] = script + } + updateMD := func() error { // Reload the instance in the case of a fingerprint mismatch diff --git a/builtin/providers/google/resource_compute_instance_test.go b/builtin/providers/google/resource_compute_instance_test.go index 61c4906a25..f59da73ef9 100644 --- a/builtin/providers/google/resource_compute_instance_test.go +++ b/builtin/providers/google/resource_compute_instance_test.go @@ -32,7 +32,7 @@ func TestAccComputeInstance_basic_deprecated_network(t *testing.T) { }) } -func TestAccComputeInstance_basic(t *testing.T) { +func TestAccComputeInstance_basic1(t *testing.T) { var instance compute.Instance resource.Test(t, resource.TestCase{ diff --git a/website/source/docs/providers/google/r/compute_global_address.html.markdown b/website/source/docs/providers/google/r/compute_global_address.html.markdown deleted file mode 100644 index 1fdb24e6dd..0000000000 --- a/website/source/docs/providers/google/r/compute_global_address.html.markdown +++ /dev/null @@ -1,37 +0,0 @@ ---- -layout: "google" -page_title: "Google: google_compute_global_address" -sidebar_current: "docs-google-resource-global-address" -description: |- - Creates a static global IP address resource for a Google Compute Engine project. ---- - -# google\_compute\_global\_address - -Creates a static IP address resource global to a for Google Compute Engine project. For more information see -[the official documentation](https://cloud.google.com/compute/docs/instances-and-network) and -[API](https://cloud.google.com/compute/docs/reference/latest/globalAddresses). - - -## Example Usage - -``` -resource "google_compute_global_address" "default" { - name = "test-address" -} -``` - -## Argument Reference - -The following arguments are supported: - -* `name` - (Required) A unique name for the resource, required by GCE. - Changing this forces a new resource to be created. - -## Attributes Reference - -The following attributes are exported: - -* `name` - The name of the resource. -* `address` - The IP address that was allocated. -* `self_link` - The URI of the created resource. From b1d731bd6f24a1aab2ede549a46bf2b9c93be68e Mon Sep 17 00:00:00 2001 From: Joel Moss Date: Wed, 14 Oct 2015 19:05:38 +0100 Subject: [PATCH 243/335] [chef provisioning] When use_policyfile is given, the run list is not used, so don't require it --- builtin/provisioners/chef/resource_provisioner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/provisioners/chef/resource_provisioner.go b/builtin/provisioners/chef/resource_provisioner.go index 7b94486d29..50b5666ee1 100644 --- a/builtin/provisioners/chef/resource_provisioner.go +++ b/builtin/provisioners/chef/resource_provisioner.go @@ -180,7 +180,7 @@ func (r *ResourceProvisioner) Validate(c *terraform.ResourceConfig) (ws []string if p.NodeName == "" { es = append(es, fmt.Errorf("Key not found: node_name")) } - if p.RunList == nil { + if !p.UsePolicyfile && p.RunList == nil { es = append(es, fmt.Errorf("Key not found: run_list")) } if p.ServerURL == "" { From 7af484c8f6e9aca7792877a55655c68351fb5910 Mon Sep 17 00:00:00 2001 From: stack72 Date: Wed, 14 Oct 2015 19:16:58 +0100 Subject: [PATCH 244/335] Changing the DynamoDb Create to do a Read at the end --- builtin/providers/aws/resource_aws_dynamodb_table.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_dynamodb_table.go b/builtin/providers/aws/resource_aws_dynamodb_table.go index b322ad8977..c88f50d8aa 100644 --- a/builtin/providers/aws/resource_aws_dynamodb_table.go +++ b/builtin/providers/aws/resource_aws_dynamodb_table.go @@ -291,7 +291,7 @@ func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) er return err } - return nil + return resourceAwsDynamoDbTableRead(d, meta) } } From 4fb7ae6600ceef8ca5cdb554f9ae7057d412b92f Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Wed, 14 Oct 2015 13:55:19 -0500 Subject: [PATCH 245/335] rename test so it can be ran in isolation --- builtin/providers/aws/resource_aws_s3_bucket_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_s3_bucket_test.go b/builtin/providers/aws/resource_aws_s3_bucket_test.go index e494816b3e..1ce05583c9 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_test.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_test.go @@ -64,7 +64,7 @@ func TestAccAWSS3Bucket_Policy(t *testing.T) { }) } -func TestAccAWSS3Bucket_Website(t *testing.T) { +func TestAccAWSS3Bucket_Website_Simple(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, From f9c577aa2ad1646e1a529d3cb23355e4ce8c2c0f Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Wed, 14 Oct 2015 13:55:37 -0500 Subject: [PATCH 246/335] update requirement for peer test --- .../providers/aws/resource_aws_vpc_peering_connection_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go b/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go index 8f73602505..ca92ce66a6 100644 --- a/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go +++ b/builtin/providers/aws/resource_aws_vpc_peering_connection_test.go @@ -36,6 +36,7 @@ func TestAccAWSVPCPeeringConnection_basic(t *testing.T) { func TestAccAWSVPCPeeringConnection_tags(t *testing.T) { var connection ec2.VpcPeeringConnection + peerId := os.Getenv("TF_PEER_ID") resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -43,7 +44,7 @@ func TestAccAWSVPCPeeringConnection_tags(t *testing.T) { CheckDestroy: testAccCheckVpcDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccVpcPeeringConfigTags, + Config: fmt.Sprintf(testAccVpcPeeringConfigTags, peerId), Check: resource.ComposeTestCheckFunc( testAccCheckAWSVpcPeeringConnectionExists("aws_vpc_peering_connection.foo", &connection), testAccCheckTags(&connection.Tags, "foo", "bar"), @@ -133,6 +134,7 @@ resource "aws_vpc" "bar" { resource "aws_vpc_peering_connection" "foo" { vpc_id = "${aws_vpc.foo.id}" peer_vpc_id = "${aws_vpc.bar.id}" + peer_owner_id = "%s" tags { foo = "bar" } From 6ab339b62dad8fd56ebc4bad2136bd062c0ab138 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Wed, 14 Oct 2015 14:49:33 -0500 Subject: [PATCH 247/335] unset website_endpoint, website_domain if website part is removed --- builtin/providers/aws/resource_aws_s3_bucket.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/builtin/providers/aws/resource_aws_s3_bucket.go b/builtin/providers/aws/resource_aws_s3_bucket.go index a329d4ff6d..b45f69cc47 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket.go +++ b/builtin/providers/aws/resource_aws_s3_bucket.go @@ -464,6 +464,9 @@ func resourceAwsS3BucketWebsiteDelete(s3conn *s3.S3, d *schema.ResourceData) err return fmt.Errorf("Error deleting S3 website: %s", err) } + d.Set("website_endpoint", "") + d.Set("website_domain", "") + return nil } From 2a179d10657d58cc8ea63f7700f3a493a1c2e1a2 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 13:44:28 -0500 Subject: [PATCH 248/335] helper/schema: ValidateFunc support for maps --- helper/schema/schema.go | 15 +++++++++++++-- helper/schema/schema_test.go | 2 +- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/helper/schema/schema.go b/helper/schema/schema.go index 34145b1367..f4d8609957 100644 --- a/helper/schema/schema.go +++ b/helper/schema/schema.go @@ -540,8 +540,8 @@ func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { if v.ValidateFunc != nil { switch v.Type { - case TypeList, TypeSet, TypeMap: - return fmt.Errorf("ValidateFunc is only supported on primitives.") + case TypeList, TypeSet: + return fmt.Errorf("ValidateFunc is not yet supported on lists or sets.") } } } @@ -1118,6 +1118,17 @@ func (m schemaMap) validateMap( } } + if schema.ValidateFunc != nil { + validatableMap := make(map[string]interface{}) + for _, raw := range raws { + for k, v := range raw.(map[string]interface{}) { + validatableMap[k] = v + } + } + + return schema.ValidateFunc(validatableMap, k) + } + return nil, nil } diff --git a/helper/schema/schema_test.go b/helper/schema/schema_test.go index faf703b0f8..09eeef119e 100644 --- a/helper/schema/schema_test.go +++ b/helper/schema/schema_test.go @@ -2903,7 +2903,7 @@ func TestSchemaMap_InternalValidate(t *testing.T) { { map[string]*Schema{ "foo": &Schema{ - Type: TypeMap, + Type: TypeSet, Required: true, ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { return From ab0534a356208ade7b5cba5096c215e0ac84f4c4 Mon Sep 17 00:00:00 2001 From: Clint Date: Wed, 14 Oct 2015 16:27:05 -0500 Subject: [PATCH 249/335] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6dfefc4193..61f9785a09 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ IMPROVEMENTS: * provider/aws: Add `configuation_endpoint` to `aws_elasticache_cluster` [GH-3250] * provider/aws: Add validation for `app_cookie_stickiness_policy.name` [GH-3277] * provider/aws: Add validation for `db_parameter_group.name` [GH-3279] + * provider/aws: Set DynamoDB Table ARN after creation [GH-3500] * provider/aws: `aws_s3_bucket_object` allows interpolated content to be set with new `content` attribute. [GH-3200] * provider/aws: Allow tags for `aws_kinesis_stream` resource. [GH-3397] * provider/aws: Configurable capacity waiting duration for ASGs [GH-3191] From a1939e70f7fc806532e977adfdf2356c525c8c2a Mon Sep 17 00:00:00 2001 From: Rob Zienert Date: Sun, 9 Aug 2015 03:02:28 -0500 Subject: [PATCH 250/335] Adding ignore_changes lifecycle meta property --- .gitignore | 1 + config/config.go | 5 +- config/loader_test.go | 57 +++++++++++++++++++ config/test-fixtures/ignore-changes.tf | 17 ++++++ terraform/context_plan_test.go | 46 +++++++++++++++ terraform/eval_ignore_changes.go | 32 +++++++++++ terraform/terraform_test.go | 13 +++++ .../test-fixtures/plan-ignore-changes/main.tf | 9 +++ terraform/transform_resource.go | 4 ++ .../docs/configuration/resources.html.md | 11 ++++ 10 files changed, 193 insertions(+), 2 deletions(-) create mode 100644 config/test-fixtures/ignore-changes.tf create mode 100644 terraform/eval_ignore_changes.go create mode 100644 terraform/test-fixtures/plan-ignore-changes/main.tf diff --git a/.gitignore b/.gitignore index 314611940e..66ea31701f 100644 --- a/.gitignore +++ b/.gitignore @@ -18,3 +18,4 @@ website/node_modules *.bak *~ .*.swp +.idea diff --git a/config/config.go b/config/config.go index c088414dab..d31777f6e8 100644 --- a/config/config.go +++ b/config/config.go @@ -84,8 +84,9 @@ type Resource struct { // ResourceLifecycle is used to store the lifecycle tuning parameters // to allow customized behavior type ResourceLifecycle struct { - CreateBeforeDestroy bool `mapstructure:"create_before_destroy"` - PreventDestroy bool `mapstructure:"prevent_destroy"` + CreateBeforeDestroy bool `mapstructure:"create_before_destroy"` + PreventDestroy bool `mapstructure:"prevent_destroy"` + IgnoreChanges []string `mapstructure:"ignore_changes"` } // Provisioner is a configured provisioner step on a resource. diff --git a/config/loader_test.go b/config/loader_test.go index d239bd0b9a..eaf4f10aaa 100644 --- a/config/loader_test.go +++ b/config/loader_test.go @@ -440,6 +440,54 @@ func TestLoadFile_createBeforeDestroy(t *testing.T) { } } +func TestLoadFile_ignoreChanges(t *testing.T) { + c, err := LoadFile(filepath.Join(fixtureDir, "ignore-changes.tf")) + if err != nil { + t.Fatalf("err: %s", err) + } + + if c == nil { + t.Fatal("config should not be nil") + } + + actual := resourcesStr(c.Resources) + print(actual) + if actual != strings.TrimSpace(ignoreChangesResourcesStr) { + t.Fatalf("bad:\n%s", actual) + } + + // Check for the flag value + r := c.Resources[0] + if r.Name != "web" && r.Type != "aws_instance" { + t.Fatalf("Bad: %#v", r) + } + + // Should populate ignore changes + if len(r.Lifecycle.IgnoreChanges) == 0 { + t.Fatalf("Bad: %#v", r) + } + + r = c.Resources[1] + if r.Name != "bar" && r.Type != "aws_instance" { + t.Fatalf("Bad: %#v", r) + } + + // Should not populate ignore changes + if len(r.Lifecycle.IgnoreChanges) > 0 { + t.Fatalf("Bad: %#v", r) + } + + r = c.Resources[2] + if r.Name != "baz" && r.Type != "aws_instance" { + t.Fatalf("Bad: %#v", r) + } + + // Should not populate ignore changes + if len(r.Lifecycle.IgnoreChanges) > 0 { + t.Fatalf("Bad: %#v", r) + } +} + func TestLoad_preventDestroyString(t *testing.T) { c, err := LoadFile(filepath.Join(fixtureDir, "prevent-destroy-string.tf")) if err != nil { @@ -676,3 +724,12 @@ aws_instance[bar] (x1) aws_instance[web] (x1) ami ` + +const ignoreChangesResourcesStr = ` +aws_instance[bar] (x1) + ami +aws_instance[baz] (x1) + ami +aws_instance[web] (x1) + ami +` diff --git a/config/test-fixtures/ignore-changes.tf b/config/test-fixtures/ignore-changes.tf new file mode 100644 index 0000000000..765a057983 --- /dev/null +++ b/config/test-fixtures/ignore-changes.tf @@ -0,0 +1,17 @@ +resource "aws_instance" "web" { + ami = "foo" + lifecycle { + ignore_changes = ["ami"] + } +} + +resource "aws_instance" "bar" { + ami = "foo" + lifecycle { + ignore_changes = [] + } +} + +resource "aws_instance" "baz" { + ami = "foo" +} diff --git a/terraform/context_plan_test.go b/terraform/context_plan_test.go index 50f2bb4716..db6f245772 100644 --- a/terraform/context_plan_test.go +++ b/terraform/context_plan_test.go @@ -1672,3 +1672,49 @@ func TestContext2Plan_varListErr(t *testing.T) { t.Fatal("should error") } } + +func TestContext2Plan_ignoreChanges(t *testing.T) { + m := testModule(t, "plan-ignore-changes") + p := testProvider("aws") + p.DiffFn = testDiffFn + s := &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "aws_instance.foo": &ResourceState{ + Primary: &InstanceState{ + ID: "bar", + Attributes: map[string]string{"ami": "ami-abcd1234"}, + }, + }, + }, + }, + }, + } + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + Variables: map[string]string{ + "foo": "ami-1234abcd", + }, + State: s, + }) + + plan, err := ctx.Plan() + if err != nil { + t.Fatalf("err: %s", err) + } + + if len(plan.Diff.RootModule().Resources) < 1 { + t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources) + } + + actual := strings.TrimSpace(plan.String()) + expected := strings.TrimSpace(testTerraformPlanIgnoreChangesStr) + if actual != expected { + t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected) + } +} diff --git a/terraform/eval_ignore_changes.go b/terraform/eval_ignore_changes.go new file mode 100644 index 0000000000..1a44089a98 --- /dev/null +++ b/terraform/eval_ignore_changes.go @@ -0,0 +1,32 @@ +package terraform +import ( + "github.com/hashicorp/terraform/config" + "strings" +) + +// EvalIgnoreChanges is an EvalNode implementation that removes diff +// attributes if their name matches names provided by the resource's +// IgnoreChanges lifecycle. +type EvalIgnoreChanges struct { + Resource *config.Resource + Diff **InstanceDiff +} + +func (n *EvalIgnoreChanges) Eval(ctx EvalContext) (interface{}, error) { + if n.Diff == nil || *n.Diff == nil || n.Resource == nil || n.Resource.Id() == "" { + return nil, nil + } + + diff := *n.Diff + ignoreChanges := n.Resource.Lifecycle.IgnoreChanges + + for _, ignoredName := range ignoreChanges { + for name := range diff.Attributes { + if strings.HasPrefix(name, ignoredName) { + delete(diff.Attributes, name) + } + } + } + + return nil, nil +} diff --git a/terraform/terraform_test.go b/terraform/terraform_test.go index c84e9803cf..02d4de2a28 100644 --- a/terraform/terraform_test.go +++ b/terraform/terraform_test.go @@ -1286,3 +1286,16 @@ STATE: ` + +const testTerraformPlanIgnoreChangesStr = ` +DIFF: + +UPDATE: aws_instance.foo + type: "" => "aws_instance" + +STATE: + +aws_instance.foo: + ID = bar + ami = ami-abcd1234 +` diff --git a/terraform/test-fixtures/plan-ignore-changes/main.tf b/terraform/test-fixtures/plan-ignore-changes/main.tf new file mode 100644 index 0000000000..056256a1d8 --- /dev/null +++ b/terraform/test-fixtures/plan-ignore-changes/main.tf @@ -0,0 +1,9 @@ +variable "foo" {} + +resource "aws_instance" "foo" { + ami = "${var.foo}" + + lifecycle { + ignore_changes = ["ami"] + } +} diff --git a/terraform/transform_resource.go b/terraform/transform_resource.go index a52b3ba724..81ff158d9c 100644 --- a/terraform/transform_resource.go +++ b/terraform/transform_resource.go @@ -318,6 +318,10 @@ func (n *graphNodeExpandedResource) EvalTree() EvalNode { Resource: n.Resource, Diff: &diff, }, + &EvalIgnoreChanges{ + Resource: n.Resource, + Diff: &diff, + }, &EvalWriteState{ Name: n.stateId(), ResourceType: n.Resource.Type, diff --git a/website/source/docs/configuration/resources.html.md b/website/source/docs/configuration/resources.html.md index f099c5f252..d5e087fec4 100644 --- a/website/source/docs/configuration/resources.html.md +++ b/website/source/docs/configuration/resources.html.md @@ -68,11 +68,20 @@ The `lifecycle` block allows the following keys to be set: destruction of a given resource. When this is set to `true`, any plan that includes a destroy of this resource will return an error message. + * `ignore_changes` (list of strings) - Customizes how diffs are evaluated for + resources, allowing individual attributes to be ignored through changes. + As an example, this can be used to ignore dynamic changes to the + resource from external resources. Other meta-parameters cannot be ignored. + ~> **NOTE on create\_before\_destroy and dependencies:** Resources that utilize the `create_before_destroy` key can only depend on other resources that also include `create_before_destroy`. Referencing a resource that does not include `create_before_destroy` will result in a dependency graph cycle. +~> **NOTE on ignore\_changes:** Ignored attribute names can be matched by their +name, not state ID. For example, if an `aws_route_table` has two routes defined +and the `ignore_changes` list contains "route", both routes will be ignored. + ------------- Within a resource, you can optionally have a **connection block**. @@ -191,6 +200,8 @@ where `LIFECYCLE` is: ``` lifecycle { [create_before_destroy = true|false] + [prevent_destroy = true|false] + [ignore_changes = [ATTRIBUTE NAME, ...]] } ``` From 4f4c572aa4394d0bd5ff509e09c4cdff5f0d7626 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 18:23:14 -0500 Subject: [PATCH 251/335] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 61f9785a09..0a4437768b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ FEATURES: * **New resource: `aws_autoscaling_lifecycle_hook`** [GH-3351] * **New resource: `aws_placement_group`** [GH-3457] * **New resource: `aws_glacier_vault`** [GH-3491] + * **New lifecycle flag: `ignore_changes`** [GH-2525] IMPROVEMENTS: From 4f400a1944186ab2c7f057343d7a45b94b13dd71 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 13:17:44 -0500 Subject: [PATCH 252/335] provider/google: one more fix to GCE metadata In #3501 @lwander got us almost all the way there, but we still had tests failing. This seemed to be because GCE sets `metadata.startup-script` to a blank string on instance creation, and if a user specifies any `metadata` in their config this is seen as the desired full contents of metadata, so we get a diff trying to remove `startup-script`. Here, to address this, we just proactively remove the "startup-script" key from `Read`, and then we enforce that "metadata_startup_script" is the only way to configure startup scripts on instances. --- .../google/resource_compute_instance.go | 30 +++++++++++-------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go index 229d1b05e3..68b8aed357 100644 --- a/builtin/providers/google/resource_compute_instance.go +++ b/builtin/providers/google/resource_compute_instance.go @@ -197,9 +197,10 @@ func resourceComputeInstance() *schema.Resource { }, "metadata": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Elem: schema.TypeString, + Type: schema.TypeMap, + Optional: true, + Elem: schema.TypeString, + ValidateFunc: validateInstanceMetadata, }, "service_account": &schema.Schema{ @@ -516,16 +517,16 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error md := instance.Metadata _md := MetadataFormatSchema(md) + delete(_md, "startup-script") + if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { d.Set("metadata_startup_script", script) - delete(_md, "startup-script") } if err = d.Set("metadata", _md); err != nil { return fmt.Errorf("Error setting metadata: %s", err) } - d.Set("can_ip_forward", instance.CanIpForward) // Set the service accounts @@ -671,7 +672,6 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err n.(map[string]interface{})["startup-script"] = script } - updateMD := func() error { // Reload the instance in the case of a fingerprint mismatch instance, err = getInstance(config, d) @@ -810,13 +810,8 @@ func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) err func resourceInstanceMetadata(d *schema.ResourceData) (*compute.Metadata, error) { m := &compute.Metadata{} mdMap := d.Get("metadata").(map[string]interface{}) - _, mapScriptExists := mdMap["startup-script"] - dScript, dScriptExists := d.GetOk("metadata_startup_script") - if mapScriptExists && dScriptExists { - return nil, fmt.Errorf("Not allowed to have both metadata_startup_script and metadata.startup-script") - } - if dScriptExists { - mdMap["startup-script"] = dScript + if v, ok := d.GetOk("metadata_startup_script"); ok && v.(string) != "" { + mdMap["startup-script"] = v } if len(mdMap) > 0 { m.Items = make([]*compute.MetadataItems, 0, len(mdMap)) @@ -852,3 +847,12 @@ func resourceInstanceTags(d *schema.ResourceData) *compute.Tags { return tags } + +func validateInstanceMetadata(v interface{}, k string) (ws []string, es []error) { + mdMap := v.(map[string]interface{}) + if _, ok := mdMap["startup-script"]; ok { + es = append(es, fmt.Errorf( + "Use metadata_startup_script instead of a startup-script key in %q.", k)) + } + return +} From beff2ff4600d205cf2b549a4537f5dbec9ff62ea Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 14 Oct 2015 19:35:20 -0700 Subject: [PATCH 253/335] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0a4437768b..a29895acaa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,7 @@ IMPROVEMENTS: * provider/aws: `aws_s3_bucket_object` allows interpolated content to be set with new `content` attribute. [GH-3200] * provider/aws: Allow tags for `aws_kinesis_stream` resource. [GH-3397] * provider/aws: Configurable capacity waiting duration for ASGs [GH-3191] + * provider/aws: Allow non-persistent Spot Requests [GH-3311] * provider/cloudstack: Add `project` parameter to `cloudstack_vpc`, `cloudstack_network`, `cloudstack_ipaddress` and `cloudstack_disk` [GH-3035] * provider/openstack: add functionality to attach FloatingIP to Port [GH-1788] * provider/google: Can now do multi-region deployments without using multiple providers [GH-3258] From 2e3b3cfad210b07cf77ae38d9495d2bb9bf8c1c7 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 14 Oct 2015 19:37:33 -0700 Subject: [PATCH 254/335] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a29895acaa..9a7d913180 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ IMPROVEMENTS: * provider/aws: Allow tags for `aws_kinesis_stream` resource. [GH-3397] * provider/aws: Configurable capacity waiting duration for ASGs [GH-3191] * provider/aws: Allow non-persistent Spot Requests [GH-3311] + * provider/aws: Support tags for AWS DB subnet group [GH-3138] * provider/cloudstack: Add `project` parameter to `cloudstack_vpc`, `cloudstack_network`, `cloudstack_ipaddress` and `cloudstack_disk` [GH-3035] * provider/openstack: add functionality to attach FloatingIP to Port [GH-1788] * provider/google: Can now do multi-region deployments without using multiple providers [GH-3258] From b2b41192acbf8db54b8aef86be9191d57374f717 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 22:16:58 -0500 Subject: [PATCH 255/335] provider/google: container test needed bigger instance to pass --- builtin/providers/google/resource_container_cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/google/resource_container_cluster_test.go b/builtin/providers/google/resource_container_cluster_test.go index 72f398a07c..ea4a5a597b 100644 --- a/builtin/providers/google/resource_container_cluster_test.go +++ b/builtin/providers/google/resource_container_cluster_test.go @@ -113,7 +113,7 @@ resource "google_container_cluster" "with_node_config" { } node_config { - machine_type = "f1-micro" + machine_type = "g1-small" disk_size_gb = 15 oauth_scopes = [ "https://www.googleapis.com/auth/compute", From 0efffc67f0015e4e7c6a51135b496bc0b5063e2c Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 22:17:34 -0500 Subject: [PATCH 256/335] provider/google: storage bucket tests shouldn't not check predefined_acl it was depreceted in https://github.com/hashicorp/terraform/pull/3272 --- builtin/providers/google/resource_storage_bucket_test.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/builtin/providers/google/resource_storage_bucket_test.go b/builtin/providers/google/resource_storage_bucket_test.go index a7b59c61a9..3860fc9a6f 100644 --- a/builtin/providers/google/resource_storage_bucket_test.go +++ b/builtin/providers/google/resource_storage_bucket_test.go @@ -52,8 +52,6 @@ func TestAccStorageCustomAttributes(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( "google_storage_bucket.bucket", &bucketName), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "predefined_acl", "publicReadWrite"), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "location", "EU"), resource.TestCheckResourceAttr( @@ -77,8 +75,6 @@ func TestAccStorageBucketUpdate(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( "google_storage_bucket.bucket", &bucketName), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "predefined_acl", "projectPrivate"), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "location", "US"), resource.TestCheckResourceAttr( From f6e525e5310db681078b096ee8d7aa74b66b4820 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 22:36:01 -0500 Subject: [PATCH 257/335] provider/google: one more test that should skip predefined_acl it was depreceted in https://github.com/hashicorp/terraform/pull/3272 --- builtin/providers/google/resource_storage_bucket_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/builtin/providers/google/resource_storage_bucket_test.go b/builtin/providers/google/resource_storage_bucket_test.go index 3860fc9a6f..8e83300500 100644 --- a/builtin/providers/google/resource_storage_bucket_test.go +++ b/builtin/providers/google/resource_storage_bucket_test.go @@ -27,8 +27,6 @@ func TestAccStorage_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( "google_storage_bucket.bucket", &bucketName), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "predefined_acl", "projectPrivate"), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "location", "US"), resource.TestCheckResourceAttr( From 3fbeb326cd53f75e01350df06eae89e681b54314 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 21:34:07 -0500 Subject: [PATCH 258/335] provider/azure: acc tests fixes * avoid name collisions * update image names --- builtin/providers/azure/provider_test.go | 6 + .../azure/resource_azure_data_disk_test.go | 138 +++++++++--------- .../azure/resource_azure_instance_test.go | 4 +- 3 files changed, 81 insertions(+), 67 deletions(-) diff --git a/builtin/providers/azure/provider_test.go b/builtin/providers/azure/provider_test.go index b3feb83925..ca4017aae0 100644 --- a/builtin/providers/azure/provider_test.go +++ b/builtin/providers/azure/provider_test.go @@ -3,9 +3,11 @@ package azure import ( "io" "io/ioutil" + "math/rand" "os" "strings" "testing" + "time" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/helper/schema" @@ -193,6 +195,10 @@ func TestAzure_isFile(t *testing.T) { } } +func genRandInt() int { + return rand.New(rand.NewSource(time.Now().UnixNano())).Int() % 100000 +} + // testAzurePublishSettingsStr is a revoked publishsettings file const testAzurePublishSettingsStr = ` diff --git a/builtin/providers/azure/resource_azure_data_disk_test.go b/builtin/providers/azure/resource_azure_data_disk_test.go index dfad26b5ef..2c6660f66d 100644 --- a/builtin/providers/azure/resource_azure_data_disk_test.go +++ b/builtin/providers/azure/resource_azure_data_disk_test.go @@ -13,6 +13,7 @@ import ( func TestAccAzureDataDisk_basic(t *testing.T) { var disk virtualmachinedisk.DataDiskResponse + name := fmt.Sprintf("terraform-test%d", genRandInt()) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -20,13 +21,13 @@ func TestAccAzureDataDisk_basic(t *testing.T) { CheckDestroy: testAccCheckAzureDataDiskDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAzureDataDisk_basic, + Config: testAccAzureDataDisk_basic(name), Check: resource.ComposeTestCheckFunc( testAccCheckAzureDataDiskExists( "azure_data_disk.foo", &disk), testAccCheckAzureDataDiskAttributes(&disk), resource.TestCheckResourceAttr( - "azure_data_disk.foo", "label", "terraform-test-0"), + "azure_data_disk.foo", "label", fmt.Sprintf("%s-0", name)), resource.TestCheckResourceAttr( "azure_data_disk.foo", "size", "10"), ), @@ -37,6 +38,7 @@ func TestAccAzureDataDisk_basic(t *testing.T) { func TestAccAzureDataDisk_update(t *testing.T) { var disk virtualmachinedisk.DataDiskResponse + name := fmt.Sprintf("terraform-test%d", genRandInt()) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -44,12 +46,12 @@ func TestAccAzureDataDisk_update(t *testing.T) { CheckDestroy: testAccCheckAzureDataDiskDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAzureDataDisk_advanced, + Config: testAccAzureDataDisk_advanced(name), Check: resource.ComposeTestCheckFunc( testAccCheckAzureDataDiskExists( "azure_data_disk.foo", &disk), resource.TestCheckResourceAttr( - "azure_data_disk.foo", "label", "terraform-test1-1"), + "azure_data_disk.foo", "label", fmt.Sprintf("%s-1", name)), resource.TestCheckResourceAttr( "azure_data_disk.foo", "lun", "1"), resource.TestCheckResourceAttr( @@ -57,17 +59,17 @@ func TestAccAzureDataDisk_update(t *testing.T) { resource.TestCheckResourceAttr( "azure_data_disk.foo", "caching", "ReadOnly"), resource.TestCheckResourceAttr( - "azure_data_disk.foo", "virtual_machine", "terraform-test1"), + "azure_data_disk.foo", "virtual_machine", name), ), }, resource.TestStep{ - Config: testAccAzureDataDisk_update, + Config: testAccAzureDataDisk_update(name), Check: resource.ComposeTestCheckFunc( testAccCheckAzureDataDiskExists( "azure_data_disk.foo", &disk), resource.TestCheckResourceAttr( - "azure_data_disk.foo", "label", "terraform-test1-1"), + "azure_data_disk.foo", "label", fmt.Sprintf("%s-1", name)), resource.TestCheckResourceAttr( "azure_data_disk.foo", "lun", "2"), resource.TestCheckResourceAttr( @@ -168,68 +170,74 @@ func testAccCheckAzureDataDiskDestroy(s *terraform.State) error { return nil } -var testAccAzureDataDisk_basic = fmt.Sprintf(` -resource "azure_instance" "foo" { - name = "terraform-test" - image = "Ubuntu Server 14.04 LTS" - size = "Basic_A1" - storage_service_name = "%s" - location = "West US" - username = "terraform" - password = "Pass!admin123" +func testAccAzureDataDisk_basic(name string) string { + return fmt.Sprintf(` + resource "azure_instance" "foo" { + name = "%s" + image = "Ubuntu Server 14.04 LTS" + size = "Basic_A1" + storage_service_name = "%s" + location = "West US" + username = "terraform" + password = "Pass!admin123" + } + + resource "azure_data_disk" "foo" { + lun = 0 + size = 10 + storage_service_name = "${azure_instance.foo.storage_service_name}" + virtual_machine = "${azure_instance.foo.id}" + }`, name, testAccStorageServiceName) } -resource "azure_data_disk" "foo" { - lun = 0 - size = 10 - storage_service_name = "${azure_instance.foo.storage_service_name}" - virtual_machine = "${azure_instance.foo.id}" -}`, testAccStorageServiceName) +func testAccAzureDataDisk_advanced(name string) string { + return fmt.Sprintf(` + resource "azure_instance" "foo" { + name = "%s" + image = "Ubuntu Server 14.04 LTS" + size = "Basic_A1" + storage_service_name = "%s" + location = "West US" + username = "terraform" + password = "Pass!admin123" + } -var testAccAzureDataDisk_advanced = fmt.Sprintf(` -resource "azure_instance" "foo" { - name = "terraform-test1" - image = "Ubuntu Server 14.04 LTS" - size = "Basic_A1" - storage_service_name = "%s" - location = "West US" - username = "terraform" - password = "Pass!admin123" + resource "azure_data_disk" "foo" { + lun = 1 + size = 10 + caching = "ReadOnly" + storage_service_name = "${azure_instance.foo.storage_service_name}" + virtual_machine = "${azure_instance.foo.id}" + }`, name, testAccStorageServiceName) } -resource "azure_data_disk" "foo" { - lun = 1 - size = 10 - caching = "ReadOnly" - storage_service_name = "${azure_instance.foo.storage_service_name}" - virtual_machine = "${azure_instance.foo.id}" -}`, testAccStorageServiceName) +func testAccAzureDataDisk_update(name string) string { + return fmt.Sprintf(` + resource "azure_instance" "foo" { + name = "%s" + image = "Ubuntu Server 14.04 LTS" + size = "Basic_A1" + storage_service_name = "%s" + location = "West US" + username = "terraform" + password = "Pass!admin123" + } -var testAccAzureDataDisk_update = fmt.Sprintf(` -resource "azure_instance" "foo" { - name = "terraform-test1" - image = "Ubuntu Server 14.04 LTS" - size = "Basic_A1" - storage_service_name = "%s" - location = "West US" - username = "terraform" - password = "Pass!admin123" + resource "azure_instance" "bar" { + name = "terraform-test2" + image = "Ubuntu Server 14.04 LTS" + size = "Basic_A1" + storage_service_name = "${azure_instance.foo.storage_service_name}" + location = "West US" + username = "terraform" + password = "Pass!admin123" + } + + resource "azure_data_disk" "foo" { + lun = 2 + size = 20 + caching = "ReadWrite" + storage_service_name = "${azure_instance.bar.storage_service_name}" + virtual_machine = "${azure_instance.bar.id}" + }`, name, testAccStorageServiceName) } - -resource "azure_instance" "bar" { - name = "terraform-test2" - image = "Ubuntu Server 14.04 LTS" - size = "Basic_A1" - storage_service_name = "${azure_instance.foo.storage_service_name}" - location = "West US" - username = "terraform" - password = "Pass!admin123" -} - -resource "azure_data_disk" "foo" { - lun = 2 - size = 20 - caching = "ReadWrite" - storage_service_name = "${azure_instance.bar.storage_service_name}" - virtual_machine = "${azure_instance.bar.id}" -}`, testAccStorageServiceName) diff --git a/builtin/providers/azure/resource_azure_instance_test.go b/builtin/providers/azure/resource_azure_instance_test.go index 79e7121540..7e63486c3f 100644 --- a/builtin/providers/azure/resource_azure_instance_test.go +++ b/builtin/providers/azure/resource_azure_instance_test.go @@ -446,7 +446,7 @@ resource "azure_security_group_rule" "foo" { resource "azure_instance" "foo" { name = "terraform-test1" - image = "Windows Server 2012 R2 Datacenter, April 2015" + image = "Windows Server 2012 R2 Datacenter, September 2015" size = "Basic_A1" storage_service_name = "%s" location = "West US" @@ -520,7 +520,7 @@ resource "azure_security_group_rule" "bar" { resource "azure_instance" "foo" { name = "terraform-test1" - image = "Windows Server 2012 R2 Datacenter, April 2015" + image = "Windows Server 2012 R2 Datacenter, September 2015" size = "Basic_A2" storage_service_name = "%s" location = "West US" From 05d6c5b509e814a12903ca770d6ee24b790c812f Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 12 Oct 2015 11:42:27 -0500 Subject: [PATCH 259/335] vsphere docs; first draft I'm not familiar with vSphere so I had to skip over details in some places, but this at least gets the basic structure in for the docs. --- website/source/assets/stylesheets/_docs.scss | 1 + .../providers/vsphere/index.html.markdown | 56 +++++++++++++++ .../vsphere/r/virtual_machine.html.markdown | 69 +++++++++++++++++++ website/source/layouts/docs.erb | 4 ++ website/source/layouts/vsphere.erb | 26 +++++++ 5 files changed, 156 insertions(+) create mode 100644 website/source/docs/providers/vsphere/index.html.markdown create mode 100644 website/source/docs/providers/vsphere/r/virtual_machine.html.markdown create mode 100644 website/source/layouts/vsphere.erb diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss index 6849f91069..0defd251a5 100755 --- a/website/source/assets/stylesheets/_docs.scss +++ b/website/source/assets/stylesheets/_docs.scss @@ -23,6 +23,7 @@ body.layout-openstack, body.layout-packet, body.layout-rundeck, body.layout-template, +body.layout-vsphere, body.layout-docs, body.layout-downloads, body.layout-inner, diff --git a/website/source/docs/providers/vsphere/index.html.markdown b/website/source/docs/providers/vsphere/index.html.markdown new file mode 100644 index 0000000000..3930519a1f --- /dev/null +++ b/website/source/docs/providers/vsphere/index.html.markdown @@ -0,0 +1,56 @@ +--- +layout: "vsphere" +page_title: "Provider: vSphere" +sidebar_current: "docs-vsphere-index" +description: |- + The vSphere provider is used to interact with the resources supported by + vSphere. The provider needs to be configured with the proper credentials before + it can be used. +--- + +# vSphere Provider + +The vSphere provider is used to interact with the resources supported by vSphere. +The provider needs to be configured with the proper credentials before it can be used. + +Use the navigation to the left to read about the available resources. + +## Example Usage + +``` +# Configure the vSphere Provider +provider "vsphere" { + user = "${var.vsphere_user}" + password = "${var.vsphere_password}" + vcenter_server = "${var.vsphere_vcenter_server}" +} + +# Create a virtual machine +resource "vsphere_virtual_machine" "web" { + name = "terraform_web" + vcpu = 2 + memory = 4096 + + network_interface { + label = "VM Network" + } + + disk { + size = 1 + iops = 500 + } +} +``` + +## Argument Reference + +The following arguments are used to configure the vSphere Provider: + +* `user` - (Required) This is the username for vSphere API operations. Can also + be specified with the `VSPHERE_USER` environment variable. +* `password` - (Required) This is the password for vSphere API operations. Can + also be specified with the `VSPHERE_PASSWORD` environment variable. +* `vcenter_server` - (Required) This is the vCenter server name for vSphere API + operations. Can also be specified with the `VSPHERE_VCENTER` environment + variable. + diff --git a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown new file mode 100644 index 0000000000..6ce012d65c --- /dev/null +++ b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown @@ -0,0 +1,69 @@ +--- +layout: "vsphere" +page_title: "vSphere: vsphere_virtual_machine" +sidebar_current: "docs-vsphere-resource-virtual-machine" +description: |- + Provides a vSphere virtual machine resource. This can be used to create, modify, and delete virtual machines. +--- + +# vsphere\_virtual\_machine + +Provides a vSphere virtual machine resource. This can be used to create, +modify, and delete virtual machines. + +## Example Usage + +``` +resource "vsphere_virtual_machine" "web" { + name = "terraform_web" + vcpu = 2 + memory = 4096 + + network_interface { + label = "VM Network" + } + + disk { + size = 1 + iops = 500 + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The virtual machine name +* `vcpu` - (Required) The number of virtual CPUs to allocate to the virtual machine +* `memory` - (Required) The amount of RAM (in MB) to allocate to the virtual machine +* `datacenter` - (Optional) The name of a Datacenter in which to launch the virtual machine +* `cluster` - (Optional) Name of a Cluster in which to launch the virtual machine +* `resource_pool` (Optional) The name of a Resource Pool in which to launch the virtual machine +* `gateway` - (Optional) Gateway IP address to use for all network interfaces +* `domain` - (Optional) A FQDN for the virtual machine; defaults to "vsphere.local" +* `time_zone` - (Optional) The [time zone](https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/timezone.html) to set on the virtual machine. Defaults to "Etc/UTC" +* `dns_suffixes` - (Optional) List of name resolution suffixes for the virtual network adapter +* `dns_servers` - (Optional) List of DNS servers for the virtual network adapter; defaults to 8.8.8.8, 8.8.4.4 +* `network_interface` - (Required) Configures virtual network interfaces; see [Network Interfaces](#network-interfaces) below for details. +* `disk` - (Required) Configures virtual disks; see [Disks](#disks) below for details +* `boot_delay` - (Optional) Time in seconds to wait for machine network to be ready. + + +## Network Interfaces + +Network interfaces support the following attributes: + +* `label` - (Required) Label to assign to this network interface +* `ip_address` - (Optional) Static IP to assign to this network interface. Interface will use DHCP if this is left blank. +* `subnet_mask` - (Optional) Subnet mask to use when statically assigning an IP. + + +## Disks + +Disks support the following attributes: + +* `template` - (Required if size not provided) Template for this disk. +* `datastore` - (Optional) Datastore for this disk +* `size` - (Required if template not provided) Size of this disk (in GB). +* `iops` - (Optional) Number of virtual iops to allocate for this disk. diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index af96b52c1e..937c120de4 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -188,6 +188,10 @@ > Template + + > + vSphere + diff --git a/website/source/layouts/vsphere.erb b/website/source/layouts/vsphere.erb new file mode 100644 index 0000000000..49e58c0578 --- /dev/null +++ b/website/source/layouts/vsphere.erb @@ -0,0 +1,26 @@ +<% wrap_layout :inner do %> + <% content_for :sidebar do %> + + <% end %> + + <%= yield %> +<% end %> From 9d41e6f3d1a3c926850cc984236b3f53d2499127 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Thu, 15 Oct 2015 09:35:06 -0500 Subject: [PATCH 260/335] vsphere docs: add warning about possible changes Since we merged this so that the community could collaborate on improvements, I thought it would be prudent to inform potential users of the status of the provider so they know what to expect. --- website/source/docs/providers/vsphere/index.html.markdown | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/source/docs/providers/vsphere/index.html.markdown b/website/source/docs/providers/vsphere/index.html.markdown index 3930519a1f..17448b024f 100644 --- a/website/source/docs/providers/vsphere/index.html.markdown +++ b/website/source/docs/providers/vsphere/index.html.markdown @@ -15,6 +15,9 @@ The provider needs to be configured with the proper credentials before it can be Use the navigation to the left to read about the available resources. +~> **NOTE:** The vSphere Provider currently represents _initial support_ and +therefore may undergo significant changes as the community improves it. + ## Example Usage ``` From d918d775f392ecd6b4ae8fb8323cc650d78051f2 Mon Sep 17 00:00:00 2001 From: Clint Date: Thu, 15 Oct 2015 10:04:55 -0500 Subject: [PATCH 261/335] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9a7d913180..1e30d90e7f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -78,6 +78,7 @@ BUG FIXES: * provider/aws: Migrate KeyPair to version 1, fixing issue with using `file()` [GH-3470] * provider/aws: Fix force_delete on autoscaling groups [GH-3485] * provider/aws: Fix crash with VPC Peering connections [GH-3490] + * provider/aws: fix bug with reading GSIs from dynamodb [GH-3300] * provider/docker: Fix issue preventing private images from being referenced [GH-2619] * provider/digitalocean: Fix issue causing unnecessary diffs based on droplet slugsize case [GH-3284] * provider/openstack: add state 'downloading' to list of expected states in From 562a793430d804f4b1691bd0e00484360cbc2994 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Thu, 15 Oct 2015 10:21:20 -0500 Subject: [PATCH 262/335] style: ran go fmt --- terraform/eval_ignore_changes.go | 5 +++-- terraform/transform_resource.go | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/terraform/eval_ignore_changes.go b/terraform/eval_ignore_changes.go index 1a44089a98..2eb2d9bb18 100644 --- a/terraform/eval_ignore_changes.go +++ b/terraform/eval_ignore_changes.go @@ -1,4 +1,5 @@ package terraform + import ( "github.com/hashicorp/terraform/config" "strings" @@ -9,7 +10,7 @@ import ( // IgnoreChanges lifecycle. type EvalIgnoreChanges struct { Resource *config.Resource - Diff **InstanceDiff + Diff **InstanceDiff } func (n *EvalIgnoreChanges) Eval(ctx EvalContext) (interface{}, error) { @@ -20,7 +21,7 @@ func (n *EvalIgnoreChanges) Eval(ctx EvalContext) (interface{}, error) { diff := *n.Diff ignoreChanges := n.Resource.Lifecycle.IgnoreChanges - for _, ignoredName := range ignoreChanges { + for _, ignoredName := range ignoreChanges { for name := range diff.Attributes { if strings.HasPrefix(name, ignoredName) { delete(diff.Attributes, name) diff --git a/terraform/transform_resource.go b/terraform/transform_resource.go index 81ff158d9c..5091f29c98 100644 --- a/terraform/transform_resource.go +++ b/terraform/transform_resource.go @@ -320,7 +320,7 @@ func (n *graphNodeExpandedResource) EvalTree() EvalNode { }, &EvalIgnoreChanges{ Resource: n.Resource, - Diff: &diff, + Diff: &diff, }, &EvalWriteState{ Name: n.stateId(), From 49396ba3e03461d9b2bc486e52c45385adb0f094 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Thu, 15 Oct 2015 15:51:15 +0000 Subject: [PATCH 263/335] v0.6.4 --- CHANGELOG.md | 2 +- deps/v0-6-4.json | 440 +++++++++++++++++++++++++++++++++++++++++++ terraform/version.go | 2 +- 3 files changed, 442 insertions(+), 2 deletions(-) create mode 100644 deps/v0-6-4.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e30d90e7f..0b2132d9cd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.6.4 (unreleased) +## 0.6.4 (October 15, 2015) FEATURES: diff --git a/deps/v0-6-4.json b/deps/v0-6-4.json new file mode 100644 index 0000000000..e0d17b58fc --- /dev/null +++ b/deps/v0-6-4.json @@ -0,0 +1,440 @@ +{ + "ImportPath": "github.com/hashicorp/terraform", + "GoVersion": "go1.4.2", + "Packages": [ + "./..." + ], + "Deps": [ + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/core/http", + "Comment": "v1.2-261-g3dcabb6", + "Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/core/tls", + "Comment": "v1.2-261-g3dcabb6", + "Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/management", + "Comment": "v1.2-261-g3dcabb6", + "Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", + "Comment": "v1.2-261-g3dcabb6", + "Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311" + }, + { + "ImportPath": "github.com/apparentlymart/go-rundeck-api/rundeck", + "Comment": "v0.0.1", + "Rev": "cddcfbabbe903e9c8df35ff9569dbb8d67789200" + }, + { + "ImportPath": "github.com/armon/circbuf", + "Rev": "bbbad097214e2918d8543d5201d12bfd7bca254d" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/endpoints", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/ec2query", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/json/jsonutil", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/jsonrpc", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/query", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/rest", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/restjson", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/restxml", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/signer/v4", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/autoscaling", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatch", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatchlogs", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/directoryservice", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/dynamodb", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/ec2", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/ecs", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/efs", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/elasticache", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/elasticsearchservice", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/elb", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/glacier", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/iam", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/kinesis", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/lambda", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/opsworks", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/rds", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/route53", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/s3", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/sns", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/sqs", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/awslabs/aws-sdk-go/aws", + "Comment": "v0.9.14-3-g308eaa6", + "Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1" + }, + { + "ImportPath": "github.com/cyberdelia/heroku-go/v3", + "Rev": "8344c6a3e281a99a693f5b71186249a8620eeb6b" + }, + { + "ImportPath": "github.com/dylanmei/iso8601", + "Rev": "2075bf119b58e5576c6ed9f867b8f3d17f2e54d4" + }, + { + "ImportPath": "github.com/dylanmei/winrmtest", + "Rev": "3e9661c52c45dab9a8528966a23d421922fca9b9" + }, + { + "ImportPath": "github.com/fsouza/go-dockerclient", + "Rev": "09604abc82243886001c3f56fd709d4ba603cead" + }, + { + "ImportPath": "github.com/hashicorp/atlas-go/archive", + "Comment": "20141209094003-77-g85a782d", + "Rev": "85a782d724b87fcd19db1c4aef9d5337a9bb7a0f" + }, + { + "ImportPath": "github.com/hashicorp/atlas-go/v1", + "Comment": "20141209094003-77-g85a782d", + "Rev": "85a782d724b87fcd19db1c4aef9d5337a9bb7a0f" + }, + { + "ImportPath": "github.com/hashicorp/consul/api", + "Comment": "v0.5.2-325-g5d9530d", + "Rev": "5d9530d7def3be989ba141382f1b9d82583418f4" + }, + { + "ImportPath": "github.com/hashicorp/errwrap", + "Rev": "7554cd9344cec97297fa6649b055a8c98c2a1e55" + }, + { + "ImportPath": "github.com/hashicorp/go-checkpoint", + "Rev": "528ab62f37fa83d4360e8ab2b2c425d6692ef533" + }, + { + "ImportPath": "github.com/hashicorp/go-multierror", + "Rev": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5" + }, + { + "ImportPath": "github.com/hashicorp/go-version", + "Rev": "2b9865f60ce11e527bd1255ba82036d465570aa3" + }, + { + "ImportPath": "github.com/hashicorp/hcl", + "Rev": "4de51957ef8d4aba6e285ddfc587633bbfc7c0e8" + }, + { + "ImportPath": "github.com/hashicorp/logutils", + "Rev": "0dc08b1671f34c4250ce212759ebd880f743d883" + }, + { + "ImportPath": "github.com/hashicorp/yamux", + "Rev": "ddcd0a6ec7c55e29f235e27935bf98d302281bd3" + }, + { + "ImportPath": "github.com/imdario/mergo", + "Comment": "0.2.0-5-g61a5285", + "Rev": "61a52852277811e93e06d28e0d0c396284a7730b" + }, + { + "ImportPath": "github.com/masterzen/simplexml/dom", + "Rev": "95ba30457eb1121fa27753627c774c7cd4e90083" + }, + { + "ImportPath": "github.com/masterzen/winrm/soap", + "Rev": "b280be362a0c6af26fbaaa055924fb9c4830b006" + }, + { + "ImportPath": "github.com/masterzen/winrm/winrm", + "Rev": "b280be362a0c6af26fbaaa055924fb9c4830b006" + }, + { + "ImportPath": "github.com/masterzen/xmlpath", + "Rev": "13f4951698adc0fa9c1dda3e275d489a24201161" + }, + { + "ImportPath": "github.com/mitchellh/cli", + "Rev": "8102d0ed5ea2709ade1243798785888175f6e415" + }, + { + "ImportPath": "github.com/mitchellh/colorstring", + "Rev": "8631ce90f28644f54aeedcb3e389a85174e067d1" + }, + { + "ImportPath": "github.com/mitchellh/copystructure", + "Rev": "6fc66267e9da7d155a9d3bd489e00dad02666dc6" + }, + { + "ImportPath": "github.com/mitchellh/go-homedir", + "Rev": "df55a15e5ce646808815381b3db47a8c66ea62f4" + }, + { + "ImportPath": "github.com/mitchellh/go-linereader", + "Rev": "07bab5fdd9580500aea6ada0e09df4aa28e68abd" + }, + { + "ImportPath": "github.com/mitchellh/mapstructure", + "Rev": "281073eb9eb092240d33ef253c404f1cca550309" + }, + { + "ImportPath": "github.com/mitchellh/osext", + "Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702" + }, + { + "ImportPath": "github.com/mitchellh/packer/common/uuid", + "Comment": "v0.8.6-76-g88386bc", + "Rev": "88386bc9db1c850306e5c3737f14bef3a2c4050d" + }, + { + "ImportPath": "github.com/mitchellh/panicwrap", + "Rev": "1655d88c8ff7495ae9d2c19fd8f445f4657e22b0" + }, + { + "ImportPath": "github.com/mitchellh/prefixedio", + "Rev": "89d9b535996bf0a185f85b59578f2e245f9e1724" + }, + { + "ImportPath": "github.com/mitchellh/reflectwalk", + "Rev": "eecf4c70c626c7cfbb95c90195bc34d386c74ac6" + }, + { + "ImportPath": "github.com/nu7hatch/gouuid", + "Rev": "179d4d0c4d8d407a32af483c2354df1d2c91e6c3" + }, + { + "ImportPath": "github.com/packer-community/winrmcp/winrmcp", + "Rev": "743b1afe5ee3f6d5ba71a0d50673fa0ba2123d6b" + }, + { + "ImportPath": "github.com/packethost/packngo", + "Rev": "496f5c8895c06505fae527830a9e554dc65325f4" + }, + { + "ImportPath": "github.com/pborman/uuid", + "Rev": "cccd189d45f7ac3368a0d127efb7f4d08ae0b655" + }, + { + "ImportPath": "github.com/pearkes/cloudflare", + "Rev": "19e280b056f3742e535ea12ae92a37ea7767ea82" + }, + { + "ImportPath": "github.com/pearkes/digitalocean", + "Rev": "e966f00c2d9de5743e87697ab77c7278f5998914" + }, + { + "ImportPath": "github.com/pearkes/dnsimple", + "Rev": "2a807d118c9e52e94819f414a6ec0293b45cad01" + }, + { + "ImportPath": "github.com/pearkes/mailgun", + "Rev": "5b02e7e9ffee9869f81393e80db138f6ff726260" + }, + { + "ImportPath": "github.com/rackspace/gophercloud", + "Comment": "v1.0.0-681-g8d032cb", + "Rev": "8d032cb1e835a0018269de3d6b53bb24fc77a8c0" + }, + { + "ImportPath": "github.com/satori/go.uuid", + "Rev": "08f0718b61e95ddba0ade3346725fe0e4bf28ca6" + }, + { + "ImportPath": "github.com/soniah/dnsmadeeasy", + "Comment": "v1.1-2-g5578a8c", + "Rev": "5578a8c15e33958c61cf7db720b6181af65f4a9e" + }, + { + "ImportPath": "github.com/vaughan0/go-ini", + "Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1" + }, + { + "ImportPath": "github.com/vmware/govmomi", + "Comment": "v0.2.0-28-g6037863", + "Rev": "603786323c18c13dd8b3da3d4f86b1dce4adf126" + }, + { + "ImportPath": "github.com/xanzy/go-cloudstack/cloudstack", + "Comment": "v1.2.0-48-g0e6e56f", + "Rev": "0e6e56fc0db3f48f060273f2e2ffe5d8d41b0112" + }, + { + "ImportPath": "golang.org/x/crypto/curve25519", + "Rev": "c8b9e6388ef638d5a8a9d865c634befdc46a6784" + }, + { + "ImportPath": "golang.org/x/crypto/pkcs12", + "Rev": "c8b9e6388ef638d5a8a9d865c634befdc46a6784" + }, + { + "ImportPath": "golang.org/x/crypto/ssh", + "Rev": "c8b9e6388ef638d5a8a9d865c634befdc46a6784" + }, + { + "ImportPath": "golang.org/x/net/context", + "Rev": "21c3935a8fc0f954d03e6b8a560c9600ffee38d2" + }, + { + "ImportPath": "golang.org/x/oauth2", + "Rev": "ef4eca6b097fad7cec79afcc278d213a6de1c960" + }, + { + "ImportPath": "google.golang.org/api/compute/v1", + "Rev": "e2903ca9e33d6cbaedda541d96996219056e8214" + }, + { + "ImportPath": "google.golang.org/api/container/v1", + "Rev": "e2903ca9e33d6cbaedda541d96996219056e8214" + }, + { + "ImportPath": "google.golang.org/api/dns/v1", + "Rev": "e2903ca9e33d6cbaedda541d96996219056e8214" + }, + { + "ImportPath": "google.golang.org/api/googleapi", + "Rev": "e2903ca9e33d6cbaedda541d96996219056e8214" + }, + { + "ImportPath": "google.golang.org/api/internal", + "Rev": "e2903ca9e33d6cbaedda541d96996219056e8214" + }, + { + "ImportPath": "google.golang.org/api/storage/v1", + "Rev": "e2903ca9e33d6cbaedda541d96996219056e8214" + }, + { + "ImportPath": "google.golang.org/cloud/compute/metadata", + "Rev": "4bea1598a0936d6d116506b59a8e1aa962b585c3" + }, + { + "ImportPath": "google.golang.org/cloud/internal", + "Rev": "4bea1598a0936d6d116506b59a8e1aa962b585c3" + } + ] +} diff --git a/terraform/version.go b/terraform/version.go index 741766330b..a07a344c14 100644 --- a/terraform/version.go +++ b/terraform/version.go @@ -6,4 +6,4 @@ const Version = "0.6.4" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "dev" +const VersionPrerelease = "" From 1bfd4b0f7175688d00e344f0ddb6ea6295d11468 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Thu, 15 Oct 2015 17:50:20 +0000 Subject: [PATCH 264/335] Reset CHANGELOG/version for 0.6.5 release --- CHANGELOG.md | 2 ++ terraform/version.go | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b2132d9cd..81316bf545 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,5 @@ +## 0.6.5 (Unreleased) + ## 0.6.4 (October 15, 2015) FEATURES: diff --git a/terraform/version.go b/terraform/version.go index a07a344c14..badbcd92ee 100644 --- a/terraform/version.go +++ b/terraform/version.go @@ -1,9 +1,9 @@ package terraform // The main version number that is being run at the moment. -const Version = "0.6.4" +const Version = "0.6.5" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "" +const VersionPrerelease = "dev" From 26bc27594b1136f45be0844ba9a8bd071f2d9c14 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Thu, 15 Oct 2015 20:57:42 +0200 Subject: [PATCH 265/335] docs: Fix EFS documentation --- .../docs/providers/aws/r/efs_mount_target.html.markdown | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/source/docs/providers/aws/r/efs_mount_target.html.markdown b/website/source/docs/providers/aws/r/efs_mount_target.html.markdown index 59bd3bee22..c29b1b742c 100644 --- a/website/source/docs/providers/aws/r/efs_mount_target.html.markdown +++ b/website/source/docs/providers/aws/r/efs_mount_target.html.markdown @@ -6,10 +6,10 @@ description: |- Provides an EFS mount target. --- -# aws\_efs\_file\_system +# aws\_efs\_mount\_target -Provides an EFS file system. Per [documentation](http://docs.aws.amazon.com/efs/latest/ug/limits.html) -the limit is 1 mount target per AZ. +Provides an EFS mount target. Per [documentation](http://docs.aws.amazon.com/efs/latest/ug/limits.html) +the limit is 1 mount target per AZ for a single EFS file system. ## Example Usage From 4017a611c34edf49554a8622bf65ac5122a5b679 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Thu, 15 Oct 2015 20:59:58 +0200 Subject: [PATCH 266/335] docs: Glacier Vault - add title + make note more brief --- .../source/docs/providers/aws/r/glacier_vault.html.markdown | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/glacier_vault.html.markdown b/website/source/docs/providers/aws/r/glacier_vault.html.markdown index 6805338c79..d783c02263 100644 --- a/website/source/docs/providers/aws/r/glacier_vault.html.markdown +++ b/website/source/docs/providers/aws/r/glacier_vault.html.markdown @@ -10,7 +10,7 @@ description: |- Provides a Glacier Vault Resource. You can refer to the [Glacier Developer Guide](http://docs.aws.amazon.com/amazonglacier/latest/dev/working-with-vaults.html) for a full explanation of the Glacier Vault functionality -~> **NOTE:** When trying to remove a Glacier Vault, the Vault must be empty. +~> **NOTE:** When removing a Glacier Vault, the Vault must be empty. ## Example Usage @@ -66,6 +66,8 @@ The following arguments are supported: * `events` - (Required) You can configure a vault to publish a notification for `ArchiveRetrievalCompleted` and `InventoryRetrievalCompleted` events. * `sns_topic` - (Required) The SNS Topic ARN. +## Attributes Reference + The following attributes are exported: * `location` - The URI of the vault that was created. From 06f4ac8166595b47638c2ae39e68a6b0e6f549bf Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 15 Oct 2015 13:36:58 -0700 Subject: [PATCH 267/335] config/module: use go-getter --- .gitignore | 1 + config/module/copy_dir.go | 76 --------- config/module/detect.go | 92 ----------- config/module/detect_bitbucket.go | 66 -------- config/module/detect_bitbucket_test.go | 67 -------- config/module/detect_file.go | 60 ------- config/module/detect_file_test.go | 88 ---------- config/module/detect_github.go | 73 --------- config/module/detect_github_test.go | 55 ------- config/module/detect_test.go | 51 ------ config/module/folder_storage.go | 65 -------- config/module/folder_storage_test.go | 48 ------ config/module/get.go | 217 +++---------------------- config/module/get_file.go | 46 ------ config/module/get_file_test.go | 104 ------------ config/module/get_git.go | 74 --------- config/module/get_git_test.go | 143 ---------------- config/module/get_hg.go | 89 ---------- config/module/get_hg_test.go | 81 --------- config/module/get_http.go | 173 -------------------- config/module/get_http_test.go | 155 ------------------ config/module/get_test.go | 128 --------------- config/module/module_test.go | 25 +-- config/module/storage.go | 25 --- config/module/tree.go | 28 +--- 25 files changed, 32 insertions(+), 1998 deletions(-) delete mode 100644 config/module/copy_dir.go delete mode 100644 config/module/detect.go delete mode 100644 config/module/detect_bitbucket.go delete mode 100644 config/module/detect_bitbucket_test.go delete mode 100644 config/module/detect_file.go delete mode 100644 config/module/detect_file_test.go delete mode 100644 config/module/detect_github.go delete mode 100644 config/module/detect_github_test.go delete mode 100644 config/module/detect_test.go delete mode 100644 config/module/folder_storage.go delete mode 100644 config/module/folder_storage_test.go delete mode 100644 config/module/get_file.go delete mode 100644 config/module/get_file_test.go delete mode 100644 config/module/get_git.go delete mode 100644 config/module/get_git_test.go delete mode 100644 config/module/get_hg.go delete mode 100644 config/module/get_hg_test.go delete mode 100644 config/module/get_http.go delete mode 100644 config/module/get_http_test.go delete mode 100644 config/module/get_test.go delete mode 100644 config/module/storage.go diff --git a/.gitignore b/.gitignore index 66ea31701f..5a230d5ca2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ *.dll *.exe +.DS_Store example.tf terraform.tfplan terraform.tfstate diff --git a/config/module/copy_dir.go b/config/module/copy_dir.go deleted file mode 100644 index f2ae63b77b..0000000000 --- a/config/module/copy_dir.go +++ /dev/null @@ -1,76 +0,0 @@ -package module - -import ( - "io" - "os" - "path/filepath" - "strings" -) - -// copyDir copies the src directory contents into dst. Both directories -// should already exist. -func copyDir(dst, src string) error { - src, err := filepath.EvalSymlinks(src) - if err != nil { - return err - } - - walkFn := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if path == src { - return nil - } - - if strings.HasPrefix(filepath.Base(path), ".") { - // Skip any dot files - if info.IsDir() { - return filepath.SkipDir - } else { - return nil - } - } - - // The "path" has the src prefixed to it. We need to join our - // destination with the path without the src on it. - dstPath := filepath.Join(dst, path[len(src):]) - - // If we have a directory, make that subdirectory, then continue - // the walk. - if info.IsDir() { - if path == filepath.Join(src, dst) { - // dst is in src; don't walk it. - return nil - } - - if err := os.MkdirAll(dstPath, 0755); err != nil { - return err - } - - return nil - } - - // If we have a file, copy the contents. - srcF, err := os.Open(path) - if err != nil { - return err - } - defer srcF.Close() - - dstF, err := os.Create(dstPath) - if err != nil { - return err - } - defer dstF.Close() - - if _, err := io.Copy(dstF, srcF); err != nil { - return err - } - - // Chmod it - return os.Chmod(dstPath, info.Mode()) - } - - return filepath.Walk(src, walkFn) -} diff --git a/config/module/detect.go b/config/module/detect.go deleted file mode 100644 index 51e07f725b..0000000000 --- a/config/module/detect.go +++ /dev/null @@ -1,92 +0,0 @@ -package module - -import ( - "fmt" - "path/filepath" - - "github.com/hashicorp/terraform/helper/url" -) - -// Detector defines the interface that an invalid URL or a URL with a blank -// scheme is passed through in order to determine if its shorthand for -// something else well-known. -type Detector interface { - // Detect will detect whether the string matches a known pattern to - // turn it into a proper URL. - Detect(string, string) (string, bool, error) -} - -// Detectors is the list of detectors that are tried on an invalid URL. -// This is also the order they're tried (index 0 is first). -var Detectors []Detector - -func init() { - Detectors = []Detector{ - new(GitHubDetector), - new(BitBucketDetector), - new(FileDetector), - } -} - -// Detect turns a source string into another source string if it is -// detected to be of a known pattern. -// -// This is safe to be called with an already valid source string: Detect -// will just return it. -func Detect(src string, pwd string) (string, error) { - getForce, getSrc := getForcedGetter(src) - - // Separate out the subdir if there is one, we don't pass that to detect - getSrc, subDir := getDirSubdir(getSrc) - - u, err := url.Parse(getSrc) - if err == nil && u.Scheme != "" { - // Valid URL - return src, nil - } - - for _, d := range Detectors { - result, ok, err := d.Detect(getSrc, pwd) - if err != nil { - return "", err - } - if !ok { - continue - } - - var detectForce string - detectForce, result = getForcedGetter(result) - result, detectSubdir := getDirSubdir(result) - - // If we have a subdir from the detection, then prepend it to our - // requested subdir. - if detectSubdir != "" { - if subDir != "" { - subDir = filepath.Join(detectSubdir, subDir) - } else { - subDir = detectSubdir - } - } - if subDir != "" { - u, err := url.Parse(result) - if err != nil { - return "", fmt.Errorf("Error parsing URL: %s", err) - } - u.Path += "//" + subDir - result = u.String() - } - - // Preserve the forced getter if it exists. We try to use the - // original set force first, followed by any force set by the - // detector. - if getForce != "" { - result = fmt.Sprintf("%s::%s", getForce, result) - } else if detectForce != "" { - result = fmt.Sprintf("%s::%s", detectForce, result) - } - - return result, nil - } - - return "", fmt.Errorf("invalid source string: %s", src) -} diff --git a/config/module/detect_bitbucket.go b/config/module/detect_bitbucket.go deleted file mode 100644 index 657637c099..0000000000 --- a/config/module/detect_bitbucket.go +++ /dev/null @@ -1,66 +0,0 @@ -package module - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" -) - -// BitBucketDetector implements Detector to detect BitBucket URLs and turn -// them into URLs that the Git or Hg Getter can understand. -type BitBucketDetector struct{} - -func (d *BitBucketDetector) Detect(src, _ string) (string, bool, error) { - if len(src) == 0 { - return "", false, nil - } - - if strings.HasPrefix(src, "bitbucket.org/") { - return d.detectHTTP(src) - } - - return "", false, nil -} - -func (d *BitBucketDetector) detectHTTP(src string) (string, bool, error) { - u, err := url.Parse("https://" + src) - if err != nil { - return "", true, fmt.Errorf("error parsing BitBucket URL: %s", err) - } - - // We need to get info on this BitBucket repository to determine whether - // it is Git or Hg. - var info struct { - SCM string `json:"scm"` - } - infoUrl := "https://api.bitbucket.org/1.0/repositories" + u.Path - resp, err := http.Get(infoUrl) - if err != nil { - return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) - } - if resp.StatusCode == 403 { - // A private repo - return "", true, fmt.Errorf( - "shorthand BitBucket URL can't be used for private repos, " + - "please use a full URL") - } - dec := json.NewDecoder(resp.Body) - if err := dec.Decode(&info); err != nil { - return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) - } - - switch info.SCM { - case "git": - if !strings.HasSuffix(u.Path, ".git") { - u.Path += ".git" - } - - return "git::" + u.String(), true, nil - case "hg": - return "hg::" + u.String(), true, nil - default: - return "", true, fmt.Errorf("unknown BitBucket SCM type: %s", info.SCM) - } -} diff --git a/config/module/detect_bitbucket_test.go b/config/module/detect_bitbucket_test.go deleted file mode 100644 index b05fd5999c..0000000000 --- a/config/module/detect_bitbucket_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package module - -import ( - "net/http" - "strings" - "testing" -) - -const testBBUrl = "https://bitbucket.org/hashicorp/tf-test-git" - -func TestBitBucketDetector(t *testing.T) { - t.Parallel() - - if _, err := http.Get(testBBUrl); err != nil { - t.Log("internet may not be working, skipping BB tests") - t.Skip() - } - - cases := []struct { - Input string - Output string - }{ - // HTTP - { - "bitbucket.org/hashicorp/tf-test-git", - "git::https://bitbucket.org/hashicorp/tf-test-git.git", - }, - { - "bitbucket.org/hashicorp/tf-test-git.git", - "git::https://bitbucket.org/hashicorp/tf-test-git.git", - }, - { - "bitbucket.org/hashicorp/tf-test-hg", - "hg::https://bitbucket.org/hashicorp/tf-test-hg", - }, - } - - pwd := "/pwd" - f := new(BitBucketDetector) - for i, tc := range cases { - var err error - for i := 0; i < 3; i++ { - var output string - var ok bool - output, ok, err = f.Detect(tc.Input, pwd) - if err != nil { - if strings.Contains(err.Error(), "invalid character") { - continue - } - - t.Fatalf("err: %s", err) - } - if !ok { - t.Fatal("not ok") - } - - if output != tc.Output { - t.Fatalf("%d: bad: %#v", i, output) - } - - break - } - if i >= 3 { - t.Fatalf("failure from bitbucket: %s", err) - } - } -} diff --git a/config/module/detect_file.go b/config/module/detect_file.go deleted file mode 100644 index 859739f954..0000000000 --- a/config/module/detect_file.go +++ /dev/null @@ -1,60 +0,0 @@ -package module - -import ( - "fmt" - "os" - "path/filepath" - "runtime" -) - -// FileDetector implements Detector to detect file paths. -type FileDetector struct{} - -func (d *FileDetector) Detect(src, pwd string) (string, bool, error) { - if len(src) == 0 { - return "", false, nil - } - - if !filepath.IsAbs(src) { - if pwd == "" { - return "", true, fmt.Errorf( - "relative paths require a module with a pwd") - } - - // Stat the pwd to determine if its a symbolic link. If it is, - // then the pwd becomes the original directory. Otherwise, - // `filepath.Join` below does some weird stuff. - // - // We just ignore if the pwd doesn't exist. That error will be - // caught later when we try to use the URL. - if fi, err := os.Lstat(pwd); !os.IsNotExist(err) { - if err != nil { - return "", true, err - } - if fi.Mode()&os.ModeSymlink != 0 { - pwd, err = os.Readlink(pwd) - if err != nil { - return "", true, err - } - } - } - - src = filepath.Join(pwd, src) - } - - return fmtFileURL(src), true, nil -} - -func fmtFileURL(path string) string { - if runtime.GOOS == "windows" { - // Make sure we're using "/" on Windows. URLs are "/"-based. - path = filepath.ToSlash(path) - return fmt.Sprintf("file://%s", path) - } - - // Make sure that we don't start with "/" since we add that below. - if path[0] == '/' { - path = path[1:] - } - return fmt.Sprintf("file:///%s", path) -} diff --git a/config/module/detect_file_test.go b/config/module/detect_file_test.go deleted file mode 100644 index 3e9db8bba2..0000000000 --- a/config/module/detect_file_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package module - -import ( - "runtime" - "testing" -) - -type fileTest struct { - in, pwd, out string - err bool -} - -var fileTests = []fileTest{ - {"./foo", "/pwd", "file:///pwd/foo", false}, - {"./foo?foo=bar", "/pwd", "file:///pwd/foo?foo=bar", false}, - {"foo", "/pwd", "file:///pwd/foo", false}, -} - -var unixFileTests = []fileTest{ - {"/foo", "/pwd", "file:///foo", false}, - {"/foo?bar=baz", "/pwd", "file:///foo?bar=baz", false}, -} - -var winFileTests = []fileTest{ - {"/foo", "/pwd", "file:///pwd/foo", false}, - {`C:\`, `/pwd`, `file://C:/`, false}, - {`C:\?bar=baz`, `/pwd`, `file://C:/?bar=baz`, false}, -} - -func TestFileDetector(t *testing.T) { - if runtime.GOOS == "windows" { - fileTests = append(fileTests, winFileTests...) - } else { - fileTests = append(fileTests, unixFileTests...) - } - - f := new(FileDetector) - for i, tc := range fileTests { - out, ok, err := f.Detect(tc.in, tc.pwd) - if err != nil { - t.Fatalf("err: %s", err) - } - if !ok { - t.Fatal("not ok") - } - - if out != tc.out { - t.Fatalf("%d: bad: %#v", i, out) - } - } -} - -var noPwdFileTests = []fileTest{ - {in: "./foo", pwd: "", out: "", err: true}, - {in: "foo", pwd: "", out: "", err: true}, -} - -var noPwdUnixFileTests = []fileTest{ - {in: "/foo", pwd: "", out: "file:///foo", err: false}, -} - -var noPwdWinFileTests = []fileTest{ - {in: "/foo", pwd: "", out: "", err: true}, - {in: `C:\`, pwd: ``, out: `file://C:/`, err: false}, -} - -func TestFileDetector_noPwd(t *testing.T) { - if runtime.GOOS == "windows" { - noPwdFileTests = append(noPwdFileTests, noPwdWinFileTests...) - } else { - noPwdFileTests = append(noPwdFileTests, noPwdUnixFileTests...) - } - - f := new(FileDetector) - for i, tc := range noPwdFileTests { - out, ok, err := f.Detect(tc.in, tc.pwd) - if err != nil != tc.err { - t.Fatalf("%d: err: %s", i, err) - } - if !ok { - t.Fatal("not ok") - } - - if out != tc.out { - t.Fatalf("%d: bad: %#v", i, out) - } - } -} diff --git a/config/module/detect_github.go b/config/module/detect_github.go deleted file mode 100644 index c4a4e89f0b..0000000000 --- a/config/module/detect_github.go +++ /dev/null @@ -1,73 +0,0 @@ -package module - -import ( - "fmt" - "net/url" - "strings" -) - -// GitHubDetector implements Detector to detect GitHub URLs and turn -// them into URLs that the Git Getter can understand. -type GitHubDetector struct{} - -func (d *GitHubDetector) Detect(src, _ string) (string, bool, error) { - if len(src) == 0 { - return "", false, nil - } - - if strings.HasPrefix(src, "github.com/") { - return d.detectHTTP(src) - } else if strings.HasPrefix(src, "git@github.com:") { - return d.detectSSH(src) - } - - return "", false, nil -} - -func (d *GitHubDetector) detectHTTP(src string) (string, bool, error) { - parts := strings.Split(src, "/") - if len(parts) < 3 { - return "", false, fmt.Errorf( - "GitHub URLs should be github.com/username/repo") - } - - urlStr := fmt.Sprintf("https://%s", strings.Join(parts[:3], "/")) - url, err := url.Parse(urlStr) - if err != nil { - return "", true, fmt.Errorf("error parsing GitHub URL: %s", err) - } - - if !strings.HasSuffix(url.Path, ".git") { - url.Path += ".git" - } - - if len(parts) > 3 { - url.Path += "//" + strings.Join(parts[3:], "/") - } - - return "git::" + url.String(), true, nil -} - -func (d *GitHubDetector) detectSSH(src string) (string, bool, error) { - idx := strings.Index(src, ":") - qidx := strings.Index(src, "?") - if qidx == -1 { - qidx = len(src) - } - - var u url.URL - u.Scheme = "ssh" - u.User = url.User("git") - u.Host = "github.com" - u.Path = src[idx+1 : qidx] - if qidx < len(src) { - q, err := url.ParseQuery(src[qidx+1:]) - if err != nil { - return "", true, fmt.Errorf("error parsing GitHub SSH URL: %s", err) - } - - u.RawQuery = q.Encode() - } - - return "git::" + u.String(), true, nil -} diff --git a/config/module/detect_github_test.go b/config/module/detect_github_test.go deleted file mode 100644 index 822e1806d3..0000000000 --- a/config/module/detect_github_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package module - -import ( - "testing" -) - -func TestGitHubDetector(t *testing.T) { - cases := []struct { - Input string - Output string - }{ - // HTTP - {"github.com/hashicorp/foo", "git::https://github.com/hashicorp/foo.git"}, - {"github.com/hashicorp/foo.git", "git::https://github.com/hashicorp/foo.git"}, - { - "github.com/hashicorp/foo/bar", - "git::https://github.com/hashicorp/foo.git//bar", - }, - { - "github.com/hashicorp/foo?foo=bar", - "git::https://github.com/hashicorp/foo.git?foo=bar", - }, - { - "github.com/hashicorp/foo.git?foo=bar", - "git::https://github.com/hashicorp/foo.git?foo=bar", - }, - - // SSH - {"git@github.com:hashicorp/foo.git", "git::ssh://git@github.com/hashicorp/foo.git"}, - { - "git@github.com:hashicorp/foo.git//bar", - "git::ssh://git@github.com/hashicorp/foo.git//bar", - }, - { - "git@github.com:hashicorp/foo.git?foo=bar", - "git::ssh://git@github.com/hashicorp/foo.git?foo=bar", - }, - } - - pwd := "/pwd" - f := new(GitHubDetector) - for i, tc := range cases { - output, ok, err := f.Detect(tc.Input, pwd) - if err != nil { - t.Fatalf("err: %s", err) - } - if !ok { - t.Fatal("not ok") - } - - if output != tc.Output { - t.Fatalf("%d: bad: %#v", i, output) - } - } -} diff --git a/config/module/detect_test.go b/config/module/detect_test.go deleted file mode 100644 index d2ee8ea1af..0000000000 --- a/config/module/detect_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package module - -import ( - "testing" -) - -func TestDetect(t *testing.T) { - cases := []struct { - Input string - Pwd string - Output string - Err bool - }{ - {"./foo", "/foo", "file:///foo/foo", false}, - {"git::./foo", "/foo", "git::file:///foo/foo", false}, - { - "git::github.com/hashicorp/foo", - "", - "git::https://github.com/hashicorp/foo.git", - false, - }, - { - "./foo//bar", - "/foo", - "file:///foo/foo//bar", - false, - }, - { - "git::github.com/hashicorp/foo//bar", - "", - "git::https://github.com/hashicorp/foo.git//bar", - false, - }, - { - "git::https://github.com/hashicorp/consul.git", - "", - "git::https://github.com/hashicorp/consul.git", - false, - }, - } - - for i, tc := range cases { - output, err := Detect(tc.Input, tc.Pwd) - if err != nil != tc.Err { - t.Fatalf("%d: bad err: %s", i, err) - } - if output != tc.Output { - t.Fatalf("%d: bad output: %s\nexpected: %s", i, output, tc.Output) - } - } -} diff --git a/config/module/folder_storage.go b/config/module/folder_storage.go deleted file mode 100644 index 81c9a2ac19..0000000000 --- a/config/module/folder_storage.go +++ /dev/null @@ -1,65 +0,0 @@ -package module - -import ( - "crypto/md5" - "encoding/hex" - "fmt" - "os" - "path/filepath" -) - -// FolderStorage is an implementation of the Storage interface that manages -// modules on the disk. -type FolderStorage struct { - // StorageDir is the directory where the modules will be stored. - StorageDir string -} - -// Dir implements Storage.Dir -func (s *FolderStorage) Dir(key string) (d string, e bool, err error) { - d = s.dir(key) - _, err = os.Stat(d) - if err == nil { - // Directory exists - e = true - return - } - if os.IsNotExist(err) { - // Directory doesn't exist - d = "" - e = false - err = nil - return - } - - // An error - d = "" - e = false - return -} - -// Get implements Storage.Get -func (s *FolderStorage) Get(key string, source string, update bool) error { - dir := s.dir(key) - if !update { - if _, err := os.Stat(dir); err == nil { - // If the directory already exists, then we're done since - // we're not updating. - return nil - } else if !os.IsNotExist(err) { - // If the error we got wasn't a file-not-exist error, then - // something went wrong and we should report it. - return fmt.Errorf("Error reading module directory: %s", err) - } - } - - // Get the source. This always forces an update. - return Get(dir, source) -} - -// dir returns the directory name internally that we'll use to map to -// internally. -func (s *FolderStorage) dir(key string) string { - sum := md5.Sum([]byte(key)) - return filepath.Join(s.StorageDir, hex.EncodeToString(sum[:])) -} diff --git a/config/module/folder_storage_test.go b/config/module/folder_storage_test.go deleted file mode 100644 index 7fda6b21a4..0000000000 --- a/config/module/folder_storage_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package module - -import ( - "os" - "path/filepath" - "testing" -) - -func TestFolderStorage_impl(t *testing.T) { - var _ Storage = new(FolderStorage) -} - -func TestFolderStorage(t *testing.T) { - s := &FolderStorage{StorageDir: tempDir(t)} - - module := testModule("basic") - - // A module shouldn't exist at first... - _, ok, err := s.Dir(module) - if err != nil { - t.Fatalf("err: %s", err) - } - if ok { - t.Fatal("should not exist") - } - - key := "foo" - - // We can get it - err = s.Get(key, module, false) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Now the module exists - dir, ok, err := s.Dir(key) - if err != nil { - t.Fatalf("err: %s", err) - } - if !ok { - t.Fatal("should exist") - } - - mainPath := filepath.Join(dir, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} diff --git a/config/module/get.go b/config/module/get.go index 627d395a9d..3820e65f29 100644 --- a/config/module/get.go +++ b/config/module/get.go @@ -1,207 +1,36 @@ package module import ( - "bytes" - "fmt" - "io/ioutil" - "net/url" - "os" - "os/exec" - "path/filepath" - "regexp" - "strings" - "syscall" - - urlhelper "github.com/hashicorp/terraform/helper/url" + "github.com/hashicorp/go-getter" ) -// Getter defines the interface that schemes must implement to download -// and update modules. -type Getter interface { - // Get downloads the given URL into the given directory. This always - // assumes that we're updating and gets the latest version that it can. - // - // The directory may already exist (if we're updating). If it is in a - // format that isn't understood, an error should be returned. Get shouldn't - // simply nuke the directory. - Get(string, *url.URL) error -} - -// Getters is the mapping of scheme to the Getter implementation that will -// be used to get a dependency. -var Getters map[string]Getter - -// forcedRegexp is the regular expression that finds forced getters. This -// syntax is schema::url, example: git::https://foo.com -var forcedRegexp = regexp.MustCompile(`^([A-Za-z]+)::(.+)$`) - -func init() { - httpGetter := new(HttpGetter) - - Getters = map[string]Getter{ - "file": new(FileGetter), - "git": new(GitGetter), - "hg": new(HgGetter), - "http": httpGetter, - "https": httpGetter, - } -} - -// Get downloads the module specified by src into the folder specified by -// dst. If dst already exists, Get will attempt to update it. +// GetMode is an enum that describes how modules are loaded. // -// src is a URL, whereas dst is always just a file path to a folder. This -// folder doesn't need to exist. It will be created if it doesn't exist. -func Get(dst, src string) error { - var force string - force, src = getForcedGetter(src) - - // If there is a subdir component, then we download the root separately - // and then copy over the proper subdir. - var realDst string - src, subDir := getDirSubdir(src) - if subDir != "" { - tmpDir, err := ioutil.TempDir("", "tf") - if err != nil { - return err - } - if err := os.RemoveAll(tmpDir); err != nil { - return err - } - defer os.RemoveAll(tmpDir) - - realDst = dst - dst = tmpDir - } - - u, err := urlhelper.Parse(src) - if err != nil { - return err - } - if force == "" { - force = u.Scheme - } - - g, ok := Getters[force] - if !ok { - return fmt.Errorf( - "module download not supported for scheme '%s'", force) - } - - err = g.Get(dst, u) - if err != nil { - err = fmt.Errorf("error downloading module '%s': %s", src, err) - return err - } - - // If we have a subdir, copy that over - if subDir != "" { - if err := os.RemoveAll(realDst); err != nil { - return err - } - if err := os.MkdirAll(realDst, 0755); err != nil { - return err - } - - return copyDir(realDst, filepath.Join(dst, subDir)) - } - - return nil -} - -// GetCopy is the same as Get except that it downloads a copy of the -// module represented by source. +// GetModeLoad says that modules will not be downloaded or updated, they will +// only be loaded from the storage. // -// This copy will omit and dot-prefixed files (such as .git/, .hg/) and -// can't be updated on its own. -func GetCopy(dst, src string) error { - // Create the temporary directory to do the real Get to - tmpDir, err := ioutil.TempDir("", "tf") - if err != nil { - return err - } - if err := os.RemoveAll(tmpDir); err != nil { - return err - } - defer os.RemoveAll(tmpDir) +// GetModeGet says that modules can be initially downloaded if they don't +// exist, but otherwise to just load from the current version in storage. +// +// GetModeUpdate says that modules should be checked for updates and +// downloaded prior to loading. If there are no updates, we load the version +// from disk, otherwise we download first and then load. +type GetMode byte - // Get to that temporary dir - if err := Get(tmpDir, src); err != nil { - return err - } +const ( + GetModeNone GetMode = iota + GetModeGet + GetModeUpdate +) - // Make sure the destination exists - if err := os.MkdirAll(dst, 0755); err != nil { - return err - } - - // Copy to the final location - return copyDir(dst, tmpDir) -} - -// getRunCommand is a helper that will run a command and capture the output -// in the case an error happens. -func getRunCommand(cmd *exec.Cmd) error { - var buf bytes.Buffer - cmd.Stdout = &buf - cmd.Stderr = &buf - err := cmd.Run() - if err == nil { - return nil - } - if exiterr, ok := err.(*exec.ExitError); ok { - // The program has exited with an exit code != 0 - if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { - return fmt.Errorf( - "%s exited with %d: %s", - cmd.Path, - status.ExitStatus(), - buf.String()) +func getStorage(s getter.Storage, key string, src string, mode GetMode) (string, bool, error) { + // Get the module with the level specified if we were told to. + if mode > GetModeNone { + if err := s.Get(key, src, mode == GetModeUpdate); err != nil { + return "", false, err } } - return fmt.Errorf("error running %s: %s", cmd.Path, buf.String()) -} - -// getDirSubdir takes a source and returns a tuple of the URL without -// the subdir and the URL with the subdir. -func getDirSubdir(src string) (string, string) { - // Calcaulate an offset to avoid accidentally marking the scheme - // as the dir. - var offset int - if idx := strings.Index(src, "://"); idx > -1 { - offset = idx + 3 - } - - // First see if we even have an explicit subdir - idx := strings.Index(src[offset:], "//") - if idx == -1 { - return src, "" - } - - idx += offset - subdir := src[idx+2:] - src = src[:idx] - - // Next, check if we have query parameters and push them onto the - // URL. - if idx = strings.Index(subdir, "?"); idx > -1 { - query := subdir[idx:] - subdir = subdir[:idx] - src += query - } - - return src, subdir -} - -// getForcedGetter takes a source and returns the tuple of the forced -// getter and the raw URL (without the force syntax). -func getForcedGetter(src string) (string, string) { - var forced string - if ms := forcedRegexp.FindStringSubmatch(src); ms != nil { - forced = ms[1] - src = ms[2] - } - - return forced, src + // Get the directory where the module is. + return s.Dir(key) } diff --git a/config/module/get_file.go b/config/module/get_file.go deleted file mode 100644 index 73cb858341..0000000000 --- a/config/module/get_file.go +++ /dev/null @@ -1,46 +0,0 @@ -package module - -import ( - "fmt" - "net/url" - "os" - "path/filepath" -) - -// FileGetter is a Getter implementation that will download a module from -// a file scheme. -type FileGetter struct{} - -func (g *FileGetter) Get(dst string, u *url.URL) error { - // The source path must exist and be a directory to be usable. - if fi, err := os.Stat(u.Path); err != nil { - return fmt.Errorf("source path error: %s", err) - } else if !fi.IsDir() { - return fmt.Errorf("source path must be a directory") - } - - fi, err := os.Lstat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - - // If the destination already exists, it must be a symlink - if err == nil { - mode := fi.Mode() - if mode&os.ModeSymlink == 0 { - return fmt.Errorf("destination exists and is not a symlink") - } - - // Remove the destination - if err := os.Remove(dst); err != nil { - return err - } - } - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { - return err - } - - return os.Symlink(u.Path, dst) -} diff --git a/config/module/get_file_test.go b/config/module/get_file_test.go deleted file mode 100644 index 4c9f6126a7..0000000000 --- a/config/module/get_file_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package module - -import ( - "os" - "path/filepath" - "testing" -) - -func TestFileGetter_impl(t *testing.T) { - var _ Getter = new(FileGetter) -} - -func TestFileGetter(t *testing.T) { - g := new(FileGetter) - dst := tempDir(t) - - // With a dir that doesn't exist - if err := g.Get(dst, testModuleURL("basic")); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the destination folder is a symlink - fi, err := os.Lstat(dst) - if err != nil { - t.Fatalf("err: %s", err) - } - if fi.Mode()&os.ModeSymlink == 0 { - t.Fatal("destination is not a symlink") - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestFileGetter_sourceFile(t *testing.T) { - g := new(FileGetter) - dst := tempDir(t) - - // With a source URL that is a path to a file - u := testModuleURL("basic") - u.Path += "/main.tf" - if err := g.Get(dst, u); err == nil { - t.Fatal("should error") - } -} - -func TestFileGetter_sourceNoExist(t *testing.T) { - g := new(FileGetter) - dst := tempDir(t) - - // With a source URL that doesn't exist - u := testModuleURL("basic") - u.Path += "/main" - if err := g.Get(dst, u); err == nil { - t.Fatal("should error") - } -} - -func TestFileGetter_dir(t *testing.T) { - g := new(FileGetter) - dst := tempDir(t) - - if err := os.MkdirAll(dst, 0755); err != nil { - t.Fatalf("err: %s", err) - } - - // With a dir that exists that isn't a symlink - if err := g.Get(dst, testModuleURL("basic")); err == nil { - t.Fatal("should error") - } -} - -func TestFileGetter_dirSymlink(t *testing.T) { - g := new(FileGetter) - dst := tempDir(t) - dst2 := tempDir(t) - - // Make parents - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { - t.Fatalf("err: %s", err) - } - if err := os.MkdirAll(dst2, 0755); err != nil { - t.Fatalf("err: %s", err) - } - - // Make a symlink - if err := os.Symlink(dst2, dst); err != nil { - t.Fatalf("err: %s", err) - } - - // With a dir that exists that isn't a symlink - if err := g.Get(dst, testModuleURL("basic")); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} diff --git a/config/module/get_git.go b/config/module/get_git.go deleted file mode 100644 index 5ab27ba0be..0000000000 --- a/config/module/get_git.go +++ /dev/null @@ -1,74 +0,0 @@ -package module - -import ( - "fmt" - "net/url" - "os" - "os/exec" -) - -// GitGetter is a Getter implementation that will download a module from -// a git repository. -type GitGetter struct{} - -func (g *GitGetter) Get(dst string, u *url.URL) error { - if _, err := exec.LookPath("git"); err != nil { - return fmt.Errorf("git must be available and on the PATH") - } - - // Extract some query parameters we use - var ref string - q := u.Query() - if len(q) > 0 { - ref = q.Get("ref") - q.Del("ref") - - // Copy the URL - var newU url.URL = *u - u = &newU - u.RawQuery = q.Encode() - } - - // First: clone or update the repository - _, err := os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - err = g.update(dst, u) - } else { - err = g.clone(dst, u) - } - if err != nil { - return err - } - - // Next: check out the proper tag/branch if it is specified, and checkout - if ref == "" { - return nil - } - - return g.checkout(dst, ref) -} - -func (g *GitGetter) checkout(dst string, ref string) error { - cmd := exec.Command("git", "checkout", ref) - cmd.Dir = dst - return getRunCommand(cmd) -} - -func (g *GitGetter) clone(dst string, u *url.URL) error { - cmd := exec.Command("git", "clone", u.String(), dst) - return getRunCommand(cmd) -} - -func (g *GitGetter) update(dst string, u *url.URL) error { - // We have to be on a branch to pull - if err := g.checkout(dst, "master"); err != nil { - return err - } - - cmd := exec.Command("git", "pull", "--ff-only") - cmd.Dir = dst - return getRunCommand(cmd) -} diff --git a/config/module/get_git_test.go b/config/module/get_git_test.go deleted file mode 100644 index 3885ff8e79..0000000000 --- a/config/module/get_git_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package module - -import ( - "os" - "os/exec" - "path/filepath" - "testing" -) - -var testHasGit bool - -func init() { - if _, err := exec.LookPath("git"); err == nil { - testHasGit = true - } -} - -func TestGitGetter_impl(t *testing.T) { - var _ Getter = new(GitGetter) -} - -func TestGitGetter(t *testing.T) { - if !testHasGit { - t.Log("git not found, skipping") - t.Skip() - } - - g := new(GitGetter) - dst := tempDir(t) - - // Git doesn't allow nested ".git" directories so we do some hackiness - // here to get around that... - moduleDir := filepath.Join(fixtureDir, "basic-git") - oldName := filepath.Join(moduleDir, "DOTgit") - newName := filepath.Join(moduleDir, ".git") - if err := os.Rename(oldName, newName); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Rename(newName, oldName) - - // With a dir that doesn't exist - if err := g.Get(dst, testModuleURL("basic-git")); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestGitGetter_branch(t *testing.T) { - if !testHasGit { - t.Log("git not found, skipping") - t.Skip() - } - - g := new(GitGetter) - dst := tempDir(t) - - // Git doesn't allow nested ".git" directories so we do some hackiness - // here to get around that... - moduleDir := filepath.Join(fixtureDir, "basic-git") - oldName := filepath.Join(moduleDir, "DOTgit") - newName := filepath.Join(moduleDir, ".git") - if err := os.Rename(oldName, newName); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Rename(newName, oldName) - - url := testModuleURL("basic-git") - q := url.Query() - q.Add("ref", "test-branch") - url.RawQuery = q.Encode() - - if err := g.Get(dst, url); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "main_branch.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } - - // Get again should work - if err := g.Get(dst, url); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath = filepath.Join(dst, "main_branch.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestGitGetter_tag(t *testing.T) { - if !testHasGit { - t.Log("git not found, skipping") - t.Skip() - } - - g := new(GitGetter) - dst := tempDir(t) - - // Git doesn't allow nested ".git" directories so we do some hackiness - // here to get around that... - moduleDir := filepath.Join(fixtureDir, "basic-git") - oldName := filepath.Join(moduleDir, "DOTgit") - newName := filepath.Join(moduleDir, ".git") - if err := os.Rename(oldName, newName); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Rename(newName, oldName) - - url := testModuleURL("basic-git") - q := url.Query() - q.Add("ref", "v1.0") - url.RawQuery = q.Encode() - - if err := g.Get(dst, url); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "main_tag1.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } - - // Get again should work - if err := g.Get(dst, url); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath = filepath.Join(dst, "main_tag1.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} diff --git a/config/module/get_hg.go b/config/module/get_hg.go deleted file mode 100644 index f74c140932..0000000000 --- a/config/module/get_hg.go +++ /dev/null @@ -1,89 +0,0 @@ -package module - -import ( - "fmt" - "net/url" - "os" - "os/exec" - "runtime" - - urlhelper "github.com/hashicorp/terraform/helper/url" -) - -// HgGetter is a Getter implementation that will download a module from -// a Mercurial repository. -type HgGetter struct{} - -func (g *HgGetter) Get(dst string, u *url.URL) error { - if _, err := exec.LookPath("hg"); err != nil { - return fmt.Errorf("hg must be available and on the PATH") - } - - newURL, err := urlhelper.Parse(u.String()) - if err != nil { - return err - } - if fixWindowsDrivePath(newURL) { - // See valid file path form on http://www.selenic.com/hg/help/urls - newURL.Path = fmt.Sprintf("/%s", newURL.Path) - } - - // Extract some query parameters we use - var rev string - q := newURL.Query() - if len(q) > 0 { - rev = q.Get("rev") - q.Del("rev") - - newURL.RawQuery = q.Encode() - } - - _, err = os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - if err != nil { - if err := g.clone(dst, newURL); err != nil { - return err - } - } - - if err := g.pull(dst, newURL); err != nil { - return err - } - - return g.update(dst, newURL, rev) -} - -func (g *HgGetter) clone(dst string, u *url.URL) error { - cmd := exec.Command("hg", "clone", "-U", u.String(), dst) - return getRunCommand(cmd) -} - -func (g *HgGetter) pull(dst string, u *url.URL) error { - cmd := exec.Command("hg", "pull") - cmd.Dir = dst - return getRunCommand(cmd) -} - -func (g *HgGetter) update(dst string, u *url.URL, rev string) error { - args := []string{"update"} - if rev != "" { - args = append(args, rev) - } - - cmd := exec.Command("hg", args...) - cmd.Dir = dst - return getRunCommand(cmd) -} - -func fixWindowsDrivePath(u *url.URL) bool { - // hg assumes a file:/// prefix for Windows drive letter file paths. - // (e.g. file:///c:/foo/bar) - // If the URL Path does not begin with a '/' character, the resulting URL - // path will have a file:// prefix. (e.g. file://c:/foo/bar) - // See http://www.selenic.com/hg/help/urls and the examples listed in - // http://selenic.com/repo/hg-stable/file/1265a3a71d75/mercurial/util.py#l1936 - return runtime.GOOS == "windows" && u.Scheme == "file" && - len(u.Path) > 1 && u.Path[0] != '/' && u.Path[1] == ':' -} diff --git a/config/module/get_hg_test.go b/config/module/get_hg_test.go deleted file mode 100644 index d7125bde21..0000000000 --- a/config/module/get_hg_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package module - -import ( - "os" - "os/exec" - "path/filepath" - "testing" -) - -var testHasHg bool - -func init() { - if _, err := exec.LookPath("hg"); err == nil { - testHasHg = true - } -} - -func TestHgGetter_impl(t *testing.T) { - var _ Getter = new(HgGetter) -} - -func TestHgGetter(t *testing.T) { - t.Parallel() - - if !testHasHg { - t.Log("hg not found, skipping") - t.Skip() - } - - g := new(HgGetter) - dst := tempDir(t) - - // With a dir that doesn't exist - if err := g.Get(dst, testModuleURL("basic-hg")); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestHgGetter_branch(t *testing.T) { - t.Parallel() - - if !testHasHg { - t.Log("hg not found, skipping") - t.Skip() - } - - g := new(HgGetter) - dst := tempDir(t) - - url := testModuleURL("basic-hg") - q := url.Query() - q.Add("rev", "test-branch") - url.RawQuery = q.Encode() - - if err := g.Get(dst, url); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "main_branch.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } - - // Get again should work - if err := g.Get(dst, url); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath = filepath.Join(dst, "main_branch.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} diff --git a/config/module/get_http.go b/config/module/get_http.go deleted file mode 100644 index be65d921a8..0000000000 --- a/config/module/get_http.go +++ /dev/null @@ -1,173 +0,0 @@ -package module - -import ( - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" -) - -// HttpGetter is a Getter implementation that will download a module from -// an HTTP endpoint. The protocol for downloading a module from an HTTP -// endpoing is as follows: -// -// An HTTP GET request is made to the URL with the additional GET parameter -// "terraform-get=1". This lets you handle that scenario specially if you -// wish. The response must be a 2xx. -// -// First, a header is looked for "X-Terraform-Get" which should contain -// a source URL to download. -// -// If the header is not present, then a meta tag is searched for named -// "terraform-get" and the content should be a source URL. -// -// The source URL, whether from the header or meta tag, must be a fully -// formed URL. The shorthand syntax of "github.com/foo/bar" or relative -// paths are not allowed. -type HttpGetter struct{} - -func (g *HttpGetter) Get(dst string, u *url.URL) error { - // Copy the URL so we can modify it - var newU url.URL = *u - u = &newU - - // Add terraform-get to the parameter. - q := u.Query() - q.Add("terraform-get", "1") - u.RawQuery = q.Encode() - - // Get the URL - resp, err := http.Get(u.String()) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - return fmt.Errorf("bad response code: %d", resp.StatusCode) - } - - // Extract the source URL - var source string - if v := resp.Header.Get("X-Terraform-Get"); v != "" { - source = v - } else { - source, err = g.parseMeta(resp.Body) - if err != nil { - return err - } - } - if source == "" { - return fmt.Errorf("no source URL was returned") - } - - // If there is a subdir component, then we download the root separately - // into a temporary directory, then copy over the proper subdir. - source, subDir := getDirSubdir(source) - if subDir == "" { - return Get(dst, source) - } - - // We have a subdir, time to jump some hoops - return g.getSubdir(dst, source, subDir) -} - -// getSubdir downloads the source into the destination, but with -// the proper subdir. -func (g *HttpGetter) getSubdir(dst, source, subDir string) error { - // Create a temporary directory to store the full source - td, err := ioutil.TempDir("", "tf") - if err != nil { - return err - } - defer os.RemoveAll(td) - - // Download that into the given directory - if err := Get(td, source); err != nil { - return err - } - - // Make sure the subdir path actually exists - sourcePath := filepath.Join(td, subDir) - if _, err := os.Stat(sourcePath); err != nil { - return fmt.Errorf( - "Error downloading %s: %s", source, err) - } - - // Copy the subdirectory into our actual destination. - if err := os.RemoveAll(dst); err != nil { - return err - } - - // Make the final destination - if err := os.MkdirAll(dst, 0755); err != nil { - return err - } - - return copyDir(dst, sourcePath) -} - -// parseMeta looks for the first meta tag in the given reader that -// will give us the source URL. -func (g *HttpGetter) parseMeta(r io.Reader) (string, error) { - d := xml.NewDecoder(r) - d.CharsetReader = charsetReader - d.Strict = false - var err error - var t xml.Token - for { - t, err = d.Token() - if err != nil { - if err == io.EOF { - err = nil - } - return "", err - } - if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") { - return "", nil - } - if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") { - return "", nil - } - e, ok := t.(xml.StartElement) - if !ok || !strings.EqualFold(e.Name.Local, "meta") { - continue - } - if attrValue(e.Attr, "name") != "terraform-get" { - continue - } - if f := attrValue(e.Attr, "content"); f != "" { - return f, nil - } - } -} - -// attrValue returns the attribute value for the case-insensitive key -// `name', or the empty string if nothing is found. -func attrValue(attrs []xml.Attr, name string) string { - for _, a := range attrs { - if strings.EqualFold(a.Name.Local, name) { - return a.Value - } - } - return "" -} - -// charsetReader returns a reader for the given charset. Currently -// it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful -// error which is printed by go get, so the user can find why the package -// wasn't downloaded if the encoding is not supported. Note that, in -// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters -// greater than 0x7f are not rejected). -func charsetReader(charset string, input io.Reader) (io.Reader, error) { - switch strings.ToLower(charset) { - case "ascii": - return input, nil - default: - return nil, fmt.Errorf("can't decode XML document using charset %q", charset) - } -} diff --git a/config/module/get_http_test.go b/config/module/get_http_test.go deleted file mode 100644 index 5f2590f481..0000000000 --- a/config/module/get_http_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package module - -import ( - "fmt" - "net" - "net/http" - "net/url" - "os" - "path/filepath" - "testing" -) - -func TestHttpGetter_impl(t *testing.T) { - var _ Getter = new(HttpGetter) -} - -func TestHttpGetter_header(t *testing.T) { - ln := testHttpServer(t) - defer ln.Close() - - g := new(HttpGetter) - dst := tempDir(t) - - var u url.URL - u.Scheme = "http" - u.Host = ln.Addr().String() - u.Path = "/header" - - // Get it! - if err := g.Get(dst, &u); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestHttpGetter_meta(t *testing.T) { - ln := testHttpServer(t) - defer ln.Close() - - g := new(HttpGetter) - dst := tempDir(t) - - var u url.URL - u.Scheme = "http" - u.Host = ln.Addr().String() - u.Path = "/meta" - - // Get it! - if err := g.Get(dst, &u); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestHttpGetter_metaSubdir(t *testing.T) { - ln := testHttpServer(t) - defer ln.Close() - - g := new(HttpGetter) - dst := tempDir(t) - - var u url.URL - u.Scheme = "http" - u.Host = ln.Addr().String() - u.Path = "/meta-subdir" - - // Get it! - if err := g.Get(dst, &u); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify the main file exists - mainPath := filepath.Join(dst, "sub.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestHttpGetter_none(t *testing.T) { - ln := testHttpServer(t) - defer ln.Close() - - g := new(HttpGetter) - dst := tempDir(t) - - var u url.URL - u.Scheme = "http" - u.Host = ln.Addr().String() - u.Path = "/none" - - // Get it! - if err := g.Get(dst, &u); err == nil { - t.Fatal("should error") - } -} - -func testHttpServer(t *testing.T) net.Listener { - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("err: %s", err) - } - - mux := http.NewServeMux() - mux.HandleFunc("/header", testHttpHandlerHeader) - mux.HandleFunc("/meta", testHttpHandlerMeta) - mux.HandleFunc("/meta-subdir", testHttpHandlerMetaSubdir) - - var server http.Server - server.Handler = mux - go server.Serve(ln) - - return ln -} - -func testHttpHandlerHeader(w http.ResponseWriter, r *http.Request) { - w.Header().Add("X-Terraform-Get", testModuleURL("basic").String()) - w.WriteHeader(200) -} - -func testHttpHandlerMeta(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(fmt.Sprintf(testHttpMetaStr, testModuleURL("basic").String()))) -} - -func testHttpHandlerMetaSubdir(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(fmt.Sprintf(testHttpMetaStr, testModuleURL("basic//subdir").String()))) -} - -func testHttpHandlerNone(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(testHttpNoneStr)) -} - -const testHttpMetaStr = ` - - - - - -` - -const testHttpNoneStr = ` - - - - -` diff --git a/config/module/get_test.go b/config/module/get_test.go deleted file mode 100644 index b403c835ce..0000000000 --- a/config/module/get_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package module - -import ( - "os" - "path/filepath" - "strings" - "testing" -) - -func TestGet_badSchema(t *testing.T) { - dst := tempDir(t) - u := testModule("basic") - u = strings.Replace(u, "file", "nope", -1) - - if err := Get(dst, u); err == nil { - t.Fatal("should error") - } -} - -func TestGet_file(t *testing.T) { - dst := tempDir(t) - u := testModule("basic") - - if err := Get(dst, u); err != nil { - t.Fatalf("err: %s", err) - } - - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestGet_fileForced(t *testing.T) { - dst := tempDir(t) - u := testModule("basic") - u = "file::" + u - - if err := Get(dst, u); err != nil { - t.Fatalf("err: %s", err) - } - - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestGet_fileSubdir(t *testing.T) { - dst := tempDir(t) - u := testModule("basic//subdir") - - if err := Get(dst, u); err != nil { - t.Fatalf("err: %s", err) - } - - mainPath := filepath.Join(dst, "sub.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestGetCopy_dot(t *testing.T) { - dst := tempDir(t) - u := testModule("basic-dot") - - if err := GetCopy(dst, u); err != nil { - t.Fatalf("err: %s", err) - } - - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } - - mainPath = filepath.Join(dst, "foo.tf") - if _, err := os.Stat(mainPath); err == nil { - t.Fatal("should not have foo.tf") - } -} - -func TestGetCopy_file(t *testing.T) { - dst := tempDir(t) - u := testModule("basic") - - if err := GetCopy(dst, u); err != nil { - t.Fatalf("err: %s", err) - } - - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestGetDirSubdir(t *testing.T) { - cases := []struct { - Input string - Dir, Sub string - }{ - { - "hashicorp.com", - "hashicorp.com", "", - }, - { - "hashicorp.com//foo", - "hashicorp.com", "foo", - }, - { - "hashicorp.com//foo?bar=baz", - "hashicorp.com?bar=baz", "foo", - }, - { - "file://foo//bar", - "file://foo", "bar", - }, - } - - for i, tc := range cases { - adir, asub := getDirSubdir(tc.Input) - if adir != tc.Dir { - t.Fatalf("%d: bad dir: %#v", i, adir) - } - if asub != tc.Sub { - t.Fatalf("%d: bad sub: %#v", i, asub) - } - } -} diff --git a/config/module/module_test.go b/config/module/module_test.go index f1517e4801..89fee6ec56 100644 --- a/config/module/module_test.go +++ b/config/module/module_test.go @@ -2,13 +2,12 @@ package module import ( "io/ioutil" - "net/url" "os" "path/filepath" "testing" + "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config" - urlhelper "github.com/hashicorp/terraform/helper/url" ) const fixtureDir = "./test-fixtures" @@ -34,24 +33,6 @@ func testConfig(t *testing.T, n string) *config.Config { return c } -func testModule(n string) string { - p := filepath.Join(fixtureDir, n) - p, err := filepath.Abs(p) - if err != nil { - panic(err) - } - return fmtFileURL(p) -} - -func testModuleURL(n string) *url.URL { - u, err := urlhelper.Parse(testModule(n)) - if err != nil { - panic(err) - } - - return u -} - -func testStorage(t *testing.T) Storage { - return &FolderStorage{StorageDir: tempDir(t)} +func testStorage(t *testing.T) getter.Storage { + return &getter.FolderStorage{StorageDir: tempDir(t)} } diff --git a/config/module/storage.go b/config/module/storage.go deleted file mode 100644 index 9c752f6309..0000000000 --- a/config/module/storage.go +++ /dev/null @@ -1,25 +0,0 @@ -package module - -// Storage is an interface that knows how to lookup downloaded modules -// as well as download and update modules from their sources into the -// proper location. -type Storage interface { - // Dir returns the directory on local disk where the modulue source - // can be loaded from. - Dir(string) (string, bool, error) - - // Get will download and optionally update the given module. - Get(string, string, bool) error -} - -func getStorage(s Storage, key string, src string, mode GetMode) (string, bool, error) { - // Get the module with the level specified if we were told to. - if mode > GetModeNone { - if err := s.Get(key, src, mode == GetModeUpdate); err != nil { - return "", false, err - } - } - - // Get the directory where the module is. - return s.Dir(key) -} diff --git a/config/module/tree.go b/config/module/tree.go index d7b3ac9661..6a75c19c23 100644 --- a/config/module/tree.go +++ b/config/module/tree.go @@ -8,6 +8,7 @@ import ( "strings" "sync" + "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config" ) @@ -27,25 +28,6 @@ type Tree struct { lock sync.RWMutex } -// GetMode is an enum that describes how modules are loaded. -// -// GetModeLoad says that modules will not be downloaded or updated, they will -// only be loaded from the storage. -// -// GetModeGet says that modules can be initially downloaded if they don't -// exist, but otherwise to just load from the current version in storage. -// -// GetModeUpdate says that modules should be checked for updates and -// downloaded prior to loading. If there are no updates, we load the version -// from disk, otherwise we download first and then load. -type GetMode byte - -const ( - GetModeNone GetMode = iota - GetModeGet - GetModeUpdate -) - // NewTree returns a new Tree for the given config structure. func NewTree(name string, c *config.Config) *Tree { return &Tree{config: c, name: name} @@ -136,7 +118,7 @@ func (t *Tree) Name() string { // module trees inherently require the configuration to be in a reasonably // sane state: no circular dependencies, proper module sources, etc. A full // suite of validations can be done by running Validate (after loading). -func (t *Tree) Load(s Storage, mode GetMode) error { +func (t *Tree) Load(s getter.Storage, mode GetMode) error { t.lock.Lock() defer t.lock.Unlock() @@ -159,15 +141,15 @@ func (t *Tree) Load(s Storage, mode GetMode) error { path = append(path, m.Name) // Split out the subdir if we have one - source, subDir := getDirSubdir(m.Source) + source, subDir := getter.SourceDirSubdir(m.Source) - source, err := Detect(source, t.config.Dir) + source, err := getter.Detect(source, t.config.Dir, getter.Detectors) if err != nil { return fmt.Errorf("module %s: %s", m.Name, err) } // Check if the detector introduced something new. - source, subDir2 := getDirSubdir(source) + source, subDir2 := getter.SourceDirSubdir(source) if subDir2 != "" { subDir = filepath.Join(subDir2, subDir) } From 344e7c26b5f116842932d0e6b6ad2f1a250526f4 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 15 Oct 2015 13:48:58 -0700 Subject: [PATCH 268/335] fix a bunch of tests from go-getter import --- command/apply.go | 4 +- command/command_test.go | 3 +- command/init.go | 3 +- command/meta.go | 5 +- command/module_storage.go | 4 +- command/module_storage_test.go | 4 +- config/lang/y.go | 272 ++++++++++++++++++++++++--------- config/module/copy_dir.go | 76 +++++++++ config/module/get.go | 33 ++++ helper/resource/testing.go | 3 +- 10 files changed, 328 insertions(+), 79 deletions(-) create mode 100644 config/module/copy_dir.go diff --git a/command/apply.go b/command/apply.go index 8001cfe077..0687116a8a 100644 --- a/command/apply.go +++ b/command/apply.go @@ -7,8 +7,8 @@ import ( "sort" "strings" + "github.com/hashicorp/go-getter" "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/terraform" ) @@ -76,7 +76,7 @@ func (c *ApplyCommand) Run(args []string) int { if !c.Destroy && maybeInit { // Do a detect to determine if we need to do an init + apply. - if detected, err := module.Detect(configPath, pwd); err != nil { + if detected, err := getter.Detect(configPath, pwd, getter.Detectors); err != nil { c.Ui.Error(fmt.Sprintf( "Invalid path: %s", err)) return 1 diff --git a/command/command_test.go b/command/command_test.go index 2b9f93dd1d..954579c3dd 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/terraform" ) @@ -73,7 +74,7 @@ func testModule(t *testing.T, name string) *module.Tree { t.Fatalf("err: %s", err) } - s := &module.FolderStorage{StorageDir: tempDir(t)} + s := &getter.FolderStorage{StorageDir: tempDir(t)} if err := mod.Load(s, module.GetModeGet); err != nil { t.Fatalf("err: %s", err) } diff --git a/command/init.go b/command/init.go index fb842d08de..1b92c0806c 100644 --- a/command/init.go +++ b/command/init.go @@ -6,6 +6,7 @@ import ( "os" "strings" + "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/terraform" @@ -75,7 +76,7 @@ func (c *InitCommand) Run(args []string) int { } // Detect - source, err = module.Detect(source, pwd) + source, err = getter.Detect(source, pwd, getter.Detectors) if err != nil { c.Ui.Error(fmt.Sprintf( "Error with module source: %s", err)) diff --git a/command/meta.go b/command/meta.go index af4a523028..3a12de02f7 100644 --- a/command/meta.go +++ b/command/meta.go @@ -9,6 +9,7 @@ import ( "path/filepath" "strconv" + "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/state" "github.com/hashicorp/terraform/terraform" @@ -330,9 +331,9 @@ func (m *Meta) flagSet(n string) *flag.FlagSet { // moduleStorage returns the module.Storage implementation used to store // modules for commands. -func (m *Meta) moduleStorage(root string) module.Storage { +func (m *Meta) moduleStorage(root string) getter.Storage { return &uiModuleStorage{ - Storage: &module.FolderStorage{ + Storage: &getter.FolderStorage{ StorageDir: filepath.Join(root, "modules"), }, Ui: m.Ui, diff --git a/command/module_storage.go b/command/module_storage.go index e17786a807..5bb832897d 100644 --- a/command/module_storage.go +++ b/command/module_storage.go @@ -3,14 +3,14 @@ package command import ( "fmt" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/go-getter" "github.com/mitchellh/cli" ) // uiModuleStorage implements module.Storage and is just a proxy to output // to the UI any Get operations. type uiModuleStorage struct { - Storage module.Storage + Storage getter.Storage Ui cli.Ui } diff --git a/command/module_storage_test.go b/command/module_storage_test.go index b77c2b5f78..97a5ed7ae3 100644 --- a/command/module_storage_test.go +++ b/command/module_storage_test.go @@ -3,9 +3,9 @@ package command import ( "testing" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/go-getter" ) func TestUiModuleStorage_impl(t *testing.T) { - var _ module.Storage = new(uiModuleStorage) + var _ getter.Storage = new(uiModuleStorage) } diff --git a/config/lang/y.go b/config/lang/y.go index e7dd185ae1..fd0693f151 100644 --- a/config/lang/y.go +++ b/config/lang/y.go @@ -30,7 +30,10 @@ const INTEGER = 57355 const FLOAT = 57356 const STRING = 57357 -var parserToknames = []string{ +var parserToknames = [...]string{ + "$end", + "error", + "$unk", "PROGRAM_BRACKET_LEFT", "PROGRAM_BRACKET_RIGHT", "PROGRAM_STRING_START", @@ -44,7 +47,7 @@ var parserToknames = []string{ "FLOAT", "STRING", } -var parserStatenames = []string{} +var parserStatenames = [...]string{} const parserEofCode = 1 const parserErrCode = 2 @@ -53,7 +56,7 @@ const parserMaxDepth = 200 //line lang.y:165 //line yacctab:1 -var parserExca = []int{ +var parserExca = [...]int{ -1, 1, 1, -1, -2, 0, @@ -67,75 +70,103 @@ var parserStates []string const parserLast = 30 -var parserAct = []int{ +var parserAct = [...]int{ 9, 20, 16, 16, 7, 7, 3, 18, 10, 8, 1, 17, 14, 12, 13, 6, 6, 19, 8, 22, 15, 23, 24, 11, 2, 25, 16, 21, 4, 5, } -var parserPact = []int{ +var parserPact = [...]int{ 1, -1000, 1, -1000, -1000, -1000, -1000, 0, -1000, 15, 0, 1, -1000, -1000, -1, -1000, 0, -8, 0, -1000, -1000, 12, -9, -1000, 0, -9, } -var parserPgo = []int{ +var parserPgo = [...]int{ 0, 0, 29, 28, 23, 6, 27, 10, } -var parserR1 = []int{ +var parserR1 = [...]int{ 0, 7, 7, 4, 4, 5, 5, 2, 1, 1, 1, 1, 1, 1, 1, 6, 6, 6, 3, } -var parserR2 = []int{ +var parserR2 = [...]int{ 0, 0, 1, 1, 2, 1, 1, 3, 3, 1, 1, 1, 3, 1, 4, 0, 3, 1, 1, } -var parserChk = []int{ +var parserChk = [...]int{ -1000, -7, -4, -5, -3, -2, 15, 4, -5, -1, 8, -4, 13, 14, 12, 5, 11, -1, 8, -1, 9, -6, -1, 9, 10, -1, } -var parserDef = []int{ +var parserDef = [...]int{ 1, -2, 2, 3, 5, 6, 18, 0, 4, 0, 0, 9, 10, 11, 13, 7, 0, 0, 15, 12, 8, 0, 17, 14, 0, 16, } -var parserTok1 = []int{ +var parserTok1 = [...]int{ 1, } -var parserTok2 = []int{ +var parserTok2 = [...]int{ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, } -var parserTok3 = []int{ +var parserTok3 = [...]int{ 0, } +var parserErrorMessages = [...]struct { + state int + token int + msg string +}{} + //line yaccpar:1 /* parser for yacc output */ -var parserDebug = 0 +var ( + parserDebug = 0 + parserErrorVerbose = false +) type parserLexer interface { Lex(lval *parserSymType) int Error(s string) } +type parserParser interface { + Parse(parserLexer) int + Lookahead() int +} + +type parserParserImpl struct { + lookahead func() int +} + +func (p *parserParserImpl) Lookahead() int { + return p.lookahead() +} + +func parserNewParser() parserParser { + p := &parserParserImpl{ + lookahead: func() int { return -1 }, + } + return p +} + const parserFlag = -1000 func parserTokname(c int) string { - // 4 is TOKSTART above - if c >= 4 && c-4 < len(parserToknames) { - if parserToknames[c-4] != "" { - return parserToknames[c-4] + if c >= 1 && c-1 < len(parserToknames) { + if parserToknames[c-1] != "" { + return parserToknames[c-1] } } return __yyfmt__.Sprintf("tok-%v", c) @@ -150,51 +181,129 @@ func parserStatname(s int) string { return __yyfmt__.Sprintf("state-%v", s) } -func parserlex1(lex parserLexer, lval *parserSymType) int { - c := 0 - char := lex.Lex(lval) +func parserErrorMessage(state, lookAhead int) string { + const TOKSTART = 4 + + if !parserErrorVerbose { + return "syntax error" + } + + for _, e := range parserErrorMessages { + if e.state == state && e.token == lookAhead { + return "syntax error: " + e.msg + } + } + + res := "syntax error: unexpected " + parserTokname(lookAhead) + + // To match Bison, suggest at most four expected tokens. + expected := make([]int, 0, 4) + + // Look for shiftable tokens. + base := parserPact[state] + for tok := TOKSTART; tok-1 < len(parserToknames); tok++ { + if n := base + tok; n >= 0 && n < parserLast && parserChk[parserAct[n]] == tok { + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + } + + if parserDef[state] == -2 { + i := 0 + for parserExca[i] != -1 || parserExca[i+1] != state { + i += 2 + } + + // Look for tokens that we accept or reduce. + for i += 2; parserExca[i] >= 0; i += 2 { + tok := parserExca[i] + if tok < TOKSTART || parserExca[i+1] == 0 { + continue + } + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + + // If the default action is to accept or reduce, give up. + if parserExca[i+1] != 0 { + return res + } + } + + for i, tok := range expected { + if i == 0 { + res += ", expecting " + } else { + res += " or " + } + res += parserTokname(tok) + } + return res +} + +func parserlex1(lex parserLexer, lval *parserSymType) (char, token int) { + token = 0 + char = lex.Lex(lval) if char <= 0 { - c = parserTok1[0] + token = parserTok1[0] goto out } if char < len(parserTok1) { - c = parserTok1[char] + token = parserTok1[char] goto out } if char >= parserPrivate { if char < parserPrivate+len(parserTok2) { - c = parserTok2[char-parserPrivate] + token = parserTok2[char-parserPrivate] goto out } } for i := 0; i < len(parserTok3); i += 2 { - c = parserTok3[i+0] - if c == char { - c = parserTok3[i+1] + token = parserTok3[i+0] + if token == char { + token = parserTok3[i+1] goto out } } out: - if c == 0 { - c = parserTok2[1] /* unknown char */ + if token == 0 { + token = parserTok2[1] /* unknown char */ } if parserDebug >= 3 { - __yyfmt__.Printf("lex %s(%d)\n", parserTokname(c), uint(char)) + __yyfmt__.Printf("lex %s(%d)\n", parserTokname(token), uint(char)) } - return c + return char, token } func parserParse(parserlex parserLexer) int { + return parserNewParser().Parse(parserlex) +} + +func (parserrcvr *parserParserImpl) Parse(parserlex parserLexer) int { var parsern int var parserlval parserSymType var parserVAL parserSymType + var parserDollar []parserSymType + _ = parserDollar // silence set and not used parserS := make([]parserSymType, parserMaxDepth) Nerrs := 0 /* number of errors */ Errflag := 0 /* error recovery flag */ parserstate := 0 parserchar := -1 + parsertoken := -1 // parserchar translated into internal numbering + parserrcvr.lookahead = func() int { return parserchar } + defer func() { + // Make sure we report no lookahead when not parsing. + parserstate = -1 + parserchar = -1 + parsertoken = -1 + }() parserp := -1 goto parserstack @@ -207,7 +316,7 @@ ret1: parserstack: /* put a state and value onto the stack */ if parserDebug >= 4 { - __yyfmt__.Printf("char %v in %v\n", parserTokname(parserchar), parserStatname(parserstate)) + __yyfmt__.Printf("char %v in %v\n", parserTokname(parsertoken), parserStatname(parserstate)) } parserp++ @@ -225,15 +334,16 @@ parsernewstate: goto parserdefault /* simple state */ } if parserchar < 0 { - parserchar = parserlex1(parserlex, &parserlval) + parserchar, parsertoken = parserlex1(parserlex, &parserlval) } - parsern += parserchar + parsern += parsertoken if parsern < 0 || parsern >= parserLast { goto parserdefault } parsern = parserAct[parsern] - if parserChk[parsern] == parserchar { /* valid shift */ + if parserChk[parsern] == parsertoken { /* valid shift */ parserchar = -1 + parsertoken = -1 parserVAL = parserlval parserstate = parsern if Errflag > 0 { @@ -247,7 +357,7 @@ parserdefault: parsern = parserDef[parserstate] if parsern == -2 { if parserchar < 0 { - parserchar = parserlex1(parserlex, &parserlval) + parserchar, parsertoken = parserlex1(parserlex, &parserlval) } /* look through exception table */ @@ -260,7 +370,7 @@ parserdefault: } for xi += 2; ; xi += 2 { parsern = parserExca[xi+0] - if parsern < 0 || parsern == parserchar { + if parsern < 0 || parsern == parsertoken { break } } @@ -273,11 +383,11 @@ parserdefault: /* error ... attempt to resume parsing */ switch Errflag { case 0: /* brand new error */ - parserlex.Error("syntax error") + parserlex.Error(parserErrorMessage(parserstate, parsertoken)) Nerrs++ if parserDebug >= 1 { __yyfmt__.Printf("%s", parserStatname(parserstate)) - __yyfmt__.Printf(" saw %s\n", parserTokname(parserchar)) + __yyfmt__.Printf(" saw %s\n", parserTokname(parsertoken)) } fallthrough @@ -305,12 +415,13 @@ parserdefault: case 3: /* no shift yet; clobber input char */ if parserDebug >= 2 { - __yyfmt__.Printf("error recovery discards %s\n", parserTokname(parserchar)) + __yyfmt__.Printf("error recovery discards %s\n", parserTokname(parsertoken)) } - if parserchar == parserEofCode { + if parsertoken == parserEofCode { goto ret1 } parserchar = -1 + parsertoken = -1 goto parsernewstate /* try again in the same state */ } } @@ -325,6 +436,13 @@ parserdefault: _ = parserpt // guard against "declared and not used" parserp -= parserR2[parsern] + // parserp is now the index of $0. Perform the default action. Iff the + // reduced production is ε, $1 is possibly out of range. + if parserp+1 >= len(parserS) { + nyys := make([]parserSymType, len(parserS)*2) + copy(nyys, parserS) + parserS = nyys + } parserVAL = parserS[parserp+1] /* consult goto table to find next state */ @@ -344,6 +462,7 @@ parserdefault: switch parsernt { case 1: + parserDollar = parserS[parserpt-0 : parserpt+1] //line lang.y:35 { parserResult = &ast.LiteralNode{ @@ -353,9 +472,10 @@ parserdefault: } } case 2: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:43 { - parserResult = parserS[parserpt-0].node + parserResult = parserDollar[1].node // We want to make sure that the top value is always a Concat // so that the return value is always a string type from an @@ -365,28 +485,30 @@ parserdefault: // because functionally the AST is the same, but we do that because // it makes for an easy literal check later (to check if a string // has any interpolations). - if _, ok := parserS[parserpt-0].node.(*ast.Concat); !ok { - if n, ok := parserS[parserpt-0].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString { + if _, ok := parserDollar[1].node.(*ast.Concat); !ok { + if n, ok := parserDollar[1].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString { parserResult = &ast.Concat{ - Exprs: []ast.Node{parserS[parserpt-0].node}, - Posx: parserS[parserpt-0].node.Pos(), + Exprs: []ast.Node{parserDollar[1].node}, + Posx: parserDollar[1].node.Pos(), } } } } case 3: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:66 { - parserVAL.node = parserS[parserpt-0].node + parserVAL.node = parserDollar[1].node } case 4: + parserDollar = parserS[parserpt-2 : parserpt+1] //line lang.y:70 { var result []ast.Node - if c, ok := parserS[parserpt-1].node.(*ast.Concat); ok { - result = append(c.Exprs, parserS[parserpt-0].node) + if c, ok := parserDollar[1].node.(*ast.Concat); ok { + result = append(c.Exprs, parserDollar[2].node) } else { - result = []ast.Node{parserS[parserpt-1].node, parserS[parserpt-0].node} + result = []ast.Node{parserDollar[1].node, parserDollar[2].node} } parserVAL.node = &ast.Concat{ @@ -395,89 +517,103 @@ parserdefault: } } case 5: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:86 { - parserVAL.node = parserS[parserpt-0].node + parserVAL.node = parserDollar[1].node } case 6: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:90 { - parserVAL.node = parserS[parserpt-0].node + parserVAL.node = parserDollar[1].node } case 7: + parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:96 { - parserVAL.node = parserS[parserpt-1].node + parserVAL.node = parserDollar[2].node } case 8: + parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:102 { - parserVAL.node = parserS[parserpt-1].node + parserVAL.node = parserDollar[2].node } case 9: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:106 { - parserVAL.node = parserS[parserpt-0].node + parserVAL.node = parserDollar[1].node } case 10: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:110 { parserVAL.node = &ast.LiteralNode{ - Value: parserS[parserpt-0].token.Value.(int), + Value: parserDollar[1].token.Value.(int), Typex: ast.TypeInt, - Posx: parserS[parserpt-0].token.Pos, + Posx: parserDollar[1].token.Pos, } } case 11: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:118 { parserVAL.node = &ast.LiteralNode{ - Value: parserS[parserpt-0].token.Value.(float64), + Value: parserDollar[1].token.Value.(float64), Typex: ast.TypeFloat, - Posx: parserS[parserpt-0].token.Pos, + Posx: parserDollar[1].token.Pos, } } case 12: + parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:126 { parserVAL.node = &ast.Arithmetic{ - Op: parserS[parserpt-1].token.Value.(ast.ArithmeticOp), - Exprs: []ast.Node{parserS[parserpt-2].node, parserS[parserpt-0].node}, - Posx: parserS[parserpt-2].node.Pos(), + Op: parserDollar[2].token.Value.(ast.ArithmeticOp), + Exprs: []ast.Node{parserDollar[1].node, parserDollar[3].node}, + Posx: parserDollar[1].node.Pos(), } } case 13: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:134 { - parserVAL.node = &ast.VariableAccess{Name: parserS[parserpt-0].token.Value.(string), Posx: parserS[parserpt-0].token.Pos} + parserVAL.node = &ast.VariableAccess{Name: parserDollar[1].token.Value.(string), Posx: parserDollar[1].token.Pos} } case 14: + parserDollar = parserS[parserpt-4 : parserpt+1] //line lang.y:138 { - parserVAL.node = &ast.Call{Func: parserS[parserpt-3].token.Value.(string), Args: parserS[parserpt-1].nodeList, Posx: parserS[parserpt-3].token.Pos} + parserVAL.node = &ast.Call{Func: parserDollar[1].token.Value.(string), Args: parserDollar[3].nodeList, Posx: parserDollar[1].token.Pos} } case 15: + parserDollar = parserS[parserpt-0 : parserpt+1] //line lang.y:143 { parserVAL.nodeList = nil } case 16: + parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:147 { - parserVAL.nodeList = append(parserS[parserpt-2].nodeList, parserS[parserpt-0].node) + parserVAL.nodeList = append(parserDollar[1].nodeList, parserDollar[3].node) } case 17: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:151 { - parserVAL.nodeList = append(parserVAL.nodeList, parserS[parserpt-0].node) + parserVAL.nodeList = append(parserVAL.nodeList, parserDollar[1].node) } case 18: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:157 { parserVAL.node = &ast.LiteralNode{ - Value: parserS[parserpt-0].token.Value.(string), + Value: parserDollar[1].token.Value.(string), Typex: ast.TypeString, - Posx: parserS[parserpt-0].token.Pos, + Posx: parserDollar[1].token.Pos, } } } diff --git a/config/module/copy_dir.go b/config/module/copy_dir.go new file mode 100644 index 0000000000..f2ae63b77b --- /dev/null +++ b/config/module/copy_dir.go @@ -0,0 +1,76 @@ +package module + +import ( + "io" + "os" + "path/filepath" + "strings" +) + +// copyDir copies the src directory contents into dst. Both directories +// should already exist. +func copyDir(dst, src string) error { + src, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if path == src { + return nil + } + + if strings.HasPrefix(filepath.Base(path), ".") { + // Skip any dot files + if info.IsDir() { + return filepath.SkipDir + } else { + return nil + } + } + + // The "path" has the src prefixed to it. We need to join our + // destination with the path without the src on it. + dstPath := filepath.Join(dst, path[len(src):]) + + // If we have a directory, make that subdirectory, then continue + // the walk. + if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + + return nil + } + + // If we have a file, copy the contents. + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstF.Close() + + if _, err := io.Copy(dstF, srcF); err != nil { + return err + } + + // Chmod it + return os.Chmod(dstPath, info.Mode()) + } + + return filepath.Walk(src, walkFn) +} diff --git a/config/module/get.go b/config/module/get.go index 3820e65f29..cba15277fd 100644 --- a/config/module/get.go +++ b/config/module/get.go @@ -1,6 +1,9 @@ package module import ( + "io/ioutil" + "os" + "github.com/hashicorp/go-getter" ) @@ -23,6 +26,36 @@ const ( GetModeUpdate ) +// GetCopy is the same as Get except that it downloads a copy of the +// module represented by source. +// +// This copy will omit and dot-prefixed files (such as .git/, .hg/) and +// can't be updated on its own. +func GetCopy(dst, src string) error { + // Create the temporary directory to do the real Get to + tmpDir, err := ioutil.TempDir("", "tf") + if err != nil { + return err + } + if err := os.RemoveAll(tmpDir); err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + // Get to that temporary dir + if err := getter.Get(tmpDir, src); err != nil { + return err + } + + // Make sure the destination exists + if err := os.MkdirAll(dst, 0755); err != nil { + return err + } + + // Copy to the final location + return copyDir(dst, tmpDir) +} + func getStorage(s getter.Storage, key string, src string, mode GetMode) (string, bool, error) { // Get the module with the level specified if we were told to. if mode > GetModeNone { diff --git a/helper/resource/testing.go b/helper/resource/testing.go index eaa0cbf710..0b53c3c615 100644 --- a/helper/resource/testing.go +++ b/helper/resource/testing.go @@ -11,6 +11,7 @@ import ( "strings" "testing" + "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/terraform" ) @@ -198,7 +199,7 @@ func testStep( } // Load the modules - modStorage := &module.FolderStorage{ + modStorage := &getter.FolderStorage{ StorageDir: filepath.Join(cfgPath, ".tfmodules"), } err = mod.Load(modStorage, module.GetModeGet) From 263cc1b8553545682316894ea8382c7d724a81ee Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 15 Oct 2015 13:52:27 -0700 Subject: [PATCH 269/335] terraform: final failing test --- terraform/terraform_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/terraform/terraform_test.go b/terraform/terraform_test.go index 02d4de2a28..d17726acb4 100644 --- a/terraform/terraform_test.go +++ b/terraform/terraform_test.go @@ -13,6 +13,7 @@ import ( "sync" "testing" + "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" ) @@ -70,7 +71,7 @@ func testModule(t *testing.T, name string) *module.Tree { t.Fatalf("err: %s", err) } - s := &module.FolderStorage{StorageDir: tempDir(t)} + s := &getter.FolderStorage{StorageDir: tempDir(t)} if err := mod.Load(s, module.GetModeGet); err != nil { t.Fatalf("err: %s", err) } From 05007bed38db92072c492c057a2b4613d59022f9 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 16 Oct 2015 09:11:39 -0500 Subject: [PATCH 270/335] Update CHANGELOG.md --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 81316bf545..a615847855 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 0.6.5 (Unreleased) +INTERNAL IMPROVEMENTS: + + * provider/digitalocean: use official Go client [GH-3333] + ## 0.6.4 (October 15, 2015) FEATURES: From 347f9c0bea68722a85a1e453c69e6a756043f6b8 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Fri, 16 Oct 2015 14:00:23 -0500 Subject: [PATCH 271/335] vagrantfile: update base image name to Bento, from Chef --- Vagrantfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Vagrantfile b/Vagrantfile index 8a936e04cf..59709339d7 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -42,7 +42,7 @@ source /etc/profile.d/gopath.sh SCRIPT Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - config.vm.box = "chef/ubuntu-12.04" + config.vm.box = "bento/ubuntu-12.04" config.vm.hostname = "terraform" config.vm.provision "shell", inline: $script, privileged: false From c2fdb7171e4d84cc3f37b8e2163b9d43aa6306e5 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 17 Oct 2015 17:33:45 -0700 Subject: [PATCH 272/335] use upstream osext, which fixes some bugs --- config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.go b/config.go index 6482238889..c9b2a7f754 100644 --- a/config.go +++ b/config.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/hcl" "github.com/hashicorp/terraform/plugin" "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/osext" + "github.com/kardianos/osext" ) // Config is the structure of the configuration for the Terraform CLI. From 593077161589a9ba920324450d0634d93834b20e Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Sun, 18 Oct 2015 13:21:41 -0400 Subject: [PATCH 273/335] Update compute_instance.html.markdown Make it clear that you can't have two networks --- .../docs/providers/google/r/compute_instance.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/providers/google/r/compute_instance.html.markdown b/website/source/docs/providers/google/r/compute_instance.html.markdown index bf8add9e62..938bc71dff 100644 --- a/website/source/docs/providers/google/r/compute_instance.html.markdown +++ b/website/source/docs/providers/google/r/compute_instance.html.markdown @@ -82,8 +82,8 @@ The following arguments are supported: are not allowed to be used simultaneously. * `network_interface` - (Required) Networks to attach to the instance. This can be - specified multiple times for multiple networks. Structure is documented - below. + specified multiple times for multiple networks, but GCE is currently limited + to just 1. Structure is documented below. * `network` - (DEPRECATED, Required) Networks to attach to the instance. This can be specified multiple times for multiple networks. Structure is documented From bb51882f337e80ca85b75069155ed03c52636dd3 Mon Sep 17 00:00:00 2001 From: Nathan Zadoks Date: Mon, 12 Oct 2015 17:04:58 -0400 Subject: [PATCH 274/335] Etcd remote state backend --- state/remote/etcd.go | 78 +++++++++++++++++++++++++++++++++++++++ state/remote/etcd_test.go | 38 +++++++++++++++++++ state/remote/remote.go | 1 + 3 files changed, 117 insertions(+) create mode 100644 state/remote/etcd.go create mode 100644 state/remote/etcd_test.go diff --git a/state/remote/etcd.go b/state/remote/etcd.go new file mode 100644 index 0000000000..f596a8492c --- /dev/null +++ b/state/remote/etcd.go @@ -0,0 +1,78 @@ +package remote + +import ( + "crypto/md5" + "fmt" + "strings" + + "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" + etcdapi "github.com/coreos/etcd/client" +) + +func etcdFactory(conf map[string]string) (Client, error) { + path, ok := conf["path"] + if !ok { + return nil, fmt.Errorf("missing 'path' configuration") + } + + endpoints, ok := conf["endpoints"] + if !ok || endpoints == "" { + return nil, fmt.Errorf("missing 'endpoints' configuration") + } + + config := etcdapi.Config{ + Endpoints: strings.Split(endpoints, " "), + } + if username, ok := conf["username"]; ok && username != "" { + config.Username = username + } + if password, ok := conf["password"]; ok && password != "" { + config.Password = password + } + + client, err := etcdapi.New(config) + if err != nil { + return nil, err + } + + return &EtcdClient{ + Client: client, + Path: path, + }, nil +} + +// EtcdClient is a remote client that stores data in etcd. +type EtcdClient struct { + Client etcdapi.Client + Path string +} + +func (c *EtcdClient) Get() (*Payload, error) { + resp, err := etcdapi.NewKeysAPI(c.Client).Get(context.Background(), c.Path, &etcdapi.GetOptions{Quorum: true}) + if err != nil { + if err, ok := err.(etcdapi.Error); ok && err.Code == etcdapi.ErrorCodeKeyNotFound { + return nil, nil + } + return nil, err + } + if resp.Node.Dir { + return nil, fmt.Errorf("path is a directory") + } + + data := []byte(resp.Node.Value) + md5 := md5.Sum(data) + return &Payload{ + Data: data, + MD5: md5[:], + }, nil +} + +func (c *EtcdClient) Put(data []byte) error { + _, err := etcdapi.NewKeysAPI(c.Client).Set(context.Background(), c.Path, string(data), nil) + return err +} + +func (c *EtcdClient) Delete() error { + _, err := etcdapi.NewKeysAPI(c.Client).Delete(context.Background(), c.Path, nil) + return err +} diff --git a/state/remote/etcd_test.go b/state/remote/etcd_test.go new file mode 100644 index 0000000000..6d06d801b2 --- /dev/null +++ b/state/remote/etcd_test.go @@ -0,0 +1,38 @@ +package remote + +import ( + "fmt" + "os" + "testing" + "time" +) + +func TestEtcdClient_impl(t *testing.T) { + var _ Client = new(EtcdClient) +} + +func TestEtcdClient(t *testing.T) { + endpoint := os.Getenv("ETCD_ENDPOINT") + if endpoint == "" { + t.Skipf("skipping; ETCD_ENDPOINT must be set") + } + + config := map[string]string{ + "endpoints": endpoint, + "path": fmt.Sprintf("tf-unit/%s", time.Now().String()), + } + + if username := os.Getenv("ETCD_USERNAME"); username != "" { + config["username"] = username + } + if password := os.Getenv("ETCD_PASSWORD"); password != "" { + config["password"] = password + } + + client, err := etcdFactory(config) + if err != nil { + t.Fatalf("Error for valid config: %s", err) + } + + testClient(t, client) +} diff --git a/state/remote/remote.go b/state/remote/remote.go index 7ebea32229..5337ad7b7b 100644 --- a/state/remote/remote.go +++ b/state/remote/remote.go @@ -38,6 +38,7 @@ func NewClient(t string, conf map[string]string) (Client, error) { var BuiltinClients = map[string]Factory{ "atlas": atlasFactory, "consul": consulFactory, + "etcd": etcdFactory, "http": httpFactory, "s3": s3Factory, "swift": swiftFactory, From 362a2035c0bb709162ab75d47ef6db2b23bcef56 Mon Sep 17 00:00:00 2001 From: Nathan Zadoks Date: Thu, 15 Oct 2015 22:32:59 -0400 Subject: [PATCH 275/335] Document the etcd remote state backend --- website/source/docs/commands/remote-config.html.markdown | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/website/source/docs/commands/remote-config.html.markdown b/website/source/docs/commands/remote-config.html.markdown index 73a06f8211..aaa4148fcb 100644 --- a/website/source/docs/commands/remote-config.html.markdown +++ b/website/source/docs/commands/remote-config.html.markdown @@ -50,6 +50,11 @@ The following backends are supported: variables can optionally be provided. Address is assumed to be the local agent if not provided. +* Etcd - Stores the state in etcd at a given path. + Requires the `path` and `endpoints` variables. The `username` and `password` + variables can optionally be provided. `endpoints` is assumed to be a + space-separated list of etcd endpoints. + * S3 - Stores the state as a given key in a given bucket on Amazon S3. Requires the `bucket` and `key` variables. Supports and honors the standard AWS environment variables `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` From 3021069207bf24c8c66f2877fd5d6eafea0d2c02 Mon Sep 17 00:00:00 2001 From: David Adams Date: Sun, 18 Oct 2015 18:23:13 -0500 Subject: [PATCH 276/335] Update init and remote config command docs * Update init docs to be correct, and provide an example. * Update remote config docs to provide more details about the Consul backend and to provide another example. --- .../source/docs/commands/init.html.markdown | 37 ++++++++++++++----- .../docs/commands/remote-config.html.markdown | 16 ++++++-- 2 files changed, 39 insertions(+), 14 deletions(-) diff --git a/website/source/docs/commands/init.html.markdown b/website/source/docs/commands/init.html.markdown index ee4286c278..803d937d75 100644 --- a/website/source/docs/commands/init.html.markdown +++ b/website/source/docs/commands/init.html.markdown @@ -31,17 +31,34 @@ a remote state configuration if provided. The command-line flags are all optional. The list of available flags are: -* `-address=url` - URL of the remote storage server. Required for HTTP backend, - optional for Atlas and Consul. - -* `-access-token=token` - Authentication token for state storage server. - Required for Atlas backend, optional for Consul. - * `-backend=atlas` - Specifies the type of remote backend. Must be one - of Atlas, Consul, or HTTP. Defaults to atlas. + of Atlas, Consul, S3, or HTTP. Defaults to Atlas. -* `-name=name` - Name of the state file in the state storage server. - Required for Atlas backend. +* `-backend-config="k=v"` - Specify a configuration variable for a backend. This is how you set the required variables for the selected backend (as detailed in the [remote command documentation](/docs/command/remote.html). -* `-path=path` - Path of the remote state in Consul. Required for the Consul backend. +## Example: Consul + +This example will initialize the current directory and configure Consul remote storage: + +``` +$ terraform init \ + -backend=consul \ + -backend-config="address=your.consul.endpoint:443" \ + -backend-config="scheme=https" \ + -backend-config="path=tf/path/for/project" \ + /path/to/source/module +``` + +## Example: S3 + +This example will initialize the current directory and configure S3 remote storage: + +``` +$ terraform init \ + -backend=s3 \ + -backend-config="bucket=your-s3-bucket" \ + -backend-config="key=tf/path/for/project.json" \ + -backend-config="acl=bucket-owner-full-control" \ + /path/to/source/module +``` diff --git a/website/source/docs/commands/remote-config.html.markdown b/website/source/docs/commands/remote-config.html.markdown index 73a06f8211..cd6f8f9d3a 100644 --- a/website/source/docs/commands/remote-config.html.markdown +++ b/website/source/docs/commands/remote-config.html.markdown @@ -45,10 +45,18 @@ The following backends are supported: * Atlas - Stores the state in Atlas. Requires the `name` and `access_token` variables. The `address` variable can optionally be provided. -* Consul - Stores the state in the KV store at a given path. - Requires the `path` variable. The `address` and `access_token` - variables can optionally be provided. Address is assumed to be the - local agent if not provided. +* Consul - Stores the state in the KV store at a given path. Requires the + `path` variable. Supports the `CONSUL_HTTP_TOKEN` environment variable + for specifying access credentials, or the `access_token` variable may + be provided, but this is not recommended since it would be included in + cleartext inside the persisted, shard state. Other supported parameters + include: + * `address` - DNS name and port of your Consul endpoint specified in the + format `dnsname:port`. Defaults to the local agent HTTP listener. This + may also be specified using the `CONSUL_HTTP_ADDR` environment variable. + * `scheme` - Specifies what protocol to use when talking to the given + `address`, either `http` or `https`. SSL support can also be triggered + by setting then environment variable `CONSUL_HTTP_SSL` to `true`. * S3 - Stores the state as a given key in a given bucket on Amazon S3. Requires the `bucket` and `key` variables. Supports and honors the standard From 3c0ed11922f17bb48ac14c76e3242070d3b82f2e Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 16 Oct 2015 17:17:35 -0400 Subject: [PATCH 277/335] Remove usage of http.DefaultClient --- state/remote/atlas.go | 9 ++++++--- state/remote/http_test.go | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/state/remote/atlas.go b/state/remote/atlas.go index f52d834a2c..2c2c48895a 100644 --- a/state/remote/atlas.go +++ b/state/remote/atlas.go @@ -83,7 +83,8 @@ func (c *AtlasClient) Get() (*Payload, error) { } // Request the url - resp, err := http.DefaultClient.Do(req) + client := &http.Client{} + resp, err := client.Do(req) if err != nil { return nil, err } @@ -161,7 +162,8 @@ func (c *AtlasClient) Put(state []byte) error { req.ContentLength = int64(len(state)) // Make the request - resp, err := http.DefaultClient.Do(req) + client := &http.Client{} + resp, err := client.Do(req) if err != nil { return fmt.Errorf("Failed to upload state: %v", err) } @@ -186,7 +188,8 @@ func (c *AtlasClient) Delete() error { } // Make the request - resp, err := http.DefaultClient.Do(req) + client := &http.Client{} + resp, err := client.Do(req) if err != nil { return fmt.Errorf("Failed to delete state: %v", err) } diff --git a/state/remote/http_test.go b/state/remote/http_test.go index e6e7297c19..74ed1755a2 100644 --- a/state/remote/http_test.go +++ b/state/remote/http_test.go @@ -24,7 +24,7 @@ func TestHTTPClient(t *testing.T) { t.Fatalf("err: %s", err) } - client := &HTTPClient{URL: url, Client: http.DefaultClient} + client := &HTTPClient{URL: url, Client: &http.Client{}} testClient(t, client) } From b0ceffc322efabc3ad2ff4bf41090eab25053bbe Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Mon, 19 Oct 2015 12:04:10 -0400 Subject: [PATCH 278/335] Remove usage from dependencies as well. Other dependencies need upstream merging to completely solve this. --- builtin/providers/aws/config.go | 3 +++ builtin/providers/dme/config.go | 6 +++++- state/remote/s3.go | 2 ++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index f8f443b73d..8b9428fbc2 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "log" + "net/http" "strings" "github.com/hashicorp/go-multierror" @@ -98,6 +99,7 @@ func (c *Config) Client() (interface{}, error) { Credentials: creds, Region: aws.String(c.Region), MaxRetries: aws.Int(c.MaxRetries), + HTTPClient: &http.Client{}, } log.Println("[INFO] Initializing IAM Connection") @@ -123,6 +125,7 @@ func (c *Config) Client() (interface{}, error) { Credentials: creds, Region: aws.String("us-east-1"), MaxRetries: aws.Int(c.MaxRetries), + HTTPClient: &http.Client{}, } log.Println("[INFO] Initializing DynamoDB connection") diff --git a/builtin/providers/dme/config.go b/builtin/providers/dme/config.go index 514df0d101..2d387673fe 100644 --- a/builtin/providers/dme/config.go +++ b/builtin/providers/dme/config.go @@ -2,8 +2,10 @@ package dme import ( "fmt" - "github.com/soniah/dnsmadeeasy" "log" + "net/http" + + "github.com/soniah/dnsmadeeasy" ) // Config contains DNSMadeEasy provider settings @@ -20,6 +22,8 @@ func (c *Config) Client() (*dnsmadeeasy.Client, error) { return nil, fmt.Errorf("Error setting up client: %s", err) } + client.HTTP = &http.Client{} + if c.UseSandbox { client.URL = dnsmadeeasy.SandboxURL } diff --git a/state/remote/s3.go b/state/remote/s3.go index bdc6a63cf9..f6cfdfbde2 100644 --- a/state/remote/s3.go +++ b/state/remote/s3.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "log" + "net/http" "os" "strconv" @@ -75,6 +76,7 @@ func s3Factory(conf map[string]string) (Client, error) { awsConfig := &aws.Config{ Credentials: credentialsProvider, Region: aws.String(regionName), + HTTPClient: &http.Client{}, } nativeClient := s3.New(awsConfig) From 5fa5c4bc535c7798b0ec792e02dda4495e0854bc Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Mon, 19 Oct 2015 13:03:28 -0400 Subject: [PATCH 279/335] Use new packngo API allowing passing in a custom http.Client --- builtin/providers/packet/config.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/builtin/providers/packet/config.go b/builtin/providers/packet/config.go index 659ee9ebc8..b7d408c626 100644 --- a/builtin/providers/packet/config.go +++ b/builtin/providers/packet/config.go @@ -1,6 +1,8 @@ package packet import ( + "net/http" + "github.com/packethost/packngo" ) @@ -14,5 +16,5 @@ type Config struct { // Client() returns a new client for accessing packet. func (c *Config) Client() *packngo.Client { - return packngo.NewClient(consumerToken, c.AuthToken) + return packngo.NewClient(consumerToken, c.AuthToken, &http.Client{}) } From 7a24da8c94733b474955fb8acd79ef8fc56f92f8 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 19 Oct 2015 18:43:49 -0700 Subject: [PATCH 280/335] Update CHANGELOG.md --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a615847855..007dedfcd6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 0.6.5 (Unreleased) +FEATURES: + + * New remote state backend: `etcd` [GH-3487] + INTERNAL IMPROVEMENTS: * provider/digitalocean: use official Go client [GH-3333] From fca44bdec3a1510a413813124b074cfd2ea08829 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 20 Oct 2015 12:28:12 -0500 Subject: [PATCH 281/335] core: state metadata difference should bump serial Remote state includes MD5-based checksumming to protect against State conflicts. This can generate improper conflicts with states that differ only in their Schema version. We began to see this issue with https://github.com/hashicorp/terraform/pull/3470 which changes the "schema_version" of aws_key_pairs. --- terraform/state.go | 15 +++++++++ terraform/state_test.go | 72 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 87 insertions(+) diff --git a/terraform/state.go b/terraform/state.go index 21b2c04de3..e97e0c27ce 100644 --- a/terraform/state.go +++ b/terraform/state.go @@ -965,6 +965,21 @@ func (s *InstanceState) Equal(other *InstanceState) bool { } } + // Meta must be equal + if len(s.Meta) != len(other.Meta) { + return false + } + for k, v := range s.Meta { + otherV, ok := other.Meta[k] + if !ok { + return false + } + + if v != otherV { + return false + } + } + return true } diff --git a/terraform/state_test.go b/terraform/state_test.go index eeb974d0b5..cc7b91bbc8 100644 --- a/terraform/state_test.go +++ b/terraform/state_test.go @@ -188,6 +188,43 @@ func TestStateEqual(t *testing.T) { }, }, }, + + // Meta differs + { + false, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]string{ + "schema_version": "1", + }, + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]string{ + "schema_version": "2", + }, + }, + }, + }, + }, + }, + }, + }, } for i, tc := range cases { @@ -224,6 +261,41 @@ func TestStateIncrementSerialMaybe(t *testing.T) { }, 1, }, + "S2 is different, but only via Instance Metadata": { + &State{ + Serial: 3, + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]string{}, + }, + }, + }, + }, + }, + }, + &State{ + Serial: 3, + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]string{ + "schema_version": "1", + }, + }, + }, + }, + }, + }, + }, + 4, + }, "S1 serial is higher": { &State{Serial: 5}, &State{ From d4f7cdc877721880c46c6d8bff726613532522b2 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Mon, 19 Oct 2015 15:38:23 -0400 Subject: [PATCH 282/335] GCP UserAgent now shows accurate Terraform version --- builtin/providers/google/config.go | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/builtin/providers/google/config.go b/builtin/providers/google/config.go index 6bfa3553d4..1198a7c0ae 100644 --- a/builtin/providers/google/config.go +++ b/builtin/providers/google/config.go @@ -10,8 +10,7 @@ import ( "runtime" "strings" - // TODO(dcunnin): Use version code from version.go - // "github.com/hashicorp/terraform" + "github.com/hashicorp/terraform/terraform" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" @@ -117,13 +116,11 @@ func (c *Config) loadAndValidate() error { } - // Build UserAgent - versionString := "0.0.0" - // TODO(dcunnin): Use Terraform's version code from version.go - // versionString := main.Version - // if main.VersionPrerelease != "" { - // versionString = fmt.Sprintf("%s-%s", versionString, main.VersionPrerelease) - // } + versionString := terraform.Version + prerelease := terraform.VersionPrerelease + if len(prerelease) > 0 { + versionString = fmt.Sprintf("%s-%s", versionString, prerelease) + } userAgent := fmt.Sprintf( "(%s %s) Terraform/%s", runtime.GOOS, runtime.GOARCH, versionString) From bba2c3221d4b16e1aa8fe26e4c6cba2b4e318380 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Mon, 19 Oct 2015 15:27:41 -0400 Subject: [PATCH 283/335] Added oauth2 support for GCP --- builtin/providers/google/config.go | 28 ++++++++++++---------------- builtin/providers/google/provider.go | 6 +++++- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/builtin/providers/google/config.go b/builtin/providers/google/config.go index 6bfa3553d4..120c578e1d 100644 --- a/builtin/providers/google/config.go +++ b/builtin/providers/google/config.go @@ -36,6 +36,13 @@ type Config struct { func (c *Config) loadAndValidate() error { var account accountFile + clientScopes := []string{ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite", + "https://www.googleapis.com/auth/devstorage.full_control", + } + if c.AccountFile == "" { c.AccountFile = os.Getenv("GOOGLE_ACCOUNT_FILE") @@ -79,13 +86,6 @@ func (c *Config) loadAndValidate() error { } } - clientScopes := []string{ - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite", - "https://www.googleapis.com/auth/devstorage.full_control", - } - // Get the token for use in our requests log.Printf("[INFO] Requesting Google token...") log.Printf("[INFO] -- Email: %s", account.ClientEmail) @@ -105,16 +105,12 @@ func (c *Config) loadAndValidate() error { client = conf.Client(oauth2.NoContext) } else { - log.Printf("[INFO] Requesting Google token via GCE Service Role...") - client = &http.Client{ - Transport: &oauth2.Transport{ - // Fetch from Google Compute Engine's metadata server to retrieve - // an access token for the provided account. - // If no account is specified, "default" is used. - Source: google.ComputeTokenSource(""), - }, + log.Printf("[INFO] Authenticating using DefaultClient"); + err := error(nil) + client, err = google.DefaultClient(oauth2.NoContext, clientScopes...) + if err != nil { + return err } - } // Build UserAgent diff --git a/builtin/providers/google/provider.go b/builtin/providers/google/provider.go index 7c9587219b..acafd851c4 100644 --- a/builtin/providers/google/provider.go +++ b/builtin/providers/google/provider.go @@ -15,7 +15,7 @@ func Provider() terraform.ResourceProvider { Schema: map[string]*schema.Schema{ "account_file": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil), ValidateFunc: validateAccountFile, }, @@ -78,6 +78,10 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { } func validateAccountFile(v interface{}, k string) (warnings []string, errors []error) { + if v == nil { + return + } + value := v.(string) if value == "" { From 05c0998d2d84d7b06743761e9858a17470935cdb Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 20 Oct 2015 14:33:28 -0500 Subject: [PATCH 284/335] core: store deeply nested modules in a consistent order in the state We were only comparing the last element of the module, which meant that deeply nested modules with the same name but different ancestry had an undefined sort order, which could cause inconsistencies in state storage and potentially break remote state MD5 checksumming. --- terraform/state.go | 5 ++--- terraform/state_test.go | 17 +++++++++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/terraform/state.go b/terraform/state.go index e97e0c27ce..8734cfc178 100644 --- a/terraform/state.go +++ b/terraform/state.go @@ -1207,9 +1207,8 @@ func (s moduleStateSort) Less(i, j int) bool { return len(a.Path) < len(b.Path) } - // Otherwise, compare by last path element - idx := len(a.Path) - 1 - return a.Path[idx] < b.Path[idx] + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") } func (s moduleStateSort) Swap(i, j int) { diff --git a/terraform/state_test.go b/terraform/state_test.go index cc7b91bbc8..8d24a8e75c 100644 --- a/terraform/state_test.go +++ b/terraform/state_test.go @@ -40,6 +40,23 @@ func TestStateAddModule(t *testing.T) { []string{"root", "foo", "bar"}, }, }, + // Same last element, different middle element + { + [][]string{ + []string{"root", "foo", "bar"}, // This one should sort after... + []string{"root", "foo"}, + []string{"root"}, + []string{"root", "bar", "bar"}, // ...this one. + []string{"root", "bar"}, + }, + [][]string{ + []string{"root"}, + []string{"root", "bar"}, + []string{"root", "foo"}, + []string{"root", "bar", "bar"}, + []string{"root", "foo", "bar"}, + }, + }, } for _, tc := range cases { From e59fb4e6ca2e6c184acbdc3c7e14d07f0b2e0a83 Mon Sep 17 00:00:00 2001 From: Christopher Tiwald Date: Sun, 19 Jul 2015 00:09:00 -0400 Subject: [PATCH 285/335] aws: Add support for "aws_codedeploy_app" resources. --- builtin/providers/aws/config.go | 5 + builtin/providers/aws/provider.go | 1 + .../aws/resource_aws_codedeploy_app.go | 127 ++++++++++++++++++ 3 files changed, 133 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_codedeploy_app.go diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index 8b9428fbc2..dfd8b1b2ec 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go/service/codedeploy" "github.com/aws/aws-sdk-go/service/directoryservice" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/ec2" @@ -70,6 +71,7 @@ type AWSClient struct { lambdaconn *lambda.Lambda opsworksconn *opsworks.OpsWorks glacierconn *glacier.Glacier + codedeployconn *codedeploy.CodeDeploy } // Client configures and returns a fully initialized AWSClient @@ -192,6 +194,9 @@ func (c *Config) Client() (interface{}, error) { log.Println("[INFO] Initializing Glacier connection") client.glacierconn = glacier.New(awsConfig) + + log.Println("[INFO] Initializing CodeDeploy Connection") + client.codedeployconn = codedeploy.New(awsConfig) } if len(errs) > 0 { diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index f73580d0f7..132fa4678a 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -166,6 +166,7 @@ func Provider() terraform.ResourceProvider { "aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(), "aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(), "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), + "aws_codedeploy_app": resourceAwsCodeDeployApp(), "aws_customer_gateway": resourceAwsCustomerGateway(), "aws_db_instance": resourceAwsDbInstance(), "aws_db_parameter_group": resourceAwsDbParameterGroup(), diff --git a/builtin/providers/aws/resource_aws_codedeploy_app.go b/builtin/providers/aws/resource_aws_codedeploy_app.go new file mode 100644 index 0000000000..ccf07a82d8 --- /dev/null +++ b/builtin/providers/aws/resource_aws_codedeploy_app.go @@ -0,0 +1,127 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/codedeploy" +) + +func resourceAwsCodeDeployApp() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCodeDeployAppCreate, + Read: resourceAwsCodeDeployAppRead, + Update: resourceAwsCodeDeployUpdate, + Delete: resourceAwsCodeDeployAppDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + // The unique ID is set by AWS on create. + "unique_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceAwsCodeDeployAppCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + application := d.Get("name").(string) + log.Printf("[DEBUG] Creating CodeDeploy application %s", application) + + resp, err := conn.CreateApplication(&codedeploy.CreateApplicationInput{ + ApplicationName: aws.String(application), + }) + if err != nil { + return err + } + log.Printf("[DEBUG] CodeDeploy application %s created", *resp.ApplicationId) + + // Despite giving the application a unique ID, AWS doesn't actually use + // it in API calls. Use it and the app name to identify the resource in + // the state file. This allows us to reliably detect both when the TF + // config file changes and when the user deletes the app without removing + // it first from the TF config. + d.SetId(fmt.Sprintf("%s:%s", *resp.ApplicationId, application)) + + return resourceAwsCodeDeployAppRead(d, meta) +} + +func resourceAwsCodeDeployAppRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + _, application := resourceAwsCodeDeployAppParseId(d.Id()) + log.Printf("[DEBUG] Reading CodeDeploy application %s", application) + resp, err := conn.GetApplication(&codedeploy.GetApplicationInput{ + ApplicationName: aws.String(application), + }) + if err != nil { + if codedeployerr, ok := err.(awserr.Error); ok && codedeployerr.Code() == "ApplicationDoesNotExistException" { + d.SetId("") + return nil + } else { + log.Printf("[ERROR] Error finding CodeDeploy application: %s", err) + return err + } + } + + d.Set("name", *resp.Application.ApplicationName) + + return nil +} + +func resourceAwsCodeDeployUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + o, n := d.GetChange("name") + + _, err := conn.UpdateApplication(&codedeploy.UpdateApplicationInput{ + ApplicationName: aws.String(o.(string)), + NewApplicationName: aws.String(n.(string)), + }) + if err != nil { + return err + } + log.Printf("[DEBUG] CodeDeploy application %s updated", n) + + d.Set("name", n) + + return nil +} + +func resourceAwsCodeDeployAppDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + _, err := conn.DeleteApplication(&codedeploy.DeleteApplicationInput{ + ApplicationName: aws.String(d.Get("name").(string)), + }) + if err != nil { + if cderr, ok := err.(awserr.Error); ok && cderr.Code() == "InvalidApplicationNameException" { + d.SetId("") + return nil + } else { + log.Printf("[ERROR] Error deleting CodeDeploy application: %s", err) + return err + } + } + + return nil +} + +func resourceAwsCodeDeployAppParseId(id string) (string, string) { + parts := strings.SplitN(id, ":", 2) + return parts[0], parts[1] +} From 42c077700a26b4cf65999db199030ac88bc68d14 Mon Sep 17 00:00:00 2001 From: Christopher Tiwald Date: Sun, 19 Jul 2015 00:09:25 -0400 Subject: [PATCH 286/335] aws: Add acceptance tests for "aws_codedeploy_app" resources. --- .../aws/resource_aws_codedeploy_app_test.go | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_codedeploy_app_test.go diff --git a/builtin/providers/aws/resource_aws_codedeploy_app_test.go b/builtin/providers/aws/resource_aws_codedeploy_app_test.go new file mode 100644 index 0000000000..9c016f1842 --- /dev/null +++ b/builtin/providers/aws/resource_aws_codedeploy_app_test.go @@ -0,0 +1,78 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/codedeploy" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSCodeDeployApp_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCodeDeployAppDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSCodeDeployApp, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSCodeDeployAppExists("aws_codedeploy_app.foo"), + ), + }, + resource.TestStep{ + Config: testAccAWSCodeDeployAppModifier, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSCodeDeployAppExists("aws_codedeploy_app.foo"), + ), + }, + }, + }) +} + +func testAccCheckAWSCodeDeployAppDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).codedeployconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_codedeploy_app" { + continue + } + + resp, err := conn.GetApplication(&codedeploy.GetApplicationInput{ + ApplicationName: aws.String(rs.Primary.ID), + }) + + if err == nil { + if resp.Application != nil { + return fmt.Errorf("CodeDeploy app still exists:\n%#v", *resp.Application.ApplicationId) + } + } + + return err + } + + return nil +} + +func testAccCheckAWSCodeDeployAppExists(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + _, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + return nil + } +} + +var testAccAWSCodeDeployApp = ` +resource "aws_codedeploy_app" "foo" { + name = "foo" +}` + +var testAccAWSCodeDeployAppModifier = ` +resource "aws_codedeploy_app" "foo" { + name = "bar" +}` From fa3dfd1420dea811e8b0992092a43afaee703e8e Mon Sep 17 00:00:00 2001 From: Christopher Tiwald Date: Sun, 19 Jul 2015 00:07:43 -0400 Subject: [PATCH 287/335] aws: Add documentation for "aws_codedeploy_app" resources. --- .../aws/r/codedeploy_app.html.markdown | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 website/source/docs/providers/aws/r/codedeploy_app.html.markdown diff --git a/website/source/docs/providers/aws/r/codedeploy_app.html.markdown b/website/source/docs/providers/aws/r/codedeploy_app.html.markdown new file mode 100644 index 0000000000..054fd1eda9 --- /dev/null +++ b/website/source/docs/providers/aws/r/codedeploy_app.html.markdown @@ -0,0 +1,32 @@ +--- +layout: "aws" +page_title: "AWS: aws_codedeploy_app" +sidebar_current: "docs-aws-resource-codedeploy-app" +description: |\ + Provides a CodeDeploy application. +--- + +# aws\_codedeploy\_app + +Provides a CodeDeploy application to be used as a basis for deployments + +## Example Usage + +``` +resource "aws_codedeploy_app" "foo" { + name = "foo" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the application. + +## Attribute Reference + +The following arguments are exported: + +* `id` - Amazon's assigned ID for the application. +* `name` - The application's name. From a546a12c2dbbeb6215c4c16ce5e6dbf3a00554dc Mon Sep 17 00:00:00 2001 From: Christopher Tiwald Date: Tue, 20 Oct 2015 18:03:57 -0400 Subject: [PATCH 288/335] aws: Add support for aws_codedeploy_deployment_group resources --- builtin/providers/aws/provider.go | 1 + ...esource_aws_codedeploy_deployment_group.go | 375 ++++++++++++++++++ 2 files changed, 376 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_codedeploy_deployment_group.go diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 132fa4678a..fed004741d 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -167,6 +167,7 @@ func Provider() terraform.ResourceProvider { "aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(), "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), "aws_codedeploy_app": resourceAwsCodeDeployApp(), + "aws_codedeploy_deployment_group": resourceAwsCodeDeployDeploymentGroup(), "aws_customer_gateway": resourceAwsCustomerGateway(), "aws_db_instance": resourceAwsDbInstance(), "aws_db_parameter_group": resourceAwsDbParameterGroup(), diff --git a/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go b/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go new file mode 100644 index 0000000000..a9f3acb078 --- /dev/null +++ b/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go @@ -0,0 +1,375 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/codedeploy" +) + +func resourceAwsCodeDeployDeploymentGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCodeDeployDeploymentGroupCreate, + Read: resourceAwsCodeDeployDeploymentGroupRead, + Update: resourceAwsCodeDeployDeploymentGroupUpdate, + Delete: resourceAwsCodeDeployDeploymentGroupDelete, + + Schema: map[string]*schema.Schema{ + "application_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 100 { + errors = append(errors, fmt.Errorf( + "%q cannot exceed 100 characters", k)) + } + return + }, + }, + + "deployment_group_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 100 { + errors = append(errors, fmt.Errorf( + "%q cannot exceed 100 characters", k)) + } + return + }, + }, + + "service_role_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "autoscaling_groups": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "deployment_config_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "CodeDeployDefault.OneAtATime", + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 100 { + errors = append(errors, fmt.Errorf( + "%q cannot exceed 100 characters", k)) + } + return + }, + }, + + "ec2_tag_filter": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateTagFilters, + }, + + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + Set: resourceAwsCodeDeployTagFilterHash, + }, + + "on_premises_instance_tag_filter": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateTagFilters, + }, + + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + Set: resourceAwsCodeDeployTagFilterHash, + }, + }, + } +} + +func resourceAwsCodeDeployDeploymentGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + application := d.Get("application_name").(string) + deploymentGroup := d.Get("deployment_group_name").(string) + + input := codedeploy.CreateDeploymentGroupInput{ + ApplicationName: aws.String(application), + DeploymentGroupName: aws.String(deploymentGroup), + ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)), + } + if attr, ok := d.GetOk("deployment_config_name"); ok { + input.DeploymentConfigName = aws.String(attr.(string)) + } + if attr, ok := d.GetOk("autoscaling_groups"); ok { + input.AutoScalingGroups = expandStringList(attr.(*schema.Set).List()) + } + if attr, ok := d.GetOk("on_premises_instance_tag_filters"); ok { + onPremFilters := buildOnPremTagFilters(attr.(*schema.Set).List()) + input.OnPremisesInstanceTagFilters = onPremFilters + } + if attr, ok := d.GetOk("ec2_tag_filter"); ok { + ec2TagFilters := buildEC2TagFilters(attr.(*schema.Set).List()) + input.Ec2TagFilters = ec2TagFilters + } + + // Retry to handle IAM role eventual consistency. + var resp *codedeploy.CreateDeploymentGroupOutput + var err error + err = resource.Retry(2*time.Minute, func() error { + resp, err = conn.CreateDeploymentGroup(&input) + if err != nil { + codedeployErr, ok := err.(awserr.Error) + if !ok { + return &resource.RetryError{Err: err} + } + if codedeployErr.Code() == "InvalidRoleException" { + log.Printf("[DEBUG] Trying to create deployment group again: %q", + codedeployErr.Message()) + return err + } + + return &resource.RetryError{Err: err} + } + return nil + }) + if err != nil { + return err + } + + d.SetId(*resp.DeploymentGroupId) + + return resourceAwsCodeDeployDeploymentGroupRead(d, meta) +} + +func resourceAwsCodeDeployDeploymentGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + log.Printf("[DEBUG] Reading CodeDeploy DeploymentGroup %s", d.Id()) + resp, err := conn.GetDeploymentGroup(&codedeploy.GetDeploymentGroupInput{ + ApplicationName: aws.String(d.Get("application_name").(string)), + DeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)), + }) + if err != nil { + return err + } + + d.Set("application_name", *resp.DeploymentGroupInfo.ApplicationName) + d.Set("autoscaling_groups", resp.DeploymentGroupInfo.AutoScalingGroups) + d.Set("deployment_config_name", *resp.DeploymentGroupInfo.DeploymentConfigName) + d.Set("deployment_group_name", *resp.DeploymentGroupInfo.DeploymentGroupName) + d.Set("service_role_arn", *resp.DeploymentGroupInfo.ServiceRoleArn) + if err := d.Set("ec2_tag_filter", ec2TagFiltersToMap(resp.DeploymentGroupInfo.Ec2TagFilters)); err != nil { + return err + } + if err := d.Set("on_premises_instance_tag_filter", onPremisesTagFiltersToMap(resp.DeploymentGroupInfo.OnPremisesInstanceTagFilters)); err != nil { + return err + } + + return nil +} + +func resourceAwsCodeDeployDeploymentGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + input := codedeploy.UpdateDeploymentGroupInput{ + ApplicationName: aws.String(d.Get("application_name").(string)), + CurrentDeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)), + } + + if d.HasChange("autoscaling_groups") { + _, n := d.GetChange("autoscaling_groups") + input.AutoScalingGroups = expandStringList(n.(*schema.Set).List()) + } + if d.HasChange("deployment_config_name") { + _, n := d.GetChange("deployment_config_name") + input.DeploymentConfigName = aws.String(n.(string)) + } + if d.HasChange("deployment_group_name") { + _, n := d.GetChange("deployment_group_name") + input.NewDeploymentGroupName = aws.String(n.(string)) + } + + // TagFilters aren't like tags. They don't append. They simply replace. + if d.HasChange("on_premises_instance_tag_filter") { + _, n := d.GetChange("on_premises_instance_tag_filter") + onPremFilters := buildOnPremTagFilters(n.(*schema.Set).List()) + input.OnPremisesInstanceTagFilters = onPremFilters + } + if d.HasChange("ec2_tag_filter") { + _, n := d.GetChange("ec2_tag_filter") + ec2Filters := buildEC2TagFilters(n.(*schema.Set).List()) + input.Ec2TagFilters = ec2Filters + } + + log.Printf("[DEBUG] Updating CodeDeploy DeploymentGroup %s", d.Id()) + _, err := conn.UpdateDeploymentGroup(&input) + if err != nil { + return err + } + + return resourceAwsCodeDeployDeploymentGroupRead(d, meta) +} + +func resourceAwsCodeDeployDeploymentGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codedeployconn + + log.Printf("[DEBUG] Deleting CodeDeploy DeploymentGroup %s", d.Id()) + _, err := conn.DeleteDeploymentGroup(&codedeploy.DeleteDeploymentGroupInput{ + ApplicationName: aws.String(d.Get("application_name").(string)), + DeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)), + }) + if err != nil { + return err + } + + d.SetId("") + + return nil +} + +// buildOnPremTagFilters converts raw schema lists into a list of +// codedeploy.TagFilters. +func buildOnPremTagFilters(configured []interface{}) []*codedeploy.TagFilter { + filters := make([]*codedeploy.TagFilter, 0) + for _, raw := range configured { + var filter codedeploy.TagFilter + m := raw.(map[string]interface{}) + + filter.Key = aws.String(m["key"].(string)) + filter.Type = aws.String(m["type"].(string)) + filter.Value = aws.String(m["value"].(string)) + + filters = append(filters, &filter) + } + + return filters +} + +// buildEC2TagFilters converts raw schema lists into a list of +// codedeploy.EC2TagFilters. +func buildEC2TagFilters(configured []interface{}) []*codedeploy.EC2TagFilter { + filters := make([]*codedeploy.EC2TagFilter, 0) + for _, raw := range configured { + var filter codedeploy.EC2TagFilter + m := raw.(map[string]interface{}) + + filter.Key = aws.String(m["key"].(string)) + filter.Type = aws.String(m["type"].(string)) + filter.Value = aws.String(m["value"].(string)) + + filters = append(filters, &filter) + } + + return filters +} + +// ec2TagFiltersToMap converts lists of tag filters into a []map[string]string. +func ec2TagFiltersToMap(list []*codedeploy.EC2TagFilter) []map[string]string { + result := make([]map[string]string, 0, len(list)) + for _, tf := range list { + l := make(map[string]string) + if *tf.Key != "" { + l["key"] = *tf.Key + } + if *tf.Value != "" { + l["value"] = *tf.Value + } + if *tf.Type != "" { + l["type"] = *tf.Type + } + result = append(result, l) + } + return result +} + +// onPremisesTagFiltersToMap converts lists of on-prem tag filters into a []map[string]string. +func onPremisesTagFiltersToMap(list []*codedeploy.TagFilter) []map[string]string { + result := make([]map[string]string, 0, len(list)) + for _, tf := range list { + l := make(map[string]string) + if *tf.Key != "" { + l["key"] = *tf.Key + } + if *tf.Value != "" { + l["value"] = *tf.Value + } + if *tf.Type != "" { + l["type"] = *tf.Type + } + result = append(result, l) + } + return result +} + +// validateTagFilters confirms the "value" component of a tag filter is one of +// AWS's three allowed types. +func validateTagFilters(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "KEY_ONLY" && value != "VALUE_ONLY" && value != "KEY_AND_VALUE" { + errors = append(errors, fmt.Errorf( + "%q must be one of \"KEY_ONLY\", \"VALUE_ONLY\", or \"KEY_AND_VALUE\"", k)) + } + return +} + +func resourceAwsCodeDeployTagFilterHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + // Nothing's actually required in tag filters, so we must check the + // presence of all values before attempting a hash. + if v, ok := m["key"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["type"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["value"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + return hashcode.String(buf.String()) +} From 390f226eb51d66cf9398a882d605658d9c683d44 Mon Sep 17 00:00:00 2001 From: Christopher Tiwald Date: Tue, 20 Oct 2015 18:04:15 -0400 Subject: [PATCH 289/335] aws: Add aws_codedeploy_deployment_group tests --- ...ce_aws_codedeploy_deployment_group_test.go | 199 ++++++++++++++++++ 1 file changed, 199 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go diff --git a/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go b/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go new file mode 100644 index 0000000000..d883b26b8d --- /dev/null +++ b/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go @@ -0,0 +1,199 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/codedeploy" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSCodeDeployDeploymentGroup_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCodeDeployDeploymentGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSCodeDeployDeploymentGroup, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo"), + ), + }, + resource.TestStep{ + Config: testAccAWSCodeDeployDeploymentGroupModifier, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo"), + ), + }, + }, + }) +} + +func testAccCheckAWSCodeDeployDeploymentGroupDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).codedeployconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_codedeploy_deployment_group" { + continue + } + + resp, err := conn.GetDeploymentGroup(&codedeploy.GetDeploymentGroupInput{ + ApplicationName: aws.String(rs.Primary.Attributes["application_name"]), + DeploymentGroupName: aws.String(rs.Primary.Attributes["deployment_group_name"]), + }) + + if err == nil { + if resp.DeploymentGroupInfo.DeploymentGroupName != nil { + return fmt.Errorf("CodeDeploy deployment group still exists:\n%#v", *resp.DeploymentGroupInfo.DeploymentGroupName) + } + } + + return err + } + + return nil +} + +func testAccCheckAWSCodeDeployDeploymentGroupExists(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + _, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + return nil + } +} + +var testAccAWSCodeDeployDeploymentGroup = ` +resource "aws_codedeploy_app" "foo_app" { + name = "foo_app" +} + +resource "aws_iam_role_policy" "foo_policy" { + name = "foo_policy" + role = "${aws_iam_role.foo_role.id}" + policy = < Date: Tue, 20 Oct 2015 18:04:39 -0400 Subject: [PATCH 290/335] aws: Add docs for aws_codedeploy_deployment_group --- .../codedeploy_deployment_group.html.markdown | 108 ++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown diff --git a/website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown b/website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown new file mode 100644 index 0000000000..cb2417fed6 --- /dev/null +++ b/website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown @@ -0,0 +1,108 @@ +--- +layout: "aws" +page_title: "AWS: aws_codedeploy_deployment_group" +sidebar_current: "docs-aws-resource-codedeploy-deployment-group" +description: |\ + Provides a CodeDeploy deployment group. +--- + +# aws\_codedeploy\_deployment\_group + +Provides a CodeDeploy deployment group for an application + +## Example Usage + +``` +resource "aws_codedeploy_app" "foo_app" { + name = "foo_app" +} + +resource "aws_iam_role_policy" "foo_policy" { + name = "foo_policy" + role = "${aws_iam_role.foo_role.id}" + policy = < Date: Wed, 21 Oct 2015 09:49:23 -0500 Subject: [PATCH 291/335] config/lang: restore go1.4.3 generated code my theory is that @mitchellh checked in a go1.5 generated file in 344e7c26b5f116842932d0e6b6ad2f1a250526f4 --- config/lang/y.go | 272 ++++++++++++----------------------------------- 1 file changed, 68 insertions(+), 204 deletions(-) diff --git a/config/lang/y.go b/config/lang/y.go index fd0693f151..e7dd185ae1 100644 --- a/config/lang/y.go +++ b/config/lang/y.go @@ -30,10 +30,7 @@ const INTEGER = 57355 const FLOAT = 57356 const STRING = 57357 -var parserToknames = [...]string{ - "$end", - "error", - "$unk", +var parserToknames = []string{ "PROGRAM_BRACKET_LEFT", "PROGRAM_BRACKET_RIGHT", "PROGRAM_STRING_START", @@ -47,7 +44,7 @@ var parserToknames = [...]string{ "FLOAT", "STRING", } -var parserStatenames = [...]string{} +var parserStatenames = []string{} const parserEofCode = 1 const parserErrCode = 2 @@ -56,7 +53,7 @@ const parserMaxDepth = 200 //line lang.y:165 //line yacctab:1 -var parserExca = [...]int{ +var parserExca = []int{ -1, 1, 1, -1, -2, 0, @@ -70,103 +67,75 @@ var parserStates []string const parserLast = 30 -var parserAct = [...]int{ +var parserAct = []int{ 9, 20, 16, 16, 7, 7, 3, 18, 10, 8, 1, 17, 14, 12, 13, 6, 6, 19, 8, 22, 15, 23, 24, 11, 2, 25, 16, 21, 4, 5, } -var parserPact = [...]int{ +var parserPact = []int{ 1, -1000, 1, -1000, -1000, -1000, -1000, 0, -1000, 15, 0, 1, -1000, -1000, -1, -1000, 0, -8, 0, -1000, -1000, 12, -9, -1000, 0, -9, } -var parserPgo = [...]int{ +var parserPgo = []int{ 0, 0, 29, 28, 23, 6, 27, 10, } -var parserR1 = [...]int{ +var parserR1 = []int{ 0, 7, 7, 4, 4, 5, 5, 2, 1, 1, 1, 1, 1, 1, 1, 6, 6, 6, 3, } -var parserR2 = [...]int{ +var parserR2 = []int{ 0, 0, 1, 1, 2, 1, 1, 3, 3, 1, 1, 1, 3, 1, 4, 0, 3, 1, 1, } -var parserChk = [...]int{ +var parserChk = []int{ -1000, -7, -4, -5, -3, -2, 15, 4, -5, -1, 8, -4, 13, 14, 12, 5, 11, -1, 8, -1, 9, -6, -1, 9, 10, -1, } -var parserDef = [...]int{ +var parserDef = []int{ 1, -2, 2, 3, 5, 6, 18, 0, 4, 0, 0, 9, 10, 11, 13, 7, 0, 0, 15, 12, 8, 0, 17, 14, 0, 16, } -var parserTok1 = [...]int{ +var parserTok1 = []int{ 1, } -var parserTok2 = [...]int{ +var parserTok2 = []int{ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, } -var parserTok3 = [...]int{ +var parserTok3 = []int{ 0, } -var parserErrorMessages = [...]struct { - state int - token int - msg string -}{} - //line yaccpar:1 /* parser for yacc output */ -var ( - parserDebug = 0 - parserErrorVerbose = false -) +var parserDebug = 0 type parserLexer interface { Lex(lval *parserSymType) int Error(s string) } -type parserParser interface { - Parse(parserLexer) int - Lookahead() int -} - -type parserParserImpl struct { - lookahead func() int -} - -func (p *parserParserImpl) Lookahead() int { - return p.lookahead() -} - -func parserNewParser() parserParser { - p := &parserParserImpl{ - lookahead: func() int { return -1 }, - } - return p -} - const parserFlag = -1000 func parserTokname(c int) string { - if c >= 1 && c-1 < len(parserToknames) { - if parserToknames[c-1] != "" { - return parserToknames[c-1] + // 4 is TOKSTART above + if c >= 4 && c-4 < len(parserToknames) { + if parserToknames[c-4] != "" { + return parserToknames[c-4] } } return __yyfmt__.Sprintf("tok-%v", c) @@ -181,129 +150,51 @@ func parserStatname(s int) string { return __yyfmt__.Sprintf("state-%v", s) } -func parserErrorMessage(state, lookAhead int) string { - const TOKSTART = 4 - - if !parserErrorVerbose { - return "syntax error" - } - - for _, e := range parserErrorMessages { - if e.state == state && e.token == lookAhead { - return "syntax error: " + e.msg - } - } - - res := "syntax error: unexpected " + parserTokname(lookAhead) - - // To match Bison, suggest at most four expected tokens. - expected := make([]int, 0, 4) - - // Look for shiftable tokens. - base := parserPact[state] - for tok := TOKSTART; tok-1 < len(parserToknames); tok++ { - if n := base + tok; n >= 0 && n < parserLast && parserChk[parserAct[n]] == tok { - if len(expected) == cap(expected) { - return res - } - expected = append(expected, tok) - } - } - - if parserDef[state] == -2 { - i := 0 - for parserExca[i] != -1 || parserExca[i+1] != state { - i += 2 - } - - // Look for tokens that we accept or reduce. - for i += 2; parserExca[i] >= 0; i += 2 { - tok := parserExca[i] - if tok < TOKSTART || parserExca[i+1] == 0 { - continue - } - if len(expected) == cap(expected) { - return res - } - expected = append(expected, tok) - } - - // If the default action is to accept or reduce, give up. - if parserExca[i+1] != 0 { - return res - } - } - - for i, tok := range expected { - if i == 0 { - res += ", expecting " - } else { - res += " or " - } - res += parserTokname(tok) - } - return res -} - -func parserlex1(lex parserLexer, lval *parserSymType) (char, token int) { - token = 0 - char = lex.Lex(lval) +func parserlex1(lex parserLexer, lval *parserSymType) int { + c := 0 + char := lex.Lex(lval) if char <= 0 { - token = parserTok1[0] + c = parserTok1[0] goto out } if char < len(parserTok1) { - token = parserTok1[char] + c = parserTok1[char] goto out } if char >= parserPrivate { if char < parserPrivate+len(parserTok2) { - token = parserTok2[char-parserPrivate] + c = parserTok2[char-parserPrivate] goto out } } for i := 0; i < len(parserTok3); i += 2 { - token = parserTok3[i+0] - if token == char { - token = parserTok3[i+1] + c = parserTok3[i+0] + if c == char { + c = parserTok3[i+1] goto out } } out: - if token == 0 { - token = parserTok2[1] /* unknown char */ + if c == 0 { + c = parserTok2[1] /* unknown char */ } if parserDebug >= 3 { - __yyfmt__.Printf("lex %s(%d)\n", parserTokname(token), uint(char)) + __yyfmt__.Printf("lex %s(%d)\n", parserTokname(c), uint(char)) } - return char, token + return c } func parserParse(parserlex parserLexer) int { - return parserNewParser().Parse(parserlex) -} - -func (parserrcvr *parserParserImpl) Parse(parserlex parserLexer) int { var parsern int var parserlval parserSymType var parserVAL parserSymType - var parserDollar []parserSymType - _ = parserDollar // silence set and not used parserS := make([]parserSymType, parserMaxDepth) Nerrs := 0 /* number of errors */ Errflag := 0 /* error recovery flag */ parserstate := 0 parserchar := -1 - parsertoken := -1 // parserchar translated into internal numbering - parserrcvr.lookahead = func() int { return parserchar } - defer func() { - // Make sure we report no lookahead when not parsing. - parserstate = -1 - parserchar = -1 - parsertoken = -1 - }() parserp := -1 goto parserstack @@ -316,7 +207,7 @@ ret1: parserstack: /* put a state and value onto the stack */ if parserDebug >= 4 { - __yyfmt__.Printf("char %v in %v\n", parserTokname(parsertoken), parserStatname(parserstate)) + __yyfmt__.Printf("char %v in %v\n", parserTokname(parserchar), parserStatname(parserstate)) } parserp++ @@ -334,16 +225,15 @@ parsernewstate: goto parserdefault /* simple state */ } if parserchar < 0 { - parserchar, parsertoken = parserlex1(parserlex, &parserlval) + parserchar = parserlex1(parserlex, &parserlval) } - parsern += parsertoken + parsern += parserchar if parsern < 0 || parsern >= parserLast { goto parserdefault } parsern = parserAct[parsern] - if parserChk[parsern] == parsertoken { /* valid shift */ + if parserChk[parsern] == parserchar { /* valid shift */ parserchar = -1 - parsertoken = -1 parserVAL = parserlval parserstate = parsern if Errflag > 0 { @@ -357,7 +247,7 @@ parserdefault: parsern = parserDef[parserstate] if parsern == -2 { if parserchar < 0 { - parserchar, parsertoken = parserlex1(parserlex, &parserlval) + parserchar = parserlex1(parserlex, &parserlval) } /* look through exception table */ @@ -370,7 +260,7 @@ parserdefault: } for xi += 2; ; xi += 2 { parsern = parserExca[xi+0] - if parsern < 0 || parsern == parsertoken { + if parsern < 0 || parsern == parserchar { break } } @@ -383,11 +273,11 @@ parserdefault: /* error ... attempt to resume parsing */ switch Errflag { case 0: /* brand new error */ - parserlex.Error(parserErrorMessage(parserstate, parsertoken)) + parserlex.Error("syntax error") Nerrs++ if parserDebug >= 1 { __yyfmt__.Printf("%s", parserStatname(parserstate)) - __yyfmt__.Printf(" saw %s\n", parserTokname(parsertoken)) + __yyfmt__.Printf(" saw %s\n", parserTokname(parserchar)) } fallthrough @@ -415,13 +305,12 @@ parserdefault: case 3: /* no shift yet; clobber input char */ if parserDebug >= 2 { - __yyfmt__.Printf("error recovery discards %s\n", parserTokname(parsertoken)) + __yyfmt__.Printf("error recovery discards %s\n", parserTokname(parserchar)) } - if parsertoken == parserEofCode { + if parserchar == parserEofCode { goto ret1 } parserchar = -1 - parsertoken = -1 goto parsernewstate /* try again in the same state */ } } @@ -436,13 +325,6 @@ parserdefault: _ = parserpt // guard against "declared and not used" parserp -= parserR2[parsern] - // parserp is now the index of $0. Perform the default action. Iff the - // reduced production is ε, $1 is possibly out of range. - if parserp+1 >= len(parserS) { - nyys := make([]parserSymType, len(parserS)*2) - copy(nyys, parserS) - parserS = nyys - } parserVAL = parserS[parserp+1] /* consult goto table to find next state */ @@ -462,7 +344,6 @@ parserdefault: switch parsernt { case 1: - parserDollar = parserS[parserpt-0 : parserpt+1] //line lang.y:35 { parserResult = &ast.LiteralNode{ @@ -472,10 +353,9 @@ parserdefault: } } case 2: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:43 { - parserResult = parserDollar[1].node + parserResult = parserS[parserpt-0].node // We want to make sure that the top value is always a Concat // so that the return value is always a string type from an @@ -485,30 +365,28 @@ parserdefault: // because functionally the AST is the same, but we do that because // it makes for an easy literal check later (to check if a string // has any interpolations). - if _, ok := parserDollar[1].node.(*ast.Concat); !ok { - if n, ok := parserDollar[1].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString { + if _, ok := parserS[parserpt-0].node.(*ast.Concat); !ok { + if n, ok := parserS[parserpt-0].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString { parserResult = &ast.Concat{ - Exprs: []ast.Node{parserDollar[1].node}, - Posx: parserDollar[1].node.Pos(), + Exprs: []ast.Node{parserS[parserpt-0].node}, + Posx: parserS[parserpt-0].node.Pos(), } } } } case 3: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:66 { - parserVAL.node = parserDollar[1].node + parserVAL.node = parserS[parserpt-0].node } case 4: - parserDollar = parserS[parserpt-2 : parserpt+1] //line lang.y:70 { var result []ast.Node - if c, ok := parserDollar[1].node.(*ast.Concat); ok { - result = append(c.Exprs, parserDollar[2].node) + if c, ok := parserS[parserpt-1].node.(*ast.Concat); ok { + result = append(c.Exprs, parserS[parserpt-0].node) } else { - result = []ast.Node{parserDollar[1].node, parserDollar[2].node} + result = []ast.Node{parserS[parserpt-1].node, parserS[parserpt-0].node} } parserVAL.node = &ast.Concat{ @@ -517,103 +395,89 @@ parserdefault: } } case 5: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:86 { - parserVAL.node = parserDollar[1].node + parserVAL.node = parserS[parserpt-0].node } case 6: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:90 { - parserVAL.node = parserDollar[1].node + parserVAL.node = parserS[parserpt-0].node } case 7: - parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:96 { - parserVAL.node = parserDollar[2].node + parserVAL.node = parserS[parserpt-1].node } case 8: - parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:102 { - parserVAL.node = parserDollar[2].node + parserVAL.node = parserS[parserpt-1].node } case 9: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:106 { - parserVAL.node = parserDollar[1].node + parserVAL.node = parserS[parserpt-0].node } case 10: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:110 { parserVAL.node = &ast.LiteralNode{ - Value: parserDollar[1].token.Value.(int), + Value: parserS[parserpt-0].token.Value.(int), Typex: ast.TypeInt, - Posx: parserDollar[1].token.Pos, + Posx: parserS[parserpt-0].token.Pos, } } case 11: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:118 { parserVAL.node = &ast.LiteralNode{ - Value: parserDollar[1].token.Value.(float64), + Value: parserS[parserpt-0].token.Value.(float64), Typex: ast.TypeFloat, - Posx: parserDollar[1].token.Pos, + Posx: parserS[parserpt-0].token.Pos, } } case 12: - parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:126 { parserVAL.node = &ast.Arithmetic{ - Op: parserDollar[2].token.Value.(ast.ArithmeticOp), - Exprs: []ast.Node{parserDollar[1].node, parserDollar[3].node}, - Posx: parserDollar[1].node.Pos(), + Op: parserS[parserpt-1].token.Value.(ast.ArithmeticOp), + Exprs: []ast.Node{parserS[parserpt-2].node, parserS[parserpt-0].node}, + Posx: parserS[parserpt-2].node.Pos(), } } case 13: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:134 { - parserVAL.node = &ast.VariableAccess{Name: parserDollar[1].token.Value.(string), Posx: parserDollar[1].token.Pos} + parserVAL.node = &ast.VariableAccess{Name: parserS[parserpt-0].token.Value.(string), Posx: parserS[parserpt-0].token.Pos} } case 14: - parserDollar = parserS[parserpt-4 : parserpt+1] //line lang.y:138 { - parserVAL.node = &ast.Call{Func: parserDollar[1].token.Value.(string), Args: parserDollar[3].nodeList, Posx: parserDollar[1].token.Pos} + parserVAL.node = &ast.Call{Func: parserS[parserpt-3].token.Value.(string), Args: parserS[parserpt-1].nodeList, Posx: parserS[parserpt-3].token.Pos} } case 15: - parserDollar = parserS[parserpt-0 : parserpt+1] //line lang.y:143 { parserVAL.nodeList = nil } case 16: - parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:147 { - parserVAL.nodeList = append(parserDollar[1].nodeList, parserDollar[3].node) + parserVAL.nodeList = append(parserS[parserpt-2].nodeList, parserS[parserpt-0].node) } case 17: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:151 { - parserVAL.nodeList = append(parserVAL.nodeList, parserDollar[1].node) + parserVAL.nodeList = append(parserVAL.nodeList, parserS[parserpt-0].node) } case 18: - parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:157 { parserVAL.node = &ast.LiteralNode{ - Value: parserDollar[1].token.Value.(string), + Value: parserS[parserpt-0].token.Value.(string), Typex: ast.TypeString, - Posx: parserDollar[1].token.Pos, + Posx: parserS[parserpt-0].token.Pos, } } } From cccc5d03e393acdddacbdb8dc27cdeed602047a9 Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Tue, 20 Oct 2015 14:49:51 +1300 Subject: [PATCH 292/335] Add lower / upper interpolation functions --- config/interpolate_funcs.go | 28 +++++++++++ config/interpolate_funcs_test.go | 48 +++++++++++++++++++ .../docs/configuration/interpolation.html.md | 4 ++ 3 files changed, 80 insertions(+) diff --git a/config/interpolate_funcs.go b/config/interpolate_funcs.go index 5322e46c4f..1b58ac93cd 100644 --- a/config/interpolate_funcs.go +++ b/config/interpolate_funcs.go @@ -29,10 +29,12 @@ func init() { "index": interpolationFuncIndex(), "join": interpolationFuncJoin(), "length": interpolationFuncLength(), + "lower": interpolationFuncLower(), "replace": interpolationFuncReplace(), "split": interpolationFuncSplit(), "base64encode": interpolationFuncBase64Encode(), "base64decode": interpolationFuncBase64Decode(), + "upper": interpolationFuncUpper(), } } @@ -442,3 +444,29 @@ func interpolationFuncBase64Decode() ast.Function { }, } } + +// interpolationFuncLower implements the "lower" function that does +// string lower casing. +func interpolationFuncLower() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + toLower := args[0].(string) + return strings.ToLower(toLower), nil + }, + } +} + +// interpolationFuncUpper implements the "upper" function that does +// string upper casing. +func interpolationFuncUpper() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + toUpper := args[0].(string) + return strings.ToUpper(toUpper), nil + }, + } +} diff --git a/config/interpolate_funcs_test.go b/config/interpolate_funcs_test.go index cafdf05640..f40f56860e 100644 --- a/config/interpolate_funcs_test.go +++ b/config/interpolate_funcs_test.go @@ -644,6 +644,54 @@ func TestInterpolateFuncBase64Decode(t *testing.T) { }) } +func TestInterpolateFuncLower(t *testing.T) { + testFunction(t, testFunctionConfig{ + Cases: []testFunctionCase{ + { + `${lower("HELLO")}`, + "hello", + false, + }, + + { + `${lower("")}`, + "", + false, + }, + + { + `${lower()}`, + nil, + true, + }, + }, + }) +} + +func TestInterpolateFuncUpper(t *testing.T) { + testFunction(t, testFunctionConfig{ + Cases: []testFunctionCase{ + { + `${upper("hello")}`, + "HELLO", + false, + }, + + { + `${upper("")}`, + "", + false, + }, + + { + `${upper()}`, + nil, + true, + }, + }, + }) +} + type testFunctionConfig struct { Cases []testFunctionCase Vars map[string]ast.Variable diff --git a/website/source/docs/configuration/interpolation.html.md b/website/source/docs/configuration/interpolation.html.md index 28d03790d7..9408390760 100644 --- a/website/source/docs/configuration/interpolation.html.md +++ b/website/source/docs/configuration/interpolation.html.md @@ -131,6 +131,8 @@ The supported built-in functions are: variable. The `map` parameter should be another variable, such as `var.amis`. + * `lower(string)` - returns a copy of the string with all Unicode letters mapped to their lower case. + * `replace(string, search, replace)` - Does a search and replace on the given string. All instances of `search` are replaced with the value of `replace`. If `search` is wrapped in forward slashes, it is treated @@ -147,6 +149,8 @@ The supported built-in functions are: `a_resource_param = ["${split(",", var.CSV_STRING)}"]`. Example: `split(",", module.amod.server_ids)` + * `upper(string)` - returns a copy of the string with all Unicode letters mapped to their upper case. + ## Templates Long strings can be managed using templates. [Templates](/docs/providers/template/index.html) are [resources](/docs/configuration/resources.html) defined by a filename and some variables to use during interpolation. They have a computed `rendered` attribute containing the result. From 938b7e2dba9d27b7f0fab7faa201231a06d9e42a Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 21 Oct 2015 08:19:51 -0700 Subject: [PATCH 293/335] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 007dedfcd6..48ceb0cd7c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ FEATURES: * New remote state backend: `etcd` [GH-3487] + * New interpolation functions: `upper` and `lower` [GH-3558] INTERNAL IMPROVEMENTS: From dd56b39e0ce8a0567eb6f154a5e73f681bfa615e Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 21 Oct 2015 08:34:46 -0700 Subject: [PATCH 294/335] Codeploy deployment group app_name instead of application_name. The corresponding resource is called aws_codeploy_app, so for consistency we'll name the attribute app_name instead of application_name. --- .../aws/resource_aws_codedeploy_deployment_group.go | 12 ++++++------ .../resource_aws_codedeploy_deployment_group_test.go | 6 +++--- .../aws/r/codedeploy_deployment_group.html.markdown | 6 +++--- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go b/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go index a9f3acb078..ee81f1cf3c 100644 --- a/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go +++ b/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go @@ -23,7 +23,7 @@ func resourceAwsCodeDeployDeploymentGroup() *schema.Resource { Delete: resourceAwsCodeDeployDeploymentGroupDelete, Schema: map[string]*schema.Schema{ - "application_name": &schema.Schema{ + "app_name": &schema.Schema{ Type: schema.TypeString, Required: true, ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { @@ -132,7 +132,7 @@ func resourceAwsCodeDeployDeploymentGroup() *schema.Resource { func resourceAwsCodeDeployDeploymentGroupCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).codedeployconn - application := d.Get("application_name").(string) + application := d.Get("app_name").(string) deploymentGroup := d.Get("deployment_group_name").(string) input := codedeploy.CreateDeploymentGroupInput{ @@ -189,14 +189,14 @@ func resourceAwsCodeDeployDeploymentGroupRead(d *schema.ResourceData, meta inter log.Printf("[DEBUG] Reading CodeDeploy DeploymentGroup %s", d.Id()) resp, err := conn.GetDeploymentGroup(&codedeploy.GetDeploymentGroupInput{ - ApplicationName: aws.String(d.Get("application_name").(string)), + ApplicationName: aws.String(d.Get("app_name").(string)), DeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)), }) if err != nil { return err } - d.Set("application_name", *resp.DeploymentGroupInfo.ApplicationName) + d.Set("app_name", *resp.DeploymentGroupInfo.ApplicationName) d.Set("autoscaling_groups", resp.DeploymentGroupInfo.AutoScalingGroups) d.Set("deployment_config_name", *resp.DeploymentGroupInfo.DeploymentConfigName) d.Set("deployment_group_name", *resp.DeploymentGroupInfo.DeploymentGroupName) @@ -215,7 +215,7 @@ func resourceAwsCodeDeployDeploymentGroupUpdate(d *schema.ResourceData, meta int conn := meta.(*AWSClient).codedeployconn input := codedeploy.UpdateDeploymentGroupInput{ - ApplicationName: aws.String(d.Get("application_name").(string)), + ApplicationName: aws.String(d.Get("app_name").(string)), CurrentDeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)), } @@ -258,7 +258,7 @@ func resourceAwsCodeDeployDeploymentGroupDelete(d *schema.ResourceData, meta int log.Printf("[DEBUG] Deleting CodeDeploy DeploymentGroup %s", d.Id()) _, err := conn.DeleteDeploymentGroup(&codedeploy.DeleteDeploymentGroupInput{ - ApplicationName: aws.String(d.Get("application_name").(string)), + ApplicationName: aws.String(d.Get("app_name").(string)), DeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)), }) if err != nil { diff --git a/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go b/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go index d883b26b8d..7608b1f585 100644 --- a/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go +++ b/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go @@ -41,7 +41,7 @@ func testAccCheckAWSCodeDeployDeploymentGroupDestroy(s *terraform.State) error { } resp, err := conn.GetDeploymentGroup(&codedeploy.GetDeploymentGroupInput{ - ApplicationName: aws.String(rs.Primary.Attributes["application_name"]), + ApplicationName: aws.String(rs.Primary.Attributes["app_name"]), DeploymentGroupName: aws.String(rs.Primary.Attributes["deployment_group_name"]), }) @@ -123,7 +123,7 @@ EOF } resource "aws_codedeploy_deployment_group" "foo" { - application_name = "${aws_codedeploy_app.foo_app.name}" + app_name = "${aws_codedeploy_app.foo_app.name}" deployment_group_name = "foo" service_role_arn = "${aws_iam_role.foo_role.arn}" ec2_tag_filter { @@ -188,7 +188,7 @@ EOF } resource "aws_codedeploy_deployment_group" "foo" { - application_name = "${aws_codedeploy_app.foo_app.name}" + app_name = "${aws_codedeploy_app.foo_app.name}" deployment_group_name = "bar" service_role_arn = "${aws_iam_role.foo_role.arn}" ec2_tag_filter { diff --git a/website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown b/website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown index cb2417fed6..ae0c3b6455 100644 --- a/website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown +++ b/website/source/docs/providers/aws/r/codedeploy_deployment_group.html.markdown @@ -67,7 +67,7 @@ EOF } resource "aws_codedeploy_deployment_group" "foo" { - application_name = "${aws_codedeploy_app.foo_app.name}" + app_name = "${aws_codedeploy_app.foo_app.name}" deployment_group_name = "bar" service_role_arn = "${aws_iam_role.foo_role.arn}" ec2_tag_filter { @@ -82,7 +82,7 @@ resource "aws_codedeploy_deployment_group" "foo" { The following arguments are supported: -* `application_name` - (Required) The name of the application. +* `app_name` - (Required) The name of the application. * `deployment_group_name` - (Required) The name of the deployment group. * `service_role_arn` - (Required) The service role ARN that allows deployments. * `autoscaling_groups` - (Optional) Autoscaling groups associated with the deployment group. @@ -101,7 +101,7 @@ Both ec2_tag_filter and on_premises_tag_filter blocks support the following: The following attributes are exported: * `id` - The deployment group's ID. -* `application_name` - The group's assigned application. +* `app_name` - The group's assigned application. * `deployment_group_name` - The group's name. * `service_role_arn` - The group's service role ARN. * `autoscaling_groups` - The autoscaling groups associated with the deployment group. From 305db7341b7d710e35c6b4273ea292fc43435a13 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 21 Oct 2015 10:37:18 -0500 Subject: [PATCH 295/335] Update CHANGELOG.md --- CHANGELOG.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 48ceb0cd7c..a68b335178 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,9 +5,16 @@ FEATURES: * New remote state backend: `etcd` [GH-3487] * New interpolation functions: `upper` and `lower` [GH-3558] +BUG FIXES: + + * core: Fix remote state conflicts caused by ambiguity in ordering of deeply nested modules [GH-3573] + * core: Fix remote state conflicts caused by state metadata differences [GH-3569] + * core: Avoid using http.DefaultClient [GH-3532] + INTERNAL IMPROVEMENTS: - * provider/digitalocean: use official Go client [GH-3333] + * provider/digitalocean: use official Go client [GH-3333] + * core: extract module fetching to external library [GH-3516] ## 0.6.4 (October 15, 2015) From 8a60219c0455de66a30372734f961e30542f1f81 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 21 Oct 2015 08:40:26 -0700 Subject: [PATCH 296/335] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a68b335178..ea969533ad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ FEATURES: + * **New resources: `aws_codeploy_app` and `aws_codeploy_deployment_group`** [GH-2783] * New remote state backend: `etcd` [GH-3487] * New interpolation functions: `upper` and `lower` [GH-3558] From f790309634ba59f87712635cdf751b6d653762b3 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 21 Oct 2015 08:44:19 -0700 Subject: [PATCH 297/335] CodeDeploy links to the AWS provider sidebar. Missed these when merging #2783. --- website/source/layouts/aws.erb | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 4b34da23a4..f6efd23775 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -26,6 +26,21 @@ + > + CodeDeploy Resources + + + > Directory Service Resources From ed951639847c310357778e9316b6ae1c51af54aa Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 27 Oct 2015 16:17:19 -0500 Subject: [PATCH 329/335] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f36e70dbee..885774c90a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ IMPROVEMENTS: * provider/google: Accurate Terraform Version [GH-3554] * provider/google: Simplified auth (DefaultClient support) [GH-3553] * provider/google: automatic_restart, preemptible, on_host_maintenance options [GH-3643] + * null_resource: enhance and document [GH-3244, GH-3659] BUG FIXES: From 122790d32bd23fbe0c91e9bb1dd0bce19f1ec8b5 Mon Sep 17 00:00:00 2001 From: Kazunori Kojima Date: Wed, 28 Oct 2015 09:19:37 +0900 Subject: [PATCH 330/335] Add check errors on reading CORS rules --- builtin/providers/aws/resource_aws_s3_bucket.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_s3_bucket.go b/builtin/providers/aws/resource_aws_s3_bucket.go index 93105ec510..3c284370f5 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket.go +++ b/builtin/providers/aws/resource_aws_s3_bucket.go @@ -276,7 +276,9 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { rule["max_age_seconds"] = ruleObject.MaxAgeSeconds rules = append(rules, rule) } - d.Set("cors_rule", rules) + if err := d.Set("cors_rule", rules); err != nil { + return fmt.Errorf("error reading S3 bucket \"%s\" CORS rules: %s", d.Id(), err) + } } // Read the website configuration From 89fb16ada0ca5d9f8fc368c46799881d753ede9b Mon Sep 17 00:00:00 2001 From: Clint Date: Wed, 28 Oct 2015 10:10:06 -0500 Subject: [PATCH 331/335] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 885774c90a..47a446aa9d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ IMPROVEMENTS: * provider/google: Simplified auth (DefaultClient support) [GH-3553] * provider/google: automatic_restart, preemptible, on_host_maintenance options [GH-3643] * null_resource: enhance and document [GH-3244, GH-3659] + * provider/aws: Add CORS settings to S3 bucket [GH-3387] BUG FIXES: From 784aadd5056ac05b53cec1583b5f2e4beeb4106a Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 28 Oct 2015 14:54:53 +0000 Subject: [PATCH 332/335] Allow cluster name, not only ARN for aws_ecs_service --- .../providers/aws/resource_aws_ecs_service.go | 18 +++++-- .../aws/resource_aws_ecs_service_test.go | 48 +++++++++++++++++++ 2 files changed, 61 insertions(+), 5 deletions(-) diff --git a/builtin/providers/aws/resource_aws_ecs_service.go b/builtin/providers/aws/resource_aws_ecs_service.go index 9d3a36ab2d..ab8562acb9 100644 --- a/builtin/providers/aws/resource_aws_ecs_service.go +++ b/builtin/providers/aws/resource_aws_ecs_service.go @@ -137,7 +137,6 @@ func resourceAwsEcsServiceCreate(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] ECS service created: %s", *service.ServiceArn) d.SetId(*service.ServiceArn) - d.Set("cluster", *service.ClusterArn) return resourceAwsEcsServiceUpdate(d, meta) } @@ -175,14 +174,21 @@ func resourceAwsEcsServiceRead(d *schema.ResourceData, meta interface{}) error { } d.Set("desired_count", *service.DesiredCount) - d.Set("cluster", *service.ClusterArn) + + // Save cluster in the same format + if strings.HasPrefix(d.Get("cluster").(string), "arn:aws:ecs:") { + d.Set("cluster", *service.ClusterArn) + } else { + clusterARN := getNameFromARN(*service.ClusterArn) + d.Set("cluster", clusterARN) + } // Save IAM role in the same format if service.RoleArn != nil { if strings.HasPrefix(d.Get("iam_role").(string), "arn:aws:iam:") { d.Set("iam_role", *service.RoleArn) } else { - roleARN := buildIamRoleNameFromARN(*service.RoleArn) + roleARN := getNameFromARN(*service.RoleArn) d.Set("iam_role", roleARN) } } @@ -306,8 +312,10 @@ func buildFamilyAndRevisionFromARN(arn string) string { return strings.Split(arn, "/")[1] } -func buildIamRoleNameFromARN(arn string) string { - // arn:aws:iam::0123456789:role/EcsService +// Expects the following ARNs: +// arn:aws:iam::0123456789:role/EcsService +// arn:aws:ecs:us-west-2:0123456789:cluster/radek-cluster +func getNameFromARN(arn string) string { return strings.Split(arn, "/")[1] } diff --git a/builtin/providers/aws/resource_aws_ecs_service_test.go b/builtin/providers/aws/resource_aws_ecs_service_test.go index 2f9b8fedbf..7f88f1536d 100644 --- a/builtin/providers/aws/resource_aws_ecs_service_test.go +++ b/builtin/providers/aws/resource_aws_ecs_service_test.go @@ -178,6 +178,26 @@ func TestAccAWSEcsService_withIamRole(t *testing.T) { }) } +// Regression for https://github.com/hashicorp/terraform/issues/3361 +func TestAccAWSEcsService_withEcsClusterName(t *testing.T) { + clusterName := regexp.MustCompile("^terraformecstestcluster$") + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSEcsServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSEcsServiceWithEcsClusterName, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSEcsServiceExists("aws_ecs_service.jenkins"), + resource.TestMatchResourceAttr( + "aws_ecs_service.jenkins", "cluster", clusterName), + ), + }, + }, + }) +} + func testAccCheckAWSEcsServiceDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).ecsconn @@ -471,3 +491,31 @@ resource "aws_ecs_service" "ghost" { desired_count = 1 } ` + +var testAccAWSEcsServiceWithEcsClusterName = ` +resource "aws_ecs_cluster" "default" { + name = "terraformecstestcluster" +} + +resource "aws_ecs_task_definition" "jenkins" { + family = "jenkins" + container_definitions = < Date: Wed, 28 Oct 2015 16:17:20 +0000 Subject: [PATCH 333/335] Update CHANGELOG.md --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 47a446aa9d..bf27fb1c33 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 0.6.7 (Unreleased) +FEATURES: + + * **New resources: `aws_cloudformation_stack`** [GH-2636] + IMPROVEMENTS: * provider/google: Accurate Terraform Version [GH-3554] From af04321723200d4cb4158e7e9a6b1f8b9f6aaf1b Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 28 Oct 2015 12:17:14 -0400 Subject: [PATCH 334/335] config: return to the go1.5 generated lang/y.go It has improvements to error messaging that we want. We'll use this occasion begin developing / building with Go 1.5 from here on out. Build times will be slower, but we have core development plans that will help mitigate that. /cc @hashicorp/terraform-committers --- config/lang/y.go | 272 +++++++++++++++++++++++++++++++++++------------ 1 file changed, 204 insertions(+), 68 deletions(-) diff --git a/config/lang/y.go b/config/lang/y.go index e7dd185ae1..fd0693f151 100644 --- a/config/lang/y.go +++ b/config/lang/y.go @@ -30,7 +30,10 @@ const INTEGER = 57355 const FLOAT = 57356 const STRING = 57357 -var parserToknames = []string{ +var parserToknames = [...]string{ + "$end", + "error", + "$unk", "PROGRAM_BRACKET_LEFT", "PROGRAM_BRACKET_RIGHT", "PROGRAM_STRING_START", @@ -44,7 +47,7 @@ var parserToknames = []string{ "FLOAT", "STRING", } -var parserStatenames = []string{} +var parserStatenames = [...]string{} const parserEofCode = 1 const parserErrCode = 2 @@ -53,7 +56,7 @@ const parserMaxDepth = 200 //line lang.y:165 //line yacctab:1 -var parserExca = []int{ +var parserExca = [...]int{ -1, 1, 1, -1, -2, 0, @@ -67,75 +70,103 @@ var parserStates []string const parserLast = 30 -var parserAct = []int{ +var parserAct = [...]int{ 9, 20, 16, 16, 7, 7, 3, 18, 10, 8, 1, 17, 14, 12, 13, 6, 6, 19, 8, 22, 15, 23, 24, 11, 2, 25, 16, 21, 4, 5, } -var parserPact = []int{ +var parserPact = [...]int{ 1, -1000, 1, -1000, -1000, -1000, -1000, 0, -1000, 15, 0, 1, -1000, -1000, -1, -1000, 0, -8, 0, -1000, -1000, 12, -9, -1000, 0, -9, } -var parserPgo = []int{ +var parserPgo = [...]int{ 0, 0, 29, 28, 23, 6, 27, 10, } -var parserR1 = []int{ +var parserR1 = [...]int{ 0, 7, 7, 4, 4, 5, 5, 2, 1, 1, 1, 1, 1, 1, 1, 6, 6, 6, 3, } -var parserR2 = []int{ +var parserR2 = [...]int{ 0, 0, 1, 1, 2, 1, 1, 3, 3, 1, 1, 1, 3, 1, 4, 0, 3, 1, 1, } -var parserChk = []int{ +var parserChk = [...]int{ -1000, -7, -4, -5, -3, -2, 15, 4, -5, -1, 8, -4, 13, 14, 12, 5, 11, -1, 8, -1, 9, -6, -1, 9, 10, -1, } -var parserDef = []int{ +var parserDef = [...]int{ 1, -2, 2, 3, 5, 6, 18, 0, 4, 0, 0, 9, 10, 11, 13, 7, 0, 0, 15, 12, 8, 0, 17, 14, 0, 16, } -var parserTok1 = []int{ +var parserTok1 = [...]int{ 1, } -var parserTok2 = []int{ +var parserTok2 = [...]int{ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, } -var parserTok3 = []int{ +var parserTok3 = [...]int{ 0, } +var parserErrorMessages = [...]struct { + state int + token int + msg string +}{} + //line yaccpar:1 /* parser for yacc output */ -var parserDebug = 0 +var ( + parserDebug = 0 + parserErrorVerbose = false +) type parserLexer interface { Lex(lval *parserSymType) int Error(s string) } +type parserParser interface { + Parse(parserLexer) int + Lookahead() int +} + +type parserParserImpl struct { + lookahead func() int +} + +func (p *parserParserImpl) Lookahead() int { + return p.lookahead() +} + +func parserNewParser() parserParser { + p := &parserParserImpl{ + lookahead: func() int { return -1 }, + } + return p +} + const parserFlag = -1000 func parserTokname(c int) string { - // 4 is TOKSTART above - if c >= 4 && c-4 < len(parserToknames) { - if parserToknames[c-4] != "" { - return parserToknames[c-4] + if c >= 1 && c-1 < len(parserToknames) { + if parserToknames[c-1] != "" { + return parserToknames[c-1] } } return __yyfmt__.Sprintf("tok-%v", c) @@ -150,51 +181,129 @@ func parserStatname(s int) string { return __yyfmt__.Sprintf("state-%v", s) } -func parserlex1(lex parserLexer, lval *parserSymType) int { - c := 0 - char := lex.Lex(lval) +func parserErrorMessage(state, lookAhead int) string { + const TOKSTART = 4 + + if !parserErrorVerbose { + return "syntax error" + } + + for _, e := range parserErrorMessages { + if e.state == state && e.token == lookAhead { + return "syntax error: " + e.msg + } + } + + res := "syntax error: unexpected " + parserTokname(lookAhead) + + // To match Bison, suggest at most four expected tokens. + expected := make([]int, 0, 4) + + // Look for shiftable tokens. + base := parserPact[state] + for tok := TOKSTART; tok-1 < len(parserToknames); tok++ { + if n := base + tok; n >= 0 && n < parserLast && parserChk[parserAct[n]] == tok { + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + } + + if parserDef[state] == -2 { + i := 0 + for parserExca[i] != -1 || parserExca[i+1] != state { + i += 2 + } + + // Look for tokens that we accept or reduce. + for i += 2; parserExca[i] >= 0; i += 2 { + tok := parserExca[i] + if tok < TOKSTART || parserExca[i+1] == 0 { + continue + } + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + + // If the default action is to accept or reduce, give up. + if parserExca[i+1] != 0 { + return res + } + } + + for i, tok := range expected { + if i == 0 { + res += ", expecting " + } else { + res += " or " + } + res += parserTokname(tok) + } + return res +} + +func parserlex1(lex parserLexer, lval *parserSymType) (char, token int) { + token = 0 + char = lex.Lex(lval) if char <= 0 { - c = parserTok1[0] + token = parserTok1[0] goto out } if char < len(parserTok1) { - c = parserTok1[char] + token = parserTok1[char] goto out } if char >= parserPrivate { if char < parserPrivate+len(parserTok2) { - c = parserTok2[char-parserPrivate] + token = parserTok2[char-parserPrivate] goto out } } for i := 0; i < len(parserTok3); i += 2 { - c = parserTok3[i+0] - if c == char { - c = parserTok3[i+1] + token = parserTok3[i+0] + if token == char { + token = parserTok3[i+1] goto out } } out: - if c == 0 { - c = parserTok2[1] /* unknown char */ + if token == 0 { + token = parserTok2[1] /* unknown char */ } if parserDebug >= 3 { - __yyfmt__.Printf("lex %s(%d)\n", parserTokname(c), uint(char)) + __yyfmt__.Printf("lex %s(%d)\n", parserTokname(token), uint(char)) } - return c + return char, token } func parserParse(parserlex parserLexer) int { + return parserNewParser().Parse(parserlex) +} + +func (parserrcvr *parserParserImpl) Parse(parserlex parserLexer) int { var parsern int var parserlval parserSymType var parserVAL parserSymType + var parserDollar []parserSymType + _ = parserDollar // silence set and not used parserS := make([]parserSymType, parserMaxDepth) Nerrs := 0 /* number of errors */ Errflag := 0 /* error recovery flag */ parserstate := 0 parserchar := -1 + parsertoken := -1 // parserchar translated into internal numbering + parserrcvr.lookahead = func() int { return parserchar } + defer func() { + // Make sure we report no lookahead when not parsing. + parserstate = -1 + parserchar = -1 + parsertoken = -1 + }() parserp := -1 goto parserstack @@ -207,7 +316,7 @@ ret1: parserstack: /* put a state and value onto the stack */ if parserDebug >= 4 { - __yyfmt__.Printf("char %v in %v\n", parserTokname(parserchar), parserStatname(parserstate)) + __yyfmt__.Printf("char %v in %v\n", parserTokname(parsertoken), parserStatname(parserstate)) } parserp++ @@ -225,15 +334,16 @@ parsernewstate: goto parserdefault /* simple state */ } if parserchar < 0 { - parserchar = parserlex1(parserlex, &parserlval) + parserchar, parsertoken = parserlex1(parserlex, &parserlval) } - parsern += parserchar + parsern += parsertoken if parsern < 0 || parsern >= parserLast { goto parserdefault } parsern = parserAct[parsern] - if parserChk[parsern] == parserchar { /* valid shift */ + if parserChk[parsern] == parsertoken { /* valid shift */ parserchar = -1 + parsertoken = -1 parserVAL = parserlval parserstate = parsern if Errflag > 0 { @@ -247,7 +357,7 @@ parserdefault: parsern = parserDef[parserstate] if parsern == -2 { if parserchar < 0 { - parserchar = parserlex1(parserlex, &parserlval) + parserchar, parsertoken = parserlex1(parserlex, &parserlval) } /* look through exception table */ @@ -260,7 +370,7 @@ parserdefault: } for xi += 2; ; xi += 2 { parsern = parserExca[xi+0] - if parsern < 0 || parsern == parserchar { + if parsern < 0 || parsern == parsertoken { break } } @@ -273,11 +383,11 @@ parserdefault: /* error ... attempt to resume parsing */ switch Errflag { case 0: /* brand new error */ - parserlex.Error("syntax error") + parserlex.Error(parserErrorMessage(parserstate, parsertoken)) Nerrs++ if parserDebug >= 1 { __yyfmt__.Printf("%s", parserStatname(parserstate)) - __yyfmt__.Printf(" saw %s\n", parserTokname(parserchar)) + __yyfmt__.Printf(" saw %s\n", parserTokname(parsertoken)) } fallthrough @@ -305,12 +415,13 @@ parserdefault: case 3: /* no shift yet; clobber input char */ if parserDebug >= 2 { - __yyfmt__.Printf("error recovery discards %s\n", parserTokname(parserchar)) + __yyfmt__.Printf("error recovery discards %s\n", parserTokname(parsertoken)) } - if parserchar == parserEofCode { + if parsertoken == parserEofCode { goto ret1 } parserchar = -1 + parsertoken = -1 goto parsernewstate /* try again in the same state */ } } @@ -325,6 +436,13 @@ parserdefault: _ = parserpt // guard against "declared and not used" parserp -= parserR2[parsern] + // parserp is now the index of $0. Perform the default action. Iff the + // reduced production is ε, $1 is possibly out of range. + if parserp+1 >= len(parserS) { + nyys := make([]parserSymType, len(parserS)*2) + copy(nyys, parserS) + parserS = nyys + } parserVAL = parserS[parserp+1] /* consult goto table to find next state */ @@ -344,6 +462,7 @@ parserdefault: switch parsernt { case 1: + parserDollar = parserS[parserpt-0 : parserpt+1] //line lang.y:35 { parserResult = &ast.LiteralNode{ @@ -353,9 +472,10 @@ parserdefault: } } case 2: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:43 { - parserResult = parserS[parserpt-0].node + parserResult = parserDollar[1].node // We want to make sure that the top value is always a Concat // so that the return value is always a string type from an @@ -365,28 +485,30 @@ parserdefault: // because functionally the AST is the same, but we do that because // it makes for an easy literal check later (to check if a string // has any interpolations). - if _, ok := parserS[parserpt-0].node.(*ast.Concat); !ok { - if n, ok := parserS[parserpt-0].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString { + if _, ok := parserDollar[1].node.(*ast.Concat); !ok { + if n, ok := parserDollar[1].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString { parserResult = &ast.Concat{ - Exprs: []ast.Node{parserS[parserpt-0].node}, - Posx: parserS[parserpt-0].node.Pos(), + Exprs: []ast.Node{parserDollar[1].node}, + Posx: parserDollar[1].node.Pos(), } } } } case 3: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:66 { - parserVAL.node = parserS[parserpt-0].node + parserVAL.node = parserDollar[1].node } case 4: + parserDollar = parserS[parserpt-2 : parserpt+1] //line lang.y:70 { var result []ast.Node - if c, ok := parserS[parserpt-1].node.(*ast.Concat); ok { - result = append(c.Exprs, parserS[parserpt-0].node) + if c, ok := parserDollar[1].node.(*ast.Concat); ok { + result = append(c.Exprs, parserDollar[2].node) } else { - result = []ast.Node{parserS[parserpt-1].node, parserS[parserpt-0].node} + result = []ast.Node{parserDollar[1].node, parserDollar[2].node} } parserVAL.node = &ast.Concat{ @@ -395,89 +517,103 @@ parserdefault: } } case 5: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:86 { - parserVAL.node = parserS[parserpt-0].node + parserVAL.node = parserDollar[1].node } case 6: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:90 { - parserVAL.node = parserS[parserpt-0].node + parserVAL.node = parserDollar[1].node } case 7: + parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:96 { - parserVAL.node = parserS[parserpt-1].node + parserVAL.node = parserDollar[2].node } case 8: + parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:102 { - parserVAL.node = parserS[parserpt-1].node + parserVAL.node = parserDollar[2].node } case 9: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:106 { - parserVAL.node = parserS[parserpt-0].node + parserVAL.node = parserDollar[1].node } case 10: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:110 { parserVAL.node = &ast.LiteralNode{ - Value: parserS[parserpt-0].token.Value.(int), + Value: parserDollar[1].token.Value.(int), Typex: ast.TypeInt, - Posx: parserS[parserpt-0].token.Pos, + Posx: parserDollar[1].token.Pos, } } case 11: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:118 { parserVAL.node = &ast.LiteralNode{ - Value: parserS[parserpt-0].token.Value.(float64), + Value: parserDollar[1].token.Value.(float64), Typex: ast.TypeFloat, - Posx: parserS[parserpt-0].token.Pos, + Posx: parserDollar[1].token.Pos, } } case 12: + parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:126 { parserVAL.node = &ast.Arithmetic{ - Op: parserS[parserpt-1].token.Value.(ast.ArithmeticOp), - Exprs: []ast.Node{parserS[parserpt-2].node, parserS[parserpt-0].node}, - Posx: parserS[parserpt-2].node.Pos(), + Op: parserDollar[2].token.Value.(ast.ArithmeticOp), + Exprs: []ast.Node{parserDollar[1].node, parserDollar[3].node}, + Posx: parserDollar[1].node.Pos(), } } case 13: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:134 { - parserVAL.node = &ast.VariableAccess{Name: parserS[parserpt-0].token.Value.(string), Posx: parserS[parserpt-0].token.Pos} + parserVAL.node = &ast.VariableAccess{Name: parserDollar[1].token.Value.(string), Posx: parserDollar[1].token.Pos} } case 14: + parserDollar = parserS[parserpt-4 : parserpt+1] //line lang.y:138 { - parserVAL.node = &ast.Call{Func: parserS[parserpt-3].token.Value.(string), Args: parserS[parserpt-1].nodeList, Posx: parserS[parserpt-3].token.Pos} + parserVAL.node = &ast.Call{Func: parserDollar[1].token.Value.(string), Args: parserDollar[3].nodeList, Posx: parserDollar[1].token.Pos} } case 15: + parserDollar = parserS[parserpt-0 : parserpt+1] //line lang.y:143 { parserVAL.nodeList = nil } case 16: + parserDollar = parserS[parserpt-3 : parserpt+1] //line lang.y:147 { - parserVAL.nodeList = append(parserS[parserpt-2].nodeList, parserS[parserpt-0].node) + parserVAL.nodeList = append(parserDollar[1].nodeList, parserDollar[3].node) } case 17: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:151 { - parserVAL.nodeList = append(parserVAL.nodeList, parserS[parserpt-0].node) + parserVAL.nodeList = append(parserVAL.nodeList, parserDollar[1].node) } case 18: + parserDollar = parserS[parserpt-1 : parserpt+1] //line lang.y:157 { parserVAL.node = &ast.LiteralNode{ - Value: parserS[parserpt-0].token.Value.(string), + Value: parserDollar[1].token.Value.(string), Typex: ast.TypeString, - Posx: parserS[parserpt-0].token.Pos, + Posx: parserDollar[1].token.Pos, } } } From 22ec52396adef10449fa55c81f3aada69fa008cb Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 28 Oct 2015 16:30:03 +0000 Subject: [PATCH 335/335] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bf27fb1c33..075cb0f442 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ BUG FIXES: * provider/google: Timeout when deleting large instance_group_manager [GH-3591] * provider/aws: Fix issue with order of Termincation Policies in AutoScaling Groups. This will introduce plans on upgrade to this version, in order to correct the ordering [GH-2890] + * provider/aws: Allow cluster name, not only ARN for `aws_ecs_service` [GH-3668] ## 0.6.6 (October 23, 2015)