From 87cde8834ec176e5b1a86bec65853a345bd56890 Mon Sep 17 00:00:00 2001 From: Jean Mertz Date: Sun, 3 May 2015 16:00:00 +0200 Subject: [PATCH 001/190] OpenStack: add functionality to attach FloatingIP to Port --- ...urce_openstack_networking_floatingip_v2.go | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go b/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go index 1b81c6a96e..37f1ca7cfe 100644 --- a/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go +++ b/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go @@ -14,6 +14,7 @@ func resourceNetworkingFloatingIPV2() *schema.Resource { return &schema.Resource{ Create: resourceNetworkFloatingIPV2Create, Read: resourceNetworkFloatingIPV2Read, + Update: resourceNetworkFloatingIPV2Update, Delete: resourceNetworkFloatingIPV2Delete, Schema: map[string]*schema.Schema{ @@ -33,6 +34,11 @@ func resourceNetworkingFloatingIPV2() *schema.Resource { ForceNew: true, DefaultFunc: envDefaultFunc("OS_POOL_NAME"), }, + "port_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + }, }, } } @@ -53,6 +59,7 @@ func resourceNetworkFloatingIPV2Create(d *schema.ResourceData, meta interface{}) } createOpts := floatingips.CreateOpts{ FloatingNetworkID: poolID, + PortID: d.Get("port_id").(string), } log.Printf("[DEBUG] Create Options: %#v", createOpts) floatingIP, err := floatingips.Create(networkClient, createOpts).Extract() @@ -78,6 +85,7 @@ func resourceNetworkFloatingIPV2Read(d *schema.ResourceData, meta interface{}) e } d.Set("address", floatingIP.FloatingIP) + d.Set("port_id", floatingIP.PortID) poolName, err := getNetworkName(d, meta, floatingIP.FloatingNetworkID) if err != nil { return fmt.Errorf("Error retrieving floating IP pool name: %s", err) @@ -87,6 +95,29 @@ func resourceNetworkFloatingIPV2Read(d *schema.ResourceData, meta interface{}) e return nil } +func resourceNetworkFloatingIPV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + var updateOpts floatingips.UpdateOpts + + if d.HasChange("port_id") { + updateOpts.PortID = d.Get("port_id").(string) + } + + log.Printf("[DEBUG] Update Options: %#v", updateOpts) + + _, err = floatingips.Update(networkClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating floating IP: %s", err) + } + + return resourceNetworkFloatingIPV2Read(d, meta) +} + func resourceNetworkFloatingIPV2Delete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) networkClient, err := config.networkingV2Client(d.Get("region").(string)) From 739a411b4d867bf7036a38f3fd4feeca867cd939 Mon Sep 17 00:00:00 2001 From: Alexander Dupuy Date: Mon, 18 May 2015 21:45:50 +0200 Subject: [PATCH 002/190] debug security group ids --- .../resource_openstack_compute_instance_v2.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/builtin/providers/openstack/resource_openstack_compute_instance_v2.go b/builtin/providers/openstack/resource_openstack_compute_instance_v2.go index 44fca923f6..066fa637ff 100644 --- a/builtin/providers/openstack/resource_openstack_compute_instance_v2.go +++ b/builtin/providers/openstack/resource_openstack_compute_instance_v2.go @@ -557,13 +557,6 @@ func resourceComputeInstanceV2Update(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Security groups to remove: %v", secgroupsToRemove) - for _, g := range secgroupsToAdd.List() { - err := secgroups.AddServerToGroup(computeClient, d.Id(), g.(string)).ExtractErr() - if err != nil { - return fmt.Errorf("Error adding security group to OpenStack server (%s): %s", d.Id(), err) - } - log.Printf("[DEBUG] Added security group (%s) to instance (%s)", g.(string), d.Id()) - } for _, g := range secgroupsToRemove.List() { err := secgroups.RemoveServerFromGroup(computeClient, d.Id(), g.(string)).ExtractErr() @@ -581,6 +574,14 @@ func resourceComputeInstanceV2Update(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Removed security group (%s) from instance (%s)", g.(string), d.Id()) } } + for _, g := range secgroupsToAdd.List() { + err := secgroups.AddServerToGroup(computeClient, d.Id(), g.(string)).ExtractErr() + if err != nil { + return fmt.Errorf("Error adding security group to OpenStack server (%s): %s", d.Id(), err) + } + log.Printf("[DEBUG] Added security group (%s) to instance (%s)", g.(string), d.Id()) + } + } if d.HasChange("admin_pass") { From 07ad320960e3fd61e80f00b919dc1a5217d0d321 Mon Sep 17 00:00:00 2001 From: Aaron Welch Date: Sun, 31 May 2015 15:58:14 +0100 Subject: [PATCH 003/190] Packet bare metal cloud hosting platform provider --- builtin/bins/provider-packet/main.go | 12 + builtin/providers/packet/config.go | 18 ++ builtin/providers/packet/provider.go | 36 +++ builtin/providers/packet/provider_test.go | 35 ++ .../packet/resource_packet_device.go | 302 ++++++++++++++++++ .../packet/resource_packet_project.go | 123 +++++++ .../packet/resource_packet_project_test.go | 95 ++++++ .../packet/resource_packet_ssh_key.go | 128 ++++++++ .../packet/resource_packet_ssh_key_test.go | 104 ++++++ .../docs/providers/packet/index.html.markdown | 47 +++ .../providers/packet/r/device.html.markdown | 55 ++++ .../providers/packet/r/project.html.markdown | 40 +++ .../providers/packet/r/ssh_key.html.markdown | 43 +++ 13 files changed, 1038 insertions(+) create mode 100644 builtin/bins/provider-packet/main.go create mode 100644 builtin/providers/packet/config.go create mode 100644 builtin/providers/packet/provider.go create mode 100644 builtin/providers/packet/provider_test.go create mode 100644 builtin/providers/packet/resource_packet_device.go create mode 100644 builtin/providers/packet/resource_packet_project.go create mode 100644 builtin/providers/packet/resource_packet_project_test.go create mode 100644 builtin/providers/packet/resource_packet_ssh_key.go create mode 100644 builtin/providers/packet/resource_packet_ssh_key_test.go create mode 100644 website/source/docs/providers/packet/index.html.markdown create mode 100644 website/source/docs/providers/packet/r/device.html.markdown create mode 100644 website/source/docs/providers/packet/r/project.html.markdown create mode 100644 website/source/docs/providers/packet/r/ssh_key.html.markdown diff --git a/builtin/bins/provider-packet/main.go b/builtin/bins/provider-packet/main.go new file mode 100644 index 0000000000..6d8198ef2b --- /dev/null +++ b/builtin/bins/provider-packet/main.go @@ -0,0 +1,12 @@ +package main + +import ( + "github.com/hashicorp/terraform/builtin/providers/packet" + "github.com/hashicorp/terraform/plugin" +) + +func main() { + plugin.Serve(&plugin.ServeOpts{ + ProviderFunc: packet.Provider, + }) +} diff --git a/builtin/providers/packet/config.go b/builtin/providers/packet/config.go new file mode 100644 index 0000000000..659ee9ebc8 --- /dev/null +++ b/builtin/providers/packet/config.go @@ -0,0 +1,18 @@ +package packet + +import ( + "github.com/packethost/packngo" +) + +const ( + consumerToken = "aZ9GmqHTPtxevvFq9SK3Pi2yr9YCbRzduCSXF2SNem5sjB91mDq7Th3ZwTtRqMWZ" +) + +type Config struct { + AuthToken string +} + +// Client() returns a new client for accessing packet. +func (c *Config) Client() *packngo.Client { + return packngo.NewClient(consumerToken, c.AuthToken) +} diff --git a/builtin/providers/packet/provider.go b/builtin/providers/packet/provider.go new file mode 100644 index 0000000000..c1efd6e838 --- /dev/null +++ b/builtin/providers/packet/provider.go @@ -0,0 +1,36 @@ +package packet + +import ( + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +// Provider returns a schema.Provider for Packet. +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "auth_token": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("PACKET_AUTH_TOKEN", nil), + Description: "The API auth key for API operations.", + }, + }, + + ResourcesMap: map[string]*schema.Resource{ + "packet_device": resourcePacketDevice(), + "packet_ssh_key": resourcePacketSSHKey(), + "packet_project": resourcePacketProject(), + }, + + ConfigureFunc: providerConfigure, + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + config := Config{ + AuthToken: d.Get("auth_token").(string), + } + + return config.Client(), nil +} diff --git a/builtin/providers/packet/provider_test.go b/builtin/providers/packet/provider_test.go new file mode 100644 index 0000000000..5483c4fb08 --- /dev/null +++ b/builtin/providers/packet/provider_test.go @@ -0,0 +1,35 @@ +package packet + +import ( + "os" + "testing" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +var testAccProviders map[string]terraform.ResourceProvider +var testAccProvider *schema.Provider + +func init() { + testAccProvider = Provider().(*schema.Provider) + testAccProviders = map[string]terraform.ResourceProvider{ + "packet": testAccProvider, + } +} + +func TestProvider(t *testing.T) { + if err := Provider().(*schema.Provider).InternalValidate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProvider_impl(t *testing.T) { + var _ terraform.ResourceProvider = Provider() +} + +func testAccPreCheck(t *testing.T) { + if v := os.Getenv("PACKET_AUTH_TOKEN"); v == "" { + t.Fatal("PACKET_AUTH_TOKEN must be set for acceptance tests") + } +} diff --git a/builtin/providers/packet/resource_packet_device.go b/builtin/providers/packet/resource_packet_device.go new file mode 100644 index 0000000000..56fc7afe55 --- /dev/null +++ b/builtin/providers/packet/resource_packet_device.go @@ -0,0 +1,302 @@ +package packet + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketDevice() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketDeviceCreate, + Read: resourcePacketDeviceRead, + Update: resourcePacketDeviceUpdate, + Delete: resourcePacketDeviceDelete, + + Schema: map[string]*schema.Schema{ + "project_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "hostname": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "operating_system": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "facility": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "plan": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "billing_cycle": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "locked": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + }, + + "network": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "gateway": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "family": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + + "cidr": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + + "public": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + + "created": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "updated": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "user_data": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "tags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourcePacketDeviceCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + createRequest := &packngo.DeviceCreateRequest{ + HostName: d.Get("hostname").(string), + Plan: d.Get("plan").(string), + Facility: d.Get("facility").(string), + OS: d.Get("operating_system").(string), + BillingCycle: d.Get("billing_cycle").(string), + ProjectID: d.Get("project_id").(string), + } + + if attr, ok := d.GetOk("user_data"); ok { + createRequest.UserData = attr.(string) + } + + tags := d.Get("tags.#").(int) + if tags > 0 { + createRequest.Tags = make([]string, 0, tags) + for i := 0; i < tags; i++ { + key := fmt.Sprintf("tags.%d", i) + createRequest.Tags = append(createRequest.Tags, d.Get(key).(string)) + } + } + + log.Printf("[DEBUG] Device create configuration: %#v", createRequest) + + newDevice, _, err := client.Devices.Create(createRequest) + if err != nil { + return fmt.Errorf("Error creating device: %s", err) + } + + // Assign the device id + d.SetId(newDevice.ID) + + log.Printf("[INFO] Device ID: %s", d.Id()) + + _, err = WaitForDeviceAttribute(d, "active", []string{"provisioning"}, "state", meta) + if err != nil { + return fmt.Errorf( + "Error waiting for device (%s) to become ready: %s", d.Id(), err) + } + + return resourcePacketDeviceRead(d, meta) +} + +func resourcePacketDeviceRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + // Retrieve the device properties for updating the state + device, _, err := client.Devices.Get(d.Id()) + if err != nil { + return fmt.Errorf("Error retrieving device: %s", err) + } + + d.Set("name", device.Hostname) + d.Set("plan", device.Plan.Slug) + d.Set("facility", device.Facility.Code) + d.Set("operating_system", device.OS.Slug) + d.Set("state", device.State) + d.Set("billing_cycle", device.BillingCycle) + d.Set("locked", device.Locked) + d.Set("created", device.Created) + d.Set("udpated", device.Updated) + + tags := make([]string, 0) + for _, tag := range device.Tags { + tags = append(tags, tag) + } + d.Set("tags", tags) + + networks := make([]map[string]interface{}, 0, 1) + for _, ip := range device.Network { + network := make(map[string]interface{}) + network["address"] = ip.Address + network["gateway"] = ip.Gateway + network["family"] = ip.Family + network["cidr"] = ip.Cidr + network["public"] = ip.Public + networks = append(networks, network) + } + d.Set("network", networks) + + return nil +} + +func resourcePacketDeviceUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + if d.HasChange("locked") && d.Get("locked").(bool) { + _, err := client.Devices.Lock(d.Id()) + + if err != nil { + return fmt.Errorf( + "Error locking device (%s): %s", d.Id(), err) + } + } else if d.HasChange("locked") { + _, err := client.Devices.Unlock(d.Id()) + + if err != nil { + return fmt.Errorf( + "Error unlocking device (%s): %s", d.Id(), err) + } + } + + return resourcePacketDeviceRead(d, meta) +} + +func resourcePacketDeviceDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + log.Printf("[INFO] Deleting device: %s", d.Id()) + if _, err := client.Devices.Delete(d.Id()); err != nil { + return fmt.Errorf("Error deleting device: %s", err) + } + + return nil +} + +func WaitForDeviceAttribute( + d *schema.ResourceData, target string, pending []string, attribute string, meta interface{}) (interface{}, error) { + // Wait for the device so we can get the networking attributes + // that show up after a while + log.Printf( + "[INFO] Waiting for device (%s) to have %s of %s", + d.Id(), attribute, target) + + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: target, + Refresh: newDeviceStateRefreshFunc(d, attribute, meta), + Timeout: 60 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + return stateConf.WaitForState() +} + +func newDeviceStateRefreshFunc( + d *schema.ResourceData, attribute string, meta interface{}) resource.StateRefreshFunc { + client := meta.(*packngo.Client) + return func() (interface{}, string, error) { + err := resourcePacketDeviceRead(d, meta) + if err != nil { + return nil, "", err + } + + // See if we can access our attribute + if attr, ok := d.GetOk(attribute); ok { + // Retrieve the device properties + device, _, err := client.Devices.Get(d.Id()) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving device: %s", err) + } + + return &device, attr.(string), nil + } + + return nil, "", nil + } +} + +// Powers on the device and waits for it to be active +func powerOnAndWait(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + _, err := client.Devices.PowerOn(d.Id()) + if err != nil { + return err + } + + // Wait for power on + _, err = WaitForDeviceAttribute(d, "active", []string{"off"}, "state", client) + if err != nil { + return err + } + + return nil +} diff --git a/builtin/providers/packet/resource_packet_project.go b/builtin/providers/packet/resource_packet_project.go new file mode 100644 index 0000000000..e41ef1381a --- /dev/null +++ b/builtin/providers/packet/resource_packet_project.go @@ -0,0 +1,123 @@ +package packet + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketProject() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketProjectCreate, + Read: resourcePacketProjectRead, + Update: resourcePacketProjectUpdate, + Delete: resourcePacketProjectDelete, + + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "payment_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "created": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "updated": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourcePacketProjectCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + createRequest := &packngo.ProjectCreateRequest{ + Name: d.Get("name").(string), + PaymentMethod: d.Get("payment_method").(string), + } + + log.Printf("[DEBUG] Project create configuration: %#v", createRequest) + project, _, err := client.Projects.Create(createRequest) + if err != nil { + return fmt.Errorf("Error creating Project: %s", err) + } + + d.SetId(project.ID) + log.Printf("[INFO] Project created: %s", project.ID) + + return resourcePacketProjectRead(d, meta) +} + +func resourcePacketProjectRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + key, _, err := client.Projects.Get(d.Id()) + if err != nil { + // If the project somehow already destroyed, mark as + // succesfully gone + if strings.Contains(err.Error(), "404") { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving Project: %s", err) + } + + d.Set("id", key.ID) + d.Set("name", key.Name) + d.Set("created", key.Created) + d.Set("updated", key.Updated) + + return nil +} + +func resourcePacketProjectUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + updateRequest := &packngo.ProjectUpdateRequest{ + ID: d.Get("id").(string), + Name: d.Get("name").(string), + } + + if attr, ok := d.GetOk("payment_method"); ok { + updateRequest.PaymentMethod = attr.(string) + } + + log.Printf("[DEBUG] Project update: %#v", d.Get("id")) + _, _, err := client.Projects.Update(updateRequest) + if err != nil { + return fmt.Errorf("Failed to update Project: %s", err) + } + + return resourcePacketProjectRead(d, meta) +} + +func resourcePacketProjectDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + log.Printf("[INFO] Deleting Project: %s", d.Id()) + _, err := client.Projects.Delete(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting SSH key: %s", err) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/packet/resource_packet_project_test.go b/builtin/providers/packet/resource_packet_project_test.go new file mode 100644 index 0000000000..b0179cfbec --- /dev/null +++ b/builtin/providers/packet/resource_packet_project_test.go @@ -0,0 +1,95 @@ +package packet + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/packethost/packngo" +) + +func TestAccPacketProject_Basic(t *testing.T) { + var project packngo.Project + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckPacketProjectDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckPacketProjectConfig_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckPacketProjectExists("packet_project.foobar", &project), + testAccCheckPacketProjectAttributes(&project), + resource.TestCheckResourceAttr( + "packet_project.foobar", "name", "foobar"), + ), + }, + }, + }) +} + +func testAccCheckPacketProjectDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*packngo.Client) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "packet_project" { + continue + } + + _, _, err := client.Projects.Get(rs.Primary.ID) + + if err == nil { + fmt.Errorf("Project cstill exists") + } + } + + return nil +} + +func testAccCheckPacketProjectAttributes(project *packngo.Project) resource.TestCheckFunc { + return func(s *terraform.State) error { + + if project.Name != "foobar" { + return fmt.Errorf("Bad name: %s", project.Name) + } + + return nil + } +} + +func testAccCheckPacketProjectExists(n string, project *packngo.Project) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Record ID is set") + } + + client := testAccProvider.Meta().(*packngo.Client) + + foundProject, _, err := client.Projects.Get(rs.Primary.ID) + + if err != nil { + return err + } + + if foundProject.ID != rs.Primary.ID { + return fmt.Errorf("Record not found: %v - %v", rs.Primary.ID, foundProject) + } + + *project = *foundProject + + return nil + } +} + +var testAccCheckPacketProjectConfig_basic = fmt.Sprintf(` +resource "packet_project" "foobar" { + name = "foobar" +}`) diff --git a/builtin/providers/packet/resource_packet_ssh_key.go b/builtin/providers/packet/resource_packet_ssh_key.go new file mode 100644 index 0000000000..95e04bd8ca --- /dev/null +++ b/builtin/providers/packet/resource_packet_ssh_key.go @@ -0,0 +1,128 @@ +package packet + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/packethost/packngo" +) + +func resourcePacketSSHKey() *schema.Resource { + return &schema.Resource{ + Create: resourcePacketSSHKeyCreate, + Read: resourcePacketSSHKeyRead, + Update: resourcePacketSSHKeyUpdate, + Delete: resourcePacketSSHKeyDelete, + + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "public_key": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "created": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "updated": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourcePacketSSHKeyCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + createRequest := &packngo.SSHKeyCreateRequest{ + Label: d.Get("name").(string), + Key: d.Get("public_key").(string), + } + + log.Printf("[DEBUG] SSH Key create configuration: %#v", createRequest) + key, _, err := client.SSHKeys.Create(createRequest) + if err != nil { + return fmt.Errorf("Error creating SSH Key: %s", err) + } + + d.SetId(key.ID) + log.Printf("[INFO] SSH Key: %s", key.ID) + + return resourcePacketSSHKeyRead(d, meta) +} + +func resourcePacketSSHKeyRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + key, _, err := client.SSHKeys.Get(d.Id()) + if err != nil { + // If the key is somehow already destroyed, mark as + // succesfully gone + if strings.Contains(err.Error(), "404") { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving SSH key: %s", err) + } + + d.Set("id", key.ID) + d.Set("name", key.Label) + d.Set("public_key", key.Key) + d.Set("fingerprint", key.FingerPrint) + d.Set("created", key.Created) + d.Set("updated", key.Updated) + + return nil +} + +func resourcePacketSSHKeyUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + updateRequest := &packngo.SSHKeyUpdateRequest{ + ID: d.Get("id").(string), + Label: d.Get("name").(string), + Key: d.Get("public_key").(string), + } + + log.Printf("[DEBUG] SSH key update: %#v", d.Get("id")) + _, _, err := client.SSHKeys.Update(updateRequest) + if err != nil { + return fmt.Errorf("Failed to update SSH key: %s", err) + } + + return resourcePacketSSHKeyRead(d, meta) +} + +func resourcePacketSSHKeyDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*packngo.Client) + + log.Printf("[INFO] Deleting SSH key: %s", d.Id()) + _, err := client.SSHKeys.Delete(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting SSH key: %s", err) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/packet/resource_packet_ssh_key_test.go b/builtin/providers/packet/resource_packet_ssh_key_test.go new file mode 100644 index 0000000000..765086d4fa --- /dev/null +++ b/builtin/providers/packet/resource_packet_ssh_key_test.go @@ -0,0 +1,104 @@ +package packet + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/packethost/packngo" +) + +func TestAccPacketSSHKey_Basic(t *testing.T) { + var key packngo.SSHKey + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckPacketSSHKeyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckPacketSSHKeyConfig_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckPacketSSHKeyExists("packet_ssh_key.foobar", &key), + testAccCheckPacketSSHKeyAttributes(&key), + resource.TestCheckResourceAttr( + "packet_ssh_key.foobar", "name", "foobar"), + resource.TestCheckResourceAttr( + "packet_ssh_key.foobar", "public_key", testAccValidPublicKey), + ), + }, + }, + }) +} + +func testAccCheckPacketSSHKeyDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*packngo.Client) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "packet_ssh_key" { + continue + } + + _, _, err := client.SSHKeys.Get(rs.Primary.ID) + + if err == nil { + fmt.Errorf("SSH key still exists") + } + } + + return nil +} + +func testAccCheckPacketSSHKeyAttributes(key *packngo.SSHKey) resource.TestCheckFunc { + return func(s *terraform.State) error { + + if key.Label != "foobar" { + return fmt.Errorf("Bad name: %s", key.Label) + } + + return nil + } +} + +func testAccCheckPacketSSHKeyExists(n string, key *packngo.SSHKey) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Record ID is set") + } + + client := testAccProvider.Meta().(*packngo.Client) + + foundKey, _, err := client.SSHKeys.Get(rs.Primary.ID) + + if err != nil { + return err + } + + if foundKey.ID != rs.Primary.ID { + return fmt.Errorf("SSh Key not found: %v - %v", rs.Primary.ID, foundKey) + } + + *key = *foundKey + + fmt.Printf("key: %v", key) + return nil + } +} + +var testAccCheckPacketSSHKeyConfig_basic = fmt.Sprintf(` +resource "packet_ssh_key" "foobar" { + name = "foobar" + public_key = "%s" +}`, testAccValidPublicKey) + +var testAccValidPublicKey = strings.TrimSpace(` +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCKVmnMOlHKcZK8tpt3MP1lqOLAcqcJzhsvJcjscgVERRN7/9484SOBJ3HSKxxNG5JN8owAjy5f9yYwcUg+JaUVuytn5Pv3aeYROHGGg+5G346xaq3DAwX6Y5ykr2fvjObgncQBnuU5KHWCECO/4h8uWuwh/kfniXPVjFToc+gnkqA+3RKpAecZhFXwfalQ9mMuYGFxn+fwn8cYEApsJbsEmb0iJwPiZ5hjFC8wREuiTlhPHDgkBLOiycd20op2nXzDbHfCHInquEe/gYxEitALONxm0swBOwJZwlTDOB7C6y2dzlrtxr1L59m7pCkWI4EtTRLvleehBoj3u7jB4usR +`) diff --git a/website/source/docs/providers/packet/index.html.markdown b/website/source/docs/providers/packet/index.html.markdown new file mode 100644 index 0000000000..bbe9f5d1ea --- /dev/null +++ b/website/source/docs/providers/packet/index.html.markdown @@ -0,0 +1,47 @@ +--- +layout: "packet" +page_title: "Provider: Packet" +sidebar_current: "docs-packet-index" +description: |- + The Packet provider is used to interact with the resources supported by Packet. The provider needs to be configured with the proper credentials before it can be used. +--- + +# Packet Provider + +The Packet provider is used to interact with the resources supported by Packet. +The provider needs to be configured with the proper credentials before it can be used. + +Use the navigation to the left to read about the available resources. + +## Example Usage + +``` +# Configure the Packet Provider +provider "packet" { + auth_token = "${var.auth_token}" +} + +# Create a project +resource "packet_project" "tf_project_1" { + name = "My First Terraform Project" + payment_method = "PAYMENT_METHOD_ID" +} + +# Create a device and add it to tf_project_1 +resource "packet_device" "web1" { + hostname = "tf.coreos2" + plan = "baremetal_1" + facility = "ewr1" + operating_system = "coreos_stable" + billing_cycle = "hourly" + project_id = "${packet_project.tf_project_1.id}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `auth_token` - (Required) This is your Packet API Auth token. This can also be specified + with the `PACKET_AUTH_TOKEN` shell environment variable. + diff --git a/website/source/docs/providers/packet/r/device.html.markdown b/website/source/docs/providers/packet/r/device.html.markdown new file mode 100644 index 0000000000..6d57dcbb51 --- /dev/null +++ b/website/source/docs/providers/packet/r/device.html.markdown @@ -0,0 +1,55 @@ +--- +layout: "packet" +page_title: "Packet: packet_device" +sidebar_current: "docs-packet-resource-device" +description: |- + Provides a Packet device resource. This can be used to create, modify, and delete devices. +--- + +# packet\_device + +Provides a Packet device resource. This can be used to create, +modify, and delete devices. + +## Example Usage + +``` +# Create a device and add it to tf_project_1 +resource "packet_device" "web1" { + hostname = "tf.coreos2" + plan = "baremetal_1" + facility = "ewr1" + operating_system = "coreos_stable" + billing_cycle = "hourly" + project_id = "${packet_project.tf_project_1.id}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `hostname` - (Required) The device name +* `project_id` - (Required) The id of the project in which to create the device +* `operating_system` - (Required) The operating system slug +* `facility` - (Required) The facility in which to create the device +* `plan` - (Required) The config type slug +* `billing_cycle` - (Required) monthly or hourly +* `user_data` (Optional) - A string of the desired User Data for the device. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the device +* `hostname`- The hostname of the device +* `project_id`- The Id of the project the device belonds to +* `facility` - The facility the device is in +* `plan` - The config type of the device +* `network` - The private and public v4 and v6 IPs assigned to the device +* `locked` - Is the device locked +* `billing_cycle` - The billing cycle of the device (monthly or hourly) +* `operating_system` - The operating system running on the device +* `status` - The status of the device +* `created` - The timestamp for when the device was created +* `updated` - The timestamp for the last time the device was udpated diff --git a/website/source/docs/providers/packet/r/project.html.markdown b/website/source/docs/providers/packet/r/project.html.markdown new file mode 100644 index 0000000000..d17190eec5 --- /dev/null +++ b/website/source/docs/providers/packet/r/project.html.markdown @@ -0,0 +1,40 @@ +--- +layout: "packet" +page_title: "Packet: packet_ssh_key" +sidebar_current: "docs-packet-resource-project" +description: |- + Provides a Packet Project resource. +--- + +# packet\_project + +Provides a Packet Project resource to allow you manage devices +in your projects. + +## Example Usage + +``` +# Create a new Project +resource "packet_project" "tf_project_1" { + name = "Terraform Fun" + payment_method = "payment-method-id" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the SSH key for identification +* `payment_method` - (Required) The id of the payment method on file to use for services created +on this project. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The unique ID of the key +* `payment_method` - The id of the payment method on file to use for services created +on this project. +* `created` - The timestamp for when the SSH key was created +* `updated` - The timestamp for the last time the SSH key was udpated diff --git a/website/source/docs/providers/packet/r/ssh_key.html.markdown b/website/source/docs/providers/packet/r/ssh_key.html.markdown new file mode 100644 index 0000000000..cb27aaa774 --- /dev/null +++ b/website/source/docs/providers/packet/r/ssh_key.html.markdown @@ -0,0 +1,43 @@ +--- +layout: "packet" +page_title: "Packet: packet_ssh_key" +sidebar_current: "docs-packet-resource-ssh-key" +description: |- + Provides a Packet SSH key resource. +--- + +# packet\_ssh_key + +Provides a Packet SSH key resource to allow you manage SSH +keys on your account. All ssh keys on your account are loaded on +all new devices, they do not have to be explicitly declared on +device creation. + +## Example Usage + +``` +# Create a new SSH key +resource "packet_ssh_key" "key1" { + name = "terraform-1" + public_key = "${file("/home/terraform/.ssh/id_rsa.pub")}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the SSH key for identification +* `public_key` - (Required) The public key. If this is a file, it +can be read using the file interpolation function + +## Attributes Reference + +The following attributes are exported: + +* `id` - The unique ID of the key +* `name` - The name of the SSH key +* `public_key` - The text of the public key +* `fingerprint` - The fingerprint of the SSH key +* `created` - The timestamp for when the SSH key was created +* `updated` - The timestamp for the last time the SSH key was udpated From 09e336a80a6afac5a8c998a704c59db54d87259f Mon Sep 17 00:00:00 2001 From: Matti Savolainen Date: Fri, 3 Jul 2015 12:58:05 +0300 Subject: [PATCH 004/190] Fix Repository attribute in docker client PullOptions for private registries. --- .../docker/resource_docker_image_funcs.go | 4 +-- .../docker/resource_docker_image_test.go | 28 +++++++++++++++++-- 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/builtin/providers/docker/resource_docker_image_funcs.go b/builtin/providers/docker/resource_docker_image_funcs.go index f45dd22264..454113c5fd 100644 --- a/builtin/providers/docker/resource_docker_image_funcs.go +++ b/builtin/providers/docker/resource_docker_image_funcs.go @@ -83,7 +83,7 @@ func pullImage(data *Data, client *dc.Client, image string) error { splitPortRepo := strings.Split(splitImageName[1], "/") pullOpts.Registry = splitImageName[0] + ":" + splitPortRepo[0] pullOpts.Tag = splitImageName[2] - pullOpts.Repository = strings.Join(splitPortRepo[1:], "/") + pullOpts.Repository = pullOpts.Registry + "/" + strings.Join(splitPortRepo[1:], "/") // It's either registry:port/username/repo, registry:port/repo, // or repo:tag with default registry @@ -98,7 +98,7 @@ func pullImage(data *Data, client *dc.Client, image string) error { // registry:port/username/repo or registry:port/repo default: pullOpts.Registry = splitImageName[0] + ":" + splitPortRepo[0] - pullOpts.Repository = strings.Join(splitPortRepo[1:], "/") + pullOpts.Repository = pullOpts.Registry + "/" + strings.Join(splitPortRepo[1:], "/") pullOpts.Tag = "latest" } diff --git a/builtin/providers/docker/resource_docker_image_test.go b/builtin/providers/docker/resource_docker_image_test.go index 14dfb29b7c..844b56329e 100644 --- a/builtin/providers/docker/resource_docker_image_test.go +++ b/builtin/providers/docker/resource_docker_image_test.go @@ -1,9 +1,8 @@ package docker import ( - "testing" - "github.com/hashicorp/terraform/helper/resource" + "testing" ) func TestAccDockerImage_basic(t *testing.T) { @@ -24,9 +23,34 @@ func TestAccDockerImage_basic(t *testing.T) { }) } +func TestAddDockerImage_private(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAddDockerPrivateImageConfig, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "docker_image.foobar", + "latest", + "2c40b0526b6358710fd09e7b8c022429268cc61703b4777e528ac9d469a07ca1"), + ), + }, + }, + }) +} + const testAccDockerImageConfig = ` resource "docker_image" "foo" { name = "ubuntu:trusty-20150320" keep_updated = true } ` + +const testAddDockerPrivateImageConfig = ` +resource "docker_image" "foobar" { + name = "gcr.io:443/google_containers/pause:0.8.0" + keep_updated = true +} +` From b928777cace13e4e1cc322e5688708f3a26d4e5b Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 11 Aug 2015 11:36:56 -0500 Subject: [PATCH 005/190] core: don't error on computed value during input walk fixes #2987 --- terraform/context_input_test.go | 59 +++++++++++++++++++ terraform/interpolate.go | 16 ++++- .../child/main.tf | 5 ++ .../input-var-partially-computed/main.tf | 7 +++ 4 files changed, 85 insertions(+), 2 deletions(-) create mode 100644 terraform/test-fixtures/input-var-partially-computed/child/main.tf create mode 100644 terraform/test-fixtures/input-var-partially-computed/main.tf diff --git a/terraform/context_input_test.go b/terraform/context_input_test.go index 155f4c72fa..404ef0ffc4 100644 --- a/terraform/context_input_test.go +++ b/terraform/context_input_test.go @@ -510,3 +510,62 @@ aws_instance.foo: t.Fatalf("expected: \n%s\ngot: \n%s\n", expectedStr, actualStr) } } + +func TestContext2Input_varPartiallyComputed(t *testing.T) { + input := new(MockUIInput) + m := testModule(t, "input-var-partially-computed") + p := testProvider("aws") + p.ApplyFn = testApplyFn + p.DiffFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + Variables: map[string]string{ + "foo": "foovalue", + }, + UIInput: input, + State: &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "aws_instance.foo": &ResourceState{ + Type: "aws_instance", + Primary: &InstanceState{ + ID: "i-abc123", + Attributes: map[string]string{ + "id": "i-abc123", + }, + }, + }, + }, + }, + &ModuleState{ + Path: append(rootModulePath, "child"), + Resources: map[string]*ResourceState{ + "aws_instance.mod": &ResourceState{ + Type: "aws_instance", + Primary: &InstanceState{ + ID: "i-bcd345", + Attributes: map[string]string{ + "id": "i-bcd345", + "value": "one,i-abc123", + }, + }, + }, + }, + }, + }, + }, + }) + + if err := ctx.Input(InputModeStd); err != nil { + t.Fatalf("err: %s", err) + } + + if _, err := ctx.Plan(); err != nil { + t.Fatalf("err: %s", err) + } +} diff --git a/terraform/interpolate.go b/terraform/interpolate.go index 6d103cd802..0197c0e422 100644 --- a/terraform/interpolate.go +++ b/terraform/interpolate.go @@ -370,7 +370,13 @@ MISSING: // be unknown. Instead, we return that the value is computed so // that the graph can continue to refresh other nodes. It doesn't // matter because the config isn't interpolated anyways. - if i.Operation == walkRefresh || i.Operation == walkPlanDestroy { + // + // For a Destroy, we're also fine with computed values, since our goal is + // only to get destroy nodes for existing resources. + // + // For an input walk, computed values are okay to return because we're only + // looking for missing variables to prompt the user for. + if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkInput { return config.UnknownVariableValue, nil } @@ -446,7 +452,13 @@ func (i *Interpolater) computeResourceMultiVariable( // be unknown. Instead, we return that the value is computed so // that the graph can continue to refresh other nodes. It doesn't // matter because the config isn't interpolated anyways. - if i.Operation == walkRefresh || i.Operation == walkPlanDestroy { + // + // For a Destroy, we're also fine with computed values, since our goal is + // only to get destroy nodes for existing resources. + // + // For an input walk, computed values are okay to return because we're only + // looking for missing variables to prompt the user for. + if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkInput { return config.UnknownVariableValue, nil } diff --git a/terraform/test-fixtures/input-var-partially-computed/child/main.tf b/terraform/test-fixtures/input-var-partially-computed/child/main.tf new file mode 100644 index 0000000000..a11cc5e835 --- /dev/null +++ b/terraform/test-fixtures/input-var-partially-computed/child/main.tf @@ -0,0 +1,5 @@ +variable "in" {} + +resource "aws_instance" "mod" { + value = "${var.in}" +} diff --git a/terraform/test-fixtures/input-var-partially-computed/main.tf b/terraform/test-fixtures/input-var-partially-computed/main.tf new file mode 100644 index 0000000000..ada6f0cead --- /dev/null +++ b/terraform/test-fixtures/input-var-partially-computed/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { } +resource "aws_instance" "bar" { } + +module "child" { + source = "./child" + in = "one,${aws_instance.foo.id},${aws_instance.bar.id}" +} From f6d69164e84a0e1efd396025f79e3743d28a0034 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Thu, 27 Aug 2015 17:13:59 +0200 Subject: [PATCH 006/190] examples: add OpenStack configuration with networking --- examples/openstack-with-networking/README.md | 63 +++++++++++++++ examples/openstack-with-networking/main.tf | 79 +++++++++++++++++++ .../openstack-with-networking/openrc.sample | 7 ++ examples/openstack-with-networking/outputs.tf | 3 + .../openstack-with-networking/variables.tf | 22 ++++++ 5 files changed, 174 insertions(+) create mode 100644 examples/openstack-with-networking/README.md create mode 100644 examples/openstack-with-networking/main.tf create mode 100644 examples/openstack-with-networking/openrc.sample create mode 100644 examples/openstack-with-networking/outputs.tf create mode 100644 examples/openstack-with-networking/variables.tf diff --git a/examples/openstack-with-networking/README.md b/examples/openstack-with-networking/README.md new file mode 100644 index 0000000000..2f9d381ca3 --- /dev/null +++ b/examples/openstack-with-networking/README.md @@ -0,0 +1,63 @@ +# Basic OpenStack architecture with networking + +This provides a template for running a simple architecture on an OpenStack +cloud. + +To simplify the example, this intentionally ignores deploying and +getting your application onto the servers. However, you could do so either via +[provisioners](https://www.terraform.io/docs/provisioners/) and a configuration +management tool, or by pre-baking configured images with +[Packer](http://www.packer.io). + +After you run `terraform apply` on this configuration, it will output the +floating IP address assigned to the instance. After your instance started, +this should respond with the default nginx web page. + +First set the required environment variables for the OpenStack provider by +sourcing the [credentials file](http://docs.openstack.org/cli-reference/content/cli_openrc.html). + +``` +source openrc +``` + +Afterwards run with a command like this: + +``` +terraform apply \ + -var 'external_gateway=c1901f39-f76e-498a-9547-c29ba45f64df' \ + -var 'pool=public' +``` + +To get a list of usable floating IP pools run this command: + +``` +$ nova floating-ip-pool-list ++--------+ +| name | ++--------+ +| public | ++--------+ +``` + +To get the UUID of the external gateway run this command: + +``` +$ neutron net-show FLOATING_IP_POOL ++---------------------------+--------------------------------------+ +| Field | Value | ++---------------------------+--------------------------------------+ +| admin_state_up | True | +| id | c1901f39-f76e-498a-9547-c29ba45f64df | +| mtu | 0 | +| name | public | +| port_security_enabled | True | +| provider:network_type | vxlan | +| provider:physical_network | | +| provider:segmentation_id | 1092 | +| router:external | True | +| shared | False | +| status | ACTIVE | +| subnets | 42b672ae-8d51-4a18-a028-ddae7859ec4c | +| tenant_id | 1bde0a49d2ff44ffb44e6339a8cefe3a | ++---------------------------+--------------------------------------+ +``` diff --git a/examples/openstack-with-networking/main.tf b/examples/openstack-with-networking/main.tf new file mode 100644 index 0000000000..d579252632 --- /dev/null +++ b/examples/openstack-with-networking/main.tf @@ -0,0 +1,79 @@ +resource "openstack_compute_keypair_v2" "terraform" { + name = "terraform" + public_key = "${file("${var.ssh_key_file}.pub")}" +} + +resource "openstack_networking_network_v2" "terraform" { + name = "terraform" + admin_state_up = "true" +} + +resource "openstack_networking_subnet_v2" "terraform" { + name = "terraform" + network_id = "${openstack_networking_network_v2.terraform.id}" + cidr = "10.0.0.0/24" + ip_version = 4 + dns_nameservers = ["8.8.8.8","8.8.4.4"] +} + +resource "openstack_networking_router_v2" "terraform" { + name = "terraform" + admin_state_up = "true" + external_gateway = "${var.external_gateway}" +} + +resource "openstack_networking_router_interface_v2" "terraform" { + router_id = "${openstack_networking_router_v2.terraform.id}" + subnet_id = "${openstack_networking_subnet_v2.terraform.id}" +} + +resource "openstack_compute_secgroup_v2" "terraform" { + name = "terraform" + description = "Security group for the Terraform example instances" + rule { + from_port = 22 + to_port = 22 + ip_protocol = "tcp" + cidr = "0.0.0.0/0" + } + rule { + from_port = 80 + to_port = 80 + ip_protocol = "tcp" + cidr = "0.0.0.0/0" + } + rule { + from_port = -1 + to_port = -1 + ip_protocol = "icmp" + cidr = "0.0.0.0/0" + } +} + +resource "openstack_compute_floatingip_v2" "terraform" { + pool = "${var.pool}" + depends_on = ["openstack_networking_router_interface_v2.terraform"] +} + +resource "openstack_compute_instance_v2" "terraform" { + name = "terraform" + image_name = "${var.image}" + flavor_name = "${var.flavor}" + key_pair = "${openstack_compute_keypair_v2.terraform.name}" + security_groups = [ "${openstack_compute_secgroup_v2.terraform.name}" ] + floating_ip = "${openstack_compute_floatingip_v2.terraform.address}" + network { + uuid = "${openstack_networking_network_v2.terraform.id}" + } + provisioner "remote-exec" { + connection { + user = "${var.ssh_user_name}" + key_file = "${var.ssh_key_file}" + } + inline = [ + "sudo apt-get -y update", + "sudo apt-get -y install nginx", + "sudo service nginx start" + ] + } +} diff --git a/examples/openstack-with-networking/openrc.sample b/examples/openstack-with-networking/openrc.sample new file mode 100644 index 0000000000..c9a38e0a13 --- /dev/null +++ b/examples/openstack-with-networking/openrc.sample @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +export OS_AUTH_URL=http://KEYSTONE.ENDPOINT.URL:5000/v2.0 +export OS_TENANT_NAME=YOUR_TENANT_NAME +export OS_USERNAME=YOUR_USERNAME +export OS_PASSWORD=YOUR_PASSWORD +export OS_REGION_NAME=YOUR_REGION_NAME diff --git a/examples/openstack-with-networking/outputs.tf b/examples/openstack-with-networking/outputs.tf new file mode 100644 index 0000000000..42f923fe28 --- /dev/null +++ b/examples/openstack-with-networking/outputs.tf @@ -0,0 +1,3 @@ +output "address" { + value = "${openstack_compute_floatingip_v2.terraform.address}" +} diff --git a/examples/openstack-with-networking/variables.tf b/examples/openstack-with-networking/variables.tf new file mode 100644 index 0000000000..3477cf67e9 --- /dev/null +++ b/examples/openstack-with-networking/variables.tf @@ -0,0 +1,22 @@ +variable "image" { + default = "Ubuntu 14.04" +} + +variable "flavor" { + default = "m1.small" +} + +variable "ssh_key_file" { + default = "~/.ssh/id_rsa.terraform" +} + +variable "ssh_user_name" { + default = "ubuntu" +} + +variable "external_gateway" { +} + +variable "pool" { + default = "public" +} From 5001bb078e06566d2f9e7dd438aaafa103a6c8d7 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 2 Sep 2015 14:44:12 +0100 Subject: [PATCH 007/190] provider/aws: Add new resource - aws_iam_saml_provider --- builtin/providers/aws/provider.go | 1 + .../aws/resource_aws_iam_saml_provider.go | 101 ++++++++++++++++++ 2 files changed, 102 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_iam_saml_provider.go diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index a5029b400a..9a00edffc3 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -187,6 +187,7 @@ func Provider() terraform.ResourceProvider { "aws_iam_policy_attachment": resourceAwsIamPolicyAttachment(), "aws_iam_role_policy": resourceAwsIamRolePolicy(), "aws_iam_role": resourceAwsIamRole(), + "aws_iam_saml_provider": resourceAwsIamSamlProvider(), "aws_iam_server_certificate": resourceAwsIAMServerCertificate(), "aws_iam_user_policy": resourceAwsIamUserPolicy(), "aws_iam_user": resourceAwsIamUser(), diff --git a/builtin/providers/aws/resource_aws_iam_saml_provider.go b/builtin/providers/aws/resource_aws_iam_saml_provider.go new file mode 100644 index 0000000000..6a166d711e --- /dev/null +++ b/builtin/providers/aws/resource_aws_iam_saml_provider.go @@ -0,0 +1,101 @@ +package aws + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsIamSamlProvider() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsIamSamlProviderCreate, + Read: resourceAwsIamSamlProviderRead, + Update: resourceAwsIamSamlProviderUpdate, + Delete: resourceAwsIamSamlProviderDelete, + + Schema: map[string]*schema.Schema{ + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "valid_until": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "saml_metadata_document": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceAwsIamSamlProviderCreate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.CreateSAMLProviderInput{ + Name: aws.String(d.Get("name").(string)), + SAMLMetadataDocument: aws.String(d.Get("saml_metadata_document").(string)), + } + + out, err := iamconn.CreateSAMLProvider(input) + if err != nil { + return err + } + + d.SetId(*out.SAMLProviderArn) + + return resourceAwsIamSamlProviderRead(d, meta) +} + +func resourceAwsIamSamlProviderRead(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.GetSAMLProviderInput{ + SAMLProviderArn: aws.String(d.Id()), + } + out, err := iamconn.GetSAMLProvider(input) + if err != nil { + return err + } + + validUntil := out.ValidUntil.Format(time.RFC1123) + d.Set("valid_until", validUntil) + d.Set("saml_metadata_document", *out.SAMLMetadataDocument) + + return nil +} + +func resourceAwsIamSamlProviderUpdate(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.UpdateSAMLProviderInput{ + SAMLProviderArn: aws.String(d.Id()), + SAMLMetadataDocument: aws.String(d.Get("saml_metadata_document").(string)), + } + _, err := iamconn.UpdateSAMLProvider(input) + if err != nil { + return err + } + + return resourceAwsIamSamlProviderRead(d, meta) +} + +func resourceAwsIamSamlProviderDelete(d *schema.ResourceData, meta interface{}) error { + iamconn := meta.(*AWSClient).iamconn + + input := &iam.DeleteSAMLProviderInput{ + SAMLProviderArn: aws.String(d.Id()), + } + _, err := iamconn.DeleteSAMLProvider(input) + + return err +} From ac762e5503b1c0661329efec543614a3a0fe34a3 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 2 Sep 2015 20:01:36 +0100 Subject: [PATCH 008/190] provider/aws: Add docs for aws_iam_saml_provider --- .../aws/r/iam_saml_provider.html.markdown | 34 +++++++++++++++++++ website/source/layouts/aws.erb | 4 +++ 2 files changed, 38 insertions(+) create mode 100644 website/source/docs/providers/aws/r/iam_saml_provider.html.markdown diff --git a/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown b/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown new file mode 100644 index 0000000000..adba6d350d --- /dev/null +++ b/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown @@ -0,0 +1,34 @@ +--- +layout: "aws" +page_title: "AWS: aws_saml_provider" +sidebar_current: "docs-aws-resource-iam-saml-provider" +description: |- + Provides an IAM SAML provider. +--- + +# aws\_iam\_saml\_provider + +Provides an IAM SAML provider. + +## Example Usage + +``` +resource "aws_saml_provider" "default" { + name = "myprovider" + saml_metadata_document = "${file("saml-metadata.xml")}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the provider to create. +* `saml_metadata_document` - (Required) An XML document generated by an identity provider that supports SAML 2.0. + +## Attributes Reference + +The following attributes are exported: + +* `arn` - The ARN assigned by AWS for this provider. +* `valid_until` - The expiration date and time for the SAML provider in RFC1123 format, e.g. `Mon, 02 Jan 2006 15:04:05 MST`. diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 2bbff22f4b..e07992b842 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -179,6 +179,10 @@ aws_iam_role_policy + > + aws_iam_saml_provider + + > aws_iam_server_certificate From 5d215c42db6aef7b7cf86bc3dee37c41fa8327a1 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 2 Sep 2015 20:02:00 +0100 Subject: [PATCH 009/190] provider/aws: Add acceptance test for aws_iam_saml_provider --- .../resource_aws_iam_saml_provider_test.go | 79 +++++++++++++++++++ .../test-fixtures/saml-metadata-modified.xml | 14 ++++ .../aws/test-fixtures/saml-metadata.xml | 14 ++++ 3 files changed, 107 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_iam_saml_provider_test.go create mode 100644 builtin/providers/aws/test-fixtures/saml-metadata-modified.xml create mode 100644 builtin/providers/aws/test-fixtures/saml-metadata.xml diff --git a/builtin/providers/aws/resource_aws_iam_saml_provider_test.go b/builtin/providers/aws/resource_aws_iam_saml_provider_test.go new file mode 100644 index 0000000000..63ed395883 --- /dev/null +++ b/builtin/providers/aws/resource_aws_iam_saml_provider_test.go @@ -0,0 +1,79 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSIAMSamlProvider_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIAMSamlProviderDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccIAMSamlProviderConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckIAMSamlProvider("aws_iam_saml_provider.salesforce"), + ), + }, + resource.TestStep{ + Config: testAccIAMSamlProviderConfigUpdate, + Check: resource.ComposeTestCheckFunc( + testAccCheckIAMSamlProvider("aws_iam_saml_provider.salesforce"), + ), + }, + }, + }) +} + +func testAccCheckIAMSamlProviderDestroy(s *terraform.State) error { + if len(s.RootModule().Resources) > 0 { + return fmt.Errorf("Expected all resources to be gone, but found: %#v", s.RootModule().Resources) + } + + return nil +} + +func testAccCheckIAMSamlProvider(id string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[id] + if !ok { + return fmt.Errorf("Not Found: %s", id) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + iamconn := testAccProvider.Meta().(*AWSClient).iamconn + _, err := iamconn.GetSAMLProvider(&iam.GetSAMLProviderInput{ + SAMLProviderArn: aws.String(rs.Primary.ID), + }) + + if err != nil { + return err + } + + return nil + } +} + +const testAccIAMSamlProviderConfig = ` +resource "aws_iam_saml_provider" "salesforce" { + name = "tf-salesforce-test" + saml_metadata_document = "${file("./test-fixtures/saml-metadata.xml")}" +} +` + +const testAccIAMSamlProviderConfigUpdate = ` +resource "aws_iam_saml_provider" "salesforce" { + name = "tf-salesforce-test" + saml_metadata_document = "${file("./test-fixtures/saml-metadata-modified.xml")}" +} +` diff --git a/builtin/providers/aws/test-fixtures/saml-metadata-modified.xml b/builtin/providers/aws/test-fixtures/saml-metadata-modified.xml new file mode 100644 index 0000000000..aaca7afc0b --- /dev/null +++ b/builtin/providers/aws/test-fixtures/saml-metadata-modified.xml @@ -0,0 +1,14 @@ + + + + + + MIIErDCCA5SgAwIBAgIOAU+PT8RBAAAAAHxJXEcwDQYJKoZIhvcNAQELBQAwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0EwHhcNMTUwOTAyMTgyNjUzWhcNMTcwOTAyMTIwMDAwWjCBkDEoMCYGA1UEAwwfU2VsZlNpZ25lZENlcnRfMDJTZXAyMDE1XzE4MjY1MzEYMBYGA1UECwwPMDBEMjQwMDAwMDBwQW9BMRcwFQYDVQQKDA5TYWxlc2ZvcmNlLmNvbTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzELMAkGA1UECAwCQ0ExDDAKBgNVBAYTA1VTQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJp/wTRr9n1IWJpkRTjNpep47OKJrD2E6rGbJ18TG2RxtIz+zCn2JwH2aP3TULh0r0hhcg/pecv51RRcG7O19DBBaTQ5+KuoICQyKZy07/yDXSiZontTwkEYs06ssTwTHUcRXbcwTKv16L7omt0MjIhTTGfvtLOYiPwyvKvzAHg4eNuAcli0duVM78UIBORtdmy9C9ZcMh8yRJo5aPBq85wsE3JXU58ytyZzCHTBLH+2xFQrjYnUSEW+FOEEpI7o33MVdFBvWWg1R17HkWzcve4C30lqOHqvxBzyESZ/N1mMlmSt8gPFyB+mUXY99StJDJpnytbY8DwSzMQUo/sOVB0CAwEAAaOCAQAwgf0wHQYDVR0OBBYEFByu1EQqRQS0bYQBKS9K5qwKi+6IMA8GA1UdEwEB/wQFMAMBAf8wgcoGA1UdIwSBwjCBv4AUHK7URCpFBLRthAEpL0rmrAqL7oihgZakgZMwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0GCDgFPj0/EQQAAAAB8SVxHMA0GCSqGSIb3DQEBCwUAA4IBAQA9O5o1tC71qJnkq+ABPo4A1aFKZVT/07GcBX4/wetcbYySL4Q2nR9pMgfPYYS1j+P2E3viPsQwPIWDUBwFkNsjjX5DSGEkLAioVGKRwJshRSCSynMcsVZbQkfBUiZXqhM0wzvoa/ALvGD+aSSb1m+x7lEpDYNwQKWaUW2VYcHWv9wjujMyy7dlj8E/jqM71mw7ThNl6k4+3RQ802dMa14txm8pkF0vZgfpV3tkqhBqtjBAicVCaveqr3r3iGqjvyilBgdY+0NR8szqzm7CD/Bkb22+/IgM/mXQuL9KHD/WADlSGmYKmG3SSahmcZxznYCnzcRNN9LVuXlz5cbljmBj + + + + urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified + + + + diff --git a/builtin/providers/aws/test-fixtures/saml-metadata.xml b/builtin/providers/aws/test-fixtures/saml-metadata.xml new file mode 100644 index 0000000000..69e353b770 --- /dev/null +++ b/builtin/providers/aws/test-fixtures/saml-metadata.xml @@ -0,0 +1,14 @@ + + + + + + MIIErDCCA5SgAwIBAgIOAU+PT8RBAAAAAHxJXEcwDQYJKoZIhvcNAQELBQAwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0EwHhcNMTUwOTAyMTgyNjUzWhcNMTcwOTAyMTIwMDAwWjCBkDEoMCYGA1UEAwwfU2VsZlNpZ25lZENlcnRfMDJTZXAyMDE1XzE4MjY1MzEYMBYGA1UECwwPMDBEMjQwMDAwMDBwQW9BMRcwFQYDVQQKDA5TYWxlc2ZvcmNlLmNvbTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzELMAkGA1UECAwCQ0ExDDAKBgNVBAYTA1VTQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJp/wTRr9n1IWJpkRTjNpep47OKJrD2E6rGbJ18TG2RxtIz+zCn2JwH2aP3TULh0r0hhcg/pecv51RRcG7O19DBBaTQ5+KuoICQyKZy07/yDXSiZontTwkEYs06ssTwTHUcRXbcwTKv16L7omt0MjIhTTGfvtLOYiPwyvKvzAHg4eNuAcli0duVM78UIBORtdmy9C9ZcMh8yRJo5aPBq85wsE3JXU58ytyZzCHTBLH+2xFQrjYnUSEW+FOEEpI7o33MVdFBvWWg1R17HkWzcve4C30lqOHqvxBzyESZ/N1mMlmSt8gPFyB+mUXY99StJDJpnytbY8DwSzMQUo/sOVB0CAwEAAaOCAQAwgf0wHQYDVR0OBBYEFByu1EQqRQS0bYQBKS9K5qwKi+6IMA8GA1UdEwEB/wQFMAMBAf8wgcoGA1UdIwSBwjCBv4AUHK7URCpFBLRthAEpL0rmrAqL7oihgZakgZMwgZAxKDAmBgNVBAMMH1NlbGZTaWduZWRDZXJ0XzAyU2VwMjAxNV8xODI2NTMxGDAWBgNVBAsMDzAwRDI0MDAwMDAwcEFvQTEXMBUGA1UECgwOU2FsZXNmb3JjZS5jb20xFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xCzAJBgNVBAgMAkNBMQwwCgYDVQQGEwNVU0GCDgFPj0/EQQAAAAB8SVxHMA0GCSqGSIb3DQEBCwUAA4IBAQA9O5o1tC71qJnkq+ABPo4A1aFKZVT/07GcBX4/wetcbYySL4Q2nR9pMgfPYYS1j+P2E3viPsQwPIWDUBwFkNsjjX5DSGEkLAioVGKRwJshRSCSynMcsVZbQkfBUiZXqhM0wzvoa/ALvGD+aSSb1m+x7lEpDYNwQKWaUW2VYcHWv9wjujMyy7dlj8E/jqM71mw7ThNl6k4+3RQ802dMa14txm8pkF0vZgfpV3tkqhBqtjBAicVCaveqr3r3iGqjvyilBgdY+0NR8szqzm7CD/Bkb22+/IgM/mXQuL9KHD/WADlSGmYKmG3SSahmcZxznYCnzcRNN9LVuXlz5cbljmBj + + + + urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified + + + + From b06f0bbf4aa4d2e09b5715adf4fb6a455735a807 Mon Sep 17 00:00:00 2001 From: Joshua Semar Date: Thu, 3 Sep 2015 10:33:59 -0500 Subject: [PATCH 010/190] fix documentation --- .../aws/r/launch_configuration.html.markdown | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/website/source/docs/providers/aws/r/launch_configuration.html.markdown b/website/source/docs/providers/aws/r/launch_configuration.html.markdown index ea96503dc0..85d45bcb03 100644 --- a/website/source/docs/providers/aws/r/launch_configuration.html.markdown +++ b/website/source/docs/providers/aws/r/launch_configuration.html.markdown @@ -23,10 +23,10 @@ resource "aws_launch_configuration" "as_conf" { ## Using with AutoScaling Groups Launch Configurations cannot be updated after creation with the Amazon -Web Service API. In order to update a Launch Configuration, Terraform will -destroy the existing resource and create a replacement. If order to effectively -use a Launch Configuration resource with an[AutoScaling Group resource][1], -it's recommend to omit the Launch Configuration `name` attribute, and +Web Service API. In order to update a Launch Configuration, Terraform will +destroy the existing resource and create a replacement. If order to effectively +use a Launch Configuration resource with an[AutoScaling Group resource][1], +it's recommend to omit the Launch Configuration `name` attribute, and specify `create_before_destroy` in a [lifecycle][2] block, as shown: ``` @@ -69,7 +69,12 @@ The following arguments are supported: * `user_data` - (Optional) The user data to provide when launching the instance. * `enable_monitoring` - (Optional) Enables/disables detailed monitoring. This is enabled by default. * `ebs_optimized` - (Optional) If true, the launched EC2 instance will be EBS-optimized. -* `block_device_mapping` - (Optional) A list of block devices to add. Their keys are documented below. +* `root_block_device` - (Optional) Customize details about the root block + device of the instance. See [Block Devices](#block-devices) below for details. +* `ebs_block_device` - (Optional) Additional EBS block devices to attach to the + instance. See [Block Devices](#block-devices) below for details. +* `ephemeral_block_device` - (Optional) Customize Ephemeral (also known as + "Instance Store") volumes on the instance. See [Block Devices](#block-devices) below for details. ## Block devices From 10c96afa9b3daade66d1911b1c8575f35c8aa786 Mon Sep 17 00:00:00 2001 From: Mike Fiedler Date: Tue, 8 Sep 2015 09:10:54 -0400 Subject: [PATCH 011/190] Update aws_db_instance `db_subnet_group_name` When launching a new RDS instance in a VPC-default AWS account, trying to control which VPC the new RDS instance lands in is not apparent from the parameters available. The following works: ``` resource "aws_db_subnet_group" "foo" { name = "foo" description = "DB Subnet for foo" subnet_ids = ["${aws_subnet.foo_1a.id}", "${aws_subnet.foo_1b.id}"] } resource "aws_db_instance" "bar" { ... db_subnet_group_name = "${aws_db_subnet_group.foo.name}" ... } ``` Hopefully this doc update will help others --- website/source/docs/providers/aws/r/db_instance.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/db_instance.html.markdown b/website/source/docs/providers/aws/r/db_instance.html.markdown index c2f0063f4c..adf2dafe6b 100644 --- a/website/source/docs/providers/aws/r/db_instance.html.markdown +++ b/website/source/docs/providers/aws/r/db_instance.html.markdown @@ -65,7 +65,7 @@ The following arguments are supported: * `vpc_security_group_ids` - (Optional) List of VPC security groups to associate. * `security_group_names` - (Optional/Deprecated) List of DB Security Groups to associate. Only used for [DB Instances on the _EC2-Classic_ Platform](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html#USER_VPC.FindDefaultVPC). -* `db_subnet_group_name` - (Optional) Name of DB subnet group +* `db_subnet_group_name` - (Optional) Name of DB subnet group. DB instance will be created in the VPC associated with the DB subnet group. If unspecified, will be created in the `default` VPC, or in EC2 Classic, if available. * `parameter_group_name` - (Optional) Name of the DB parameter group to associate. * `storage_encrypted` - (Optional) Specifies whether the DB instance is encrypted. The default is `false` if not specified. * `apply_immediately` - (Optional) Specifies whether any database modifications From 03f94d66aef7fde266ad8e2f831c373ea37b99ad Mon Sep 17 00:00:00 2001 From: zpatrick Date: Wed, 9 Sep 2015 21:13:36 +0000 Subject: [PATCH 012/190] adding content field to s3_bucket_object --- .../aws/resource_aws_s3_bucket_object.go | 31 ++++++++++++--- .../aws/resource_aws_s3_bucket_object_test.go | 39 +++++++++++++++++-- 2 files changed, 61 insertions(+), 9 deletions(-) diff --git a/builtin/providers/aws/resource_aws_s3_bucket_object.go b/builtin/providers/aws/resource_aws_s3_bucket_object.go index 9d46952d07..8a2e8370b0 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_object.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_object.go @@ -4,6 +4,8 @@ import ( "fmt" "log" "os" + "io" + "bytes" "github.com/hashicorp/terraform/helper/schema" @@ -34,10 +36,18 @@ func resourceAwsS3BucketObject() *schema.Resource { "source": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, + ConflictsWith: []string{"content"}, }, + "content": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"source"}, + }, + "etag": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -51,19 +61,28 @@ func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) erro bucket := d.Get("bucket").(string) key := d.Get("key").(string) - source := d.Get("source").(string) + var body io.ReadSeeker - file, err := os.Open(source) + if v, ok := d.GetOk("source"); ok { + source := v.(string) + file, err := os.Open(source) + if err != nil { + return fmt.Errorf("Error opening S3 bucket object source (%s): %s", source, err) + } - if err != nil { - return fmt.Errorf("Error opening S3 bucket object source (%s): %s", source, err) + body = file + } else if v, ok := d.GetOk("content"); ok { + content := v.(string) + body = bytes.NewReader([]byte(content)) + } else { + return fmt.Errorf("Must specify \"source\" or \"content\" field") } resp, err := s3conn.PutObject( &s3.PutObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), - Body: file, + Body: body, }) if err != nil { diff --git a/builtin/providers/aws/resource_aws_s3_bucket_object_test.go b/builtin/providers/aws/resource_aws_s3_bucket_object_test.go index 4f947736ae..6311dd7c32 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_object_test.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_object_test.go @@ -15,7 +15,7 @@ import ( var tf, err = ioutil.TempFile("", "tf") -func TestAccAWSS3BucketObject_basic(t *testing.T) { +func TestAccAWSS3BucketObject_source(t *testing.T) { // first write some data to the tempfile just so it's not 0 bytes. ioutil.WriteFile(tf.Name(), []byte("{anything will do }"), 0644) resource.Test(t, resource.TestCase{ @@ -29,7 +29,26 @@ func TestAccAWSS3BucketObject_basic(t *testing.T) { CheckDestroy: testAccCheckAWSS3BucketObjectDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSS3BucketObjectConfig, + Config: testAccAWSS3BucketObjectConfigSource, + Check: testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object"), + }, + }, + }) +} + +func TestAccAWSS3BucketObject_content(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if err != nil { + panic(err) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketObjectDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSS3BucketObjectConfigContent, Check: testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object"), }, }, @@ -86,7 +105,7 @@ func testAccCheckAWSS3BucketObjectExists(n string) resource.TestCheckFunc { } var randomBucket = randInt -var testAccAWSS3BucketObjectConfig = fmt.Sprintf(` +var testAccAWSS3BucketObjectConfigSource = fmt.Sprintf(` resource "aws_s3_bucket" "object_bucket" { bucket = "tf-object-test-bucket-%d" } @@ -97,3 +116,17 @@ resource "aws_s3_bucket_object" "object" { source = "%s" } `, randomBucket, tf.Name()) + + +var testAccAWSS3BucketObjectConfigContent = fmt.Sprintf(` +resource "aws_s3_bucket" "object_bucket" { + bucket = "tf-object-test-bucket-%d" +} + +resource "aws_s3_bucket_object" "object" { + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "test-key" + content = "some_bucket_content" +} +`, randomBucket) + From 141c419cc70827ecc97211889913f6cdd1b59cb3 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 9 Sep 2015 23:17:57 -0700 Subject: [PATCH 013/190] Docs for aws_s3_bucket content argument. --- .../docs/providers/aws/r/s3_bucket_object.html.markdown | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown b/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown index 63d201b826..14286a603f 100644 --- a/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown +++ b/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown @@ -28,7 +28,11 @@ The following arguments are supported: * `bucket` - (Required) The name of the bucket to put the file in. * `key` - (Required) The name of the object once it is in the bucket. -* `source` - (Required) The path to the source file being uploaded to the bucket. +* `source` - (Required unless `content` given) The path to the source file being uploaded to the bucket. +* `content` - (Required unless `source` given) The literal content being uploaded to the bucket. + +Either `source` or `content` must be provided to specify the bucket content. +These two arguments are mutually-exclusive. ## Attributes Reference From 5256a6df6b7677d26b63efe1c5a932e2cce884b3 Mon Sep 17 00:00:00 2001 From: zpatrick Date: Thu, 10 Sep 2015 18:37:17 +0000 Subject: [PATCH 014/190] fix formatting --- .../aws/resource_aws_s3_bucket_object.go | 21 ++++++++++--------- .../aws/resource_aws_s3_bucket_object_test.go | 11 ++++------ 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/builtin/providers/aws/resource_aws_s3_bucket_object.go b/builtin/providers/aws/resource_aws_s3_bucket_object.go index 8a2e8370b0..3a4cc4df25 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_object.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_object.go @@ -1,11 +1,11 @@ package aws import ( + "bytes" "fmt" + "io" "log" "os" - "io" - "bytes" "github.com/hashicorp/terraform/helper/schema" @@ -35,18 +35,18 @@ func resourceAwsS3BucketObject() *schema.Resource { }, "source": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, ConflictsWith: []string{"content"}, }, - "content": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, + "content": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, ConflictsWith: []string{"source"}, - }, + }, "etag": &schema.Schema{ Type: schema.TypeString, @@ -138,3 +138,4 @@ func resourceAwsS3BucketObjectDelete(d *schema.ResourceData, meta interface{}) e } return nil } + diff --git a/builtin/providers/aws/resource_aws_s3_bucket_object_test.go b/builtin/providers/aws/resource_aws_s3_bucket_object_test.go index 6311dd7c32..0e0651ad00 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_object_test.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_object_test.go @@ -107,22 +107,19 @@ func testAccCheckAWSS3BucketObjectExists(n string) resource.TestCheckFunc { var randomBucket = randInt var testAccAWSS3BucketObjectConfigSource = fmt.Sprintf(` resource "aws_s3_bucket" "object_bucket" { - bucket = "tf-object-test-bucket-%d" + bucket = "tf-object-test-bucket-%d" } - resource "aws_s3_bucket_object" "object" { - bucket = "${aws_s3_bucket.object_bucket.bucket}" - key = "test-key" - source = "%s" + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "test-key" + source = "%s" } `, randomBucket, tf.Name()) - var testAccAWSS3BucketObjectConfigContent = fmt.Sprintf(` resource "aws_s3_bucket" "object_bucket" { bucket = "tf-object-test-bucket-%d" } - resource "aws_s3_bucket_object" "object" { bucket = "${aws_s3_bucket.object_bucket.bucket}" key = "test-key" From 3d77d158f7270472446b9e1fe461487c9763c91c Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 14 Sep 2015 10:38:29 +0100 Subject: [PATCH 015/190] remote/s3: Add support for ACL --- state/remote/s3.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/state/remote/s3.go b/state/remote/s3.go index c2d897dd00..26330d1126 100644 --- a/state/remote/s3.go +++ b/state/remote/s3.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "io" + "log" "os" "strconv" @@ -45,6 +46,11 @@ func s3Factory(conf map[string]string) (Client, error) { serverSideEncryption = v } + acl := "" + if raw, ok := conf["acl"]; ok { + acl = raw + } + accessKeyId := conf["access_key"] secretAccessKey := conf["secret_key"] @@ -77,6 +83,7 @@ func s3Factory(conf map[string]string) (Client, error) { bucketName: bucketName, keyName: keyName, serverSideEncryption: serverSideEncryption, + acl: acl, }, nil } @@ -85,6 +92,7 @@ type S3Client struct { bucketName string keyName string serverSideEncryption bool + acl string } func (c *S3Client) Get() (*Payload, error) { @@ -140,6 +148,12 @@ func (c *S3Client) Put(data []byte) error { i.ServerSideEncryption = aws.String("AES256") } + if c.acl != "" { + i.ACL = aws.String(c.acl) + } + + log.Printf("[DEBUG] Uploading remote state to S3: %#v", i) + if _, err := c.nativeClient.PutObject(i); err == nil { return nil } else { From 4f7f20ba23b3b46680005a0efd4f06c80ad9a2b5 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 14 Sep 2015 10:36:55 +0100 Subject: [PATCH 016/190] remote/s3: Add some docs for supported parameters --- website/source/docs/commands/remote-config.html.markdown | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/website/source/docs/commands/remote-config.html.markdown b/website/source/docs/commands/remote-config.html.markdown index c7586ac0e4..73a06f8211 100644 --- a/website/source/docs/commands/remote-config.html.markdown +++ b/website/source/docs/commands/remote-config.html.markdown @@ -57,6 +57,13 @@ The following backends are supported: in the `access_key`, `secret_key` and `region` variables respectively, but passing credentials this way is not recommended since they will be included in cleartext inside the persisted state. + Other supported parameters include: + * `bucket` - the name of the S3 bucket + * `key` - path where to place/look for state file inside the bucket + * `encrypt` - whether to enable [server side encryption](http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) + of the state file + * `acl` - [Canned ACL](http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) + to be applied to the state file. * HTTP - Stores the state using a simple REST client. State will be fetched via GET, updated via POST, and purged with DELETE. Requires the `address` variable. From 55f3c8c76498cf181a1f3605b0e796e5cac6be07 Mon Sep 17 00:00:00 2001 From: thrashr888 Date: Mon, 14 Sep 2015 16:50:53 -0700 Subject: [PATCH 017/190] provider/aws: aws_elasticache_cluster normalizes name to lowercase --- .../aws/resource_aws_elasticache_cluster.go | 12 +++++++++++- .../aws/resource_aws_elasticache_cluster_test.go | 5 ++++- .../aws/r/elasticache_cluster.html.markdown | 4 ++-- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index 080c56ac9c..520ea13427 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -28,6 +28,12 @@ func resourceAwsElasticacheCluster() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, + StateFunc: func(val interface{}) string { + // Elasticache normalizes cluster ids to lowercase, + // so we have to do this too or else we can end up + // with non-converging diffs. + return strings.ToLower(val.(string)) + }, }, "engine": &schema.Schema{ Type: schema.TypeString, @@ -190,7 +196,11 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{ return fmt.Errorf("Error creating Elasticache: %s", err) } - d.SetId(*resp.CacheCluster.CacheClusterId) + // Assign the cluster id as the resource ID + // Elasticache always retains the id in lower case, so we have to + // mimic that or else we won't be able to refresh a resource whose + // name contained uppercase characters. + d.SetId(strings.ToLower(*resp.CacheCluster.CacheClusterId)) pending := []string{"creating"} stateConf := &resource.StateChangeConf{ diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go index caa14a8df7..173ca21ea7 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster_test.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster_test.go @@ -163,7 +163,10 @@ resource "aws_security_group" "bar" { } resource "aws_elasticache_cluster" "bar" { - cluster_id = "tf-test-%03d" + // Including uppercase letters in this name to ensure + // that we correctly handle the fact that the API + // normalizes names to lowercase. + cluster_id = "tf-TEST-%03d" node_type = "cache.m1.small" num_cache_nodes = 1 engine = "redis" diff --git a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown index 953b78a9c0..dc4df4c2a0 100644 --- a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown @@ -27,8 +27,8 @@ resource "aws_elasticache_cluster" "bar" { The following arguments are supported: -* `cluster_id` – (Required) Group identifier. This parameter is stored as a -lowercase string +* `cluster_id` – (Required) Group identifier. Elasticache converts + this name to lowercase * `engine` – (Required) Name of the cache engine to be used for this cache cluster. Valid values for this parameter are `memcached` or `redis` From 4fd5c7254046bd7ef3d1b6872dcefe45f15fcbc2 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Tue, 15 Sep 2015 15:52:43 -0400 Subject: [PATCH 018/190] Fix "malformed url" bug in instance template when using network name --- .../providers/google/resource_compute_instance_template.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/builtin/providers/google/resource_compute_instance_template.go b/builtin/providers/google/resource_compute_instance_template.go index 060f4bb393..ce2c727349 100644 --- a/builtin/providers/google/resource_compute_instance_template.go +++ b/builtin/providers/google/resource_compute_instance_template.go @@ -305,11 +305,9 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) (error, []*compute. for i := 0; i < networksCount; i++ { prefix := fmt.Sprintf("network_interface.%d", i) - source := "global/networks/default" + source := "global/networks/" if v, ok := d.GetOk(prefix + ".network"); ok { - if v.(string) != "default" { - source = v.(string) - } + source += v.(string) } // Build the networkInterface From 029f1fa57236208a4cc77ddc330ea356500955bf Mon Sep 17 00:00:00 2001 From: stack72 Date: Wed, 16 Sep 2015 13:06:54 +0100 Subject: [PATCH 019/190] Adding configuration endpoint to the elasticache cluster nodes --- builtin/providers/aws/resource_aws_elasticache_cluster.go | 6 ++++++ .../docs/providers/aws/r/elasticache_cluster.html.markdown | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index 080c56ac9c..fc72f3a1d4 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -105,6 +105,10 @@ func resourceAwsElasticacheCluster() *schema.Resource { Type: schema.TypeInt, Computed: true, }, + "configuration_endpoint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, }, }, @@ -233,6 +237,7 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) if c.ConfigurationEndpoint != nil { d.Set("port", c.ConfigurationEndpoint.Port) } + d.Set("subnet_group_name", c.CacheSubnetGroupName) d.Set("security_group_names", c.CacheSecurityGroups) d.Set("security_group_ids", c.SecurityGroups) @@ -353,6 +358,7 @@ func setCacheNodeData(d *schema.ResourceData, c *elasticache.CacheCluster) error "id": *node.CacheNodeId, "address": *node.Endpoint.Address, "port": int(*node.Endpoint.Port), + "configuration_endpoint": fmt.Sprintf("%s:%d", *node.Endpoint.Address, *node.Endpoint.Port), }) } diff --git a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown index 953b78a9c0..2e020a43da 100644 --- a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown @@ -80,7 +80,7 @@ Example: `arn:aws:s3:::my_bucket/snapshot1.rdb` The following attributes are exported: -* `cache_nodes` - List of node objects including `id`, `address` and `port`. +* `cache_nodes` - List of node objects including `id`, `address`, `port` and `configuration_endpoint`. Referenceable e.g. as `${aws_elasticache_cluster.bar.cache_nodes.0.address}` [1]: http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyCacheCluster.html From 68c187c01ef59fd99b8f657b71e1ed2a8b2a6b6d Mon Sep 17 00:00:00 2001 From: stack72 Date: Wed, 16 Sep 2015 17:15:31 +0100 Subject: [PATCH 020/190] Changing the ElastiCache Cluster configuration_engine to be on the cluster, not on the cache nodes --- .../providers/aws/resource_aws_elasticache_cluster.go | 10 +++++----- .../providers/aws/r/elasticache_cluster.html.markdown | 4 +++- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index fc72f3a1d4..093ea88f8c 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -29,6 +29,10 @@ func resourceAwsElasticacheCluster() *schema.Resource { Required: true, ForceNew: true, }, + "configuration_endpoint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, "engine": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -105,10 +109,6 @@ func resourceAwsElasticacheCluster() *schema.Resource { Type: schema.TypeInt, Computed: true, }, - "configuration_endpoint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, }, }, }, @@ -236,6 +236,7 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) d.Set("engine_version", c.EngineVersion) if c.ConfigurationEndpoint != nil { d.Set("port", c.ConfigurationEndpoint.Port) + d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", *c.ConfigurationEndpoint.Address, *c.ConfigurationEndpoint.Port))) } d.Set("subnet_group_name", c.CacheSubnetGroupName) @@ -358,7 +359,6 @@ func setCacheNodeData(d *schema.ResourceData, c *elasticache.CacheCluster) error "id": *node.CacheNodeId, "address": *node.Endpoint.Address, "port": int(*node.Endpoint.Port), - "configuration_endpoint": fmt.Sprintf("%s:%d", *node.Endpoint.Address, *node.Endpoint.Port), }) } diff --git a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown index 2e020a43da..2fe6a8dcf1 100644 --- a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown @@ -80,7 +80,9 @@ Example: `arn:aws:s3:::my_bucket/snapshot1.rdb` The following attributes are exported: -* `cache_nodes` - List of node objects including `id`, `address`, `port` and `configuration_endpoint`. +* `cache_nodes` - List of node objects including `id`, `address` and `port`. Referenceable e.g. as `${aws_elasticache_cluster.bar.cache_nodes.0.address}` + +* `configuration_endpoint` - (Memcached only) The configuration endpoint to allow host discovery [1]: http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyCacheCluster.html From b224abb7a9b09247c3913c28d68870a6471efe86 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 16 Sep 2015 22:02:28 +0100 Subject: [PATCH 021/190] provider/aws: Add cloudwatch_log_group --- builtin/providers/aws/config.go | 37 +++-- builtin/providers/aws/provider.go | 1 + .../aws/resource_aws_cloudwatch_log_group.go | 146 ++++++++++++++++++ 3 files changed, 168 insertions(+), 16 deletions(-) create mode 100644 builtin/providers/aws/resource_aws_cloudwatch_log_group.go diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index a57c65c1b2..c1fc7ca92f 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -12,6 +12,7 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ecs" @@ -41,22 +42,23 @@ type Config struct { } type AWSClient struct { - cloudwatchconn *cloudwatch.CloudWatch - dynamodbconn *dynamodb.DynamoDB - ec2conn *ec2.EC2 - ecsconn *ecs.ECS - elbconn *elb.ELB - autoscalingconn *autoscaling.AutoScaling - s3conn *s3.S3 - sqsconn *sqs.SQS - snsconn *sns.SNS - r53conn *route53.Route53 - region string - rdsconn *rds.RDS - iamconn *iam.IAM - kinesisconn *kinesis.Kinesis - elasticacheconn *elasticache.ElastiCache - lambdaconn *lambda.Lambda + cloudwatchconn *cloudwatch.CloudWatch + cloudwatchlogsconn *cloudwatchlogs.CloudWatchLogs + dynamodbconn *dynamodb.DynamoDB + ec2conn *ec2.EC2 + ecsconn *ecs.ECS + elbconn *elb.ELB + autoscalingconn *autoscaling.AutoScaling + s3conn *s3.S3 + sqsconn *sqs.SQS + snsconn *sns.SNS + r53conn *route53.Route53 + region string + rdsconn *rds.RDS + iamconn *iam.IAM + kinesisconn *kinesis.Kinesis + elasticacheconn *elasticache.ElastiCache + lambdaconn *lambda.Lambda } // Client configures and returns a fully initialized AWSClient @@ -156,6 +158,9 @@ func (c *Config) Client() (interface{}, error) { log.Println("[INFO] Initializing CloudWatch SDK connection") client.cloudwatchconn = cloudwatch.New(awsConfig) + + log.Println("[INFO] Initializing CloudWatch Logs connection") + client.cloudwatchlogsconn = cloudwatchlogs.New(awsConfig) } if len(errs) > 0 { diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 6b2c16c7ab..16e4f3789d 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -163,6 +163,7 @@ func Provider() terraform.ResourceProvider { "aws_autoscaling_group": resourceAwsAutoscalingGroup(), "aws_autoscaling_notification": resourceAwsAutoscalingNotification(), "aws_autoscaling_policy": resourceAwsAutoscalingPolicy(), + "aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(), "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), "aws_customer_gateway": resourceAwsCustomerGateway(), "aws_db_instance": resourceAwsDbInstance(), diff --git a/builtin/providers/aws/resource_aws_cloudwatch_log_group.go b/builtin/providers/aws/resource_aws_cloudwatch_log_group.go new file mode 100644 index 0000000000..e4f7236b2d --- /dev/null +++ b/builtin/providers/aws/resource_aws_cloudwatch_log_group.go @@ -0,0 +1,146 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" +) + +func resourceAwsCloudWatchLogGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudWatchLogGroupCreate, + Read: resourceAwsCloudWatchLogGroupRead, + Update: resourceAwsCloudWatchLogGroupUpdate, + Delete: resourceAwsCloudWatchLogGroupDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "retention_in_days": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 0, + }, + + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsCloudWatchLogGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + + log.Printf("[DEBUG] Creating CloudWatch Log Group: %s", d.Get("name").(string)) + _, err := conn.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String(d.Get("name").(string)), + }) + if err != nil { + return fmt.Errorf("Creating CloudWatch Log Group failed: %s", err) + } + + d.SetId(d.Get("name").(string)) + + log.Println("[INFO] CloudWatch Log Group created") + + return resourceAwsCloudWatchLogGroupUpdate(d, meta) +} + +func resourceAwsCloudWatchLogGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + log.Printf("[DEBUG] Reading CloudWatch Log Group: %q", d.Get("name").(string)) + lg, err := lookupCloudWatchLogGroup(conn, d.Get("name").(string), nil) + if err != nil { + return err + } + + log.Printf("[DEBUG] Found Log Group: %#v", *lg) + + d.Set("arn", *lg.Arn) + d.Set("name", *lg.LogGroupName) + + if lg.RetentionInDays != nil { + d.Set("retention_in_days", *lg.RetentionInDays) + } + + return nil +} + +func lookupCloudWatchLogGroup(conn *cloudwatchlogs.CloudWatchLogs, + name string, nextToken *string) (*cloudwatchlogs.LogGroup, error) { + input := &cloudwatchlogs.DescribeLogGroupsInput{ + LogGroupNamePrefix: aws.String(name), + NextToken: nextToken, + } + resp, err := conn.DescribeLogGroups(input) + if err != nil { + return nil, err + } + + for _, lg := range resp.LogGroups { + if *lg.LogGroupName == name { + return lg, nil + } + } + + if resp.NextToken != nil { + return lookupCloudWatchLogGroup(conn, name, resp.NextToken) + } + + return nil, fmt.Errorf("CloudWatch Log Group %q not found", name) +} + +func resourceAwsCloudWatchLogGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + + name := d.Get("name").(string) + log.Printf("[DEBUG] Updating CloudWatch Log Group: %q", name) + + if d.HasChange("retention_in_days") { + var err error + + if v, ok := d.GetOk("retention_in_days"); ok { + input := cloudwatchlogs.PutRetentionPolicyInput{ + LogGroupName: aws.String(name), + RetentionInDays: aws.Int64(int64(v.(int))), + } + log.Printf("[DEBUG] Setting retention for CloudWatch Log Group: %q: %s", name, input) + _, err = conn.PutRetentionPolicy(&input) + } else { + log.Printf("[DEBUG] Deleting retention for CloudWatch Log Group: %q", name) + _, err = conn.DeleteRetentionPolicy(&cloudwatchlogs.DeleteRetentionPolicyInput{ + LogGroupName: aws.String(name), + }) + } + + return err + } + + return resourceAwsCloudWatchLogGroupRead(d, meta) +} + +func resourceAwsCloudWatchLogGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudwatchlogsconn + log.Printf("[INFO] Deleting CloudWatch Log Group: %s", d.Id()) + _, err := conn.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ + LogGroupName: aws.String(d.Get("name").(string)), + }) + if err != nil { + return fmt.Errorf("Error deleting CloudWatch Log Group: %s", err) + } + log.Println("[INFO] CloudWatch Log Group deleted") + + d.SetId("") + + return nil +} From 7b0626adb6fa59fbefbc648c8456e4a8796fe274 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 16 Sep 2015 22:02:45 +0100 Subject: [PATCH 022/190] provider/aws: Add docs for CloudWatch Log Group --- .../aws/r/cloudwatch_log_group.html.markdown | 33 +++++++++++++++++++ website/source/layouts/aws.erb | 4 +++ 2 files changed, 37 insertions(+) create mode 100644 website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown diff --git a/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown new file mode 100644 index 0000000000..e784c63893 --- /dev/null +++ b/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown @@ -0,0 +1,33 @@ +--- +layout: "aws" +page_title: "AWS: aws_cloudwatch_log_group" +sidebar_current: "docs-aws-resource-cloudwatch-log-group" +description: |- + Provides a CloudWatch Log Group resource. +--- + +# aws\_cloudwatch\_log\_group + +Provides a CloudWatch Log Group resource. + +## Example Usage + +``` +resource "aws_cloudwatch_log_group" "yada" { + name = "Yada" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the log group +* `retention_in_days` - (Optional) Specifies the number of days + you want to retain log events in the specified log group. + +## Attributes Reference + +The following attributes are exported: + +* `arn` - The Amazon Resource Name (ARN) specifying the log group. diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 22801a5075..5c67ad58ef 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -15,6 +15,10 @@ CloudWatch Resources