mirror of
https://github.com/opentofu/opentofu.git
synced 2025-01-08 15:13:56 -06:00
provider/aws: Slight design change to aws_elasticache_replication_group
We should error check up front on the use of num_cache_nodes and cluster_mode. This allows us to write a test to make sure all works as expected ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSElasticacheReplicationGroup_clusteringAndCacheNodesCausesError' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) 2017/05/09 19:04:56 Generated command/internal_plugin_list.go TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSElasticacheReplicationGroup_clusteringAndCacheNodesCausesError -timeout 120m === RUN TestAccAWSElasticacheReplicationGroup_clusteringAndCacheNodesCausesError --- PASS: TestAccAWSElasticacheReplicationGroup_clusteringAndCacheNodesCausesError (40.58s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 40.603s ```
This commit is contained in:
parent
55a4ce2838
commit
930a41b7c0
@ -165,8 +165,15 @@ func resourceAwsElasticacheReplicationGroupCreate(d *schema.ResourceData, meta i
|
||||
params.SnapshotName = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if a, ok := d.GetOk("cluster_mode"); ok {
|
||||
clusterModeAttributes := a.(*schema.Set).List()
|
||||
clusterMode, clusterModeOk := d.GetOk("cluster_mode")
|
||||
cacheClusters, cacheClustersOk := d.GetOk("number_cache_clusters")
|
||||
|
||||
if !clusterModeOk && !cacheClustersOk || clusterModeOk && cacheClustersOk {
|
||||
return fmt.Errorf("Either `number_cache_clusters` or `cluster_mode` must be set")
|
||||
}
|
||||
|
||||
if clusterModeOk {
|
||||
clusterModeAttributes := clusterMode.(*schema.Set).List()
|
||||
attributes := clusterModeAttributes[0].(map[string]interface{})
|
||||
|
||||
if v, ok := attributes["num_node_groups"]; ok {
|
||||
@ -176,10 +183,10 @@ func resourceAwsElasticacheReplicationGroupCreate(d *schema.ResourceData, meta i
|
||||
if v, ok := attributes["replicas_per_node_group"]; ok {
|
||||
params.ReplicasPerNodeGroup = aws.Int64(int64(v.(int)))
|
||||
}
|
||||
} else if v, ok := d.GetOk("number_cache_clusters"); ok {
|
||||
params.NumCacheClusters = aws.Int64(int64(v.(int)))
|
||||
} else {
|
||||
return fmt.Errorf("number_cache_clusters must be set if not using cluster_mode")
|
||||
}
|
||||
|
||||
if cacheClustersOk {
|
||||
params.NumCacheClusters = aws.Int64(int64(cacheClusters.(int)))
|
||||
}
|
||||
|
||||
resp, err := conn.CreateReplicationGroup(params)
|
||||
|
@ -2,6 +2,7 @@ package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
@ -240,14 +241,16 @@ func TestAccAWSElasticacheReplicationGroup_redisClusterInVpc2(t *testing.T) {
|
||||
|
||||
func TestAccAWSElasticacheReplicationGroup_nativeRedisCluster(t *testing.T) {
|
||||
var rg elasticache.ReplicationGroup
|
||||
rInt := acctest.RandInt()
|
||||
rName := acctest.RandString(10)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig,
|
||||
{
|
||||
Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rInt, rName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg),
|
||||
resource.TestCheckResourceAttr(
|
||||
@ -268,6 +271,23 @@ func TestAccAWSElasticacheReplicationGroup_nativeRedisCluster(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccAWSElasticacheReplicationGroup_clusteringAndCacheNodesCausesError(t *testing.T) {
|
||||
rInt := acctest.RandInt()
|
||||
rName := acctest.RandString(10)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterErrorConfig(rInt, rName),
|
||||
ExpectError: regexp.MustCompile("Either `number_cache_clusters` or `cluster_mode` must be set"),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestResourceAWSElastiCacheReplicationGroupIdValidation(t *testing.T) {
|
||||
cases := []struct {
|
||||
Value string
|
||||
@ -754,11 +774,8 @@ resource "aws_elasticache_replication_group" "bar" {
|
||||
}
|
||||
`, acctest.RandInt(), acctest.RandInt(), acctest.RandInt(), acctest.RandInt(), acctest.RandString(10))
|
||||
|
||||
var testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig = fmt.Sprintf(`
|
||||
provider "aws" {
|
||||
region = "us-west-2"
|
||||
}
|
||||
|
||||
func testAccAWSElasticacheReplicationGroupNativeRedisClusterErrorConfig(rInt int, rName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_vpc" "foo" {
|
||||
cidr_block = "192.168.0.0/16"
|
||||
tags {
|
||||
@ -818,5 +835,70 @@ resource "aws_elasticache_replication_group" "bar" {
|
||||
replicas_per_node_group = 1
|
||||
num_node_groups = 2
|
||||
}
|
||||
number_cache_clusters = 3
|
||||
}`, rInt, rInt, rInt, rInt, rName)
|
||||
}
|
||||
|
||||
func testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rInt int, rName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "aws_vpc" "foo" {
|
||||
cidr_block = "192.168.0.0/16"
|
||||
tags {
|
||||
Name = "tf-test"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_subnet" "foo" {
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
cidr_block = "192.168.0.0/20"
|
||||
availability_zone = "us-west-2a"
|
||||
tags {
|
||||
Name = "tf-test-%03d"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_subnet" "bar" {
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
cidr_block = "192.168.16.0/20"
|
||||
availability_zone = "us-west-2b"
|
||||
tags {
|
||||
Name = "tf-test-%03d"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_elasticache_subnet_group" "bar" {
|
||||
name = "tf-test-cache-subnet-%03d"
|
||||
description = "tf-test-cache-subnet-group-descr"
|
||||
subnet_ids = [
|
||||
"${aws_subnet.foo.id}",
|
||||
"${aws_subnet.bar.id}"
|
||||
]
|
||||
}
|
||||
|
||||
resource "aws_security_group" "bar" {
|
||||
name = "tf-test-security-group-%03d"
|
||||
description = "tf-test-security-group-descr"
|
||||
vpc_id = "${aws_vpc.foo.id}"
|
||||
ingress {
|
||||
from_port = -1
|
||||
to_port = -1
|
||||
protocol = "icmp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_elasticache_replication_group" "bar" {
|
||||
replication_group_id = "tf-%s"
|
||||
replication_group_description = "test description"
|
||||
node_type = "cache.t2.micro"
|
||||
port = 6379
|
||||
subnet_group_name = "${aws_elasticache_subnet_group.bar.name}"
|
||||
security_group_ids = ["${aws_security_group.bar.id}"]
|
||||
parameter_group_name = "default.redis3.2.cluster.on"
|
||||
automatic_failover_enabled = true
|
||||
cluster_mode {
|
||||
replicas_per_node_group = 1
|
||||
num_node_groups = 2
|
||||
}
|
||||
}`, rInt, rInt, rInt, rInt, rName)
|
||||
}
|
||||
`, acctest.RandInt(), acctest.RandInt(), acctest.RandInt(), acctest.RandInt(), acctest.RandString(10))
|
||||
|
Loading…
Reference in New Issue
Block a user