provider/aws: Modify aws_redshift_cluster now allows

`publicly_accessible` to be changed

Also updated the AWS Go SDK from 1.1.9 -> 1.1.12 as this was required to
allow the new behavior for the Redshift API
This commit is contained in:
stack72 2016-03-19 01:05:09 +05:30
parent 88a1103738
commit 12ac0761b7
31 changed files with 1288 additions and 527 deletions

228
Godeps/Godeps.json generated
View File

@ -173,288 +173,288 @@
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/awserr",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/client",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/defaults",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/request",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/session",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/endpoints",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/ec2query",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restjson",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/signer/v4",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/waiter",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/apigateway",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/autoscaling",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/cloudformation",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/cloudtrail",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatch",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatchevents",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatchlogs",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/codecommit",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/codedeploy",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/directoryservice",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/dynamodb",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/ec2",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/ecr",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/ecs",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/efs",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/elasticache",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/elasticbeanstalk",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/elasticsearchservice",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/elb",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/firehose",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/glacier",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/iam",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/kinesis",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/kms",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/lambda",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/opsworks",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/rds",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/redshift",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/route53",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/s3",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/sns",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/sqs",
"Comment": "v1.1.9",
"Rev": "2e7cf03d7f5c8a4b4c9f7341ddf1e13102845cf2"
"Comment": "v1.1.12",
"Rev": "4da0bec8953a0a540f391930a946917b12a95671"
},
{
"ImportPath": "github.com/bgentry/speakeasy",

View File

@ -145,7 +145,6 @@ func resourceAwsRedshiftCluster() *schema.Resource {
"publicly_accessible": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
Default: true,
},
@ -410,6 +409,10 @@ func resourceAwsRedshiftClusterUpdate(d *schema.ResourceData, meta interface{})
req.AllowVersionUpgrade = aws.Bool(d.Get("allow_version_upgrade").(bool))
}
if d.HasChange("publicly_accessible") {
req.PubliclyAccessible = aws.Bool(d.Get("publicly_accessible").(bool))
}
log.Printf("[INFO] Modifying Redshift Cluster: %s", d.Id())
log.Printf("[DEBUG] Redshift Cluster Modify options: %s", req)
_, err := conn.ModifyCluster(req)
@ -418,7 +421,7 @@ func resourceAwsRedshiftClusterUpdate(d *schema.ResourceData, meta interface{})
}
stateConf := &resource.StateChangeConf{
Pending: []string{"creating", "deleting", "rebooting", "resizing", "renaming"},
Pending: []string{"creating", "deleting", "rebooting", "resizing", "renaming", "modifying"},
Target: []string{"available"},
Refresh: resourceAwsRedshiftClusterStateRefreshFunc(d, meta),
Timeout: 10 * time.Minute,

View File

@ -38,11 +38,12 @@ func TestAccAWSRedshiftCluster_basic(t *testing.T) {
})
}
func TestAccAWSRedshiftCluster_notPubliclyAccessible(t *testing.T) {
func TestAccAWSRedshiftCluster_publiclyAccessible(t *testing.T) {
var v redshift.Cluster
ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int()
config := fmt.Sprintf(testAccAWSRedshiftClusterConfig_notPubliclyAccessible, ri)
preConfig := fmt.Sprintf(testAccAWSRedshiftClusterConfig_notPubliclyAccessible, ri)
postConfig := fmt.Sprintf(testAccAWSRedshiftClusterConfig_updatePubliclyAccessible, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -50,13 +51,22 @@ func TestAccAWSRedshiftCluster_notPubliclyAccessible(t *testing.T) {
CheckDestroy: testAccCheckAWSRedshiftClusterDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: config,
Config: preConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v),
resource.TestCheckResourceAttr(
"aws_redshift_cluster.default", "publicly_accessible", "false"),
),
},
resource.TestStep{
Config: postConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v),
resource.TestCheckResourceAttr(
"aws_redshift_cluster.default", "publicly_accessible", "true"),
),
},
},
})
}
@ -335,3 +345,65 @@ resource "aws_redshift_cluster" "default" {
cluster_subnet_group_name = "${aws_redshift_subnet_group.foo.name}"
publicly_accessible = false
}`
var testAccAWSRedshiftClusterConfig_updatePubliclyAccessible = `
provider "aws" {
region = "us-west-2"
}
resource "aws_vpc" "foo" {
cidr_block = "10.1.0.0/16"
}
resource "aws_internet_gateway" "foo" {
vpc_id = "${aws_vpc.foo.id}"
tags {
foo = "bar"
}
}
resource "aws_subnet" "foo" {
cidr_block = "10.1.1.0/24"
availability_zone = "us-west-2a"
vpc_id = "${aws_vpc.foo.id}"
tags {
Name = "tf-dbsubnet-test-1"
}
}
resource "aws_subnet" "bar" {
cidr_block = "10.1.2.0/24"
availability_zone = "us-west-2b"
vpc_id = "${aws_vpc.foo.id}"
tags {
Name = "tf-dbsubnet-test-2"
}
}
resource "aws_subnet" "foobar" {
cidr_block = "10.1.3.0/24"
availability_zone = "us-west-2c"
vpc_id = "${aws_vpc.foo.id}"
tags {
Name = "tf-dbsubnet-test-3"
}
}
resource "aws_redshift_subnet_group" "foo" {
name = "foo"
description = "foo description"
subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}", "${aws_subnet.foobar.id}"]
}
resource "aws_redshift_cluster" "default" {
cluster_identifier = "tf-redshift-cluster-%d"
availability_zone = "us-west-2a"
database_name = "mydb"
master_username = "foo"
master_password = "Mustbe8characters"
node_type = "dc1.large"
automated_snapshot_retention_period = 7
allow_version_upgrade = false
cluster_subnet_group_name = "${aws_redshift_subnet_group.foo.name}"
publicly_accessible = true
}`

View File

@ -42,9 +42,12 @@ type Error interface {
OrigErr() error
}
// BatchError is a batch of errors which also wraps lower level errors with code, message,
// and original errors. Calling Error() will only return the error that is at the end
// of the list.
// BatchError is a batch of errors which also wraps lower level errors with
// code, message, and original errors. Calling Error() will include all errors
// that occured in the batch.
//
// Deprecated: Replaced with BatchedErrors. Only defined for backwards
// compatibility.
type BatchError interface {
// Satisfy the generic error interface.
error
@ -59,17 +62,35 @@ type BatchError interface {
OrigErrs() []error
}
// BatchedErrors is a batch of errors which also wraps lower level errors with
// code, message, and original errors. Calling Error() will include all errors
// that occured in the batch.
//
// Replaces BatchError
type BatchedErrors interface {
// Satisfy the base Error interface.
Error
// Returns the original error if one was set. Nil is returned if not set.
OrigErrs() []error
}
// New returns an Error object described by the code, message, and origErr.
//
// If origErr satisfies the Error interface it will not be wrapped within a new
// Error object and will instead be returned.
func New(code, message string, origErr error) Error {
return newBaseError(code, message, origErr)
var errs []error
if origErr != nil {
errs = append(errs, origErr)
}
return newBaseError(code, message, errs)
}
// NewBatchError returns an baseError with an expectation of an array of errors
func NewBatchError(code, message string, errs []error) BatchError {
return newBaseErrors(code, message, errs)
// NewBatchError returns an BatchedErrors with a collection of errors as an
// array of errors.
func NewBatchError(code, message string, errs []error) BatchedErrors {
return newBaseError(code, message, errs)
}
// A RequestFailure is an interface to extract request failure information from

View File

@ -34,36 +34,17 @@ type baseError struct {
errs []error
}
// newBaseError returns an error object for the code, message, and err.
// newBaseError returns an error object for the code, message, and errors.
//
// code is a short no whitespace phrase depicting the classification of
// the error that is being created.
//
// message is the free flow string containing detailed information about the error.
// message is the free flow string containing detailed information about the
// error.
//
// origErr is the error object which will be nested under the new error to be returned.
func newBaseError(code, message string, origErr error) *baseError {
b := &baseError{
code: code,
message: message,
}
if origErr != nil {
b.errs = append(b.errs, origErr)
}
return b
}
// newBaseErrors returns an error object for the code, message, and errors.
//
// code is a short no whitespace phrase depicting the classification of
// the error that is being created.
//
// message is the free flow string containing detailed information about the error.
//
// origErrs is the error objects which will be nested under the new errors to be returned.
func newBaseErrors(code, message string, origErrs []error) *baseError {
// origErrs is the error objects which will be nested under the new errors to
// be returned.
func newBaseError(code, message string, origErrs []error) *baseError {
b := &baseError{
code: code,
message: message,
@ -103,19 +84,26 @@ func (b baseError) Message() string {
return b.message
}
// OrigErr returns the original error if one was set. Nil is returned if no error
// was set. This only returns the first element in the list. If the full list is
// needed, use BatchError
// OrigErr returns the original error if one was set. Nil is returned if no
// error was set. This only returns the first element in the list. If the full
// list is needed, use BatchedErrors.
func (b baseError) OrigErr() error {
if size := len(b.errs); size > 0 {
switch len(b.errs) {
case 0:
return nil
case 1:
return b.errs[0]
default:
if err, ok := b.errs[0].(Error); ok {
return NewBatchError(err.Code(), err.Message(), b.errs[1:])
}
return NewBatchError("BatchedErrors",
"multiple errors occured", b.errs)
}
return nil
}
// OrigErrs returns the original errors if one was set. An empty slice is returned if
// no error was set:w
// OrigErrs returns the original errors if one was set. An empty slice is
// returned if no error was set.
func (b baseError) OrigErrs() []error {
return b.errs
}
@ -133,8 +121,8 @@ type requestError struct {
requestID string
}
// newRequestError returns a wrapped error with additional information for request
// status code, and service requestID.
// newRequestError returns a wrapped error with additional information for
// request status code, and service requestID.
//
// Should be used to wrap all request which involve service requests. Even if
// the request failed without a service response, but had an HTTP status code
@ -173,6 +161,15 @@ func (r requestError) RequestID() string {
return r.requestID
}
// OrigErrs returns the original errors if one was set. An empty slice is
// returned if no error was set.
func (r requestError) OrigErrs() []error {
if b, ok := r.awsError.(BatchedErrors); ok {
return b.OrigErrs()
}
return []error{r.OrigErr()}
}
// An error list that satisfies the golang interface
type errorList []error

View File

@ -91,6 +91,10 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
default:
if !v.IsValid() {
fmt.Fprint(buf, "<invalid value>")
return
}
format := "%v"
switch v.Interface().(type) {
case string:

View File

@ -1,12 +1,18 @@
package ec2metadata
import (
"encoding/json"
"path"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
// GetMetadata uses the path provided to request
// GetMetadata uses the path provided to request information from the EC2
// instance metdata service. The content will be returned as a string, or
// error if the request failed.
func (c *EC2Metadata) GetMetadata(p string) (string, error) {
op := &request.Operation{
Name: "GetMetadata",
@ -20,6 +26,43 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) {
return output.Content, req.Send()
}
// GetDynamicData uses the path provided to request information from the EC2
// instance metadata service for dynamic data. The content will be returned
// as a string, or error if the request failed.
func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
op := &request.Operation{
Name: "GetDynamicData",
HTTPMethod: "GET",
HTTPPath: path.Join("/", "dynamic", p),
}
output := &metadataOutput{}
req := c.NewRequest(op, nil, output)
return output.Content, req.Send()
}
// GetInstanceIdentityDocument retrieves an identity document describing an
// instance. Error is returned if the request fails or is unable to parse
// the response.
func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
resp, err := c.GetDynamicData("instance-identity/document")
if err != nil {
return EC2InstanceIdentityDocument{},
awserr.New("EC2RoleRequestError",
"failed to get EC2 instance identity document", err)
}
doc := EC2InstanceIdentityDocument{}
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
return EC2InstanceIdentityDocument{},
awserr.New("SerializationError",
"failed to decode EC2 instance identity document", err)
}
return doc, nil
}
// Region returns the region the instance is running in.
func (c *EC2Metadata) Region() (string, error) {
resp, err := c.GetMetadata("placement/availability-zone")
@ -41,3 +84,22 @@ func (c *EC2Metadata) Available() bool {
return true
}
// An EC2InstanceIdentityDocument provides the shape for unmarshalling
// an instance identity document
type EC2InstanceIdentityDocument struct {
DevpayProductCodes []string `json:"devpayProductCodes"`
AvailabilityZone string `json:"availabilityZone"`
PrivateIP string `json:"privateIp"`
Version string `json:"version"`
Region string `json:"region"`
InstanceID string `json:"instanceId"`
BillingProducts []string `json:"billingProducts"`
InstanceType string `json:"instanceType"`
AccountID string `json:"accountId"`
PendingTime time.Time `json:"pendingTime"`
ImageID string `json:"imageId"`
KernelID string `json:"kernelId"`
RamdiskID string `json:"ramdiskId"`
Architecture string `json:"architecture"`
}

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.1.9"
const SDKVersion = "1.1.12"

View File

@ -2,7 +2,7 @@ package query
import (
"encoding/xml"
"io"
"io/ioutil"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
@ -15,6 +15,10 @@ type xmlErrorResponse struct {
RequestID string `xml:"RequestId"`
}
type xmlServiceUnavailableResponse struct {
XMLName xml.Name `xml:"ServiceUnavailableException"`
}
// UnmarshalErrorHandler is a name request handler to unmarshal request errors
var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
@ -22,11 +26,16 @@ var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalEr
func UnmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
resp := &xmlErrorResponse{}
err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
if err != nil && err != io.EOF {
r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err)
} else {
bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err)
return
}
// First check for specific error
resp := xmlErrorResponse{}
decodeErr := xml.Unmarshal(bodyBytes, &resp)
if decodeErr == nil {
reqID := resp.RequestID
if reqID == "" {
reqID = r.RequestID
@ -36,5 +45,22 @@ func UnmarshalError(r *request.Request) {
r.HTTPResponse.StatusCode,
reqID,
)
return
}
// Check for unhandled error
servUnavailResp := xmlServiceUnavailableResponse{}
unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp)
if unavailErr == nil {
r.Error = awserr.NewRequestFailure(
awserr.New("ServiceUnavailableException", "service is unavailable", nil),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
// Failed to retrieve any error message from the response body
r.Error = awserr.New("SerializationError",
"failed to decode query XML error response", decodeErr)
}

View File

@ -222,8 +222,7 @@ func EscapePath(path string, encodeSep bool) string {
if noEscape[c] || (c == '/' && !encodeSep) {
buf.WriteByte(c)
} else {
buf.WriteByte('%')
buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16)))
fmt.Fprintf(&buf, "%%%02X", c)
}
}
return buf.String()

View File

@ -3,6 +3,7 @@ package rest
import (
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"net/http"
"reflect"
@ -51,6 +52,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
if payload.IsValid() {
switch payload.Interface().(type) {
case []byte:
defer r.HTTPResponse.Body.Close()
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
@ -58,6 +60,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
payload.Set(reflect.ValueOf(b))
}
case *string:
defer r.HTTPResponse.Body.Close()
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
@ -72,6 +75,8 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
case "aws.ReadSeekCloser", "io.ReadCloser":
payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
default:
io.Copy(ioutil.Discard, r.HTTPResponse.Body)
defer r.HTTPResponse.Body.Close()
r.Error = awserr.New("SerializationError",
"failed to decode REST response",
fmt.Errorf("unknown payload type %s", payload.Type()))

View File

@ -53,6 +53,7 @@ func UnmarshalMeta(r *request.Request) {
// UnmarshalError unmarshals a response error for the REST JSON protocol.
func UnmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
code := r.HTTPResponse.Header.Get("X-Amzn-Errortype")
bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {

View File

@ -995,6 +995,8 @@ type PublicKey struct {
ValidityStartTime *time.Time `type:"timestamp" timestampFormat:"unix"`
// The DER encoded public key value in PKCS#1 format.
//
// Value is automatically base64 encoded/decoded by the SDK.
Value []byte `type:"blob"`
}

File diff suppressed because it is too large Load Diff

View File

@ -11,22 +11,21 @@ import (
"github.com/aws/aws-sdk-go/private/signer/v4"
)
// Overview This is the AWS CodeDeploy API Reference. This guide provides descriptions
// of the AWS CodeDeploy APIs. For additional information, see the AWS CodeDeploy
// User Guide (http://docs.aws.amazon.com/codedeploy/latest/userguide).
// Overview This reference guide provides descriptions of the AWS CodeDeploy
// APIs. For more information about AWS CodeDeploy, see the AWS CodeDeploy User
// Guide (docs.aws.amazon.com/codedeploy/latest/userguide).
//
// Using the APIs You can use the AWS CodeDeploy APIs to work with the following
// items:
// Using the APIs You can use the AWS CodeDeploy APIs to work with the following:
//
// Applications are unique identifiers that AWS CodeDeploy uses to ensure
// that the correct combinations of revisions, deployment configurations, and
// deployment groups are being referenced during deployments.
// Applications are unique identifiers used by AWS CodeDeploy to ensure the
// correct combinations of revisions, deployment configurations, and deployment
// groups are being referenced during deployments.
//
// You can use the AWS CodeDeploy APIs to create, delete, get, list, and update
// applications.
//
// Deployment configurations are sets of deployment rules and deployment
// success and failure conditions that AWS CodeDeploy uses during deployments.
// Deployment configurations are sets of deployment rules and success and
// failure conditions used by AWS CodeDeploy during deployments.
//
// You can use the AWS CodeDeploy APIs to create, delete, get, and list deployment
// configurations.
@ -41,22 +40,22 @@ import (
// are deployed. Instances are identified by their Amazon EC2 tags or Auto Scaling
// group names. Instances belong to deployment groups.
//
// You can use the AWS CodeDeploy APIs to get and list instances.
// You can use the AWS CodeDeploy APIs to get and list instance.
//
// Deployments represent the process of deploying revisions to instances.
//
// You can use the AWS CodeDeploy APIs to create, get, list, and stop deployments.
//
// Application revisions are archive files that are stored in Amazon S3 buckets
// or GitHub repositories. These revisions contain source content (such as source
// code, web pages, executable files, any deployment scripts, and similar) along
// with an Application Specification file (AppSpec file). (The AppSpec file
// is unique to AWS CodeDeploy; it defines a series of deployment actions that
// you want AWS CodeDeploy to execute.) An application revision is uniquely
// identified by its Amazon S3 object key and its ETag, version, or both (for
// application revisions that are stored in Amazon S3 buckets) or by its repository
// name and commit ID (for applications revisions that are stored in GitHub
// repositories). Application revisions are deployed through deployment groups.
// Application revisions are archive files stored in Amazon S3 buckets or
// GitHub repositories. These revisions contain source content (such as source
// code, web pages, executable files, and deployment scripts) along with an
// application specification (AppSpec) file. (The AppSpec file is unique to
// AWS CodeDeploy; it defines the deployment actions you want AWS CodeDeploy
// to execute.) Ffor application revisions stored in Amazon S3 buckets, an application
// revision is uniquely identified by its Amazon S3 object key and its ETag,
// version, or both. For application revisions stored in GitHub repositories,
// an application revision is uniquely identified by its repository name and
// commit ID. Application revisions are deployed through deployment groups.
//
// You can use the AWS CodeDeploy APIs to get, list, and register application
// revisions.

View File

@ -796,6 +796,8 @@ type AttributeValue struct {
_ struct{} `type:"structure"`
// A Binary data type.
//
// B is automatically base64 encoded/decoded by the SDK.
B []byte `type:"blob"`
// A Boolean data type.

View File

@ -7983,6 +7983,7 @@ func (s AvailableCapacity) GoString() string {
type BlobAttributeValue struct {
_ struct{} `type:"structure"`
// Value is automatically base64 encoded/decoded by the SDK.
Value []byte `locationName:"value" type:"blob"`
}
@ -17436,6 +17437,8 @@ type ImportKeyPairInput struct {
// The public key. You must base64 encode the public key material before sending
// it to AWS.
//
// PublicKeyMaterial is automatically base64 encoded/decoded by the SDK.
PublicKeyMaterial []byte `locationName:"publicKeyMaterial" type:"blob" required:"true"`
}
@ -21903,6 +21906,8 @@ type S3Storage struct {
// A Base64-encoded Amazon S3 upload policy that gives Amazon EC2 permission
// to upload items into Amazon S3 on your behalf.
//
// UploadPolicy is automatically base64 encoded/decoded by the SDK.
UploadPolicy []byte `locationName:"uploadPolicy" type:"blob"`
// The signature of the Base64 encoded JSON document.

View File

@ -1349,6 +1349,8 @@ type UploadLayerPartInput struct {
_ struct{} `type:"structure"`
// The base64-encoded layer part payload.
//
// LayerPartBlob is automatically base64 encoded/decoded by the SDK.
LayerPartBlob []byte `locationName:"layerPartBlob" type:"blob" required:"true"`
// The integer value of the first byte of the layer part.

View File

@ -841,6 +841,8 @@ type Record struct {
// The data blob, which is base64-encoded when the blob is serialized. The maximum
// size of the data blob, before base64-encoding, is 1,000 KB.
//
// Data is automatically base64 encoded/decoded by the SDK.
Data []byte `type:"blob" required:"true"`
}

View File

@ -2928,6 +2928,12 @@ func (c *IAM) ListPolicyVersionsRequest(input *ListPolicyVersionsInput) (req *re
Name: opListPolicyVersions,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"Marker"},
OutputTokens: []string{"Marker"},
LimitToken: "MaxItems",
TruncationToken: "IsTruncated",
},
}
if input == nil {
@ -2952,6 +2958,14 @@ func (c *IAM) ListPolicyVersions(input *ListPolicyVersionsInput) (*ListPolicyVer
return out, err
}
func (c *IAM) ListPolicyVersionsPages(input *ListPolicyVersionsInput, fn func(p *ListPolicyVersionsOutput, lastPage bool) (shouldContinue bool)) error {
page, _ := c.ListPolicyVersionsRequest(input)
page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
return page.EachPage(func(p interface{}, lastPage bool) bool {
return fn(p.(*ListPolicyVersionsOutput), lastPage)
})
}
const opListRolePolicies = "ListRolePolicies"
// ListRolePoliciesRequest generates a request for the ListRolePolicies operation.
@ -4157,7 +4171,8 @@ func (c *IAM) UpdateUserRequest(input *UpdateUserInput) (req *request.Request, o
// Updates the name and/or the path of the specified user.
//
// You should understand the implications of changing a user's path or name.
// For more information, see Renaming Users and Groups (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_WorkingWithGroupsAndUsers.html)
// For more information, see Renaming an IAM User (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_manage.html#id_users_renaming)
// and Renaming an IAM Group (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_groups_manage_rename.html)
// in the IAM User Guide. To change a user name the requester must have appropriate
// permissions on both the source object and the target object. For example,
// to change Bob to Robert, the entity making the request must have permission
@ -6141,9 +6156,12 @@ type EvaluationResult struct {
MatchedStatements []*Statement `type:"list"`
// A list of context keys that are required by the included input policies but
// that were not provided by one of the input parameters. To discover the context
// keys used by a set of policies, you can call GetContextKeysForCustomPolicy
// or GetContextKeysForPrincipalPolicy.
// that were not provided by one of the input parameters. This list is used
// when the resource in a simulation is "*", either explicitly, or when the
// ResourceArns parameter blank. If you include a list of resources, then any
// missing context values are instead included under the ResourceSpecificResults
// section. To discover the context keys used by a set of policies, you can
// call GetContextKeysForCustomPolicy or GetContextKeysForPrincipalPolicy.
//
// If the response includes any keys in this list, then the reported results
// might be untrustworthy because the simulation could not completely evaluate
@ -6609,6 +6627,8 @@ type GetCredentialReportOutput struct {
_ struct{} `type:"structure"`
// Contains the credential report. The report is Base64-encoded.
//
// Content is automatically base64 encoded/decoded by the SDK.
Content []byte `type:"blob"`
// The date and time when the credential report was created, in ISO 8601 date-time
@ -9235,6 +9255,11 @@ func (s PolicyDetail) GoString() string {
type PolicyGroup struct {
_ struct{} `type:"structure"`
// The stable and unique string identifying the group. For more information
// about IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
// in the IAM User Guide.
GroupId *string `min:"16" type:"string"`
// The name (friendly name, not ARN) identifying the group.
GroupName *string `min:"1" type:"string"`
}
@ -9260,6 +9285,11 @@ func (s PolicyGroup) GoString() string {
type PolicyRole struct {
_ struct{} `type:"structure"`
// The stable and unique string identifying the role. For more information about
// IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
// in the IAM User Guide.
RoleId *string `min:"16" type:"string"`
// The name (friendly name, not ARN) identifying the role.
RoleName *string `min:"1" type:"string"`
}
@ -9285,6 +9315,11 @@ func (s PolicyRole) GoString() string {
type PolicyUser struct {
_ struct{} `type:"structure"`
// The stable and unique string identifying the user. For more information about
// IDs, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
// in the IAM User Guide.
UserId *string `min:"16" type:"string"`
// The name (friendly name, not ARN) identifying the user.
UserName *string `min:"1" type:"string"`
}
@ -9611,8 +9646,12 @@ type ResourceSpecificResult struct {
MatchedStatements []*Statement `type:"list"`
// A list of context keys that are required by the included input policies but
// that were not provided by one of the input parameters. To discover the context
// keys used by a set of policies, you can call GetContextKeysForCustomPolicy
// that were not provided by one of the input parameters. This list is used
// when a list of ARNs is included in the ResourceArns parameter instead of
// "*". If you do not specify individual resources, by setting ResourceArns
// to "*" or by not including the ResourceArns parameter, then any missing context
// values are instead included under the EvaluationResults section. To discover
// the context keys used by a set of policies, you can call GetContextKeysForCustomPolicy
// or GetContextKeysForPrincipalPolicy.
MissingContextValues []*string `type:"list"`
}
@ -11068,6 +11107,8 @@ type VirtualMFADevice struct {
// The Base32 seed defined as specified in RFC3548 (http://www.ietf.org/rfc/rfc3548.txt).
// The Base32StringSeed is Base64-encoded.
//
// Base32StringSeed is automatically base64 encoded/decoded by the SDK.
Base32StringSeed []byte `type:"blob"`
// The date and time on which the virtual MFA device was enabled.
@ -11077,6 +11118,8 @@ type VirtualMFADevice struct {
// where $virtualMFADeviceName is one of the create call arguments, AccountName
// is the user name if set (otherwise, the account ID otherwise), and Base32String
// is the seed in Base32 format. The Base32String value is Base64-encoded.
//
// QRCodePNG is automatically base64 encoded/decoded by the SDK.
QRCodePNG []byte `type:"blob"`
// The serial number associated with VirtualMFADevice.

View File

@ -1333,6 +1333,8 @@ type PutRecordInput struct {
// is serialized. When the data blob (the payload before base64-encoding) is
// added to the partition key size, the total size must not exceed the maximum
// record size (1 MB).
//
// Data is automatically base64 encoded/decoded by the SDK.
Data []byte `type:"blob" required:"true"`
// The hash value used to explicitly determine the shard the data record is
@ -1448,6 +1450,8 @@ type PutRecordsRequestEntry struct {
// is serialized. When the data blob (the payload before base64-encoding) is
// added to the partition key size, the total size must not exceed the maximum
// record size (1 MB).
//
// Data is automatically base64 encoded/decoded by the SDK.
Data []byte `type:"blob" required:"true"`
// The hash value used to determine explicitly the shard that the data record
@ -1523,6 +1527,8 @@ type Record struct {
// the blob in any way. When the data blob (the payload before base64-encoding)
// is added to the partition key size, the total size must not exceed the maximum
// record size (1 MB).
//
// Data is automatically base64 encoded/decoded by the SDK.
Data []byte `type:"blob" required:"true"`
// Identifies which shard in the stream the data record is assigned to.

View File

@ -1268,6 +1268,8 @@ type DecryptInput struct {
_ struct{} `type:"structure"`
// Ciphertext to be decrypted. The blob includes metadata.
//
// CiphertextBlob is automatically base64 encoded/decoded by the SDK.
CiphertextBlob []byte `min:"1" type:"blob" required:"true"`
// The encryption context. If this was specified in the Encrypt function, it
@ -1301,6 +1303,8 @@ type DecryptOutput struct {
// Decrypted plaintext data. This value may not be returned if the customer
// master key is not available or if you didn't have permission to use it.
//
// Plaintext is automatically base64 encoded/decoded by the SDK.
Plaintext []byte `min:"1" type:"blob"`
}
@ -1551,6 +1555,8 @@ type EncryptInput struct {
KeyId *string `min:"1" type:"string" required:"true"`
// Data to be encrypted.
//
// Plaintext is automatically base64 encoded/decoded by the SDK.
Plaintext []byte `min:"1" type:"blob" required:"true"`
}
@ -1569,6 +1575,8 @@ type EncryptOutput struct {
// The encrypted plaintext. If you are using the CLI, the value is Base64 encoded.
// Otherwise, it is not encoded.
//
// CiphertextBlob is automatically base64 encoded/decoded by the SDK.
CiphertextBlob []byte `min:"1" type:"blob"`
// The ID of the key used during encryption.
@ -1638,6 +1646,8 @@ type GenerateDataKeyOutput struct {
//
// If you are using the CLI, the value is Base64 encoded. Otherwise, it is
// not encoded.
//
// CiphertextBlob is automatically base64 encoded/decoded by the SDK.
CiphertextBlob []byte `min:"1" type:"blob"`
// System generated unique identifier of the key to be used to decrypt the encrypted
@ -1646,6 +1656,8 @@ type GenerateDataKeyOutput struct {
// Plaintext that contains the data key. Use this for encryption and decryption
// and then remove it from memory as soon as possible.
//
// Plaintext is automatically base64 encoded/decoded by the SDK.
Plaintext []byte `min:"1" type:"blob"`
}
@ -1708,6 +1720,8 @@ type GenerateDataKeyWithoutPlaintextOutput struct {
//
// If you are using the CLI, the value is Base64 encoded. Otherwise, it is
// not encoded.
//
// CiphertextBlob is automatically base64 encoded/decoded by the SDK.
CiphertextBlob []byte `min:"1" type:"blob"`
// System generated unique identifier of the key to be used to decrypt the encrypted
@ -1747,6 +1761,8 @@ type GenerateRandomOutput struct {
_ struct{} `type:"structure"`
// Plaintext that contains the unpredictable byte string.
//
// Plaintext is automatically base64 encoded/decoded by the SDK.
Plaintext []byte `min:"1" type:"blob"`
}
@ -2307,6 +2323,8 @@ type ReEncryptInput struct {
_ struct{} `type:"structure"`
// Ciphertext of the data to re-encrypt.
//
// CiphertextBlob is automatically base64 encoded/decoded by the SDK.
CiphertextBlob []byte `min:"1" type:"blob" required:"true"`
// Encryption context to be used when the data is re-encrypted.
@ -2347,6 +2365,8 @@ type ReEncryptOutput struct {
// The re-encrypted data. If you are using the CLI, the value is Base64 encoded.
// Otherwise, it is not encoded.
//
// CiphertextBlob is automatically base64 encoded/decoded by the SDK.
CiphertextBlob []byte `min:"1" type:"blob"`
// Unique identifier of the key used to re-encrypt the data.

View File

@ -1341,6 +1341,8 @@ type FunctionCode struct {
// AWS CLI, the SDKs or CLI will do the encoding for you). For more information
// about creating a .zip file, go to Execution Permissions (http://docs.aws.amazon.com/lambda/latest/dg/intro-permission-model.html#lambda-intro-execution-role.html)
// in the AWS Lambda Developer Guide.
//
// ZipFile is automatically base64 encoded/decoded by the SDK.
ZipFile []byte `type:"blob"`
}
@ -2126,6 +2128,8 @@ type UpdateFunctionCodeInput struct {
S3ObjectVersion *string `min:"1" type:"string"`
// Based64-encoded .zip file containing your packaged source code.
//
// ZipFile is automatically base64 encoded/decoded by the SDK.
ZipFile []byte `type:"blob"`
}

View File

@ -34,13 +34,18 @@ func (c *Redshift) AuthorizeClusterSecurityGroupIngressRequest(input *AuthorizeC
// Adds an inbound (ingress) rule to an Amazon Redshift security group. Depending
// on whether the application accessing your cluster is running on the Internet
// or an EC2 instance, you can authorize inbound access to either a Classless
// Interdomain Routing (CIDR) IP address range or an EC2 security group. You
// can add as many as 20 ingress rules to an Amazon Redshift security group.
// or an Amazon EC2 instance, you can authorize inbound access to either a Classless
// Interdomain Routing (CIDR)/Internet Protocol (IP) range or to an Amazon EC2
// security group. You can add as many as 20 ingress rules to an Amazon Redshift
// security group.
//
// The EC2 security group must be defined in the AWS region where the cluster
// resides. For an overview of CIDR blocks, see the Wikipedia article on Classless
// Inter-Domain Routing (http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing).
// If you authorize access to an Amazon EC2 security group, specify EC2SecurityGroupName
// and EC2SecurityGroupOwnerId. The Amazon EC2 security group and Amazon Redshift
// cluster must be in the same AWS region.
//
// If you authorize access to a CIDR/IP address range, specify CIDRIP. For
// an overview of CIDR blocks, see the Wikipedia article on Classless Inter-Domain
// Routing (http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing).
//
// You must also associate the security group with a cluster so that clients
// running on these IP addresses or the EC2 instance are authorized to connect
@ -1669,6 +1674,37 @@ func (c *Redshift) DescribeSnapshotCopyGrants(input *DescribeSnapshotCopyGrantsI
return out, err
}
const opDescribeTableRestoreStatus = "DescribeTableRestoreStatus"
// DescribeTableRestoreStatusRequest generates a request for the DescribeTableRestoreStatus operation.
func (c *Redshift) DescribeTableRestoreStatusRequest(input *DescribeTableRestoreStatusInput) (req *request.Request, output *DescribeTableRestoreStatusOutput) {
op := &request.Operation{
Name: opDescribeTableRestoreStatus,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DescribeTableRestoreStatusInput{}
}
req = c.newRequest(op, input, output)
output = &DescribeTableRestoreStatusOutput{}
req.Data = output
return
}
// Lists the status of one or more table restore requests made using the RestoreTableFromClusterSnapshot
// API action. If you don't specify a value for the TableRestoreRequestId parameter,
// then DescribeTableRestoreStatus returns the status of all in-progress table
// restore requests. Otherwise DescribeTableRestoreStatus returns the status
// of the table specified by TableRestoreRequestId.
func (c *Redshift) DescribeTableRestoreStatus(input *DescribeTableRestoreStatusInput) (*DescribeTableRestoreStatusOutput, error) {
req, out := c.DescribeTableRestoreStatusRequest(input)
err := req.Send()
return out, err
}
const opDescribeTags = "DescribeTags"
// DescribeTagsRequest generates a request for the DescribeTags operation.
@ -2121,6 +2157,45 @@ func (c *Redshift) RestoreFromClusterSnapshot(input *RestoreFromClusterSnapshotI
return out, err
}
const opRestoreTableFromClusterSnapshot = "RestoreTableFromClusterSnapshot"
// RestoreTableFromClusterSnapshotRequest generates a request for the RestoreTableFromClusterSnapshot operation.
func (c *Redshift) RestoreTableFromClusterSnapshotRequest(input *RestoreTableFromClusterSnapshotInput) (req *request.Request, output *RestoreTableFromClusterSnapshotOutput) {
op := &request.Operation{
Name: opRestoreTableFromClusterSnapshot,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &RestoreTableFromClusterSnapshotInput{}
}
req = c.newRequest(op, input, output)
output = &RestoreTableFromClusterSnapshotOutput{}
req.Data = output
return
}
// Creates a new table from a table in an Amazon Redshift cluster snapshot.
// You must create the new table within the Amazon Redshift cluster that the
// snapshot was taken from.
//
// You cannot use RestoreTableFromClusterSnapshot to restore a table with the
// same name as an existing table in an Amazon Redshift cluster. That is, you
// cannot overwrite an existing table in a cluster with a restored table. If
// you want to replace your original table with a new, restored table, then
// rename or drop your original table before you call RestoreTableFromClusterSnapshot.
// When you have renamed your original table, then you can pass the original
// name of the table as the NewTableName parameter value in the call to RestoreTableFromClusterSnapshot.
// This way, you can replace the original table with the table created from
// the snapshot.
func (c *Redshift) RestoreTableFromClusterSnapshot(input *RestoreTableFromClusterSnapshotInput) (*RestoreTableFromClusterSnapshotOutput, error) {
req, out := c.RestoreTableFromClusterSnapshotRequest(input)
err := req.Send()
return out, err
}
const opRevokeClusterSecurityGroupIngress = "RevokeClusterSecurityGroupIngress"
// RevokeClusterSecurityGroupIngressRequest generates a request for the RevokeClusterSecurityGroupIngress operation.
@ -2230,7 +2305,6 @@ func (s AccountWithRestoreAccess) GoString() string {
return s.String()
}
// ???
type AuthorizeClusterSecurityGroupIngressInput struct {
_ struct{} `type:"structure"`
@ -2385,8 +2459,10 @@ type Cluster struct {
// cross-region snapshot copy.
ClusterSnapshotCopyStatus *ClusterSnapshotCopyStatus `type:"structure"`
// The current state of this cluster. Possible values include available, creating,
// deleting, rebooting, renaming, and resizing.
// The current state of the cluster. Possible values are: available creating
// deleting final-snapshot hardware-failure incompatible-hsm incompatible-network
// incompatible-parameters incompatible-restore modifying rebooting renaming
// resizing rotating-keys storage-full updating-hsm
ClusterStatus *string `type:"string"`
// The name of the subnet group that is associated with the cluster. This parameter
@ -2401,7 +2477,7 @@ type Cluster struct {
// was not specified, a database named "dev" was created by default.
DBName *string `type:"string"`
// Describes the status of the elastic IP (EIP) address.
// The status of the elastic IP (EIP) address.
ElasticIpStatus *ElasticIpStatus `type:"structure"`
// If true, data in the cluster is encrypted at rest.
@ -2521,9 +2597,6 @@ func (s ClusterParameterGroup) GoString() string {
return s.String()
}
// Contains the output from the ModifyClusterParameterGroup and ResetClusterParameterGroup
// actions and indicate the parameter group involved and the status of the operation
// on the parameter group.
type ClusterParameterGroupNameMessage struct {
_ struct{} `type:"structure"`
@ -2585,14 +2658,14 @@ type ClusterParameterStatus struct {
// with the database, waiting for a cluster reboot, or encountered an error
// when being applied.
//
// The following are possible statuses and descriptions. in-sync: The parameter
// value is in sync with the database. pending-reboot: The parameter value
// will be applied after the cluster reboots. applying: The parameter value
// is being applied to the database. invalid-parameter: Cannot apply the parameter
// value because it has an invalid value or syntax. apply-deferred: The parameter
// The following are possible statuses and descriptions. in-sync: The parameter
// value is in sync with the database. pending-reboot: The parameter value will
// be applied after the cluster reboots. applying: The parameter value is being
// applied to the database. invalid-parameter: Cannot apply the parameter value
// because it has an invalid value or syntax. apply-deferred: The parameter
// contains static property changes. The changes are deferred until the cluster
// reboots. apply-error: Cannot connect to the cluster. The parameter change
// will be applied after the cluster reboots. unknown-error: Cannot apply the
// reboots. apply-error: Cannot connect to the cluster. The parameter change
// will be applied after the cluster reboots. unknown-error: Cannot apply the
// parameter change right now. The change will be applied after the cluster
// reboots.
ParameterApplyStatus *string `type:"string"`
@ -2643,7 +2716,7 @@ func (s ClusterSecurityGroup) GoString() string {
return s.String()
}
// Describes a security group.
// Describes a cluster security group.
type ClusterSecurityGroupMembership struct {
_ struct{} `type:"structure"`
@ -2810,6 +2883,9 @@ func (s CopyClusterSnapshotOutput) GoString() string {
type CreateClusterInput struct {
_ struct{} `type:"structure"`
// Reserved.
AdditionalInfo *string `type:"string"`
// If true, major version upgrades can be applied during the maintenance window
// to the Amazon Redshift engine that is running on the cluster.
//
@ -2878,8 +2954,8 @@ type CreateClusterInput struct {
// outside virtual private cloud (VPC).
ClusterSubnetGroupName *string `type:"string"`
// The type of the cluster. When cluster type is specified as single-node,
// the NumberOfNodes parameter is not required. multi-node, the NumberOfNodes
// The type of the cluster. When cluster type is specified as single-node,
// the NumberOfNodes parameter is not required. multi-node, the NumberOfNodes
// parameter is required.
//
// Valid Values: multi-node | single-node
@ -3109,7 +3185,6 @@ func (s CreateClusterParameterGroupOutput) GoString() string {
return s.String()
}
// ???
type CreateClusterSecurityGroupInput struct {
_ struct{} `type:"structure"`
@ -3326,6 +3401,7 @@ func (s CreateEventSubscriptionInput) GoString() string {
type CreateEventSubscriptionOutput struct {
_ struct{} `type:"structure"`
// Describes event subscriptions.
EventSubscription *EventSubscription `type:"structure"`
}
@ -4069,7 +4145,6 @@ func (s DescribeClusterParametersOutput) GoString() string {
return s.String()
}
// ???
type DescribeClusterSecurityGroupsInput struct {
_ struct{} `type:"structure"`
@ -4128,7 +4203,6 @@ func (s DescribeClusterSecurityGroupsInput) GoString() string {
return s.String()
}
// Contains the output from the DescribeClusterSecurityGroups action.
type DescribeClusterSecurityGroupsOutput struct {
_ struct{} `type:"structure"`
@ -4542,7 +4616,7 @@ type DescribeEventCategoriesInput struct {
// The source type, such as cluster or parameter group, to which the described
// event categories apply.
//
// Valid values: cluster, snapshot, parameter group, and security group.
// Valid values: cluster, cluster-snapshot, cluster-parameter-group, and cluster-security-group.
SourceType *string `type:"string"`
}
@ -4711,7 +4785,6 @@ func (s DescribeEventsInput) GoString() string {
return s.String()
}
// Contains the output from the DescribeEvents action.
type DescribeEventsOutput struct {
_ struct{} `type:"structure"`
@ -4871,7 +4944,7 @@ func (s DescribeHsmConfigurationsInput) GoString() string {
type DescribeHsmConfigurationsOutput struct {
_ struct{} `type:"structure"`
// A list of Amazon Redshift HSM configurations.
// A list of HsmConfiguration objects.
HsmConfigurations []*HsmConfiguration `locationNameList:"HsmConfiguration" type:"list"`
// A value that indicates the starting point for the next set of response records
@ -4895,7 +4968,7 @@ func (s DescribeHsmConfigurationsOutput) GoString() string {
type DescribeLoggingStatusInput struct {
_ struct{} `type:"structure"`
// The identifier of the cluster to get the logging status from.
// The identifier of the cluster from which to get the logging status.
//
// Example: examplecluster
ClusterIdentifier *string `type:"string" required:"true"`
@ -4967,7 +5040,7 @@ type DescribeOrderableClusterOptionsOutput struct {
Marker *string `type:"string"`
// An OrderableClusterOption structure containing information about orderable
// options for the Cluster.
// options for the cluster.
OrderableClusterOptions []*OrderableClusterOption `locationNameList:"OrderableClusterOption" type:"list"`
}
@ -5016,7 +5089,6 @@ func (s DescribeReservedNodeOfferingsInput) GoString() string {
return s.String()
}
// Contains the output from the DescribeReservedNodeOfferings action.
type DescribeReservedNodeOfferingsOutput struct {
_ struct{} `type:"structure"`
@ -5027,7 +5099,7 @@ type DescribeReservedNodeOfferingsOutput struct {
// records have been retrieved for the request.
Marker *string `type:"string"`
// A list of reserved node offerings.
// A list of ReservedNodeOffering objects.
ReservedNodeOfferings []*ReservedNodeOffering `locationNameList:"ReservedNodeOffering" type:"list"`
}
@ -5075,7 +5147,6 @@ func (s DescribeReservedNodesInput) GoString() string {
return s.String()
}
// Contains the output from the DescribeReservedNodes action.
type DescribeReservedNodesOutput struct {
_ struct{} `type:"structure"`
@ -5086,7 +5157,7 @@ type DescribeReservedNodesOutput struct {
// records have been retrieved for the request.
Marker *string `type:"string"`
// The list of reserved nodes.
// The list of ReservedNode objects.
ReservedNodes []*ReservedNode `locationNameList:"ReservedNode" type:"list"`
}
@ -5250,7 +5321,6 @@ func (s DescribeSnapshotCopyGrantsInput) GoString() string {
return s.String()
}
// The result of the snapshot copy grant.
type DescribeSnapshotCopyGrantsOutput struct {
_ struct{} `type:"structure"`
@ -5265,7 +5335,7 @@ type DescribeSnapshotCopyGrantsOutput struct {
// or the Marker parameter, but not both.
Marker *string `type:"string"`
// The list of snapshot copy grants.
// The list of SnapshotCopyGrant objects.
SnapshotCopyGrants []*SnapshotCopyGrant `locationNameList:"SnapshotCopyGrant" type:"list"`
}
@ -5279,7 +5349,59 @@ func (s DescribeSnapshotCopyGrantsOutput) GoString() string {
return s.String()
}
// Contains the output from the DescribeTags action.
type DescribeTableRestoreStatusInput struct {
_ struct{} `type:"structure"`
// The Amazon Redshift cluster that the table is being restored to.
ClusterIdentifier *string `type:"string"`
// An optional pagination token provided by a previous DescribeTableRestoreStatus
// request. If this parameter is specified, the response includes only records
// beyond the marker, up to the value specified by the MaxRecords parameter.
Marker *string `type:"string"`
// The maximum number of records to include in the response. If more records
// exist than the specified MaxRecords value, a pagination token called a marker
// is included in the response so that the remaining results can be retrieved.
MaxRecords *int64 `type:"integer"`
// The identifier of the table restore request to return status for. If you
// don't specify a TableRestoreRequestId value, then DescribeTableRestoreStatus
// returns the status of all in-progress table restore requests.
TableRestoreRequestId *string `type:"string"`
}
// String returns the string representation
func (s DescribeTableRestoreStatusInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeTableRestoreStatusInput) GoString() string {
return s.String()
}
type DescribeTableRestoreStatusOutput struct {
_ struct{} `type:"structure"`
// A pagination token that can be used in a subsequent DescribeTableRestoreStatus
// request.
Marker *string `type:"string"`
// A list of status details for one or more table restore requests.
TableRestoreStatusDetails []*TableRestoreStatus `locationNameList:"TableRestoreStatus" type:"list"`
}
// String returns the string representation
func (s DescribeTableRestoreStatusOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeTableRestoreStatusOutput) GoString() string {
return s.String()
}
type DescribeTagsInput struct {
_ struct{} `type:"structure"`
@ -5337,7 +5459,6 @@ func (s DescribeTagsInput) GoString() string {
return s.String()
}
// Contains the output from the DescribeTags action.
type DescribeTagsOutput struct {
_ struct{} `type:"structure"`
@ -5454,7 +5575,7 @@ type ElasticIpStatus struct {
// The elastic IP (EIP) address for the cluster.
ElasticIp *string `type:"string"`
// Describes the status of the elastic IP (EIP) address.
// The status of the elastic IP (EIP) address.
Status *string `type:"string"`
}
@ -5621,14 +5742,15 @@ func (s Event) GoString() string {
return s.String()
}
// Describes event categories.
type EventCategoriesMap struct {
_ struct{} `type:"structure"`
// The events in the event category.
Events []*EventInfoMap `locationNameList:"EventInfoMap" type:"list"`
// The Amazon Redshift source type, such as cluster or cluster-snapshot, that
// the returned categories belong to.
// The source type, such as cluster or cluster-snapshot, that the returned categories
// belong to.
SourceType *string `type:"string"`
}
@ -5642,6 +5764,7 @@ func (s EventCategoriesMap) GoString() string {
return s.String()
}
// Describes event information.
type EventInfoMap struct {
_ struct{} `type:"structure"`
@ -5670,6 +5793,7 @@ func (s EventInfoMap) GoString() string {
return s.String()
}
// Describes event subscriptions.
type EventSubscription struct {
_ struct{} `type:"structure"`
@ -5795,6 +5919,7 @@ func (s HsmConfiguration) GoString() string {
return s.String()
}
// Describes the status of changes to HSM settings.
type HsmStatus struct {
_ struct{} `type:"structure"`
@ -5860,7 +5985,7 @@ type LoggingStatus struct {
// The last time when logs failed to be delivered.
LastFailureTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
// The last time when logs were delivered.
// The last time that logs were delivered.
LastSuccessfulDeliveryTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
// true if logging is on, false if logging is off.
@ -5952,6 +6077,14 @@ type ModifyClusterInput struct {
// Example: 1.0
ClusterVersion *string `type:"string"`
// The Elastic IP (EIP) address for the cluster.
//
// Constraints: The cluster must be provisioned in EC2-VPC and publicly-accessible
// through an Internet gateway. For more information about provisioning clusters
// in EC2-VPC, go to Supported Platforms to Launch Your Cluster (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#cluster-platforms)
// in the Amazon Redshift Cluster Management Guide.
ElasticIp *string `type:"string"`
// Specifies the name of the HSM client certificate the Amazon Redshift cluster
// uses to retrieve the data encryption keys stored in an HSM.
HsmClientCertificateIdentifier *string `type:"string"`
@ -6034,6 +6167,10 @@ type ModifyClusterInput struct {
// Constraints: Must be at least 30 minutes.
PreferredMaintenanceWindow *string `type:"string"`
// If true, the cluster can be accessed from a public network. Only clusters
// in VPCs can be set to be publicly available.
PubliclyAccessible *bool `type:"boolean"`
// A list of virtual private cloud (VPC) security groups to be associated with
// the cluster.
VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"`
@ -6195,6 +6332,7 @@ func (s ModifyEventSubscriptionInput) GoString() string {
type ModifyEventSubscriptionOutput struct {
_ struct{} `type:"structure"`
// Describes event subscriptions.
EventSubscription *EventSubscription `type:"structure"`
}
@ -6291,7 +6429,12 @@ type Parameter struct {
// The valid range of values for the parameter.
AllowedValues *string `type:"string"`
// Specifies how to apply the parameter. Supported value: static.
// Specifies how to apply the WLM configuration parameter. Some properties can
// be applied dynamically, while other properties require that any associated
// clusters be rebooted for the configuration changes to be applied. For more
// information about parameters and parameter groups, go to Amazon Redshift
// Parameter Groups (http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
// in the Amazon Redshift Cluster Management Guide.
ApplyType *string `type:"string" enum:"ParameterApplyType"`
// The data type of the parameter.
@ -6352,6 +6495,10 @@ type PendingModifiedValues struct {
// The pending or in-progress change of the number of nodes in the cluster.
NumberOfNodes *int64 `type:"integer"`
// The pending or in-progress change of the ability to connect to the cluster
// from the public network.
PubliclyAccessible *bool `type:"boolean"`
}
// String returns the string representation
@ -6367,7 +6514,7 @@ func (s PendingModifiedValues) GoString() string {
type PurchaseReservedNodeOfferingInput struct {
_ struct{} `type:"structure"`
// The number of reserved nodes you want to purchase.
// The number of reserved nodes that you want to purchase.
//
// Default: 1
NodeCount *int64 `type:"integer"`
@ -6597,6 +6744,9 @@ func (s ResetClusterParameterGroupInput) GoString() string {
type RestoreFromClusterSnapshotInput struct {
_ struct{} `type:"structure"`
// Reserved.
AdditionalInfo *string `type:"string"`
// If true, major version upgrades can be applied during the maintenance window
// to the Amazon Redshift engine that is running on the cluster.
//
@ -6798,7 +6948,63 @@ func (s RestoreStatus) GoString() string {
return s.String()
}
// ???
type RestoreTableFromClusterSnapshotInput struct {
_ struct{} `type:"structure"`
// The identifier of the Amazon Redshift cluster to restore the table to.
ClusterIdentifier *string `type:"string" required:"true"`
// The name of the table to create as a result of the current request.
NewTableName *string `type:"string" required:"true"`
// The identifier of the snapshot to restore the table from. This snapshot must
// have been created from the Amazon Redshift cluster specified by the ClusterIdentifier
// parameter.
SnapshotIdentifier *string `type:"string" required:"true"`
// The name of the source database that contains the table to restore from.
SourceDatabaseName *string `type:"string" required:"true"`
// The name of the source schema that contains the table to restore from.
SourceSchemaName *string `type:"string"`
// The name of the source table to restore from.
SourceTableName *string `type:"string" required:"true"`
// The name of the database to restore the table to.
TargetDatabaseName *string `type:"string"`
// The name of the schema to restore the table to.
TargetSchemaName *string `type:"string"`
}
// String returns the string representation
func (s RestoreTableFromClusterSnapshotInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RestoreTableFromClusterSnapshotInput) GoString() string {
return s.String()
}
type RestoreTableFromClusterSnapshotOutput struct {
_ struct{} `type:"structure"`
// Describes the status of a RestoreTableFromClusterSnapshot operation.
TableRestoreStatus *TableRestoreStatus `type:"structure"`
}
// String returns the string representation
func (s RestoreTableFromClusterSnapshotOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RestoreTableFromClusterSnapshotOutput) GoString() string {
return s.String()
}
type RevokeClusterSecurityGroupIngressInput struct {
_ struct{} `type:"structure"`
@ -7019,9 +7225,9 @@ type Snapshot struct {
SourceRegion *string `type:"string"`
// The snapshot status. The value of the status depends on the API operation
// used. CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
// DescribeClusterSnapshots returns status as "creating", "available", "final
// snapshot", or "failed". DeleteClusterSnapshot returns status as "deleted".
// used. CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
// DescribeClusterSnapshots returns status as "creating", "available", "final
// snapshot", or "failed". DeleteClusterSnapshot returns status as "deleted".
Status *string `type:"string"`
// The list of tags for the cluster snapshot.
@ -7101,6 +7307,68 @@ func (s Subnet) GoString() string {
return s.String()
}
// Describes the status of a RestoreTableFromClusterSnapshot operation.
type TableRestoreStatus struct {
_ struct{} `type:"structure"`
// The identifier of the Amazon Redshift cluster that the table is being restored
// to.
ClusterIdentifier *string `type:"string"`
// A description of the status of the table restore request. Status values include
// SUCCEEDED, FAILED, CANCELLED, PENDING, IN_PROGRESS.
Message *string `type:"string"`
// The name of the table to create as a result of the table restore request.
NewTableName *string `type:"string"`
// The amount of data restored to the new table so far, in megabytes (MB).
ProgressInMegaBytes *int64 `type:"long"`
// The time that the table restore request was made, in Universal Coordinated
// Time (UTC).
RequestTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
// The identifier of the snapshot that the table is being restored from.
SnapshotIdentifier *string `type:"string"`
// The name of the source database that contains the table being restored.
SourceDatabaseName *string `type:"string"`
// The name of the source schema that contains the table being restored.
SourceSchemaName *string `type:"string"`
// The name of the source table being restored.
SourceTableName *string `type:"string"`
// A value that describes the current state of the table restore request.
//
// Valid Values: SUCCEEDED, FAILED, CANCELLED, PENDING, IN_PROGRESS
Status *string `type:"string" enum:"TableRestoreStatusType"`
// The unique identifier for the table restore request.
TableRestoreRequestId *string `type:"string"`
// The name of the database to restore the table to.
TargetDatabaseName *string `type:"string"`
// The name of the schema to restore the table to.
TargetSchemaName *string `type:"string"`
// The total amount of data to restore to the new table, in megabytes (MB).
TotalDataInMegaBytes *int64 `type:"long"`
}
// String returns the string representation
func (s TableRestoreStatus) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s TableRestoreStatus) GoString() string {
return s.String()
}
// A tag consisting of a name/value pair for a resource.
type Tag struct {
_ struct{} `type:"structure"`
@ -7157,8 +7425,10 @@ func (s TaggedResource) GoString() string {
type VpcSecurityGroupMembership struct {
_ struct{} `type:"structure"`
// The status of the VPC security group.
Status *string `type:"string"`
// The identifier of the VPC security group.
VpcSecurityGroupId *string `type:"string"`
}
@ -7189,3 +7459,16 @@ const (
// @enum SourceType
SourceTypeClusterSnapshot = "cluster-snapshot"
)
const (
// @enum TableRestoreStatusType
TableRestoreStatusTypePending = "PENDING"
// @enum TableRestoreStatusType
TableRestoreStatusTypeInProgress = "IN_PROGRESS"
// @enum TableRestoreStatusType
TableRestoreStatusTypeSucceeded = "SUCCEEDED"
// @enum TableRestoreStatusType
TableRestoreStatusTypeFailed = "FAILED"
// @enum TableRestoreStatusType
TableRestoreStatusTypeCanceled = "CANCELED"
)

View File

@ -5,12 +5,21 @@ import (
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/restxml"
)
func init() {
initClient = func(c *client.Client) {
c.Handlers.Build.PushBack(sanitizeURL)
}
initRequest = func(r *request.Request) {
switch r.Operation.Name {
case opChangeResourceRecordSets:
r.Handlers.UnmarshalError.Remove(restxml.UnmarshalErrorHandler)
r.Handlers.UnmarshalError.PushBack(unmarshalChangeResourceRecordSetsError)
}
}
}
var reSanitizeURL = regexp.MustCompile(`\/%2F\w+%2F`)

View File

@ -0,0 +1,77 @@
package route53
import (
"bytes"
"encoding/xml"
"io/ioutil"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/restxml"
)
type baseXMLErrorResponse struct {
XMLName xml.Name
}
type standardXMLErrorResponse struct {
XMLName xml.Name `xml:"ErrorResponse"`
Code string `xml:"Error>Code"`
Message string `xml:"Error>Message"`
RequestID string `xml:"RequestId"`
}
type invalidChangeBatchXMLErrorResponse struct {
XMLName xml.Name `xml:"InvalidChangeBatch"`
Messages []string `xml:"Messages>Message"`
}
func unmarshalChangeResourceRecordSetsError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
responseBody, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to read Route53 XML error response", err)
return
}
baseError := &baseXMLErrorResponse{}
if err := xml.Unmarshal(responseBody, baseError); err != nil {
r.Error = awserr.New("SerializationError", "failed to decode Route53 XML error response", err)
return
}
switch baseError.XMLName.Local {
case "InvalidChangeBatch":
unmarshalInvalidChangeBatchError(r, responseBody)
default:
r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader(responseBody))
restxml.UnmarshalError(r)
}
}
func unmarshalInvalidChangeBatchError(r *request.Request, requestBody []byte) {
resp := &invalidChangeBatchXMLErrorResponse{}
err := xml.Unmarshal(requestBody, resp)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err)
return
}
const errorCode = "InvalidChangeBatch"
errors := []error{}
for _, msg := range resp.Messages {
errors = append(errors, awserr.New(errorCode, msg, nil))
}
r.Error = awserr.NewRequestFailure(
awserr.NewBatchError(errorCode, "ChangeBatch errors occured", errors),
r.HTTPResponse.StatusCode,
r.RequestID,
)
}

View File

@ -297,6 +297,7 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput)
return
}
// Deletes the replication configuration from the bucket.
func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) {
req, out := c.DeleteBucketReplicationRequest(input)
err := req.Send()
@ -688,6 +689,7 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req
return
}
// Deprecated, see the GetBucketReplicationConfiguration operation.
func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) {
req, out := c.GetBucketReplicationRequest(input)
err := req.Send()
@ -1670,6 +1672,26 @@ func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput,
return out, err
}
// Specifies the days since the initiation of an Incomplete Multipart Upload
// that Lifecycle will wait before permanently removing all parts of the upload.
type AbortIncompleteMultipartUpload struct {
_ struct{} `type:"structure"`
// Indicates the number of days that must pass since initiation for Lifecycle
// to abort an Incomplete Multipart Upload.
DaysAfterInitiation *int64 `type:"integer"`
}
// String returns the string representation
func (s AbortIncompleteMultipartUpload) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AbortIncompleteMultipartUpload) GoString() string {
return s.String()
}
type AbortMultipartUploadInput struct {
_ struct{} `type:"structure"`
@ -2396,6 +2418,13 @@ func (s CreateMultipartUploadInput) GoString() string {
type CreateMultipartUploadOutput struct {
_ struct{} `type:"structure"`
// Date when multipart upload will become eligible for abort operation by lifecycle.
AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"`
// Id of the lifecycle rule that makes a multipart upload eligible for abort
// operation.
AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"`
// Name of the bucket to which the multipart upload was initiated.
Bucket *string `locationName:"Bucket" type:"string"`
@ -3108,7 +3137,7 @@ func (s GetBucketLoggingOutput) GoString() string {
type GetBucketNotificationConfigurationRequest struct {
_ struct{} `type:"structure"`
// Name of the buket to get the notification configuration for.
// Name of the bucket to get the notification configuration for.
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
}
@ -3972,6 +4001,12 @@ type LifecycleExpiration struct {
// Indicates the lifetime, in days, of the objects that are subject to the rule.
// The value must be a non-zero positive integer.
Days *int64 `type:"integer"`
// Indicates whether Amazon S3 will remove a delete marker with no noncurrent
// versions. If set to true, the delete marker will be expired; if set to false
// the policy takes no action. This cannot be specified with Days or Date in
// a Lifecycle Expiration Policy.
ExpiredObjectDeleteMarker *bool `type:"boolean"`
}
// String returns the string representation
@ -3987,6 +4022,10 @@ func (s LifecycleExpiration) GoString() string {
type LifecycleRule struct {
_ struct{} `type:"structure"`
// Specifies the days since the initiation of an Incomplete Multipart Upload
// that Lifecycle will wait before permanently removing all parts of the upload.
AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
Expiration *LifecycleExpiration `type:"structure"`
// Unique identifier for the rule. The value cannot be longer than 255 characters.
@ -4359,6 +4398,13 @@ func (s ListPartsInput) GoString() string {
type ListPartsOutput struct {
_ struct{} `type:"structure"`
// Date when multipart upload will become eligible for abort operation by lifecycle.
AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"`
// Id of the lifecycle rule that makes a multipart upload eligible for abort
// operation.
AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"`
// Name of the bucket to which the multipart upload was initiated.
Bucket *string `type:"string"`
@ -5228,6 +5274,7 @@ type PutObjectInput struct {
// Object data.
Body io.ReadSeeker `type:"blob"`
// Name of the bucket to which the PUT operation was initiated.
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
// Specifies caching behavior along the request/reply chain.
@ -5266,6 +5313,7 @@ type PutObjectInput struct {
// Allows grantee to write the ACL for the applicable object.
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
// Object key for which the PUT operation was initiated.
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
// A map of metadata to store with the object in S3.
@ -5641,6 +5689,10 @@ func (s RoutingRule) GoString() string {
type Rule struct {
_ struct{} `type:"structure"`
// Specifies the days since the initiation of an Incomplete Multipart Upload
// that Lifecycle will wait before permanently removing all parts of the upload.
AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
Expiration *LifecycleExpiration `type:"structure"`
// Unique identifier for the rule. The value cannot be longer than 255 characters.
@ -5947,14 +5999,17 @@ func (s UploadPartCopyOutput) GoString() string {
type UploadPartInput struct {
_ struct{} `type:"structure" payload:"Body"`
// Object data.
Body io.ReadSeeker `type:"blob"`
// Name of the bucket to which the multipart upload was initiated.
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
// Size of the body in bytes. This parameter is useful when the size of the
// body cannot be determined automatically.
ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"`
// Object key for which the multipart upload was initiated.
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
// Part number of part being uploaded. This is a positive integer between 1

View File

@ -4,6 +4,7 @@ import (
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
@ -20,6 +21,7 @@ type xmlErrorResponse struct {
func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
defer io.Copy(ioutil.Discard, r.HTTPResponse.Body)
if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
r.Error = awserr.NewRequestFailure(

View File

@ -18,6 +18,12 @@ func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
Argument: "",
Expected: 200,
},
{
State: "success",
Matcher: "status",
Argument: "",
Expected: 301,
},
{
State: "success",
Matcher: "status",

View File

@ -1608,6 +1608,8 @@ type MessageAttributeValue struct {
// Binary type attributes can store any binary data, for example, compressed
// data, encrypted data, or images.
//
// BinaryValue is automatically base64 encoded/decoded by the SDK.
BinaryValue []byte `type:"blob"`
// Amazon SNS supports the following logical data types: String, Number, and

View File

@ -1393,6 +1393,8 @@ type MessageAttributeValue struct {
// Binary type attributes can store any binary data, for example, compressed
// data, encrypted data, or images.
//
// BinaryValue is automatically base64 encoded/decoded by the SDK.
BinaryValue []byte `type:"blob"`
// Amazon SQS supports the following logical data types: String, Number, and