mirror of
https://github.com/grafana/grafana.git
synced 2024-11-22 08:56:43 -06:00
Merge branch 'cloudwatch' of https://github.com/mtanda/grafana into cloudwatch
This commit is contained in:
commit
ffbf70af25
39
Godeps/Godeps.json
generated
39
Godeps/Godeps.json
generated
@ -18,6 +18,41 @@
|
||||
"ImportPath": "github.com/Unknwon/macaron",
|
||||
"Rev": "93de4f3fad97bf246b838f828e2348f46f21f20a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws",
|
||||
"Comment": "v0.7.3",
|
||||
"Rev": "bed164a424e75154a40550c04c313ef51a7bb275"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/internal/endpoints",
|
||||
"Comment": "v0.7.3",
|
||||
"Rev": "bed164a424e75154a40550c04c313ef51a7bb275"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/query",
|
||||
"Comment": "v0.7.3",
|
||||
"Rev": "bed164a424e75154a40550c04c313ef51a7bb275"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/rest",
|
||||
"Comment": "v0.7.3",
|
||||
"Rev": "bed164a424e75154a40550c04c313ef51a7bb275"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil",
|
||||
"Comment": "v0.7.3",
|
||||
"Rev": "bed164a424e75154a40550c04c313ef51a7bb275"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/internal/signer/v4",
|
||||
"Comment": "v0.7.3",
|
||||
"Rev": "bed164a424e75154a40550c04c313ef51a7bb275"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatch",
|
||||
"Comment": "v0.7.3",
|
||||
"Rev": "bed164a424e75154a40550c04c313ef51a7bb275"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||
"Rev": "2df174808ee097f90d259e432cc04442cf60be21"
|
||||
@ -79,6 +114,10 @@
|
||||
"ImportPath": "github.com/streadway/amqp",
|
||||
"Rev": "150b7f24d6ad507e6026c13d85ce1f1391ac7400"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vaughan0/go-ini",
|
||||
"Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "972f0c5fbe4ae29e666c3f78c3ed42ae7a448b0a"
|
||||
|
105
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go
generated
vendored
Normal file
105
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
// Package awserr represents API error interface accessors for the SDK.
|
||||
package awserr
|
||||
|
||||
// An Error wraps lower level errors with code, message and an original error.
|
||||
// The underlying concrete error type may also satisfy other interfaces which
|
||||
// can be to used to obtain more specific information about the error.
|
||||
//
|
||||
// Calling Error() or String() will always include the full information about
|
||||
// an error based on its underlying type.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// output, err := s3manage.Upload(svc, input, opts)
|
||||
// if err != nil {
|
||||
// if awsErr, ok := err.(awserr.Error); ok {
|
||||
// // Get error details
|
||||
// log.Println("Error:", err.Code(), err.Message())
|
||||
//
|
||||
// // Prints out full error message, including original error if there was one.
|
||||
// log.Println("Error:", err.Error())
|
||||
//
|
||||
// // Get original error
|
||||
// if origErr := err.Err(); origErr != nil {
|
||||
// // operate on original error.
|
||||
// }
|
||||
// } else {
|
||||
// fmt.Println(err.Error())
|
||||
// }
|
||||
// }
|
||||
//
|
||||
type Error interface {
|
||||
// Satisfy the generic error interface.
|
||||
error
|
||||
|
||||
// Returns the short phrase depicting the classification of the error.
|
||||
Code() string
|
||||
|
||||
// Returns the error details message.
|
||||
Message() string
|
||||
|
||||
// Returns the original error if one was set. Nil is returned if not set.
|
||||
OrigErr() error
|
||||
}
|
||||
|
||||
// New returns an Error object described by the code, message, and origErr.
|
||||
//
|
||||
// If origErr satisfies the Error interface it will not be wrapped within a new
|
||||
// Error object and will instead be returned.
|
||||
func New(code, message string, origErr error) Error {
|
||||
if e, ok := origErr.(Error); ok && e != nil {
|
||||
return e
|
||||
}
|
||||
return newBaseError(code, message, origErr)
|
||||
}
|
||||
|
||||
// A RequestFailure is an interface to extract request failure information from
|
||||
// an Error such as the request ID of the failed request returned by a service.
|
||||
// RequestFailures may not always have a requestID value if the request failed
|
||||
// prior to reaching the service such as a connection error.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// output, err := s3manage.Upload(svc, input, opts)
|
||||
// if err != nil {
|
||||
// if reqerr, ok := err.(RequestFailure); ok {
|
||||
// log.Printf("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
|
||||
// } else {
|
||||
// log.Printf("Error:", err.Error()
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Combined with awserr.Error:
|
||||
//
|
||||
// output, err := s3manage.Upload(svc, input, opts)
|
||||
// if err != nil {
|
||||
// if awsErr, ok := err.(awserr.Error); ok {
|
||||
// // Generic AWS Error with Code, Message, and original error (if any)
|
||||
// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
|
||||
//
|
||||
// if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// // A service error occurred
|
||||
// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
|
||||
// }
|
||||
// } else {
|
||||
// fmt.Println(err.Error())
|
||||
// }
|
||||
// }
|
||||
//
|
||||
type RequestFailure interface {
|
||||
Error
|
||||
|
||||
// The status code of the HTTP response.
|
||||
StatusCode() int
|
||||
|
||||
// The request ID returned by the service for a request failure. This will
|
||||
// be empty if no request ID is available such as the request failed due
|
||||
// to a connection error.
|
||||
RequestID() string
|
||||
}
|
||||
|
||||
// NewRequestFailure returns a new request error wrapper for the given Error
|
||||
// provided.
|
||||
func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
|
||||
return newRequestError(err, statusCode, reqID)
|
||||
}
|
135
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go
generated
vendored
Normal file
135
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
package awserr
|
||||
|
||||
import "fmt"
|
||||
|
||||
// SprintError returns a string of the formatted error code.
|
||||
//
|
||||
// Both extra and origErr are optional. If they are included their lines
|
||||
// will be added, but if they are not included their lines will be ignored.
|
||||
func SprintError(code, message, extra string, origErr error) string {
|
||||
msg := fmt.Sprintf("%s: %s", code, message)
|
||||
if extra != "" {
|
||||
msg = fmt.Sprintf("%s\n\t%s", msg, extra)
|
||||
}
|
||||
if origErr != nil {
|
||||
msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
// A baseError wraps the code and message which defines an error. It also
|
||||
// can be used to wrap an original error object.
|
||||
//
|
||||
// Should be used as the root for errors satisfying the awserr.Error. Also
|
||||
// for any error which does not fit into a specific error wrapper type.
|
||||
type baseError struct {
|
||||
// Classification of error
|
||||
code string
|
||||
|
||||
// Detailed information about error
|
||||
message string
|
||||
|
||||
// Optional original error this error is based off of. Allows building
|
||||
// chained errors.
|
||||
origErr error
|
||||
}
|
||||
|
||||
// newBaseError returns an error object for the code, message, and err.
|
||||
//
|
||||
// code is a short no whitespace phrase depicting the classification of
|
||||
// the error that is being created.
|
||||
//
|
||||
// message is the free flow string containing detailed information about the error.
|
||||
//
|
||||
// origErr is the error object which will be nested under the new error to be returned.
|
||||
func newBaseError(code, message string, origErr error) *baseError {
|
||||
return &baseError{
|
||||
code: code,
|
||||
message: message,
|
||||
origErr: origErr,
|
||||
}
|
||||
}
|
||||
|
||||
// Error returns the string representation of the error.
|
||||
//
|
||||
// See ErrorWithExtra for formatting.
|
||||
//
|
||||
// Satisfies the error interface.
|
||||
func (b baseError) Error() string {
|
||||
return SprintError(b.code, b.message, "", b.origErr)
|
||||
}
|
||||
|
||||
// String returns the string representation of the error.
|
||||
// Alias for Error to satisfy the stringer interface.
|
||||
func (b baseError) String() string {
|
||||
return b.Error()
|
||||
}
|
||||
|
||||
// Code returns the short phrase depicting the classification of the error.
|
||||
func (b baseError) Code() string {
|
||||
return b.code
|
||||
}
|
||||
|
||||
// Message returns the error details message.
|
||||
func (b baseError) Message() string {
|
||||
return b.message
|
||||
}
|
||||
|
||||
// OrigErr returns the original error if one was set. Nil is returned if no error
|
||||
// was set.
|
||||
func (b baseError) OrigErr() error {
|
||||
return b.origErr
|
||||
}
|
||||
|
||||
// So that the Error interface type can be included as an anonymous field
|
||||
// in the requestError struct and not conflict with the error.Error() method.
|
||||
type awsError Error
|
||||
|
||||
// A requestError wraps a request or service error.
|
||||
//
|
||||
// Composed of baseError for code, message, and original error.
|
||||
type requestError struct {
|
||||
awsError
|
||||
statusCode int
|
||||
requestID string
|
||||
}
|
||||
|
||||
// newRequestError returns a wrapped error with additional information for request
|
||||
// status code, and service requestID.
|
||||
//
|
||||
// Should be used to wrap all request which involve service requests. Even if
|
||||
// the request failed without a service response, but had an HTTP status code
|
||||
// that may be meaningful.
|
||||
//
|
||||
// Also wraps original errors via the baseError.
|
||||
func newRequestError(err Error, statusCode int, requestID string) *requestError {
|
||||
return &requestError{
|
||||
awsError: err,
|
||||
statusCode: statusCode,
|
||||
requestID: requestID,
|
||||
}
|
||||
}
|
||||
|
||||
// Error returns the string representation of the error.
|
||||
// Satisfies the error interface.
|
||||
func (r requestError) Error() string {
|
||||
extra := fmt.Sprintf("status code: %d, request id: [%s]",
|
||||
r.statusCode, r.requestID)
|
||||
return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
|
||||
}
|
||||
|
||||
// String returns the string representation of the error.
|
||||
// Alias for Error to satisfy the stringer interface.
|
||||
func (r requestError) String() string {
|
||||
return r.Error()
|
||||
}
|
||||
|
||||
// StatusCode returns the wrapped status code for the error
|
||||
func (r requestError) StatusCode() int {
|
||||
return r.statusCode
|
||||
}
|
||||
|
||||
// RequestID returns the wrapped requestID
|
||||
func (r requestError) RequestID() string {
|
||||
return r.requestID
|
||||
}
|
103
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
generated
vendored
Normal file
103
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
package awsutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Copy deeply copies a src structure to dst. Useful for copying request and
|
||||
// response structures.
|
||||
//
|
||||
// Can copy between structs of different type, but will only copy fields which
|
||||
// are assignable, and exist in both structs. Fields which are not assignable,
|
||||
// or do not exist in both structs are ignored.
|
||||
func Copy(dst, src interface{}) {
|
||||
dstval := reflect.ValueOf(dst)
|
||||
if !dstval.IsValid() {
|
||||
panic("Copy dst cannot be nil")
|
||||
}
|
||||
|
||||
rcopy(dstval, reflect.ValueOf(src), true)
|
||||
}
|
||||
|
||||
// CopyOf returns a copy of src while also allocating the memory for dst.
|
||||
// src must be a pointer type or this operation will fail.
|
||||
func CopyOf(src interface{}) (dst interface{}) {
|
||||
dsti := reflect.New(reflect.TypeOf(src).Elem())
|
||||
dst = dsti.Interface()
|
||||
rcopy(dsti, reflect.ValueOf(src), true)
|
||||
return
|
||||
}
|
||||
|
||||
// rcopy performs a recursive copy of values from the source to destination.
|
||||
//
|
||||
// root is used to skip certain aspects of the copy which are not valid
|
||||
// for the root node of a object.
|
||||
func rcopy(dst, src reflect.Value, root bool) {
|
||||
if !src.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
switch src.Kind() {
|
||||
case reflect.Ptr:
|
||||
if _, ok := src.Interface().(io.Reader); ok {
|
||||
if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
|
||||
dst.Elem().Set(src)
|
||||
} else if dst.CanSet() {
|
||||
dst.Set(src)
|
||||
}
|
||||
} else {
|
||||
e := src.Type().Elem()
|
||||
if dst.CanSet() && !src.IsNil() {
|
||||
dst.Set(reflect.New(e))
|
||||
}
|
||||
if src.Elem().IsValid() {
|
||||
// Keep the current root state since the depth hasn't changed
|
||||
rcopy(dst.Elem(), src.Elem(), root)
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
if !root {
|
||||
dst.Set(reflect.New(src.Type()).Elem())
|
||||
}
|
||||
|
||||
t := dst.Type()
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
name := t.Field(i).Name
|
||||
srcval := src.FieldByName(name)
|
||||
if srcval.IsValid() {
|
||||
rcopy(dst.FieldByName(name), srcval, false)
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
if src.IsNil() {
|
||||
break
|
||||
}
|
||||
|
||||
s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
|
||||
dst.Set(s)
|
||||
for i := 0; i < src.Len(); i++ {
|
||||
rcopy(dst.Index(i), src.Index(i), false)
|
||||
}
|
||||
case reflect.Map:
|
||||
if src.IsNil() {
|
||||
break
|
||||
}
|
||||
|
||||
s := reflect.MakeMap(src.Type())
|
||||
dst.Set(s)
|
||||
for _, k := range src.MapKeys() {
|
||||
v := src.MapIndex(k)
|
||||
v2 := reflect.New(v.Type()).Elem()
|
||||
rcopy(v2, v, false)
|
||||
dst.SetMapIndex(k, v2)
|
||||
}
|
||||
default:
|
||||
// Assign the value if possible. If its not assignable, the value would
|
||||
// need to be converted and the impact of that may be unexpected, or is
|
||||
// not compatible with the dst type.
|
||||
if src.Type().AssignableTo(dst.Type()) {
|
||||
dst.Set(src)
|
||||
}
|
||||
}
|
||||
}
|
201
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go
generated
vendored
Normal file
201
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
package awsutil_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func ExampleCopy() {
|
||||
type Foo struct {
|
||||
A int
|
||||
B []*string
|
||||
}
|
||||
|
||||
// Create the initial value
|
||||
str1 := "hello"
|
||||
str2 := "bye bye"
|
||||
f1 := &Foo{A: 1, B: []*string{&str1, &str2}}
|
||||
|
||||
// Do the copy
|
||||
var f2 Foo
|
||||
awsutil.Copy(&f2, f1)
|
||||
|
||||
// Print the result
|
||||
fmt.Println(awsutil.Prettify(f2))
|
||||
|
||||
// Output:
|
||||
// {
|
||||
// A: 1,
|
||||
// B: ["hello","bye bye"]
|
||||
// }
|
||||
}
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
type Foo struct {
|
||||
A int
|
||||
B []*string
|
||||
C map[string]*int
|
||||
}
|
||||
|
||||
// Create the initial value
|
||||
str1 := "hello"
|
||||
str2 := "bye bye"
|
||||
int1 := 1
|
||||
int2 := 2
|
||||
f1 := &Foo{
|
||||
A: 1,
|
||||
B: []*string{&str1, &str2},
|
||||
C: map[string]*int{
|
||||
"A": &int1,
|
||||
"B": &int2,
|
||||
},
|
||||
}
|
||||
|
||||
// Do the copy
|
||||
var f2 Foo
|
||||
awsutil.Copy(&f2, f1)
|
||||
|
||||
// Values are equal
|
||||
assert.Equal(t, f2.A, f1.A)
|
||||
assert.Equal(t, f2.B, f1.B)
|
||||
assert.Equal(t, f2.C, f1.C)
|
||||
|
||||
// But pointers are not!
|
||||
str3 := "nothello"
|
||||
int3 := 57
|
||||
f2.A = 100
|
||||
f2.B[0] = &str3
|
||||
f2.C["B"] = &int3
|
||||
assert.NotEqual(t, f2.A, f1.A)
|
||||
assert.NotEqual(t, f2.B, f1.B)
|
||||
assert.NotEqual(t, f2.C, f1.C)
|
||||
}
|
||||
|
||||
func TestCopyIgnoreNilMembers(t *testing.T) {
|
||||
type Foo struct {
|
||||
A *string
|
||||
B []string
|
||||
C map[string]string
|
||||
}
|
||||
|
||||
f := &Foo{}
|
||||
assert.Nil(t, f.A)
|
||||
assert.Nil(t, f.B)
|
||||
assert.Nil(t, f.C)
|
||||
|
||||
var f2 Foo
|
||||
awsutil.Copy(&f2, f)
|
||||
assert.Nil(t, f2.A)
|
||||
assert.Nil(t, f2.B)
|
||||
assert.Nil(t, f2.C)
|
||||
|
||||
fcopy := awsutil.CopyOf(f)
|
||||
f3 := fcopy.(*Foo)
|
||||
assert.Nil(t, f3.A)
|
||||
assert.Nil(t, f3.B)
|
||||
assert.Nil(t, f3.C)
|
||||
}
|
||||
|
||||
func TestCopyPrimitive(t *testing.T) {
|
||||
str := "hello"
|
||||
var s string
|
||||
awsutil.Copy(&s, &str)
|
||||
assert.Equal(t, "hello", s)
|
||||
}
|
||||
|
||||
func TestCopyNil(t *testing.T) {
|
||||
var s string
|
||||
awsutil.Copy(&s, nil)
|
||||
assert.Equal(t, "", s)
|
||||
}
|
||||
|
||||
func TestCopyReader(t *testing.T) {
|
||||
var buf io.Reader = bytes.NewReader([]byte("hello world"))
|
||||
var r io.Reader
|
||||
awsutil.Copy(&r, buf)
|
||||
b, err := ioutil.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte("hello world"), b)
|
||||
|
||||
// empty bytes because this is not a deep copy
|
||||
b, err = ioutil.ReadAll(buf)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte(""), b)
|
||||
}
|
||||
|
||||
func TestCopyDifferentStructs(t *testing.T) {
|
||||
type SrcFoo struct {
|
||||
A int
|
||||
B []*string
|
||||
C map[string]*int
|
||||
SrcUnique string
|
||||
SameNameDiffType int
|
||||
}
|
||||
type DstFoo struct {
|
||||
A int
|
||||
B []*string
|
||||
C map[string]*int
|
||||
DstUnique int
|
||||
SameNameDiffType string
|
||||
}
|
||||
|
||||
// Create the initial value
|
||||
str1 := "hello"
|
||||
str2 := "bye bye"
|
||||
int1 := 1
|
||||
int2 := 2
|
||||
f1 := &SrcFoo{
|
||||
A: 1,
|
||||
B: []*string{&str1, &str2},
|
||||
C: map[string]*int{
|
||||
"A": &int1,
|
||||
"B": &int2,
|
||||
},
|
||||
SrcUnique: "unique",
|
||||
SameNameDiffType: 1,
|
||||
}
|
||||
|
||||
// Do the copy
|
||||
var f2 DstFoo
|
||||
awsutil.Copy(&f2, f1)
|
||||
|
||||
// Values are equal
|
||||
assert.Equal(t, f2.A, f1.A)
|
||||
assert.Equal(t, f2.B, f1.B)
|
||||
assert.Equal(t, f2.C, f1.C)
|
||||
assert.Equal(t, "unique", f1.SrcUnique)
|
||||
assert.Equal(t, 1, f1.SameNameDiffType)
|
||||
assert.Equal(t, 0, f2.DstUnique)
|
||||
assert.Equal(t, "", f2.SameNameDiffType)
|
||||
}
|
||||
|
||||
func ExampleCopyOf() {
|
||||
type Foo struct {
|
||||
A int
|
||||
B []*string
|
||||
}
|
||||
|
||||
// Create the initial value
|
||||
str1 := "hello"
|
||||
str2 := "bye bye"
|
||||
f1 := &Foo{A: 1, B: []*string{&str1, &str2}}
|
||||
|
||||
// Do the copy
|
||||
v := awsutil.CopyOf(f1)
|
||||
var f2 *Foo = v.(*Foo)
|
||||
|
||||
// Print the result
|
||||
fmt.Println(awsutil.Prettify(f2))
|
||||
|
||||
// Output:
|
||||
// {
|
||||
// A: 1,
|
||||
// B: ["hello","bye bye"]
|
||||
// }
|
||||
}
|
187
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
generated
vendored
Normal file
187
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
generated
vendored
Normal file
@ -0,0 +1,187 @@
|
||||
package awsutil
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
|
||||
|
||||
// rValuesAtPath returns a slice of values found in value v. The values
|
||||
// in v are explored recursively so all nested values are collected.
|
||||
func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool) []reflect.Value {
|
||||
pathparts := strings.Split(path, "||")
|
||||
if len(pathparts) > 1 {
|
||||
for _, pathpart := range pathparts {
|
||||
vals := rValuesAtPath(v, pathpart, create, caseSensitive)
|
||||
if vals != nil && len(vals) > 0 {
|
||||
return vals
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
|
||||
components := strings.Split(path, ".")
|
||||
for len(values) > 0 && len(components) > 0 {
|
||||
var index *int64
|
||||
var indexStar bool
|
||||
c := strings.TrimSpace(components[0])
|
||||
if c == "" { // no actual component, illegal syntax
|
||||
return nil
|
||||
} else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
|
||||
// TODO normalize case for user
|
||||
return nil // don't support unexported fields
|
||||
}
|
||||
|
||||
// parse this component
|
||||
if m := indexRe.FindStringSubmatch(c); m != nil {
|
||||
c = m[1]
|
||||
if m[2] == "" {
|
||||
index = nil
|
||||
indexStar = true
|
||||
} else {
|
||||
i, _ := strconv.ParseInt(m[2], 10, 32)
|
||||
index = &i
|
||||
indexStar = false
|
||||
}
|
||||
}
|
||||
|
||||
nextvals := []reflect.Value{}
|
||||
for _, value := range values {
|
||||
// pull component name out of struct member
|
||||
if value.Kind() != reflect.Struct {
|
||||
continue
|
||||
}
|
||||
|
||||
if c == "*" { // pull all members
|
||||
for i := 0; i < value.NumField(); i++ {
|
||||
if f := reflect.Indirect(value.Field(i)); f.IsValid() {
|
||||
nextvals = append(nextvals, f)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
value = value.FieldByNameFunc(func(name string) bool {
|
||||
if c == name {
|
||||
return true
|
||||
} else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
if create && value.Kind() == reflect.Ptr && value.IsNil() {
|
||||
value.Set(reflect.New(value.Type().Elem()))
|
||||
value = value.Elem()
|
||||
} else {
|
||||
value = reflect.Indirect(value)
|
||||
}
|
||||
|
||||
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
|
||||
if !create && value.IsNil() {
|
||||
value = reflect.ValueOf(nil)
|
||||
}
|
||||
}
|
||||
|
||||
if value.IsValid() {
|
||||
nextvals = append(nextvals, value)
|
||||
}
|
||||
}
|
||||
values = nextvals
|
||||
|
||||
if indexStar || index != nil {
|
||||
nextvals = []reflect.Value{}
|
||||
for _, value := range values {
|
||||
value := reflect.Indirect(value)
|
||||
if value.Kind() != reflect.Slice {
|
||||
continue
|
||||
}
|
||||
|
||||
if indexStar { // grab all indices
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
idx := reflect.Indirect(value.Index(i))
|
||||
if idx.IsValid() {
|
||||
nextvals = append(nextvals, idx)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// pull out index
|
||||
i := int(*index)
|
||||
if i >= value.Len() { // check out of bounds
|
||||
if create {
|
||||
// TODO resize slice
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
} else if i < 0 { // support negative indexing
|
||||
i = value.Len() + i
|
||||
}
|
||||
value = reflect.Indirect(value.Index(i))
|
||||
|
||||
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
|
||||
if !create && value.IsNil() {
|
||||
value = reflect.ValueOf(nil)
|
||||
}
|
||||
}
|
||||
|
||||
if value.IsValid() {
|
||||
nextvals = append(nextvals, value)
|
||||
}
|
||||
}
|
||||
values = nextvals
|
||||
}
|
||||
|
||||
components = components[1:]
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// ValuesAtPath returns a list of objects at the lexical path inside of a structure
|
||||
func ValuesAtPath(i interface{}, path string) []interface{} {
|
||||
if rvals := rValuesAtPath(i, path, false, true); rvals != nil {
|
||||
vals := make([]interface{}, len(rvals))
|
||||
for i, rval := range rvals {
|
||||
vals[i] = rval.Interface()
|
||||
}
|
||||
return vals
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValuesAtAnyPath returns a list of objects at the case-insensitive lexical
|
||||
// path inside of a structure
|
||||
func ValuesAtAnyPath(i interface{}, path string) []interface{} {
|
||||
if rvals := rValuesAtPath(i, path, false, false); rvals != nil {
|
||||
vals := make([]interface{}, len(rvals))
|
||||
for i, rval := range rvals {
|
||||
vals[i] = rval.Interface()
|
||||
}
|
||||
return vals
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetValueAtPath sets an object at the lexical path inside of a structure
|
||||
func SetValueAtPath(i interface{}, path string, v interface{}) {
|
||||
if rvals := rValuesAtPath(i, path, true, true); rvals != nil {
|
||||
for _, rval := range rvals {
|
||||
rval.Set(reflect.ValueOf(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetValueAtAnyPath sets an object at the case insensitive lexical path inside
|
||||
// of a structure
|
||||
func SetValueAtAnyPath(i interface{}, path string, v interface{}) {
|
||||
if rvals := rValuesAtPath(i, path, true, false); rvals != nil {
|
||||
for _, rval := range rvals {
|
||||
rval.Set(reflect.ValueOf(v))
|
||||
}
|
||||
}
|
||||
}
|
68
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go
generated
vendored
Normal file
68
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
package awsutil_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type Struct struct {
|
||||
A []Struct
|
||||
z []Struct
|
||||
B *Struct
|
||||
D *Struct
|
||||
C string
|
||||
}
|
||||
|
||||
var data = Struct{
|
||||
A: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}},
|
||||
z: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}},
|
||||
B: &Struct{B: &Struct{C: "terminal"}, D: &Struct{C: "terminal2"}},
|
||||
C: "initial",
|
||||
}
|
||||
|
||||
func TestValueAtPathSuccess(t *testing.T) {
|
||||
assert.Equal(t, []interface{}{"initial"}, awsutil.ValuesAtPath(data, "C"))
|
||||
assert.Equal(t, []interface{}{"value1"}, awsutil.ValuesAtPath(data, "A[0].C"))
|
||||
assert.Equal(t, []interface{}{"value2"}, awsutil.ValuesAtPath(data, "A[1].C"))
|
||||
assert.Equal(t, []interface{}{"value3"}, awsutil.ValuesAtPath(data, "A[2].C"))
|
||||
assert.Equal(t, []interface{}{"value3"}, awsutil.ValuesAtAnyPath(data, "a[2].c"))
|
||||
assert.Equal(t, []interface{}{"value3"}, awsutil.ValuesAtPath(data, "A[-1].C"))
|
||||
assert.Equal(t, []interface{}{"value1", "value2", "value3"}, awsutil.ValuesAtPath(data, "A[].C"))
|
||||
assert.Equal(t, []interface{}{"terminal"}, awsutil.ValuesAtPath(data, "B . B . C"))
|
||||
assert.Equal(t, []interface{}{"terminal", "terminal2"}, awsutil.ValuesAtPath(data, "B.*.C"))
|
||||
assert.Equal(t, []interface{}{"initial"}, awsutil.ValuesAtPath(data, "A.D.X || C"))
|
||||
}
|
||||
|
||||
func TestValueAtPathFailure(t *testing.T) {
|
||||
assert.Equal(t, []interface{}(nil), awsutil.ValuesAtPath(data, "C.x"))
|
||||
assert.Equal(t, []interface{}(nil), awsutil.ValuesAtPath(data, ".x"))
|
||||
assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "X.Y.Z"))
|
||||
assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "A[100].C"))
|
||||
assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "A[3].C"))
|
||||
assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "B.B.C.Z"))
|
||||
assert.Equal(t, []interface{}(nil), awsutil.ValuesAtPath(data, "z[-1].C"))
|
||||
assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(nil, "A.B.C"))
|
||||
assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(Struct{}, "A"))
|
||||
}
|
||||
|
||||
func TestSetValueAtPathSuccess(t *testing.T) {
|
||||
var s Struct
|
||||
awsutil.SetValueAtPath(&s, "C", "test1")
|
||||
awsutil.SetValueAtPath(&s, "B.B.C", "test2")
|
||||
awsutil.SetValueAtPath(&s, "B.D.C", "test3")
|
||||
assert.Equal(t, "test1", s.C)
|
||||
assert.Equal(t, "test2", s.B.B.C)
|
||||
assert.Equal(t, "test3", s.B.D.C)
|
||||
|
||||
awsutil.SetValueAtPath(&s, "B.*.C", "test0")
|
||||
assert.Equal(t, "test0", s.B.B.C)
|
||||
assert.Equal(t, "test0", s.B.D.C)
|
||||
|
||||
var s2 Struct
|
||||
awsutil.SetValueAtAnyPath(&s2, "b.b.c", "test0")
|
||||
assert.Equal(t, "test0", s2.B.B.C)
|
||||
awsutil.SetValueAtAnyPath(&s2, "A", []Struct{{}})
|
||||
assert.Equal(t, []Struct{{}}, s2.A)
|
||||
}
|
103
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
generated
vendored
Normal file
103
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
package awsutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Prettify returns the string representation of a value.
|
||||
func Prettify(i interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
prettify(reflect.ValueOf(i), 0, &buf)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// prettify will recursively walk value v to build a textual
|
||||
// representation of the value.
|
||||
func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
|
||||
for v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Struct:
|
||||
strtype := v.Type().String()
|
||||
if strtype == "time.Time" {
|
||||
fmt.Fprintf(buf, "%s", v.Interface())
|
||||
break
|
||||
} else if strings.HasPrefix(strtype, "io.") {
|
||||
buf.WriteString("<buffer>")
|
||||
break
|
||||
}
|
||||
|
||||
buf.WriteString("{\n")
|
||||
|
||||
names := []string{}
|
||||
for i := 0; i < v.Type().NumField(); i++ {
|
||||
name := v.Type().Field(i).Name
|
||||
f := v.Field(i)
|
||||
if name[0:1] == strings.ToLower(name[0:1]) {
|
||||
continue // ignore unexported fields
|
||||
}
|
||||
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
|
||||
continue // ignore unset fields
|
||||
}
|
||||
names = append(names, name)
|
||||
}
|
||||
|
||||
for i, n := range names {
|
||||
val := v.FieldByName(n)
|
||||
buf.WriteString(strings.Repeat(" ", indent+2))
|
||||
buf.WriteString(n + ": ")
|
||||
prettify(val, indent+2, buf)
|
||||
|
||||
if i < len(names)-1 {
|
||||
buf.WriteString(",\n")
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||
case reflect.Slice:
|
||||
nl, id, id2 := "", "", ""
|
||||
if v.Len() > 3 {
|
||||
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
|
||||
}
|
||||
buf.WriteString("[" + nl)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
buf.WriteString(id2)
|
||||
prettify(v.Index(i), indent+2, buf)
|
||||
|
||||
if i < v.Len()-1 {
|
||||
buf.WriteString("," + nl)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString(nl + id + "]")
|
||||
case reflect.Map:
|
||||
buf.WriteString("{\n")
|
||||
|
||||
for i, k := range v.MapKeys() {
|
||||
buf.WriteString(strings.Repeat(" ", indent+2))
|
||||
buf.WriteString(k.String() + ": ")
|
||||
prettify(v.MapIndex(k), indent+2, buf)
|
||||
|
||||
if i < v.Len()-1 {
|
||||
buf.WriteString(",\n")
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||
default:
|
||||
format := "%v"
|
||||
switch v.Interface().(type) {
|
||||
case string:
|
||||
format = "%q"
|
||||
case io.ReadSeeker, io.Reader:
|
||||
format = "buffer(%p)"
|
||||
}
|
||||
fmt.Fprintf(buf, format, v.Interface())
|
||||
}
|
||||
}
|
254
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
Normal file
254
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
Normal file
@ -0,0 +1,254 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
)
|
||||
|
||||
// DefaultChainCredentials is a Credentials which will find the first available
|
||||
// credentials Value from the list of Providers.
|
||||
//
|
||||
// This should be used in the default case. Once the type of credentials are
|
||||
// known switching to the specific Credentials will be more efficient.
|
||||
var DefaultChainCredentials = credentials.NewChainCredentials(
|
||||
[]credentials.Provider{
|
||||
&credentials.EnvProvider{},
|
||||
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
|
||||
&credentials.EC2RoleProvider{ExpiryWindow: 5 * time.Minute},
|
||||
})
|
||||
|
||||
// The default number of retries for a service. The value of -1 indicates that
|
||||
// the service specific retry default will be used.
|
||||
const DefaultRetries = -1
|
||||
|
||||
// DefaultConfig is the default all service configuration will be based off of.
|
||||
// By default, all clients use this structure for initialization options unless
|
||||
// a custom configuration object is passed in.
|
||||
//
|
||||
// You may modify this global structure to change all default configuration
|
||||
// in the SDK. Note that configuration options are copied by value, so any
|
||||
// modifications must happen before constructing a client.
|
||||
var DefaultConfig = NewConfig().
|
||||
WithCredentials(DefaultChainCredentials).
|
||||
WithRegion(os.Getenv("AWS_REGION")).
|
||||
WithHTTPClient(http.DefaultClient).
|
||||
WithMaxRetries(DefaultRetries).
|
||||
WithLogger(NewDefaultLogger()).
|
||||
WithLogLevel(LogOff)
|
||||
|
||||
// A Config provides service configuration for service clients. By default,
|
||||
// all clients will use the {DefaultConfig} structure.
|
||||
type Config struct {
|
||||
// The credentials object to use when signing requests. Defaults to
|
||||
// {DefaultChainCredentials}.
|
||||
Credentials *credentials.Credentials
|
||||
|
||||
// An optional endpoint URL (hostname only or fully qualified URI)
|
||||
// that overrides the default generated endpoint for a client. Set this
|
||||
// to `""` to use the default generated endpoint.
|
||||
//
|
||||
// @note You must still provide a `Region` value when specifying an
|
||||
// endpoint for a client.
|
||||
Endpoint *string
|
||||
|
||||
// The region to send requests to. This parameter is required and must
|
||||
// be configured globally or on a per-client basis unless otherwise
|
||||
// noted. A full list of regions is found in the "Regions and Endpoints"
|
||||
// document.
|
||||
//
|
||||
// @see http://docs.aws.amazon.com/general/latest/gr/rande.html
|
||||
// AWS Regions and Endpoints
|
||||
Region *string
|
||||
|
||||
// Set this to `true` to disable SSL when sending requests. Defaults
|
||||
// to `false`.
|
||||
DisableSSL *bool
|
||||
|
||||
// The HTTP client to use when sending requests. Defaults to
|
||||
// `http.DefaultClient`.
|
||||
HTTPClient *http.Client
|
||||
|
||||
// An integer value representing the logging level. The default log level
|
||||
// is zero (LogOff), which represents no logging. To enable logging set
|
||||
// to a LogLevel Value.
|
||||
LogLevel *LogLevelType
|
||||
|
||||
// The logger writer interface to write logging messages to. Defaults to
|
||||
// standard out.
|
||||
Logger Logger
|
||||
|
||||
// The maximum number of times that a request will be retried for failures.
|
||||
// Defaults to -1, which defers the max retry setting to the service specific
|
||||
// configuration.
|
||||
MaxRetries *int
|
||||
|
||||
// Disables semantic parameter validation, which validates input for missing
|
||||
// required fields and/or other semantic request input errors.
|
||||
DisableParamValidation *bool
|
||||
|
||||
// Disables the computation of request and response checksums, e.g.,
|
||||
// CRC32 checksums in Amazon DynamoDB.
|
||||
DisableComputeChecksums *bool
|
||||
|
||||
// Set this to `true` to force the request to use path-style addressing,
|
||||
// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will
|
||||
// use virtual hosted bucket addressing when possible
|
||||
// (`http://BUCKET.s3.amazonaws.com/KEY`).
|
||||
//
|
||||
// @note This configuration option is specific to the Amazon S3 service.
|
||||
// @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
|
||||
// Amazon S3: Virtual Hosting of Buckets
|
||||
S3ForcePathStyle *bool
|
||||
}
|
||||
|
||||
// NewConfig returns a new Config pointer that can be chained with builder methods to
|
||||
// set multiple configuration values inline without using pointers.
|
||||
//
|
||||
// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10))
|
||||
//
|
||||
func NewConfig() *Config {
|
||||
return &Config{}
|
||||
}
|
||||
|
||||
// WithCredentials sets a config Credentials value returning a Config pointer
|
||||
// for chaining.
|
||||
func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
|
||||
c.Credentials = creds
|
||||
return c
|
||||
}
|
||||
|
||||
// WithEndpoint sets a config Endpoint value returning a Config pointer for
|
||||
// chaining.
|
||||
func (c *Config) WithEndpoint(endpoint string) *Config {
|
||||
c.Endpoint = &endpoint
|
||||
return c
|
||||
}
|
||||
|
||||
// WithRegion sets a config Region value returning a Config pointer for
|
||||
// chaining.
|
||||
func (c *Config) WithRegion(region string) *Config {
|
||||
c.Region = ®ion
|
||||
return c
|
||||
}
|
||||
|
||||
// WithDisableSSL sets a config DisableSSL value returning a Config pointer
|
||||
// for chaining.
|
||||
func (c *Config) WithDisableSSL(disable bool) *Config {
|
||||
c.DisableSSL = &disable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithHTTPClient sets a config HTTPClient value returning a Config pointer
|
||||
// for chaining.
|
||||
func (c *Config) WithHTTPClient(client *http.Client) *Config {
|
||||
c.HTTPClient = client
|
||||
return c
|
||||
}
|
||||
|
||||
// WithMaxRetries sets a config MaxRetries value returning a Config pointer
|
||||
// for chaining.
|
||||
func (c *Config) WithMaxRetries(max int) *Config {
|
||||
c.MaxRetries = &max
|
||||
return c
|
||||
}
|
||||
|
||||
// WithDisableParamValidation sets a config DisableParamValidation value
|
||||
// returning a Config pointer for chaining.
|
||||
func (c *Config) WithDisableParamValidation(disable bool) *Config {
|
||||
c.DisableParamValidation = &disable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithDisableComputeChecksums sets a config DisableComputeChecksums value
|
||||
// returning a Config pointer for chaining.
|
||||
func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
|
||||
c.DisableComputeChecksums = &disable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithLogLevel sets a config LogLevel value returning a Config pointer for
|
||||
// chaining.
|
||||
func (c *Config) WithLogLevel(level LogLevelType) *Config {
|
||||
c.LogLevel = &level
|
||||
return c
|
||||
}
|
||||
|
||||
// WithLogger sets a config Logger value returning a Config pointer for
|
||||
// chaining.
|
||||
func (c *Config) WithLogger(logger Logger) *Config {
|
||||
c.Logger = logger
|
||||
return c
|
||||
}
|
||||
|
||||
// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
|
||||
// pointer for chaining.
|
||||
func (c *Config) WithS3ForcePathStyle(force bool) *Config {
|
||||
c.S3ForcePathStyle = &force
|
||||
return c
|
||||
}
|
||||
|
||||
// Merge returns a new Config with the other Config's attribute values merged into
|
||||
// this Config. If the other Config's attribute is nil it will not be merged into
|
||||
// the new Config to be returned.
|
||||
func (c Config) Merge(other *Config) *Config {
|
||||
if other == nil {
|
||||
return &c
|
||||
}
|
||||
|
||||
dst := c
|
||||
|
||||
if other.Credentials != nil {
|
||||
dst.Credentials = other.Credentials
|
||||
}
|
||||
|
||||
if other.Endpoint != nil {
|
||||
dst.Endpoint = other.Endpoint
|
||||
}
|
||||
|
||||
if other.Region != nil {
|
||||
dst.Region = other.Region
|
||||
}
|
||||
|
||||
if other.DisableSSL != nil {
|
||||
dst.DisableSSL = other.DisableSSL
|
||||
}
|
||||
|
||||
if other.HTTPClient != nil {
|
||||
dst.HTTPClient = other.HTTPClient
|
||||
}
|
||||
|
||||
if other.LogLevel != nil {
|
||||
dst.LogLevel = other.LogLevel
|
||||
}
|
||||
|
||||
if other.Logger != nil {
|
||||
dst.Logger = other.Logger
|
||||
}
|
||||
|
||||
if other.MaxRetries != nil {
|
||||
dst.MaxRetries = other.MaxRetries
|
||||
}
|
||||
|
||||
if other.DisableParamValidation != nil {
|
||||
dst.DisableParamValidation = other.DisableParamValidation
|
||||
}
|
||||
|
||||
if other.DisableComputeChecksums != nil {
|
||||
dst.DisableComputeChecksums = other.DisableComputeChecksums
|
||||
}
|
||||
|
||||
if other.S3ForcePathStyle != nil {
|
||||
dst.S3ForcePathStyle = other.S3ForcePathStyle
|
||||
}
|
||||
|
||||
return &dst
|
||||
}
|
||||
|
||||
// Copy will return a shallow copy of the Config object.
|
||||
func (c Config) Copy() *Config {
|
||||
dst := c
|
||||
return &dst
|
||||
}
|
87
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config_test.go
generated
vendored
Normal file
87
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config_test.go
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
)
|
||||
|
||||
var testCredentials = credentials.NewChainCredentials([]credentials.Provider{
|
||||
&credentials.EnvProvider{},
|
||||
&credentials.SharedCredentialsProvider{
|
||||
Filename: "TestFilename",
|
||||
Profile: "TestProfile"},
|
||||
&credentials.EC2RoleProvider{ExpiryWindow: 5 * time.Minute},
|
||||
})
|
||||
|
||||
var copyTestConfig = Config{
|
||||
Credentials: testCredentials,
|
||||
Endpoint: String("CopyTestEndpoint"),
|
||||
Region: String("COPY_TEST_AWS_REGION"),
|
||||
DisableSSL: Bool(true),
|
||||
HTTPClient: http.DefaultClient,
|
||||
LogLevel: LogLevel(LogDebug),
|
||||
Logger: NewDefaultLogger(),
|
||||
MaxRetries: Int(DefaultRetries),
|
||||
DisableParamValidation: Bool(true),
|
||||
DisableComputeChecksums: Bool(true),
|
||||
S3ForcePathStyle: Bool(true),
|
||||
}
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
want := copyTestConfig
|
||||
got := copyTestConfig.Copy()
|
||||
if !reflect.DeepEqual(*got, want) {
|
||||
t.Errorf("Copy() = %+v", got)
|
||||
t.Errorf(" want %+v", want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyReturnsNewInstance(t *testing.T) {
|
||||
want := copyTestConfig
|
||||
got := copyTestConfig.Copy()
|
||||
if got == &want {
|
||||
t.Errorf("Copy() = %p; want different instance as source %p", got, &want)
|
||||
}
|
||||
}
|
||||
|
||||
var mergeTestZeroValueConfig = Config{}
|
||||
|
||||
var mergeTestConfig = Config{
|
||||
Credentials: testCredentials,
|
||||
Endpoint: String("MergeTestEndpoint"),
|
||||
Region: String("MERGE_TEST_AWS_REGION"),
|
||||
DisableSSL: Bool(true),
|
||||
HTTPClient: http.DefaultClient,
|
||||
LogLevel: LogLevel(LogDebug),
|
||||
Logger: NewDefaultLogger(),
|
||||
MaxRetries: Int(10),
|
||||
DisableParamValidation: Bool(true),
|
||||
DisableComputeChecksums: Bool(true),
|
||||
S3ForcePathStyle: Bool(true),
|
||||
}
|
||||
|
||||
var mergeTests = []struct {
|
||||
cfg *Config
|
||||
in *Config
|
||||
want *Config
|
||||
}{
|
||||
{&Config{}, nil, &Config{}},
|
||||
{&Config{}, &mergeTestZeroValueConfig, &Config{}},
|
||||
{&Config{}, &mergeTestConfig, &mergeTestConfig},
|
||||
}
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
for i, tt := range mergeTests {
|
||||
got := tt.cfg.Merge(tt.in)
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("Config %d %+v", i, tt.cfg)
|
||||
t.Errorf(" Merge(%+v)", tt.in)
|
||||
t.Errorf(" got %+v", got)
|
||||
t.Errorf(" want %+v", tt.want)
|
||||
}
|
||||
}
|
||||
}
|
357
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convutil.go
generated
vendored
Normal file
357
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convutil.go
generated
vendored
Normal file
@ -0,0 +1,357 @@
|
||||
package aws
|
||||
|
||||
import "time"
|
||||
|
||||
// String returns a pointer to of the string value passed in.
|
||||
func String(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
||||
// StringValue returns the value of the string pointer passed in or
|
||||
// "" if the pointer is nil.
|
||||
func StringValue(v *string) string {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// StringSlice converts a slice of string values into a slice of
|
||||
// string pointers
|
||||
func StringSlice(src []string) []*string {
|
||||
dst := make([]*string, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// StringValueSlice converts a slice of string pointers into a slice of
|
||||
// string values
|
||||
func StringValueSlice(src []*string) []string {
|
||||
dst := make([]string, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// StringMap converts a string map of string values into a string
|
||||
// map of string pointers
|
||||
func StringMap(src map[string]string) map[string]*string {
|
||||
dst := make(map[string]*string)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// StringValueMap converts a string map of string pointers into a string
|
||||
// map of string values
|
||||
func StringValueMap(src map[string]*string) map[string]string {
|
||||
dst := make(map[string]string)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Bool returns a pointer to of the bool value passed in.
|
||||
func Bool(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
// BoolValue returns the value of the bool pointer passed in or
|
||||
// false if the pointer is nil.
|
||||
func BoolValue(v *bool) bool {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// BoolSlice converts a slice of bool values into a slice of
|
||||
// bool pointers
|
||||
func BoolSlice(src []bool) []*bool {
|
||||
dst := make([]*bool, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// BoolValueSlice converts a slice of bool pointers into a slice of
|
||||
// bool values
|
||||
func BoolValueSlice(src []*bool) []bool {
|
||||
dst := make([]bool, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// BoolMap converts a string map of bool values into a string
|
||||
// map of bool pointers
|
||||
func BoolMap(src map[string]bool) map[string]*bool {
|
||||
dst := make(map[string]*bool)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// BoolValueMap converts a string map of bool pointers into a string
|
||||
// map of bool values
|
||||
func BoolValueMap(src map[string]*bool) map[string]bool {
|
||||
dst := make(map[string]bool)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int returns a pointer to of the int value passed in.
|
||||
func Int(v int) *int {
|
||||
return &v
|
||||
}
|
||||
|
||||
// IntValue returns the value of the int pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func IntValue(v *int) int {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// IntSlice converts a slice of int values into a slice of
|
||||
// int pointers
|
||||
func IntSlice(src []int) []*int {
|
||||
dst := make([]*int, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// IntValueSlice converts a slice of int pointers into a slice of
|
||||
// int values
|
||||
func IntValueSlice(src []*int) []int {
|
||||
dst := make([]int, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// IntMap converts a string map of int values into a string
|
||||
// map of int pointers
|
||||
func IntMap(src map[string]int) map[string]*int {
|
||||
dst := make(map[string]*int)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// IntValueMap converts a string map of int pointers into a string
|
||||
// map of int values
|
||||
func IntValueMap(src map[string]*int) map[string]int {
|
||||
dst := make(map[string]int)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64 returns a pointer to of the int64 value passed in.
|
||||
func Int64(v int64) *int64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int64Value returns the value of the int64 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Int64Value(v *int64) int64 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Int64Slice converts a slice of int64 values into a slice of
|
||||
// int64 pointers
|
||||
func Int64Slice(src []int64) []*int64 {
|
||||
dst := make([]*int64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64ValueSlice converts a slice of int64 pointers into a slice of
|
||||
// int64 values
|
||||
func Int64ValueSlice(src []*int64) []int64 {
|
||||
dst := make([]int64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64Map converts a string map of int64 values into a string
|
||||
// map of int64 pointers
|
||||
func Int64Map(src map[string]int64) map[string]*int64 {
|
||||
dst := make(map[string]*int64)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64ValueMap converts a string map of int64 pointers into a string
|
||||
// map of int64 values
|
||||
func Int64ValueMap(src map[string]*int64) map[string]int64 {
|
||||
dst := make(map[string]int64)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64 returns a pointer to of the float64 value passed in.
|
||||
func Float64(v float64) *float64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float64Value returns the value of the float64 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Float64Value(v *float64) float64 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Float64Slice converts a slice of float64 values into a slice of
|
||||
// float64 pointers
|
||||
func Float64Slice(src []float64) []*float64 {
|
||||
dst := make([]*float64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64ValueSlice converts a slice of float64 pointers into a slice of
|
||||
// float64 values
|
||||
func Float64ValueSlice(src []*float64) []float64 {
|
||||
dst := make([]float64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64Map converts a string map of float64 values into a string
|
||||
// map of float64 pointers
|
||||
func Float64Map(src map[string]float64) map[string]*float64 {
|
||||
dst := make(map[string]*float64)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64ValueMap converts a string map of float64 pointers into a string
|
||||
// map of float64 values
|
||||
func Float64ValueMap(src map[string]*float64) map[string]float64 {
|
||||
dst := make(map[string]float64)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Time returns a pointer to of the time.Time value passed in.
|
||||
func Time(v time.Time) *time.Time {
|
||||
return &v
|
||||
}
|
||||
|
||||
// TimeValue returns the value of the time.Time pointer passed in or
|
||||
// time.Time{} if the pointer is nil.
|
||||
func TimeValue(v *time.Time) time.Time {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// TimeSlice converts a slice of time.Time values into a slice of
|
||||
// time.Time pointers
|
||||
func TimeSlice(src []time.Time) []*time.Time {
|
||||
dst := make([]*time.Time, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// TimeValueSlice converts a slice of time.Time pointers into a slice of
|
||||
// time.Time values
|
||||
func TimeValueSlice(src []*time.Time) []time.Time {
|
||||
dst := make([]time.Time, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// TimeMap converts a string map of time.Time values into a string
|
||||
// map of time.Time pointers
|
||||
func TimeMap(src map[string]time.Time) map[string]*time.Time {
|
||||
dst := make(map[string]*time.Time)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// TimeValueMap converts a string map of time.Time pointers into a string
|
||||
// map of time.Time values
|
||||
func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
|
||||
dst := make(map[string]time.Time)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
438
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convutil_test.go
generated
vendored
Normal file
438
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convutil_test.go
generated
vendored
Normal file
@ -0,0 +1,438 @@
|
||||
package aws_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var testCasesStringSlice = [][]string{
|
||||
{"a", "b", "c", "d", "e"},
|
||||
{"a", "b", "", "", "e"},
|
||||
}
|
||||
|
||||
func TestStringSlice(t *testing.T) {
|
||||
for idx, in := range testCasesStringSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := aws.StringSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := aws.StringValueSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesStringValueSlice = [][]*string{
|
||||
{aws.String("a"), aws.String("b"), nil, aws.String("c")},
|
||||
}
|
||||
|
||||
func TestStringValueSlice(t *testing.T) {
|
||||
for idx, in := range testCasesStringValueSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := aws.StringValueSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
out2 := aws.StringSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out2 {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesStringMap = []map[string]string{
|
||||
{"a": "1", "b": "2", "c": "3"},
|
||||
}
|
||||
|
||||
func TestStringMap(t *testing.T) {
|
||||
for idx, in := range testCasesStringMap {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := aws.StringMap(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := aws.StringValueMap(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesBoolSlice = [][]bool{
|
||||
{true, true, false, false},
|
||||
}
|
||||
|
||||
func TestBoolSlice(t *testing.T) {
|
||||
for idx, in := range testCasesBoolSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := aws.BoolSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := aws.BoolValueSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesBoolValueSlice = [][]*bool{}
|
||||
|
||||
func TestBoolValueSlice(t *testing.T) {
|
||||
for idx, in := range testCasesBoolValueSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := aws.BoolValueSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
out2 := aws.BoolSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out2 {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesBoolMap = []map[string]bool{
|
||||
{"a": true, "b": false, "c": true},
|
||||
}
|
||||
|
||||
func TestBoolMap(t *testing.T) {
|
||||
for idx, in := range testCasesBoolMap {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := aws.BoolMap(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := aws.BoolValueMap(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesIntSlice = [][]int{
|
||||
{1, 2, 3, 4},
|
||||
}
|
||||
|
||||
func TestIntSlice(t *testing.T) {
|
||||
for idx, in := range testCasesIntSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := aws.IntSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := aws.IntValueSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesIntValueSlice = [][]*int{}
|
||||
|
||||
func TestIntValueSlice(t *testing.T) {
|
||||
for idx, in := range testCasesIntValueSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := aws.IntValueSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
out2 := aws.IntSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out2 {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesIntMap = []map[string]int{
|
||||
{"a": 3, "b": 2, "c": 1},
|
||||
}
|
||||
|
||||
func TestIntMap(t *testing.T) {
|
||||
for idx, in := range testCasesIntMap {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := aws.IntMap(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := aws.IntValueMap(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesInt64Slice = [][]int64{
|
||||
{1, 2, 3, 4},
|
||||
}
|
||||
|
||||
func TestInt64Slice(t *testing.T) {
|
||||
for idx, in := range testCasesInt64Slice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := aws.Int64Slice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := aws.Int64ValueSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesInt64ValueSlice = [][]*int64{}
|
||||
|
||||
func TestInt64ValueSlice(t *testing.T) {
|
||||
for idx, in := range testCasesInt64ValueSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := aws.Int64ValueSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
out2 := aws.Int64Slice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out2 {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesInt64Map = []map[string]int64{
|
||||
{"a": 3, "b": 2, "c": 1},
|
||||
}
|
||||
|
||||
func TestInt64Map(t *testing.T) {
|
||||
for idx, in := range testCasesInt64Map {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := aws.Int64Map(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := aws.Int64ValueMap(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesFloat64Slice = [][]float64{
|
||||
{1, 2, 3, 4},
|
||||
}
|
||||
|
||||
func TestFloat64Slice(t *testing.T) {
|
||||
for idx, in := range testCasesFloat64Slice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := aws.Float64Slice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := aws.Float64ValueSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesFloat64ValueSlice = [][]*float64{}
|
||||
|
||||
func TestFloat64ValueSlice(t *testing.T) {
|
||||
for idx, in := range testCasesFloat64ValueSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := aws.Float64ValueSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
out2 := aws.Float64Slice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out2 {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesFloat64Map = []map[string]float64{
|
||||
{"a": 3, "b": 2, "c": 1},
|
||||
}
|
||||
|
||||
func TestFloat64Map(t *testing.T) {
|
||||
for idx, in := range testCasesFloat64Map {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := aws.Float64Map(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := aws.Float64ValueMap(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesTimeSlice = [][]time.Time{
|
||||
{time.Now(), time.Now().AddDate(100, 0, 0)},
|
||||
}
|
||||
|
||||
func TestTimeSlice(t *testing.T) {
|
||||
for idx, in := range testCasesTimeSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := aws.TimeSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := aws.TimeValueSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesTimeValueSlice = [][]*time.Time{}
|
||||
|
||||
func TestTimeValueSlice(t *testing.T) {
|
||||
for idx, in := range testCasesTimeValueSlice {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := aws.TimeValueSlice(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
|
||||
out2 := aws.TimeSlice(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out2 {
|
||||
if in[i] == nil {
|
||||
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
|
||||
} else {
|
||||
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var testCasesTimeMap = []map[string]time.Time{
|
||||
{"a": time.Now().AddDate(-100, 0, 0), "b": time.Now()},
|
||||
}
|
||||
|
||||
func TestTimeMap(t *testing.T) {
|
||||
for idx, in := range testCasesTimeMap {
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
out := aws.TimeMap(in)
|
||||
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
|
||||
for i := range out {
|
||||
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
|
||||
}
|
||||
|
||||
out2 := aws.TimeValueMap(out)
|
||||
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
|
||||
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
|
||||
}
|
||||
}
|
85
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
generated
vendored
Normal file
85
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoValidProvidersFoundInChain Is returned when there are no valid
|
||||
// providers in the ChainProvider.
|
||||
//
|
||||
// @readonly
|
||||
ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", "no valid providers in chain", nil)
|
||||
)
|
||||
|
||||
// A ChainProvider will search for a provider which returns credentials
|
||||
// and cache that provider until Retrieve is called again.
|
||||
//
|
||||
// The ChainProvider provides a way of chaining multiple providers together
|
||||
// which will pick the first available using priority order of the Providers
|
||||
// in the list.
|
||||
//
|
||||
// If none of the Providers retrieve valid credentials Value, ChainProvider's
|
||||
// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
|
||||
//
|
||||
// If a Provider is found which returns valid credentials Value ChainProvider
|
||||
// will cache that Provider for all calls to IsExpired(), until Retrieve is
|
||||
// called again.
|
||||
//
|
||||
// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
|
||||
// In this example EnvProvider will first check if any credentials are available
|
||||
// vai the environment variables. If there are none ChainProvider will check
|
||||
// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
|
||||
// does not return any credentials ChainProvider will return the error
|
||||
// ErrNoValidProvidersFoundInChain
|
||||
//
|
||||
// creds := NewChainCredentials(
|
||||
// []Provider{
|
||||
// &EnvProvider{},
|
||||
// &EC2RoleProvider{},
|
||||
// })
|
||||
//
|
||||
// // Usage of ChainCredentials with aws.Config
|
||||
// svc := ec2.New(&aws.Config{Credentials: creds})
|
||||
//
|
||||
type ChainProvider struct {
|
||||
Providers []Provider
|
||||
curr Provider
|
||||
}
|
||||
|
||||
// NewChainCredentials returns a pointer to a new Credentials object
|
||||
// wrapping a chain of providers.
|
||||
func NewChainCredentials(providers []Provider) *Credentials {
|
||||
return NewCredentials(&ChainProvider{
|
||||
Providers: append([]Provider{}, providers...),
|
||||
})
|
||||
}
|
||||
|
||||
// Retrieve returns the credentials value or error if no provider returned
|
||||
// without error.
|
||||
//
|
||||
// If a provider is found it will be cached and any calls to IsExpired()
|
||||
// will return the expired state of the cached provider.
|
||||
func (c *ChainProvider) Retrieve() (Value, error) {
|
||||
for _, p := range c.Providers {
|
||||
if creds, err := p.Retrieve(); err == nil {
|
||||
c.curr = p
|
||||
return creds, nil
|
||||
}
|
||||
}
|
||||
c.curr = nil
|
||||
|
||||
// TODO better error reporting. maybe report error for each failed retrieve?
|
||||
|
||||
return Value{}, ErrNoValidProvidersFoundInChain
|
||||
}
|
||||
|
||||
// IsExpired will returned the expired state of the currently cached provider
|
||||
// if there is one. If there is no current provider, true will be returned.
|
||||
func (c *ChainProvider) IsExpired() bool {
|
||||
if c.curr != nil {
|
||||
return c.curr.IsExpired()
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
73
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go
generated
vendored
Normal file
73
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestChainProviderGet(t *testing.T) {
|
||||
p := &ChainProvider{
|
||||
Providers: []Provider{
|
||||
&stubProvider{err: awserr.New("FirstError", "first provider error", nil)},
|
||||
&stubProvider{err: awserr.New("SecondError", "second provider error", nil)},
|
||||
&stubProvider{
|
||||
creds: Value{
|
||||
AccessKeyID: "AKID",
|
||||
SecretAccessKey: "SECRET",
|
||||
SessionToken: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
creds, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Empty(t, creds.SessionToken, "Expect session token to be empty")
|
||||
}
|
||||
|
||||
func TestChainProviderIsExpired(t *testing.T) {
|
||||
stubProvider := &stubProvider{expired: true}
|
||||
p := &ChainProvider{
|
||||
Providers: []Provider{
|
||||
stubProvider,
|
||||
},
|
||||
}
|
||||
|
||||
assert.True(t, p.IsExpired(), "Expect expired to be true before any Retrieve")
|
||||
_, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
assert.False(t, p.IsExpired(), "Expect not expired after retrieve")
|
||||
|
||||
stubProvider.expired = true
|
||||
assert.True(t, p.IsExpired(), "Expect return of expired provider")
|
||||
|
||||
_, err = p.Retrieve()
|
||||
assert.False(t, p.IsExpired(), "Expect not expired after retrieve")
|
||||
}
|
||||
|
||||
func TestChainProviderWithNoProvider(t *testing.T) {
|
||||
p := &ChainProvider{
|
||||
Providers: []Provider{},
|
||||
}
|
||||
|
||||
assert.True(t, p.IsExpired(), "Expect expired with no providers")
|
||||
_, err := p.Retrieve()
|
||||
assert.Equal(t, ErrNoValidProvidersFoundInChain, err, "Expect no providers error returned")
|
||||
}
|
||||
|
||||
func TestChainProviderWithNoValidProvider(t *testing.T) {
|
||||
p := &ChainProvider{
|
||||
Providers: []Provider{
|
||||
&stubProvider{err: awserr.New("FirstError", "first provider error", nil)},
|
||||
&stubProvider{err: awserr.New("SecondError", "second provider error", nil)},
|
||||
},
|
||||
}
|
||||
|
||||
assert.True(t, p.IsExpired(), "Expect expired with no providers")
|
||||
_, err := p.Retrieve()
|
||||
assert.Equal(t, ErrNoValidProvidersFoundInChain, err, "Expect no providers error returned")
|
||||
}
|
220
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
Normal file
220
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
Normal file
@ -0,0 +1,220 @@
|
||||
// Package credentials provides credential retrieval and management
|
||||
//
|
||||
// The Credentials is the primary method of getting access to and managing
|
||||
// credentials Values. Using dependency injection retrieval of the credential
|
||||
// values is handled by a object which satisfies the Provider interface.
|
||||
//
|
||||
// By default the Credentials.Get() will cache the successful result of a
|
||||
// Provider's Retrieve() until Provider.IsExpired() returns true. At which
|
||||
// point Credentials will call Provider's Retrieve() to get new credential Value.
|
||||
//
|
||||
// The Provider is responsible for determining when credentials Value have expired.
|
||||
// It is also important to note that Credentials will always call Retrieve the
|
||||
// first time Credentials.Get() is called.
|
||||
//
|
||||
// Example of using the environment variable credentials.
|
||||
//
|
||||
// creds := NewEnvCredentials()
|
||||
//
|
||||
// // Retrieve the credentials value
|
||||
// credValue, err := creds.Get()
|
||||
// if err != nil {
|
||||
// // handle error
|
||||
// }
|
||||
//
|
||||
// Example of forcing credentials to expire and be refreshed on the next Get().
|
||||
// This may be helpful to proactively expire credentials and refresh them sooner
|
||||
// than they would naturally expire on their own.
|
||||
//
|
||||
// creds := NewCredentials(&EC2RoleProvider{})
|
||||
// creds.Expire()
|
||||
// credsValue, err := creds.Get()
|
||||
// // New credentials will be retrieved instead of from cache.
|
||||
//
|
||||
//
|
||||
// Custom Provider
|
||||
//
|
||||
// Each Provider built into this package also provides a helper method to generate
|
||||
// a Credentials pointer setup with the provider. To use a custom Provider just
|
||||
// create a type which satisfies the Provider interface and pass it to the
|
||||
// NewCredentials method.
|
||||
//
|
||||
// type MyProvider struct{}
|
||||
// func (m *MyProvider) Retrieve() (Value, error) {...}
|
||||
// func (m *MyProvider) IsExpired() bool {...}
|
||||
//
|
||||
// creds := NewCredentials(&MyProvider{})
|
||||
// credValue, err := creds.Get()
|
||||
//
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Create an empty Credential object that can be used as dummy placeholder
|
||||
// credentials for requests that do not need signed.
|
||||
//
|
||||
// This Credentials can be used to configure a service to not sign requests
|
||||
// when making service API calls. For example, when accessing public
|
||||
// s3 buckets.
|
||||
//
|
||||
// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials})
|
||||
// // Access public S3 buckets.
|
||||
//
|
||||
// @readonly
|
||||
var AnonymousCredentials = NewStaticCredentials("", "", "")
|
||||
|
||||
// A Value is the AWS credentials value for individual credential fields.
|
||||
type Value struct {
|
||||
// AWS Access key ID
|
||||
AccessKeyID string
|
||||
|
||||
// AWS Secret Access Key
|
||||
SecretAccessKey string
|
||||
|
||||
// AWS Session Token
|
||||
SessionToken string
|
||||
}
|
||||
|
||||
// A Provider is the interface for any component which will provide credentials
|
||||
// Value. A provider is required to manage its own Expired state, and what to
|
||||
// be expired means.
|
||||
//
|
||||
// The Provider should not need to implement its own mutexes, because
|
||||
// that will be managed by Credentials.
|
||||
type Provider interface {
|
||||
// Refresh returns nil if it successfully retrieved the value.
|
||||
// Error is returned if the value were not obtainable, or empty.
|
||||
Retrieve() (Value, error)
|
||||
|
||||
// IsExpired returns if the credentials are no longer valid, and need
|
||||
// to be retrieved.
|
||||
IsExpired() bool
|
||||
}
|
||||
|
||||
// A Expiry provides shared expiration logic to be used by credentials
|
||||
// providers to implement expiry functionality.
|
||||
//
|
||||
// The best method to use this struct is as an anonymous field within the
|
||||
// provider's struct.
|
||||
//
|
||||
// Example:
|
||||
// type EC2RoleProvider struct {
|
||||
// Expiry
|
||||
// ...
|
||||
// }
|
||||
type Expiry struct {
|
||||
// The date/time when to expire on
|
||||
expiration time.Time
|
||||
|
||||
// If set will be used by IsExpired to determine the current time.
|
||||
// Defaults to time.Now if CurrentTime is not set. Available for testing
|
||||
// to be able to mock out the current time.
|
||||
CurrentTime func() time.Time
|
||||
}
|
||||
|
||||
// SetExpiration sets the expiration IsExpired will check when called.
|
||||
//
|
||||
// If window is greater than 0 the expiration time will be reduced by the
|
||||
// window value.
|
||||
//
|
||||
// Using a window is helpful to trigger credentials to expire sooner than
|
||||
// the expiration time given to ensure no requests are made with expired
|
||||
// tokens.
|
||||
func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
|
||||
e.expiration = expiration
|
||||
if window > 0 {
|
||||
e.expiration = e.expiration.Add(-window)
|
||||
}
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials are expired.
|
||||
func (e *Expiry) IsExpired() bool {
|
||||
if e.CurrentTime == nil {
|
||||
e.CurrentTime = time.Now
|
||||
}
|
||||
return e.expiration.Before(e.CurrentTime())
|
||||
}
|
||||
|
||||
// A Credentials provides synchronous safe retrieval of AWS credentials Value.
|
||||
// Credentials will cache the credentials value until they expire. Once the value
|
||||
// expires the next Get will attempt to retrieve valid credentials.
|
||||
//
|
||||
// Credentials is safe to use across multiple goroutines and will manage the
|
||||
// synchronous state so the Providers do not need to implement their own
|
||||
// synchronization.
|
||||
//
|
||||
// The first Credentials.Get() will always call Provider.Retrieve() to get the
|
||||
// first instance of the credentials Value. All calls to Get() after that
|
||||
// will return the cached credentials Value until IsExpired() returns true.
|
||||
type Credentials struct {
|
||||
creds Value
|
||||
forceRefresh bool
|
||||
m sync.Mutex
|
||||
|
||||
provider Provider
|
||||
}
|
||||
|
||||
// NewCredentials returns a pointer to a new Credentials with the provider set.
|
||||
func NewCredentials(provider Provider) *Credentials {
|
||||
return &Credentials{
|
||||
provider: provider,
|
||||
forceRefresh: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the credentials value, or error if the credentials Value failed
|
||||
// to be retrieved.
|
||||
//
|
||||
// Will return the cached credentials Value if it has not expired. If the
|
||||
// credentials Value has expired the Provider's Retrieve() will be called
|
||||
// to refresh the credentials.
|
||||
//
|
||||
// If Credentials.Expire() was called the credentials Value will be force
|
||||
// expired, and the next call to Get() will cause them to be refreshed.
|
||||
func (c *Credentials) Get() (Value, error) {
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
|
||||
if c.isExpired() {
|
||||
creds, err := c.provider.Retrieve()
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
c.creds = creds
|
||||
c.forceRefresh = false
|
||||
}
|
||||
|
||||
return c.creds, nil
|
||||
}
|
||||
|
||||
// Expire expires the credentials and forces them to be retrieved on the
|
||||
// next call to Get().
|
||||
//
|
||||
// This will override the Provider's expired state, and force Credentials
|
||||
// to call the Provider's Retrieve().
|
||||
func (c *Credentials) Expire() {
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
|
||||
c.forceRefresh = true
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials are no longer valid, and need
|
||||
// to be retrieved.
|
||||
//
|
||||
// If the Credentials were forced to be expired with Expire() this will
|
||||
// reflect that override.
|
||||
func (c *Credentials) IsExpired() bool {
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
|
||||
return c.isExpired()
|
||||
}
|
||||
|
||||
// isExpired helper method wrapping the definition of expired credentials.
|
||||
func (c *Credentials) isExpired() bool {
|
||||
return c.forceRefresh || c.provider.IsExpired()
|
||||
}
|
62
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go
generated
vendored
Normal file
62
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type stubProvider struct {
|
||||
creds Value
|
||||
expired bool
|
||||
err error
|
||||
}
|
||||
|
||||
func (s *stubProvider) Retrieve() (Value, error) {
|
||||
s.expired = false
|
||||
return s.creds, s.err
|
||||
}
|
||||
func (s *stubProvider) IsExpired() bool {
|
||||
return s.expired
|
||||
}
|
||||
|
||||
func TestCredentialsGet(t *testing.T) {
|
||||
c := NewCredentials(&stubProvider{
|
||||
creds: Value{
|
||||
AccessKeyID: "AKID",
|
||||
SecretAccessKey: "SECRET",
|
||||
SessionToken: "",
|
||||
},
|
||||
expired: true,
|
||||
})
|
||||
|
||||
creds, err := c.Get()
|
||||
assert.Nil(t, err, "Expected no error")
|
||||
assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Empty(t, creds.SessionToken, "Expect session token to be empty")
|
||||
}
|
||||
|
||||
func TestCredentialsGetWithError(t *testing.T) {
|
||||
c := NewCredentials(&stubProvider{err: awserr.New("provider error", "", nil), expired: true})
|
||||
|
||||
_, err := c.Get()
|
||||
assert.Equal(t, "provider error", err.(awserr.Error).Code(), "Expected provider error")
|
||||
}
|
||||
|
||||
func TestCredentialsExpire(t *testing.T) {
|
||||
stub := &stubProvider{}
|
||||
c := NewCredentials(stub)
|
||||
|
||||
stub.expired = false
|
||||
assert.True(t, c.IsExpired(), "Expected to start out expired")
|
||||
c.Expire()
|
||||
assert.True(t, c.IsExpired(), "Expected to be expired")
|
||||
|
||||
c.forceRefresh = false
|
||||
assert.False(t, c.IsExpired(), "Expected not to be expired")
|
||||
|
||||
stub.expired = true
|
||||
assert.True(t, c.IsExpired(), "Expected to be expired")
|
||||
}
|
162
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider.go
generated
vendored
Normal file
162
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider.go
generated
vendored
Normal file
@ -0,0 +1,162 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
const metadataCredentialsEndpoint = "http://169.254.169.254/latest/meta-data/iam/security-credentials/"
|
||||
|
||||
// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
|
||||
// those credentials are expired.
|
||||
//
|
||||
// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
|
||||
// or ExpiryWindow
|
||||
//
|
||||
// p := &credentials.EC2RoleProvider{
|
||||
// // Pass in a custom timeout to be used when requesting
|
||||
// // IAM EC2 Role credentials.
|
||||
// Client: &http.Client{
|
||||
// Timeout: 10 * time.Second,
|
||||
// },
|
||||
// // Use default EC2 Role metadata endpoint, Alternate endpoints can be
|
||||
// // specified setting Endpoint to something else.
|
||||
// Endpoint: "",
|
||||
// // Do not use early expiry of credentials. If a non zero value is
|
||||
// // specified the credentials will be expired early
|
||||
// ExpiryWindow: 0,
|
||||
// }
|
||||
type EC2RoleProvider struct {
|
||||
Expiry
|
||||
|
||||
// Endpoint must be fully quantified URL
|
||||
Endpoint string
|
||||
|
||||
// HTTP client to use when connecting to EC2 service
|
||||
Client *http.Client
|
||||
|
||||
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
||||
// the credentials actually expiring. This is beneficial so race conditions
|
||||
// with expiring credentials do not cause request to fail unexpectedly
|
||||
// due to ExpiredTokenException exceptions.
|
||||
//
|
||||
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
||||
// 10 seconds before the credentials are actually expired.
|
||||
//
|
||||
// If ExpiryWindow is 0 or less it will be ignored.
|
||||
ExpiryWindow time.Duration
|
||||
}
|
||||
|
||||
// NewEC2RoleCredentials returns a pointer to a new Credentials object
|
||||
// wrapping the EC2RoleProvider.
|
||||
//
|
||||
// Takes a custom http.Client which can be configured for custom handling of
|
||||
// things such as timeout.
|
||||
//
|
||||
// Endpoint is the URL that the EC2RoleProvider will connect to when retrieving
|
||||
// role and credentials.
|
||||
//
|
||||
// Window is the expiry window that will be subtracted from the expiry returned
|
||||
// by the role credential request. This is done so that the credentials will
|
||||
// expire sooner than their actual lifespan.
|
||||
func NewEC2RoleCredentials(client *http.Client, endpoint string, window time.Duration) *Credentials {
|
||||
return NewCredentials(&EC2RoleProvider{
|
||||
Endpoint: endpoint,
|
||||
Client: client,
|
||||
ExpiryWindow: window,
|
||||
})
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the EC2 service.
|
||||
// Error will be returned if the request fails, or unable to extract
|
||||
// the desired credentials.
|
||||
func (m *EC2RoleProvider) Retrieve() (Value, error) {
|
||||
if m.Client == nil {
|
||||
m.Client = http.DefaultClient
|
||||
}
|
||||
if m.Endpoint == "" {
|
||||
m.Endpoint = metadataCredentialsEndpoint
|
||||
}
|
||||
|
||||
credsList, err := requestCredList(m.Client, m.Endpoint)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
|
||||
if len(credsList) == 0 {
|
||||
return Value{}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
|
||||
}
|
||||
credsName := credsList[0]
|
||||
|
||||
roleCreds, err := requestCred(m.Client, m.Endpoint, credsName)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
|
||||
m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
|
||||
|
||||
return Value{
|
||||
AccessKeyID: roleCreds.AccessKeyID,
|
||||
SecretAccessKey: roleCreds.SecretAccessKey,
|
||||
SessionToken: roleCreds.Token,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// A ec2RoleCredRespBody provides the shape for deserializing credential
|
||||
// request responses.
|
||||
type ec2RoleCredRespBody struct {
|
||||
Expiration time.Time
|
||||
AccessKeyID string
|
||||
SecretAccessKey string
|
||||
Token string
|
||||
}
|
||||
|
||||
// requestCredList requests a list of credentials from the EC2 service.
|
||||
// If there are no credentials, or there is an error making or receiving the request
|
||||
func requestCredList(client *http.Client, endpoint string) ([]string, error) {
|
||||
resp, err := client.Get(endpoint)
|
||||
if err != nil {
|
||||
return nil, awserr.New("ListEC2Role", "failed to list EC2 Roles", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
credsList := []string{}
|
||||
s := bufio.NewScanner(resp.Body)
|
||||
for s.Scan() {
|
||||
credsList = append(credsList, s.Text())
|
||||
}
|
||||
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, awserr.New("ReadEC2Role", "failed to read list of EC2 Roles", err)
|
||||
}
|
||||
|
||||
return credsList, nil
|
||||
}
|
||||
|
||||
// requestCred requests the credentials for a specific credentials from the EC2 service.
|
||||
//
|
||||
// If the credentials cannot be found, or there is an error reading the response
|
||||
// and error will be returned.
|
||||
func requestCred(client *http.Client, endpoint, credsName string) (*ec2RoleCredRespBody, error) {
|
||||
resp, err := client.Get(endpoint + credsName)
|
||||
if err != nil {
|
||||
return nil, awserr.New("GetEC2RoleCredentials",
|
||||
fmt.Sprintf("failed to get %s EC2 Role credentials", credsName),
|
||||
err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
respCreds := &ec2RoleCredRespBody{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(respCreds); err != nil {
|
||||
return nil, awserr.New("DecodeEC2RoleCredentials",
|
||||
fmt.Sprintf("failed to decode %s EC2 Role credentials", credsName),
|
||||
err)
|
||||
}
|
||||
|
||||
return respCreds, nil
|
||||
}
|
108
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider_test.go
generated
vendored
Normal file
108
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider_test.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func initTestServer(expireOn string) *httptest.Server {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.RequestURI == "/" {
|
||||
fmt.Fprintln(w, "/creds")
|
||||
} else {
|
||||
fmt.Fprintf(w, `{
|
||||
"AccessKeyId" : "accessKey",
|
||||
"SecretAccessKey" : "secret",
|
||||
"Token" : "token",
|
||||
"Expiration" : "%s"
|
||||
}`, expireOn)
|
||||
}
|
||||
}))
|
||||
|
||||
return server
|
||||
}
|
||||
|
||||
func TestEC2RoleProvider(t *testing.T) {
|
||||
server := initTestServer("2014-12-16T01:51:37Z")
|
||||
defer server.Close()
|
||||
|
||||
p := &EC2RoleProvider{Client: http.DefaultClient, Endpoint: server.URL}
|
||||
|
||||
creds, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
|
||||
}
|
||||
|
||||
func TestEC2RoleProviderIsExpired(t *testing.T) {
|
||||
server := initTestServer("2014-12-16T01:51:37Z")
|
||||
defer server.Close()
|
||||
|
||||
p := &EC2RoleProvider{Client: http.DefaultClient, Endpoint: server.URL}
|
||||
p.CurrentTime = func() time.Time {
|
||||
return time.Date(2014, 12, 15, 21, 26, 0, 0, time.UTC)
|
||||
}
|
||||
|
||||
assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.")
|
||||
|
||||
_, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.")
|
||||
|
||||
p.CurrentTime = func() time.Time {
|
||||
return time.Date(3014, 12, 15, 21, 26, 0, 0, time.UTC)
|
||||
}
|
||||
|
||||
assert.True(t, p.IsExpired(), "Expect creds to be expired.")
|
||||
}
|
||||
|
||||
func TestEC2RoleProviderExpiryWindowIsExpired(t *testing.T) {
|
||||
server := initTestServer("2014-12-16T01:51:37Z")
|
||||
defer server.Close()
|
||||
|
||||
p := &EC2RoleProvider{Client: http.DefaultClient, Endpoint: server.URL, ExpiryWindow: time.Hour * 1}
|
||||
p.CurrentTime = func() time.Time {
|
||||
return time.Date(2014, 12, 15, 0, 51, 37, 0, time.UTC)
|
||||
}
|
||||
|
||||
assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.")
|
||||
|
||||
_, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.")
|
||||
|
||||
p.CurrentTime = func() time.Time {
|
||||
return time.Date(2014, 12, 16, 0, 55, 37, 0, time.UTC)
|
||||
}
|
||||
|
||||
assert.True(t, p.IsExpired(), "Expect creds to be expired.")
|
||||
}
|
||||
|
||||
func BenchmarkEC2RoleProvider(b *testing.B) {
|
||||
server := initTestServer("2014-12-16T01:51:37Z")
|
||||
defer server.Close()
|
||||
|
||||
p := &EC2RoleProvider{Client: http.DefaultClient, Endpoint: server.URL}
|
||||
_, err := p.Retrieve()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
_, err := p.Retrieve()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
73
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
generated
vendored
Normal file
73
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
|
||||
// found in the process's environment.
|
||||
//
|
||||
// @readonly
|
||||
ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
|
||||
|
||||
// ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
|
||||
// can't be found in the process's environment.
|
||||
//
|
||||
// @readonly
|
||||
ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
|
||||
)
|
||||
|
||||
// A EnvProvider retrieves credentials from the environment variables of the
|
||||
// running process. Environment credentials never expire.
|
||||
//
|
||||
// Environment variables used:
|
||||
//
|
||||
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
|
||||
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
|
||||
type EnvProvider struct {
|
||||
retrieved bool
|
||||
}
|
||||
|
||||
// NewEnvCredentials returns a pointer to a new Credentials object
|
||||
// wrapping the environment variable provider.
|
||||
func NewEnvCredentials() *Credentials {
|
||||
return NewCredentials(&EnvProvider{})
|
||||
}
|
||||
|
||||
// Retrieve retrieves the keys from the environment.
|
||||
func (e *EnvProvider) Retrieve() (Value, error) {
|
||||
e.retrieved = false
|
||||
|
||||
id := os.Getenv("AWS_ACCESS_KEY_ID")
|
||||
if id == "" {
|
||||
id = os.Getenv("AWS_ACCESS_KEY")
|
||||
}
|
||||
|
||||
secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
|
||||
if secret == "" {
|
||||
secret = os.Getenv("AWS_SECRET_KEY")
|
||||
}
|
||||
|
||||
if id == "" {
|
||||
return Value{}, ErrAccessKeyIDNotFound
|
||||
}
|
||||
|
||||
if secret == "" {
|
||||
return Value{}, ErrSecretAccessKeyNotFound
|
||||
}
|
||||
|
||||
e.retrieved = true
|
||||
return Value{
|
||||
AccessKeyID: id,
|
||||
SecretAccessKey: secret,
|
||||
SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials have been retrieved.
|
||||
func (e *EnvProvider) IsExpired() bool {
|
||||
return !e.retrieved
|
||||
}
|
70
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go
generated
vendored
Normal file
70
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEnvProviderRetrieve(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("AWS_ACCESS_KEY_ID", "access")
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
|
||||
os.Setenv("AWS_SESSION_TOKEN", "token")
|
||||
|
||||
e := EnvProvider{}
|
||||
creds, err := e.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.Equal(t, "access", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
|
||||
}
|
||||
|
||||
func TestEnvProviderIsExpired(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("AWS_ACCESS_KEY_ID", "access")
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
|
||||
os.Setenv("AWS_SESSION_TOKEN", "token")
|
||||
|
||||
e := EnvProvider{}
|
||||
|
||||
assert.True(t, e.IsExpired(), "Expect creds to be expired before retrieve.")
|
||||
|
||||
_, err := e.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.False(t, e.IsExpired(), "Expect creds to not be expired after retrieve.")
|
||||
}
|
||||
|
||||
func TestEnvProviderNoAccessKeyID(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
|
||||
|
||||
e := EnvProvider{}
|
||||
creds, err := e.Retrieve()
|
||||
assert.Equal(t, ErrAccessKeyIDNotFound, err, "ErrAccessKeyIDNotFound expected, but was %#v error: %#v", creds, err)
|
||||
}
|
||||
|
||||
func TestEnvProviderNoSecretAccessKey(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("AWS_ACCESS_KEY_ID", "access")
|
||||
|
||||
e := EnvProvider{}
|
||||
creds, err := e.Retrieve()
|
||||
assert.Equal(t, ErrSecretAccessKeyNotFound, err, "ErrSecretAccessKeyNotFound expected, but was %#v error: %#v", creds, err)
|
||||
}
|
||||
|
||||
func TestEnvProviderAlternateNames(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("AWS_ACCESS_KEY", "access")
|
||||
os.Setenv("AWS_SECRET_KEY", "secret")
|
||||
|
||||
e := EnvProvider{}
|
||||
creds, err := e.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.Equal(t, "access", creds.AccessKeyID, "Expected access key ID")
|
||||
assert.Equal(t, "secret", creds.SecretAccessKey, "Expected secret access key")
|
||||
assert.Empty(t, creds.SessionToken, "Expected no token")
|
||||
}
|
8
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini
generated
vendored
Normal file
8
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
[default]
|
||||
aws_access_key_id = accessKey
|
||||
aws_secret_access_key = secret
|
||||
aws_session_token = token
|
||||
|
||||
[no_token]
|
||||
aws_access_key_id = accessKey
|
||||
aws_secret_access_key = secret
|
135
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
generated
vendored
Normal file
135
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/vaughan0/go-ini"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
|
||||
//
|
||||
// @readonly
|
||||
ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
|
||||
)
|
||||
|
||||
// A SharedCredentialsProvider retrieves credentials from the current user's home
|
||||
// directory, and keeps track if those credentials are expired.
|
||||
//
|
||||
// Profile ini file example: $HOME/.aws/credentials
|
||||
type SharedCredentialsProvider struct {
|
||||
// Path to the shared credentials file. If empty will default to current user's
|
||||
// home directory.
|
||||
Filename string
|
||||
|
||||
// AWS Profile to extract credentials from the shared credentials file. If empty
|
||||
// will default to environment variable "AWS_PROFILE" or "default" if
|
||||
// environment variable is also not set.
|
||||
Profile string
|
||||
|
||||
// retrieved states if the credentials have been successfully retrieved.
|
||||
retrieved bool
|
||||
}
|
||||
|
||||
// NewSharedCredentials returns a pointer to a new Credentials object
|
||||
// wrapping the Profile file provider.
|
||||
func NewSharedCredentials(filename, profile string) *Credentials {
|
||||
return NewCredentials(&SharedCredentialsProvider{
|
||||
Filename: filename,
|
||||
Profile: profile,
|
||||
})
|
||||
}
|
||||
|
||||
// Retrieve reads and extracts the shared credentials from the current
|
||||
// users home directory.
|
||||
func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
|
||||
p.retrieved = false
|
||||
|
||||
filename, err := p.filename()
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
|
||||
creds, err := loadProfile(filename, p.profile())
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
|
||||
p.retrieved = true
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
// IsExpired returns if the shared credentials have expired.
|
||||
func (p *SharedCredentialsProvider) IsExpired() bool {
|
||||
return !p.retrieved
|
||||
}
|
||||
|
||||
// loadProfiles loads from the file pointed to by shared credentials filename for profile.
|
||||
// The credentials retrieved from the profile will be returned or error. Error will be
|
||||
// returned if it fails to read from the file, or the data is invalid.
|
||||
func loadProfile(filename, profile string) (Value, error) {
|
||||
config, err := ini.LoadFile(filename)
|
||||
if err != nil {
|
||||
return Value{}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
|
||||
}
|
||||
iniProfile := config.Section(profile)
|
||||
|
||||
id, ok := iniProfile["aws_access_key_id"]
|
||||
if !ok {
|
||||
return Value{}, awserr.New("SharedCredsAccessKey",
|
||||
fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
|
||||
nil)
|
||||
}
|
||||
|
||||
secret, ok := iniProfile["aws_secret_access_key"]
|
||||
if !ok {
|
||||
return Value{}, awserr.New("SharedCredsSecret",
|
||||
fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
|
||||
nil)
|
||||
}
|
||||
|
||||
token := iniProfile["aws_session_token"]
|
||||
|
||||
return Value{
|
||||
AccessKeyID: id,
|
||||
SecretAccessKey: secret,
|
||||
SessionToken: token,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// filename returns the filename to use to read AWS shared credentials.
|
||||
//
|
||||
// Will return an error if the user's home directory path cannot be found.
|
||||
func (p *SharedCredentialsProvider) filename() (string, error) {
|
||||
if p.Filename == "" {
|
||||
homeDir := os.Getenv("HOME") // *nix
|
||||
if homeDir == "" { // Windows
|
||||
homeDir = os.Getenv("USERPROFILE")
|
||||
}
|
||||
if homeDir == "" {
|
||||
return "", ErrSharedCredentialsHomeNotFound
|
||||
}
|
||||
|
||||
p.Filename = filepath.Join(homeDir, ".aws", "credentials")
|
||||
}
|
||||
|
||||
return p.Filename, nil
|
||||
}
|
||||
|
||||
// profile returns the AWS shared credentials profile. If empty will read
|
||||
// environment variable "AWS_PROFILE". If that is not set profile will
|
||||
// return "default".
|
||||
func (p *SharedCredentialsProvider) profile() string {
|
||||
if p.Profile == "" {
|
||||
p.Profile = os.Getenv("AWS_PROFILE")
|
||||
}
|
||||
if p.Profile == "" {
|
||||
p.Profile = "default"
|
||||
}
|
||||
|
||||
return p.Profile
|
||||
}
|
77
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go
generated
vendored
Normal file
77
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSharedCredentialsProvider(t *testing.T) {
|
||||
os.Clearenv()
|
||||
|
||||
p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
|
||||
creds, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Equal(t, "token", creds.SessionToken, "Expect session token to match")
|
||||
}
|
||||
|
||||
func TestSharedCredentialsProviderIsExpired(t *testing.T) {
|
||||
os.Clearenv()
|
||||
|
||||
p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
|
||||
|
||||
assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve")
|
||||
|
||||
_, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve")
|
||||
}
|
||||
|
||||
func TestSharedCredentialsProviderWithAWS_PROFILE(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("AWS_PROFILE", "no_token")
|
||||
|
||||
p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
|
||||
creds, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Empty(t, creds.SessionToken, "Expect no token")
|
||||
}
|
||||
|
||||
func TestSharedCredentialsProviderWithoutTokenFromProfile(t *testing.T) {
|
||||
os.Clearenv()
|
||||
|
||||
p := SharedCredentialsProvider{Filename: "example.ini", Profile: "no_token"}
|
||||
creds, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Empty(t, creds.SessionToken, "Expect no token")
|
||||
}
|
||||
|
||||
func BenchmarkSharedCredentialsProvider(b *testing.B) {
|
||||
os.Clearenv()
|
||||
|
||||
p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
|
||||
_, err := p.Retrieve()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
_, err := p.Retrieve()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
44
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
Normal file
44
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrStaticCredentialsEmpty is emitted when static credentials are empty.
|
||||
//
|
||||
// @readonly
|
||||
ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
|
||||
)
|
||||
|
||||
// A StaticProvider is a set of credentials which are set pragmatically,
|
||||
// and will never expire.
|
||||
type StaticProvider struct {
|
||||
Value
|
||||
}
|
||||
|
||||
// NewStaticCredentials returns a pointer to a new Credentials object
|
||||
// wrapping a static credentials value provider.
|
||||
func NewStaticCredentials(id, secret, token string) *Credentials {
|
||||
return NewCredentials(&StaticProvider{Value: Value{
|
||||
AccessKeyID: id,
|
||||
SecretAccessKey: secret,
|
||||
SessionToken: token,
|
||||
}})
|
||||
}
|
||||
|
||||
// Retrieve returns the credentials or error if the credentials are invalid.
|
||||
func (s *StaticProvider) Retrieve() (Value, error) {
|
||||
if s.AccessKeyID == "" || s.SecretAccessKey == "" {
|
||||
return Value{}, ErrStaticCredentialsEmpty
|
||||
}
|
||||
|
||||
return s.Value, nil
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials are expired.
|
||||
//
|
||||
// For StaticProvider, the credentials never expired.
|
||||
func (s *StaticProvider) IsExpired() bool {
|
||||
return false
|
||||
}
|
34
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go
generated
vendored
Normal file
34
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestStaticProviderGet(t *testing.T) {
|
||||
s := StaticProvider{
|
||||
Value: Value{
|
||||
AccessKeyID: "AKID",
|
||||
SecretAccessKey: "SECRET",
|
||||
SessionToken: "",
|
||||
},
|
||||
}
|
||||
|
||||
creds, err := s.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match")
|
||||
assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Empty(t, creds.SessionToken, "Expect no session token")
|
||||
}
|
||||
|
||||
func TestStaticProviderIsExpired(t *testing.T) {
|
||||
s := StaticProvider{
|
||||
Value: Value{
|
||||
AccessKeyID: "AKID",
|
||||
SecretAccessKey: "SECRET",
|
||||
SessionToken: "",
|
||||
},
|
||||
}
|
||||
|
||||
assert.False(t, s.IsExpired(), "Expect static credentials to never expire")
|
||||
}
|
120
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
Normal file
120
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
Normal file
@ -0,0 +1,120 @@
|
||||
// Package stscreds are credential Providers to retrieve STS AWS credentials.
|
||||
//
|
||||
// STS provides multiple ways to retrieve credentials which can be used when making
|
||||
// future AWS service API operation calls.
|
||||
package stscreds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AssumeRoler represents the minimal subset of the STS client API used by this provider.
|
||||
type AssumeRoler interface {
|
||||
AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
|
||||
}
|
||||
|
||||
// AssumeRoleProvider retrieves temporary credentials from the STS service, and
|
||||
// keeps track of their expiration time. This provider must be used explicitly,
|
||||
// as it is not included in the credentials chain.
|
||||
//
|
||||
// Example how to configure a service to use this provider:
|
||||
//
|
||||
// config := &aws.Config{
|
||||
// Credentials: stscreds.NewCredentials(nil, "arn-of-the-role-to-assume", 10*time.Second),
|
||||
// })
|
||||
// // Use config for creating your AWS service.
|
||||
//
|
||||
// Example how to obtain customised credentials:
|
||||
//
|
||||
// provider := &stscreds.Provider{
|
||||
// // Extend the duration to 1 hour.
|
||||
// Duration: time.Hour,
|
||||
// // Custom role name.
|
||||
// RoleSessionName: "custom-session-name",
|
||||
// }
|
||||
// creds := credentials.NewCredentials(provider)
|
||||
//
|
||||
type AssumeRoleProvider struct {
|
||||
credentials.Expiry
|
||||
|
||||
// Custom STS client. If not set the default STS client will be used.
|
||||
Client AssumeRoler
|
||||
|
||||
// Role to be assumed.
|
||||
RoleARN string
|
||||
|
||||
// Session name, if you wish to reuse the credentials elsewhere.
|
||||
RoleSessionName string
|
||||
|
||||
// Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
|
||||
Duration time.Duration
|
||||
|
||||
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
||||
// the credentials actually expiring. This is beneficial so race conditions
|
||||
// with expiring credentials do not cause request to fail unexpectedly
|
||||
// due to ExpiredTokenException exceptions.
|
||||
//
|
||||
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
||||
// 10 seconds before the credentials are actually expired.
|
||||
//
|
||||
// If ExpiryWindow is 0 or less it will be ignored.
|
||||
ExpiryWindow time.Duration
|
||||
}
|
||||
|
||||
// NewCredentials returns a pointer to a new Credentials object wrapping the
|
||||
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
|
||||
// role will be named after a nanosecond timestamp of this operation.
|
||||
//
|
||||
// The sts and roleARN parameters are used for building the "AssumeRole" call.
|
||||
// Pass nil as sts to use the default client.
|
||||
//
|
||||
// Window is the expiry window that will be subtracted from the expiry returned
|
||||
// by the role credential request. This is done so that the credentials will
|
||||
// expire sooner than their actual lifespan.
|
||||
func NewCredentials(client AssumeRoler, roleARN string, window time.Duration) *credentials.Credentials {
|
||||
return credentials.NewCredentials(&AssumeRoleProvider{
|
||||
Client: client,
|
||||
RoleARN: roleARN,
|
||||
ExpiryWindow: window,
|
||||
})
|
||||
}
|
||||
|
||||
// Retrieve generates a new set of temporary credentials using STS.
|
||||
func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
|
||||
|
||||
// Apply defaults where parameters are not set.
|
||||
if p.Client == nil {
|
||||
p.Client = sts.New(nil)
|
||||
}
|
||||
if p.RoleSessionName == "" {
|
||||
// Try to work out a role name that will hopefully end up unique.
|
||||
p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
|
||||
}
|
||||
if p.Duration == 0 {
|
||||
// Expire as often as AWS permits.
|
||||
p.Duration = 15 * time.Minute
|
||||
}
|
||||
|
||||
roleOutput, err := p.Client.AssumeRole(&sts.AssumeRoleInput{
|
||||
DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
|
||||
RoleARN: aws.String(p.RoleARN),
|
||||
RoleSessionName: aws.String(p.RoleSessionName),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return credentials.Value{}, err
|
||||
}
|
||||
|
||||
// We will proactively generate new credentials before they expire.
|
||||
p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
|
||||
|
||||
return credentials.Value{
|
||||
AccessKeyID: *roleOutput.Credentials.AccessKeyID,
|
||||
SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
|
||||
SessionToken: *roleOutput.Credentials.SessionToken,
|
||||
}, nil
|
||||
}
|
58
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go
generated
vendored
Normal file
58
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
package stscreds
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type stubSTS struct {
|
||||
}
|
||||
|
||||
func (s *stubSTS) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) {
|
||||
expiry := time.Now().Add(60 * time.Minute)
|
||||
return &sts.AssumeRoleOutput{
|
||||
Credentials: &sts.Credentials{
|
||||
// Just reflect the role arn to the provider.
|
||||
AccessKeyID: input.RoleARN,
|
||||
SecretAccessKey: aws.String("assumedSecretAccessKey"),
|
||||
SessionToken: aws.String("assumedSessionToken"),
|
||||
Expiration: &expiry,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestAssumeRoleProvider(t *testing.T) {
|
||||
stub := &stubSTS{}
|
||||
p := &AssumeRoleProvider{
|
||||
Client: stub,
|
||||
RoleARN: "roleARN",
|
||||
}
|
||||
|
||||
creds, err := p.Retrieve()
|
||||
assert.Nil(t, err, "Expect no error")
|
||||
|
||||
assert.Equal(t, "roleARN", creds.AccessKeyID, "Expect access key ID to be reflected role ARN")
|
||||
assert.Equal(t, "assumedSecretAccessKey", creds.SecretAccessKey, "Expect secret access key to match")
|
||||
assert.Equal(t, "assumedSessionToken", creds.SessionToken, "Expect session token to match")
|
||||
}
|
||||
|
||||
func BenchmarkAssumeRoleProvider(b *testing.B) {
|
||||
stub := &stubSTS{}
|
||||
p := &AssumeRoleProvider{
|
||||
Client: stub,
|
||||
RoleARN: "roleARN",
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
_, err := p.Retrieve()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
157
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handler_functions.go
generated
vendored
Normal file
157
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handler_functions.go
generated
vendored
Normal file
@ -0,0 +1,157 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
var sleepDelay = func(delay time.Duration) {
|
||||
time.Sleep(delay)
|
||||
}
|
||||
|
||||
// Interface for matching types which also have a Len method.
|
||||
type lener interface {
|
||||
Len() int
|
||||
}
|
||||
|
||||
// BuildContentLength builds the content length of a request based on the body,
|
||||
// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
|
||||
// to determine request body length and no "Content-Length" was specified it will panic.
|
||||
func BuildContentLength(r *Request) {
|
||||
if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
|
||||
length, _ := strconv.ParseInt(slength, 10, 64)
|
||||
r.HTTPRequest.ContentLength = length
|
||||
return
|
||||
}
|
||||
|
||||
var length int64
|
||||
switch body := r.Body.(type) {
|
||||
case nil:
|
||||
length = 0
|
||||
case lener:
|
||||
length = int64(body.Len())
|
||||
case io.Seeker:
|
||||
r.bodyStart, _ = body.Seek(0, 1)
|
||||
end, _ := body.Seek(0, 2)
|
||||
body.Seek(r.bodyStart, 0) // make sure to seek back to original location
|
||||
length = end - r.bodyStart
|
||||
default:
|
||||
panic("Cannot get length of body, must provide `ContentLength`")
|
||||
}
|
||||
|
||||
r.HTTPRequest.ContentLength = length
|
||||
r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
|
||||
}
|
||||
|
||||
// UserAgentHandler is a request handler for injecting User agent into requests.
|
||||
func UserAgentHandler(r *Request) {
|
||||
r.HTTPRequest.Header.Set("User-Agent", SDKName+"/"+SDKVersion)
|
||||
}
|
||||
|
||||
var reStatusCode = regexp.MustCompile(`^(\d+)`)
|
||||
|
||||
// SendHandler is a request handler to send service request using HTTP client.
|
||||
func SendHandler(r *Request) {
|
||||
var err error
|
||||
r.HTTPResponse, err = r.Service.Config.HTTPClient.Do(r.HTTPRequest)
|
||||
if err != nil {
|
||||
// Capture the case where url.Error is returned for error processing
|
||||
// response. e.g. 301 without location header comes back as string
|
||||
// error and r.HTTPResponse is nil. Other url redirect errors will
|
||||
// comeback in a similar method.
|
||||
if e, ok := err.(*url.Error); ok {
|
||||
if s := reStatusCode.FindStringSubmatch(e.Error()); s != nil {
|
||||
code, _ := strconv.ParseInt(s[1], 10, 64)
|
||||
r.HTTPResponse = &http.Response{
|
||||
StatusCode: int(code),
|
||||
Status: http.StatusText(int(code)),
|
||||
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
if r.HTTPRequest == nil {
|
||||
// Add a dummy request response object to ensure the HTTPResponse
|
||||
// value is consistent.
|
||||
r.HTTPResponse = &http.Response{
|
||||
StatusCode: int(0),
|
||||
Status: http.StatusText(int(0)),
|
||||
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
||||
}
|
||||
}
|
||||
// Catch all other request errors.
|
||||
r.Error = awserr.New("RequestError", "send request failed", err)
|
||||
r.Retryable = Bool(true) // network errors are retryable
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateResponseHandler is a request handler to validate service response.
|
||||
func ValidateResponseHandler(r *Request) {
|
||||
if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
|
||||
// this may be replaced by an UnmarshalError handler
|
||||
r.Error = awserr.New("UnknownError", "unknown error", nil)
|
||||
}
|
||||
}
|
||||
|
||||
// AfterRetryHandler performs final checks to determine if the request should
|
||||
// be retried and how long to delay.
|
||||
func AfterRetryHandler(r *Request) {
|
||||
// If one of the other handlers already set the retry state
|
||||
// we don't want to override it based on the service's state
|
||||
if r.Retryable == nil {
|
||||
r.Retryable = Bool(r.Service.ShouldRetry(r))
|
||||
}
|
||||
|
||||
if r.WillRetry() {
|
||||
r.RetryDelay = r.Service.RetryRules(r)
|
||||
sleepDelay(r.RetryDelay)
|
||||
|
||||
// when the expired token exception occurs the credentials
|
||||
// need to be expired locally so that the next request to
|
||||
// get credentials will trigger a credentials refresh.
|
||||
if r.Error != nil {
|
||||
if err, ok := r.Error.(awserr.Error); ok {
|
||||
if isCodeExpiredCreds(err.Code()) {
|
||||
r.Config.Credentials.Expire()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.RetryCount++
|
||||
r.Error = nil
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrMissingRegion is an error that is returned if region configuration is
|
||||
// not found.
|
||||
//
|
||||
// @readonly
|
||||
ErrMissingRegion error = awserr.New("MissingRegion", "could not find region configuration", nil)
|
||||
|
||||
// ErrMissingEndpoint is an error that is returned if an endpoint cannot be
|
||||
// resolved for a service.
|
||||
//
|
||||
// @readonly
|
||||
ErrMissingEndpoint error = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
|
||||
)
|
||||
|
||||
// ValidateEndpointHandler is a request handler to validate a request had the
|
||||
// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
|
||||
// region is not valid.
|
||||
func ValidateEndpointHandler(r *Request) {
|
||||
if r.Service.SigningRegion == "" && StringValue(r.Service.Config.Region) == "" {
|
||||
r.Error = ErrMissingRegion
|
||||
} else if r.Service.Endpoint == "" {
|
||||
r.Error = ErrMissingEndpoint
|
||||
}
|
||||
}
|
81
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handler_functions_test.go
generated
vendored
Normal file
81
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handler_functions_test.go
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestValidateEndpointHandler(t *testing.T) {
|
||||
os.Clearenv()
|
||||
svc := NewService(NewConfig().WithRegion("us-west-2"))
|
||||
svc.Handlers.Clear()
|
||||
svc.Handlers.Validate.PushBack(ValidateEndpointHandler)
|
||||
|
||||
req := NewRequest(svc, &Operation{Name: "Operation"}, nil, nil)
|
||||
err := req.Build()
|
||||
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestValidateEndpointHandlerErrorRegion(t *testing.T) {
|
||||
os.Clearenv()
|
||||
svc := NewService(nil)
|
||||
svc.Handlers.Clear()
|
||||
svc.Handlers.Validate.PushBack(ValidateEndpointHandler)
|
||||
|
||||
req := NewRequest(svc, &Operation{Name: "Operation"}, nil, nil)
|
||||
err := req.Build()
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrMissingRegion, err)
|
||||
}
|
||||
|
||||
type mockCredsProvider struct {
|
||||
expired bool
|
||||
retrieveCalled bool
|
||||
}
|
||||
|
||||
func (m *mockCredsProvider) Retrieve() (credentials.Value, error) {
|
||||
m.retrieveCalled = true
|
||||
return credentials.Value{}, nil
|
||||
}
|
||||
|
||||
func (m *mockCredsProvider) IsExpired() bool {
|
||||
return m.expired
|
||||
}
|
||||
|
||||
func TestAfterRetryRefreshCreds(t *testing.T) {
|
||||
os.Clearenv()
|
||||
credProvider := &mockCredsProvider{}
|
||||
svc := NewService(&Config{Credentials: credentials.NewCredentials(credProvider), MaxRetries: Int(1)})
|
||||
|
||||
svc.Handlers.Clear()
|
||||
svc.Handlers.ValidateResponse.PushBack(func(r *Request) {
|
||||
r.Error = awserr.New("UnknownError", "", nil)
|
||||
r.HTTPResponse = &http.Response{StatusCode: 400}
|
||||
})
|
||||
svc.Handlers.UnmarshalError.PushBack(func(r *Request) {
|
||||
r.Error = awserr.New("ExpiredTokenException", "", nil)
|
||||
})
|
||||
svc.Handlers.AfterRetry.PushBack(func(r *Request) {
|
||||
AfterRetryHandler(r)
|
||||
})
|
||||
|
||||
assert.True(t, svc.Config.Credentials.IsExpired(), "Expect to start out expired")
|
||||
assert.False(t, credProvider.retrieveCalled)
|
||||
|
||||
req := NewRequest(svc, &Operation{Name: "Operation"}, nil, nil)
|
||||
req.Send()
|
||||
|
||||
assert.True(t, svc.Config.Credentials.IsExpired())
|
||||
assert.False(t, credProvider.retrieveCalled)
|
||||
|
||||
_, err := svc.Config.Credentials.Get()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, credProvider.retrieveCalled)
|
||||
}
|
85
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handlers.go
generated
vendored
Normal file
85
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handlers.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
package aws
|
||||
|
||||
// A Handlers provides a collection of request handlers for various
|
||||
// stages of handling requests.
|
||||
type Handlers struct {
|
||||
Validate HandlerList
|
||||
Build HandlerList
|
||||
Sign HandlerList
|
||||
Send HandlerList
|
||||
ValidateResponse HandlerList
|
||||
Unmarshal HandlerList
|
||||
UnmarshalMeta HandlerList
|
||||
UnmarshalError HandlerList
|
||||
Retry HandlerList
|
||||
AfterRetry HandlerList
|
||||
}
|
||||
|
||||
// copy returns of this handler's lists.
|
||||
func (h *Handlers) copy() Handlers {
|
||||
return Handlers{
|
||||
Validate: h.Validate.copy(),
|
||||
Build: h.Build.copy(),
|
||||
Sign: h.Sign.copy(),
|
||||
Send: h.Send.copy(),
|
||||
ValidateResponse: h.ValidateResponse.copy(),
|
||||
Unmarshal: h.Unmarshal.copy(),
|
||||
UnmarshalError: h.UnmarshalError.copy(),
|
||||
UnmarshalMeta: h.UnmarshalMeta.copy(),
|
||||
Retry: h.Retry.copy(),
|
||||
AfterRetry: h.AfterRetry.copy(),
|
||||
}
|
||||
}
|
||||
|
||||
// Clear removes callback functions for all handlers
|
||||
func (h *Handlers) Clear() {
|
||||
h.Validate.Clear()
|
||||
h.Build.Clear()
|
||||
h.Send.Clear()
|
||||
h.Sign.Clear()
|
||||
h.Unmarshal.Clear()
|
||||
h.UnmarshalMeta.Clear()
|
||||
h.UnmarshalError.Clear()
|
||||
h.ValidateResponse.Clear()
|
||||
h.Retry.Clear()
|
||||
h.AfterRetry.Clear()
|
||||
}
|
||||
|
||||
// A HandlerList manages zero or more handlers in a list.
|
||||
type HandlerList struct {
|
||||
list []func(*Request)
|
||||
}
|
||||
|
||||
// copy creates a copy of the handler list.
|
||||
func (l *HandlerList) copy() HandlerList {
|
||||
var n HandlerList
|
||||
n.list = append([]func(*Request){}, l.list...)
|
||||
return n
|
||||
}
|
||||
|
||||
// Clear clears the handler list.
|
||||
func (l *HandlerList) Clear() {
|
||||
l.list = []func(*Request){}
|
||||
}
|
||||
|
||||
// Len returns the number of handlers in the list.
|
||||
func (l *HandlerList) Len() int {
|
||||
return len(l.list)
|
||||
}
|
||||
|
||||
// PushBack pushes handlers f to the back of the handler list.
|
||||
func (l *HandlerList) PushBack(f ...func(*Request)) {
|
||||
l.list = append(l.list, f...)
|
||||
}
|
||||
|
||||
// PushFront pushes handlers f to the front of the handler list.
|
||||
func (l *HandlerList) PushFront(f ...func(*Request)) {
|
||||
l.list = append(f, l.list...)
|
||||
}
|
||||
|
||||
// Run executes all handlers in the list with a given request object.
|
||||
func (l *HandlerList) Run(r *Request) {
|
||||
for _, f := range l.list {
|
||||
f(r)
|
||||
}
|
||||
}
|
31
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handlers_test.go
generated
vendored
Normal file
31
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handlers_test.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestHandlerList(t *testing.T) {
|
||||
s := ""
|
||||
r := &Request{}
|
||||
l := HandlerList{}
|
||||
l.PushBack(func(r *Request) {
|
||||
s += "a"
|
||||
r.Data = s
|
||||
})
|
||||
l.Run(r)
|
||||
assert.Equal(t, "a", s)
|
||||
assert.Equal(t, "a", r.Data)
|
||||
}
|
||||
|
||||
func TestMultipleHandlers(t *testing.T) {
|
||||
r := &Request{}
|
||||
l := HandlerList{}
|
||||
l.PushBack(func(r *Request) { r.Data = nil })
|
||||
l.PushFront(func(r *Request) { r.Data = Bool(true) })
|
||||
l.Run(r)
|
||||
if r.Data != nil {
|
||||
t.Error("Expected handler to execute")
|
||||
}
|
||||
}
|
89
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go
generated
vendored
Normal file
89
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// A LogLevelType defines the level logging should be performed at. Used to instruct
|
||||
// the SDK which statements should be logged.
|
||||
type LogLevelType uint
|
||||
|
||||
// LogLevel returns the pointer to a LogLevel. Should be used to workaround
|
||||
// not being able to take the address of a non-composite literal.
|
||||
func LogLevel(l LogLevelType) *LogLevelType {
|
||||
return &l
|
||||
}
|
||||
|
||||
// Value returns the LogLevel value or the default value LogOff if the LogLevel
|
||||
// is nil. Safe to use on nil value LogLevelTypes.
|
||||
func (l *LogLevelType) Value() LogLevelType {
|
||||
if l != nil {
|
||||
return *l
|
||||
}
|
||||
return LogOff
|
||||
}
|
||||
|
||||
// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
|
||||
// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
|
||||
// LogLevel is nill, will default to LogOff comparison.
|
||||
func (l *LogLevelType) Matches(v LogLevelType) bool {
|
||||
c := l.Value()
|
||||
return c&v == v
|
||||
}
|
||||
|
||||
// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
|
||||
// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default
|
||||
// to LogOff comparison.
|
||||
func (l *LogLevelType) AtLeast(v LogLevelType) bool {
|
||||
c := l.Value()
|
||||
return c >= v
|
||||
}
|
||||
|
||||
const (
|
||||
// LogOff states that no logging should be performed by the SDK. This is the
|
||||
// default state of the SDK, and should be use to disable all logging.
|
||||
LogOff LogLevelType = iota * 0x1000
|
||||
|
||||
// LogDebug state that debug output should be logged by the SDK. This should
|
||||
// be used to inspect request made and responses received.
|
||||
LogDebug
|
||||
)
|
||||
|
||||
// Debug Logging Sub Levels
|
||||
const (
|
||||
// LogDebugWithSigning states that the SDK should log request signing and
|
||||
// presigning events. This should be used to log the signing details of
|
||||
// requests for debugging. Will also enable LogDebug.
|
||||
LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
|
||||
|
||||
// LogDebugWithHTTPBody states the SDK should log HTTP request and response
|
||||
// HTTP bodys in addition to the headers and path. This should be used to
|
||||
// see the body content of requests and responses made while using the SDK
|
||||
// Will also enable LogDebug.
|
||||
LogDebugWithHTTPBody
|
||||
)
|
||||
|
||||
// A Logger is a minimalistic interface for the SDK to log messages to. Should
|
||||
// be used to provide custom logging writers for the SDK to use.
|
||||
type Logger interface {
|
||||
Log(...interface{})
|
||||
}
|
||||
|
||||
// NewDefaultLogger returns a Logger which will write log messages to stdout, and
|
||||
// use same formatting runes as the stdlib log.Logger
|
||||
func NewDefaultLogger() Logger {
|
||||
return &defaultLogger{
|
||||
logger: log.New(os.Stdout, "", log.LstdFlags),
|
||||
}
|
||||
}
|
||||
|
||||
// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
|
||||
type defaultLogger struct {
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// Log logs the parameters to the stdlib logger. See log.Println.
|
||||
func (l defaultLogger) Log(args ...interface{}) {
|
||||
l.logger.Println(args...)
|
||||
}
|
89
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/param_validator.go
generated
vendored
Normal file
89
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/param_validator.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
// ValidateParameters is a request handler to validate the input parameters.
|
||||
// Validating parameters only has meaning if done prior to the request being sent.
|
||||
func ValidateParameters(r *Request) {
|
||||
if r.ParamsFilled() {
|
||||
v := validator{errors: []string{}}
|
||||
v.validateAny(reflect.ValueOf(r.Params), "")
|
||||
|
||||
if count := len(v.errors); count > 0 {
|
||||
format := "%d validation errors:\n- %s"
|
||||
msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- "))
|
||||
r.Error = awserr.New("InvalidParameter", msg, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A validator validates values. Collects validations errors which occurs.
|
||||
type validator struct {
|
||||
errors []string
|
||||
}
|
||||
|
||||
// validateAny will validate any struct, slice or map type. All validations
|
||||
// are also performed recursively for nested types.
|
||||
func (v *validator) validateAny(value reflect.Value, path string) {
|
||||
value = reflect.Indirect(value)
|
||||
if !value.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
switch value.Kind() {
|
||||
case reflect.Struct:
|
||||
v.validateStruct(value, path)
|
||||
case reflect.Slice:
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
case reflect.Map:
|
||||
for _, n := range value.MapKeys() {
|
||||
v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// validateStruct will validate the struct value's fields. If the structure has
|
||||
// nested types those types will be validated also.
|
||||
func (v *validator) validateStruct(value reflect.Value, path string) {
|
||||
prefix := "."
|
||||
if path == "" {
|
||||
prefix = ""
|
||||
}
|
||||
|
||||
for i := 0; i < value.Type().NumField(); i++ {
|
||||
f := value.Type().Field(i)
|
||||
if strings.ToLower(f.Name[0:1]) == f.Name[0:1] {
|
||||
continue
|
||||
}
|
||||
fvalue := value.FieldByName(f.Name)
|
||||
|
||||
notset := false
|
||||
if f.Tag.Get("required") != "" {
|
||||
switch fvalue.Kind() {
|
||||
case reflect.Ptr, reflect.Slice, reflect.Map:
|
||||
if fvalue.IsNil() {
|
||||
notset = true
|
||||
}
|
||||
default:
|
||||
if !fvalue.IsValid() {
|
||||
notset = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if notset {
|
||||
msg := "missing required parameter: " + path + prefix + f.Name
|
||||
v.errors = append(v.errors, msg)
|
||||
} else {
|
||||
v.validateAny(fvalue, path+prefix+f.Name)
|
||||
}
|
||||
}
|
||||
}
|
84
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/param_validator_test.go
generated
vendored
Normal file
84
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/param_validator_test.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
package aws_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var service = func() *aws.Service {
|
||||
s := &aws.Service{
|
||||
Config: &aws.Config{},
|
||||
ServiceName: "mock-service",
|
||||
APIVersion: "2015-01-01",
|
||||
}
|
||||
return s
|
||||
}()
|
||||
|
||||
type StructShape struct {
|
||||
RequiredList []*ConditionalStructShape `required:"true"`
|
||||
RequiredMap map[string]*ConditionalStructShape `required:"true"`
|
||||
RequiredBool *bool `required:"true"`
|
||||
OptionalStruct *ConditionalStructShape
|
||||
|
||||
hiddenParameter *string
|
||||
|
||||
metadataStructureShape
|
||||
}
|
||||
|
||||
type metadataStructureShape struct {
|
||||
SDKShapeTraits bool
|
||||
}
|
||||
|
||||
type ConditionalStructShape struct {
|
||||
Name *string `required:"true"`
|
||||
SDKShapeTraits bool
|
||||
}
|
||||
|
||||
func TestNoErrors(t *testing.T) {
|
||||
input := &StructShape{
|
||||
RequiredList: []*ConditionalStructShape{},
|
||||
RequiredMap: map[string]*ConditionalStructShape{
|
||||
"key1": {Name: aws.String("Name")},
|
||||
"key2": {Name: aws.String("Name")},
|
||||
},
|
||||
RequiredBool: aws.Bool(true),
|
||||
OptionalStruct: &ConditionalStructShape{Name: aws.String("Name")},
|
||||
}
|
||||
|
||||
req := aws.NewRequest(service, &aws.Operation{}, input, nil)
|
||||
aws.ValidateParameters(req)
|
||||
assert.NoError(t, req.Error)
|
||||
}
|
||||
|
||||
func TestMissingRequiredParameters(t *testing.T) {
|
||||
input := &StructShape{}
|
||||
req := aws.NewRequest(service, &aws.Operation{}, input, nil)
|
||||
aws.ValidateParameters(req)
|
||||
|
||||
assert.Error(t, req.Error)
|
||||
assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code())
|
||||
assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList\n- missing required parameter: RequiredMap\n- missing required parameter: RequiredBool", req.Error.(awserr.Error).Message())
|
||||
}
|
||||
|
||||
func TestNestedMissingRequiredParameters(t *testing.T) {
|
||||
input := &StructShape{
|
||||
RequiredList: []*ConditionalStructShape{{}},
|
||||
RequiredMap: map[string]*ConditionalStructShape{
|
||||
"key1": {Name: aws.String("Name")},
|
||||
"key2": {},
|
||||
},
|
||||
RequiredBool: aws.Bool(true),
|
||||
OptionalStruct: &ConditionalStructShape{},
|
||||
}
|
||||
|
||||
req := aws.NewRequest(service, &aws.Operation{}, input, nil)
|
||||
aws.ValidateParameters(req)
|
||||
|
||||
assert.Error(t, req.Error)
|
||||
assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code())
|
||||
assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList[0].Name\n- missing required parameter: RequiredMap[\"key2\"].Name\n- missing required parameter: OptionalStruct.Name", req.Error.(awserr.Error).Message())
|
||||
|
||||
}
|
312
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request.go
generated
vendored
Normal file
312
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request.go
generated
vendored
Normal file
@ -0,0 +1,312 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
)
|
||||
|
||||
// A Request is the service request to be made.
|
||||
type Request struct {
|
||||
*Service
|
||||
Handlers Handlers
|
||||
Time time.Time
|
||||
ExpireTime time.Duration
|
||||
Operation *Operation
|
||||
HTTPRequest *http.Request
|
||||
HTTPResponse *http.Response
|
||||
Body io.ReadSeeker
|
||||
bodyStart int64 // offset from beginning of Body that the request body starts
|
||||
Params interface{}
|
||||
Error error
|
||||
Data interface{}
|
||||
RequestID string
|
||||
RetryCount uint
|
||||
Retryable *bool
|
||||
RetryDelay time.Duration
|
||||
|
||||
built bool
|
||||
}
|
||||
|
||||
// An Operation is the service API operation to be made.
|
||||
type Operation struct {
|
||||
Name string
|
||||
HTTPMethod string
|
||||
HTTPPath string
|
||||
*Paginator
|
||||
}
|
||||
|
||||
// Paginator keeps track of pagination configuration for an API operation.
|
||||
type Paginator struct {
|
||||
InputTokens []string
|
||||
OutputTokens []string
|
||||
LimitToken string
|
||||
TruncationToken string
|
||||
}
|
||||
|
||||
// NewRequest returns a new Request pointer for the service API
|
||||
// operation and parameters.
|
||||
//
|
||||
// Params is any value of input parameters to be the request payload.
|
||||
// Data is pointer value to an object which the request's response
|
||||
// payload will be deserialized to.
|
||||
func NewRequest(service *Service, operation *Operation, params interface{}, data interface{}) *Request {
|
||||
method := operation.HTTPMethod
|
||||
if method == "" {
|
||||
method = "POST"
|
||||
}
|
||||
p := operation.HTTPPath
|
||||
if p == "" {
|
||||
p = "/"
|
||||
}
|
||||
|
||||
httpReq, _ := http.NewRequest(method, "", nil)
|
||||
httpReq.URL, _ = url.Parse(service.Endpoint + p)
|
||||
|
||||
r := &Request{
|
||||
Service: service,
|
||||
Handlers: service.Handlers.copy(),
|
||||
Time: time.Now(),
|
||||
ExpireTime: 0,
|
||||
Operation: operation,
|
||||
HTTPRequest: httpReq,
|
||||
Body: nil,
|
||||
Params: params,
|
||||
Error: nil,
|
||||
Data: data,
|
||||
}
|
||||
r.SetBufferBody([]byte{})
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// WillRetry returns if the request's can be retried.
|
||||
func (r *Request) WillRetry() bool {
|
||||
return r.Error != nil && BoolValue(r.Retryable) && r.RetryCount < r.Service.MaxRetries()
|
||||
}
|
||||
|
||||
// ParamsFilled returns if the request's parameters have been populated
|
||||
// and the parameters are valid. False is returned if no parameters are
|
||||
// provided or invalid.
|
||||
func (r *Request) ParamsFilled() bool {
|
||||
return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
|
||||
}
|
||||
|
||||
// DataFilled returns true if the request's data for response deserialization
|
||||
// target has been set and is a valid. False is returned if data is not
|
||||
// set, or is invalid.
|
||||
func (r *Request) DataFilled() bool {
|
||||
return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
|
||||
}
|
||||
|
||||
// SetBufferBody will set the request's body bytes that will be sent to
|
||||
// the service API.
|
||||
func (r *Request) SetBufferBody(buf []byte) {
|
||||
r.SetReaderBody(bytes.NewReader(buf))
|
||||
}
|
||||
|
||||
// SetStringBody sets the body of the request to be backed by a string.
|
||||
func (r *Request) SetStringBody(s string) {
|
||||
r.SetReaderBody(strings.NewReader(s))
|
||||
}
|
||||
|
||||
// SetReaderBody will set the request's body reader.
|
||||
func (r *Request) SetReaderBody(reader io.ReadSeeker) {
|
||||
r.HTTPRequest.Body = ioutil.NopCloser(reader)
|
||||
r.Body = reader
|
||||
}
|
||||
|
||||
// Presign returns the request's signed URL. Error will be returned
|
||||
// if the signing fails.
|
||||
func (r *Request) Presign(expireTime time.Duration) (string, error) {
|
||||
r.ExpireTime = expireTime
|
||||
r.Sign()
|
||||
if r.Error != nil {
|
||||
return "", r.Error
|
||||
}
|
||||
return r.HTTPRequest.URL.String(), nil
|
||||
}
|
||||
|
||||
// Build will build the request's object so it can be signed and sent
|
||||
// to the service. Build will also validate all the request's parameters.
|
||||
// Anny additional build Handlers set on this request will be run
|
||||
// in the order they were set.
|
||||
//
|
||||
// The request will only be built once. Multiple calls to build will have
|
||||
// no effect.
|
||||
//
|
||||
// If any Validate or Build errors occur the build will stop and the error
|
||||
// which occurred will be returned.
|
||||
func (r *Request) Build() error {
|
||||
if !r.built {
|
||||
r.Error = nil
|
||||
r.Handlers.Validate.Run(r)
|
||||
if r.Error != nil {
|
||||
return r.Error
|
||||
}
|
||||
r.Handlers.Build.Run(r)
|
||||
r.built = true
|
||||
}
|
||||
|
||||
return r.Error
|
||||
}
|
||||
|
||||
// Sign will sign the request retuning error if errors are encountered.
|
||||
//
|
||||
// Send will build the request prior to signing. All Sign Handlers will
|
||||
// be executed in the order they were set.
|
||||
func (r *Request) Sign() error {
|
||||
r.Build()
|
||||
if r.Error != nil {
|
||||
return r.Error
|
||||
}
|
||||
|
||||
r.Handlers.Sign.Run(r)
|
||||
return r.Error
|
||||
}
|
||||
|
||||
// Send will send the request returning error if errors are encountered.
|
||||
//
|
||||
// Send will sign the request prior to sending. All Send Handlers will
|
||||
// be executed in the order they were set.
|
||||
func (r *Request) Send() error {
|
||||
for {
|
||||
r.Sign()
|
||||
if r.Error != nil {
|
||||
return r.Error
|
||||
}
|
||||
|
||||
if BoolValue(r.Retryable) {
|
||||
// Re-seek the body back to the original point in for a retry so that
|
||||
// send will send the body's contents again in the upcoming request.
|
||||
r.Body.Seek(r.bodyStart, 0)
|
||||
}
|
||||
r.Retryable = nil
|
||||
|
||||
r.Handlers.Send.Run(r)
|
||||
if r.Error != nil {
|
||||
r.Handlers.Retry.Run(r)
|
||||
r.Handlers.AfterRetry.Run(r)
|
||||
if r.Error != nil {
|
||||
return r.Error
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
r.Handlers.UnmarshalMeta.Run(r)
|
||||
r.Handlers.ValidateResponse.Run(r)
|
||||
if r.Error != nil {
|
||||
r.Handlers.UnmarshalError.Run(r)
|
||||
r.Handlers.Retry.Run(r)
|
||||
r.Handlers.AfterRetry.Run(r)
|
||||
if r.Error != nil {
|
||||
return r.Error
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
r.Handlers.Unmarshal.Run(r)
|
||||
if r.Error != nil {
|
||||
r.Handlers.Retry.Run(r)
|
||||
r.Handlers.AfterRetry.Run(r)
|
||||
if r.Error != nil {
|
||||
return r.Error
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasNextPage returns true if this request has more pages of data available.
|
||||
func (r *Request) HasNextPage() bool {
|
||||
return r.nextPageTokens() != nil
|
||||
}
|
||||
|
||||
// nextPageTokens returns the tokens to use when asking for the next page of
|
||||
// data.
|
||||
func (r *Request) nextPageTokens() []interface{} {
|
||||
if r.Operation.Paginator == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.Operation.TruncationToken != "" {
|
||||
tr := awsutil.ValuesAtAnyPath(r.Data, r.Operation.TruncationToken)
|
||||
if tr == nil || len(tr) == 0 {
|
||||
return nil
|
||||
}
|
||||
switch v := tr[0].(type) {
|
||||
case bool:
|
||||
if v == false {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
found := false
|
||||
tokens := make([]interface{}, len(r.Operation.OutputTokens))
|
||||
|
||||
for i, outtok := range r.Operation.OutputTokens {
|
||||
v := awsutil.ValuesAtAnyPath(r.Data, outtok)
|
||||
if v != nil && len(v) > 0 {
|
||||
found = true
|
||||
tokens[i] = v[0]
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
return tokens
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NextPage returns a new Request that can be executed to return the next
|
||||
// page of result data. Call .Send() on this request to execute it.
|
||||
func (r *Request) NextPage() *Request {
|
||||
tokens := r.nextPageTokens()
|
||||
if tokens == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
|
||||
nr := NewRequest(r.Service, r.Operation, awsutil.CopyOf(r.Params), data)
|
||||
for i, intok := range nr.Operation.InputTokens {
|
||||
awsutil.SetValueAtAnyPath(nr.Params, intok, tokens[i])
|
||||
}
|
||||
return nr
|
||||
}
|
||||
|
||||
// EachPage iterates over each page of a paginated request object. The fn
|
||||
// parameter should be a function with the following sample signature:
|
||||
//
|
||||
// func(page *T, lastPage bool) bool {
|
||||
// return true // return false to stop iterating
|
||||
// }
|
||||
//
|
||||
// Where "T" is the structure type matching the output structure of the given
|
||||
// operation. For example, a request object generated by
|
||||
// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
|
||||
// as the structure "T". The lastPage value represents whether the page is
|
||||
// the last page of data or not. The return value of this function should
|
||||
// return true to keep iterating or false to stop.
|
||||
func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
|
||||
for page := r; page != nil; page = page.NextPage() {
|
||||
page.Send()
|
||||
shouldContinue := fn(page.Data, !page.HasNextPage())
|
||||
if page.Error != nil || !shouldContinue {
|
||||
return page.Error
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
305
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request_pagination_test.go
generated
vendored
Normal file
305
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request_pagination_test.go
generated
vendored
Normal file
@ -0,0 +1,305 @@
|
||||
package aws_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/internal/test/unit"
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var _ = unit.Imported
|
||||
|
||||
// Use DynamoDB methods for simplicity
|
||||
func TestPagination(t *testing.T) {
|
||||
db := dynamodb.New(nil)
|
||||
tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false
|
||||
|
||||
reqNum := 0
|
||||
resps := []*dynamodb.ListTablesOutput{
|
||||
{TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")},
|
||||
{TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")},
|
||||
{TableNames: []*string{aws.String("Table5")}},
|
||||
}
|
||||
|
||||
db.Handlers.Send.Clear() // mock sending
|
||||
db.Handlers.Unmarshal.Clear()
|
||||
db.Handlers.UnmarshalMeta.Clear()
|
||||
db.Handlers.ValidateResponse.Clear()
|
||||
db.Handlers.Build.PushBack(func(r *aws.Request) {
|
||||
in := r.Params.(*dynamodb.ListTablesInput)
|
||||
if in == nil {
|
||||
tokens = append(tokens, "")
|
||||
} else if in.ExclusiveStartTableName != nil {
|
||||
tokens = append(tokens, *in.ExclusiveStartTableName)
|
||||
}
|
||||
})
|
||||
db.Handlers.Unmarshal.PushBack(func(r *aws.Request) {
|
||||
r.Data = resps[reqNum]
|
||||
reqNum++
|
||||
})
|
||||
|
||||
params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
|
||||
err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool {
|
||||
numPages++
|
||||
for _, t := range p.TableNames {
|
||||
pages = append(pages, *t)
|
||||
}
|
||||
if last {
|
||||
if gotToEnd {
|
||||
assert.Fail(t, "last=true happened twice")
|
||||
}
|
||||
gotToEnd = true
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
assert.Equal(t, []string{"Table2", "Table4"}, tokens)
|
||||
assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages)
|
||||
assert.Equal(t, 3, numPages)
|
||||
assert.True(t, gotToEnd)
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, params.ExclusiveStartTableName)
|
||||
}
|
||||
|
||||
// Use DynamoDB methods for simplicity
|
||||
func TestPaginationEachPage(t *testing.T) {
|
||||
db := dynamodb.New(nil)
|
||||
tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false
|
||||
|
||||
reqNum := 0
|
||||
resps := []*dynamodb.ListTablesOutput{
|
||||
{TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")},
|
||||
{TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")},
|
||||
{TableNames: []*string{aws.String("Table5")}},
|
||||
}
|
||||
|
||||
db.Handlers.Send.Clear() // mock sending
|
||||
db.Handlers.Unmarshal.Clear()
|
||||
db.Handlers.UnmarshalMeta.Clear()
|
||||
db.Handlers.ValidateResponse.Clear()
|
||||
db.Handlers.Build.PushBack(func(r *aws.Request) {
|
||||
in := r.Params.(*dynamodb.ListTablesInput)
|
||||
if in == nil {
|
||||
tokens = append(tokens, "")
|
||||
} else if in.ExclusiveStartTableName != nil {
|
||||
tokens = append(tokens, *in.ExclusiveStartTableName)
|
||||
}
|
||||
})
|
||||
db.Handlers.Unmarshal.PushBack(func(r *aws.Request) {
|
||||
r.Data = resps[reqNum]
|
||||
reqNum++
|
||||
})
|
||||
|
||||
params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
|
||||
req, _ := db.ListTablesRequest(params)
|
||||
err := req.EachPage(func(p interface{}, last bool) bool {
|
||||
numPages++
|
||||
for _, t := range p.(*dynamodb.ListTablesOutput).TableNames {
|
||||
pages = append(pages, *t)
|
||||
}
|
||||
if last {
|
||||
if gotToEnd {
|
||||
assert.Fail(t, "last=true happened twice")
|
||||
}
|
||||
gotToEnd = true
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
assert.Equal(t, []string{"Table2", "Table4"}, tokens)
|
||||
assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages)
|
||||
assert.Equal(t, 3, numPages)
|
||||
assert.True(t, gotToEnd)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
// Use DynamoDB methods for simplicity
|
||||
func TestPaginationEarlyExit(t *testing.T) {
|
||||
db := dynamodb.New(nil)
|
||||
numPages, gotToEnd := 0, false
|
||||
|
||||
reqNum := 0
|
||||
resps := []*dynamodb.ListTablesOutput{
|
||||
{TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")},
|
||||
{TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")},
|
||||
{TableNames: []*string{aws.String("Table5")}},
|
||||
}
|
||||
|
||||
db.Handlers.Send.Clear() // mock sending
|
||||
db.Handlers.Unmarshal.Clear()
|
||||
db.Handlers.UnmarshalMeta.Clear()
|
||||
db.Handlers.ValidateResponse.Clear()
|
||||
db.Handlers.Unmarshal.PushBack(func(r *aws.Request) {
|
||||
r.Data = resps[reqNum]
|
||||
reqNum++
|
||||
})
|
||||
|
||||
params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
|
||||
err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool {
|
||||
numPages++
|
||||
if numPages == 2 {
|
||||
return false
|
||||
}
|
||||
if last {
|
||||
if gotToEnd {
|
||||
assert.Fail(t, "last=true happened twice")
|
||||
}
|
||||
gotToEnd = true
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
assert.Equal(t, 2, numPages)
|
||||
assert.False(t, gotToEnd)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestSkipPagination(t *testing.T) {
|
||||
client := s3.New(nil)
|
||||
client.Handlers.Send.Clear() // mock sending
|
||||
client.Handlers.Unmarshal.Clear()
|
||||
client.Handlers.UnmarshalMeta.Clear()
|
||||
client.Handlers.ValidateResponse.Clear()
|
||||
client.Handlers.Unmarshal.PushBack(func(r *aws.Request) {
|
||||
r.Data = &s3.HeadBucketOutput{}
|
||||
})
|
||||
|
||||
req, _ := client.HeadBucketRequest(&s3.HeadBucketInput{Bucket: aws.String("bucket")})
|
||||
|
||||
numPages, gotToEnd := 0, false
|
||||
req.EachPage(func(p interface{}, last bool) bool {
|
||||
numPages++
|
||||
if last {
|
||||
gotToEnd = true
|
||||
}
|
||||
return true
|
||||
})
|
||||
assert.Equal(t, 1, numPages)
|
||||
assert.True(t, gotToEnd)
|
||||
}
|
||||
|
||||
// Use S3 for simplicity
|
||||
func TestPaginationTruncation(t *testing.T) {
|
||||
count := 0
|
||||
client := s3.New(nil)
|
||||
|
||||
reqNum := &count
|
||||
resps := []*s3.ListObjectsOutput{
|
||||
{IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key1")}}},
|
||||
{IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key2")}}},
|
||||
{IsTruncated: aws.Bool(false), Contents: []*s3.Object{{Key: aws.String("Key3")}}},
|
||||
{IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key4")}}},
|
||||
}
|
||||
|
||||
client.Handlers.Send.Clear() // mock sending
|
||||
client.Handlers.Unmarshal.Clear()
|
||||
client.Handlers.UnmarshalMeta.Clear()
|
||||
client.Handlers.ValidateResponse.Clear()
|
||||
client.Handlers.Unmarshal.PushBack(func(r *aws.Request) {
|
||||
r.Data = resps[*reqNum]
|
||||
*reqNum++
|
||||
})
|
||||
|
||||
params := &s3.ListObjectsInput{Bucket: aws.String("bucket")}
|
||||
|
||||
results := []string{}
|
||||
err := client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool {
|
||||
results = append(results, *p.Contents[0].Key)
|
||||
return true
|
||||
})
|
||||
|
||||
assert.Equal(t, []string{"Key1", "Key2", "Key3"}, results)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// Try again without truncation token at all
|
||||
count = 0
|
||||
resps[1].IsTruncated = nil
|
||||
resps[2].IsTruncated = aws.Bool(true)
|
||||
results = []string{}
|
||||
err = client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool {
|
||||
results = append(results, *p.Contents[0].Key)
|
||||
return true
|
||||
})
|
||||
|
||||
assert.Equal(t, []string{"Key1", "Key2"}, results)
|
||||
assert.Nil(t, err)
|
||||
|
||||
}
|
||||
|
||||
// Benchmarks
|
||||
var benchResps = []*dynamodb.ListTablesOutput{
|
||||
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
|
||||
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
|
||||
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
|
||||
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
|
||||
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
|
||||
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
|
||||
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
|
||||
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
|
||||
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
|
||||
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
|
||||
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
|
||||
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
|
||||
{TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")},
|
||||
{TableNames: []*string{aws.String("TABLE")}},
|
||||
}
|
||||
|
||||
var benchDb = func() *dynamodb.DynamoDB {
|
||||
db := dynamodb.New(nil)
|
||||
db.Handlers.Send.Clear() // mock sending
|
||||
db.Handlers.Unmarshal.Clear()
|
||||
db.Handlers.UnmarshalMeta.Clear()
|
||||
db.Handlers.ValidateResponse.Clear()
|
||||
return db
|
||||
}
|
||||
|
||||
func BenchmarkCodegenIterator(b *testing.B) {
|
||||
reqNum := 0
|
||||
db := benchDb()
|
||||
db.Handlers.Unmarshal.PushBack(func(r *aws.Request) {
|
||||
r.Data = benchResps[reqNum]
|
||||
reqNum++
|
||||
})
|
||||
|
||||
input := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
|
||||
iter := func(fn func(*dynamodb.ListTablesOutput, bool) bool) error {
|
||||
page, _ := db.ListTablesRequest(input)
|
||||
for ; page != nil; page = page.NextPage() {
|
||||
page.Send()
|
||||
out := page.Data.(*dynamodb.ListTablesOutput)
|
||||
if result := fn(out, !page.HasNextPage()); page.Error != nil || !result {
|
||||
return page.Error
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
reqNum = 0
|
||||
iter(func(p *dynamodb.ListTablesOutput, last bool) bool {
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEachPageIterator(b *testing.B) {
|
||||
reqNum := 0
|
||||
db := benchDb()
|
||||
db.Handlers.Unmarshal.PushBack(func(r *aws.Request) {
|
||||
r.Data = benchResps[reqNum]
|
||||
reqNum++
|
||||
})
|
||||
|
||||
input := &dynamodb.ListTablesInput{Limit: aws.Int64(2)}
|
||||
for i := 0; i < b.N; i++ {
|
||||
reqNum = 0
|
||||
req, _ := db.ListTablesRequest(input)
|
||||
req.EachPage(func(p interface{}, last bool) bool {
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
225
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request_test.go
generated
vendored
Normal file
225
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request_test.go
generated
vendored
Normal file
@ -0,0 +1,225 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type testData struct {
|
||||
Data string
|
||||
}
|
||||
|
||||
func body(str string) io.ReadCloser {
|
||||
return ioutil.NopCloser(bytes.NewReader([]byte(str)))
|
||||
}
|
||||
|
||||
func unmarshal(req *Request) {
|
||||
defer req.HTTPResponse.Body.Close()
|
||||
if req.Data != nil {
|
||||
json.NewDecoder(req.HTTPResponse.Body).Decode(req.Data)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func unmarshalError(req *Request) {
|
||||
bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
req.Error = awserr.New("UnmarshaleError", req.HTTPResponse.Status, err)
|
||||
return
|
||||
}
|
||||
if len(bodyBytes) == 0 {
|
||||
req.Error = awserr.NewRequestFailure(
|
||||
awserr.New("UnmarshaleError", req.HTTPResponse.Status, fmt.Errorf("empty body")),
|
||||
req.HTTPResponse.StatusCode,
|
||||
"",
|
||||
)
|
||||
return
|
||||
}
|
||||
var jsonErr jsonErrorResponse
|
||||
if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil {
|
||||
req.Error = awserr.New("UnmarshaleError", "JSON unmarshal", err)
|
||||
return
|
||||
}
|
||||
req.Error = awserr.NewRequestFailure(
|
||||
awserr.New(jsonErr.Code, jsonErr.Message, nil),
|
||||
req.HTTPResponse.StatusCode,
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
type jsonErrorResponse struct {
|
||||
Code string `json:"__type"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// test that retries occur for 5xx status codes
|
||||
func TestRequestRecoverRetry5xx(t *testing.T) {
|
||||
reqNum := 0
|
||||
reqs := []http.Response{
|
||||
{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
|
||||
{StatusCode: 501, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
|
||||
{StatusCode: 200, Body: body(`{"data":"valid"}`)},
|
||||
}
|
||||
|
||||
s := NewService(NewConfig().WithMaxRetries(10))
|
||||
s.Handlers.Validate.Clear()
|
||||
s.Handlers.Unmarshal.PushBack(unmarshal)
|
||||
s.Handlers.UnmarshalError.PushBack(unmarshalError)
|
||||
s.Handlers.Send.Clear() // mock sending
|
||||
s.Handlers.Send.PushBack(func(r *Request) {
|
||||
r.HTTPResponse = &reqs[reqNum]
|
||||
reqNum++
|
||||
})
|
||||
out := &testData{}
|
||||
r := NewRequest(s, &Operation{Name: "Operation"}, nil, out)
|
||||
err := r.Send()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, int(r.RetryCount))
|
||||
assert.Equal(t, "valid", out.Data)
|
||||
}
|
||||
|
||||
// test that retries occur for 4xx status codes with a response type that can be retried - see `shouldRetry`
|
||||
func TestRequestRecoverRetry4xxRetryable(t *testing.T) {
|
||||
reqNum := 0
|
||||
reqs := []http.Response{
|
||||
{StatusCode: 400, Body: body(`{"__type":"Throttling","message":"Rate exceeded."}`)},
|
||||
{StatusCode: 429, Body: body(`{"__type":"ProvisionedThroughputExceededException","message":"Rate exceeded."}`)},
|
||||
{StatusCode: 200, Body: body(`{"data":"valid"}`)},
|
||||
}
|
||||
|
||||
s := NewService(NewConfig().WithMaxRetries(10))
|
||||
s.Handlers.Validate.Clear()
|
||||
s.Handlers.Unmarshal.PushBack(unmarshal)
|
||||
s.Handlers.UnmarshalError.PushBack(unmarshalError)
|
||||
s.Handlers.Send.Clear() // mock sending
|
||||
s.Handlers.Send.PushBack(func(r *Request) {
|
||||
r.HTTPResponse = &reqs[reqNum]
|
||||
reqNum++
|
||||
})
|
||||
out := &testData{}
|
||||
r := NewRequest(s, &Operation{Name: "Operation"}, nil, out)
|
||||
err := r.Send()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, int(r.RetryCount))
|
||||
assert.Equal(t, "valid", out.Data)
|
||||
}
|
||||
|
||||
// test that retries don't occur for 4xx status codes with a response type that can't be retried
|
||||
func TestRequest4xxUnretryable(t *testing.T) {
|
||||
s := NewService(NewConfig().WithMaxRetries(10))
|
||||
s.Handlers.Validate.Clear()
|
||||
s.Handlers.Unmarshal.PushBack(unmarshal)
|
||||
s.Handlers.UnmarshalError.PushBack(unmarshalError)
|
||||
s.Handlers.Send.Clear() // mock sending
|
||||
s.Handlers.Send.PushBack(func(r *Request) {
|
||||
r.HTTPResponse = &http.Response{StatusCode: 401, Body: body(`{"__type":"SignatureDoesNotMatch","message":"Signature does not match."}`)}
|
||||
})
|
||||
out := &testData{}
|
||||
r := NewRequest(s, &Operation{Name: "Operation"}, nil, out)
|
||||
err := r.Send()
|
||||
assert.NotNil(t, err)
|
||||
if e, ok := err.(awserr.RequestFailure); ok {
|
||||
assert.Equal(t, 401, e.StatusCode())
|
||||
} else {
|
||||
assert.Fail(t, "Expected error to be a service failure")
|
||||
}
|
||||
assert.Equal(t, "SignatureDoesNotMatch", err.(awserr.Error).Code())
|
||||
assert.Equal(t, "Signature does not match.", err.(awserr.Error).Message())
|
||||
assert.Equal(t, 0, int(r.RetryCount))
|
||||
}
|
||||
|
||||
func TestRequestExhaustRetries(t *testing.T) {
|
||||
delays := []time.Duration{}
|
||||
sleepDelay = func(delay time.Duration) {
|
||||
delays = append(delays, delay)
|
||||
}
|
||||
|
||||
reqNum := 0
|
||||
reqs := []http.Response{
|
||||
{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
|
||||
{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
|
||||
{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
|
||||
{StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)},
|
||||
}
|
||||
|
||||
s := NewService(NewConfig().WithMaxRetries(DefaultRetries))
|
||||
s.Handlers.Validate.Clear()
|
||||
s.Handlers.Unmarshal.PushBack(unmarshal)
|
||||
s.Handlers.UnmarshalError.PushBack(unmarshalError)
|
||||
s.Handlers.Send.Clear() // mock sending
|
||||
s.Handlers.Send.PushBack(func(r *Request) {
|
||||
r.HTTPResponse = &reqs[reqNum]
|
||||
reqNum++
|
||||
})
|
||||
r := NewRequest(s, &Operation{Name: "Operation"}, nil, nil)
|
||||
err := r.Send()
|
||||
assert.NotNil(t, err)
|
||||
if e, ok := err.(awserr.RequestFailure); ok {
|
||||
assert.Equal(t, 500, e.StatusCode())
|
||||
} else {
|
||||
assert.Fail(t, "Expected error to be a service failure")
|
||||
}
|
||||
assert.Equal(t, "UnknownError", err.(awserr.Error).Code())
|
||||
assert.Equal(t, "An error occurred.", err.(awserr.Error).Message())
|
||||
assert.Equal(t, 3, int(r.RetryCount))
|
||||
|
||||
expectDelays := []struct{ min, max time.Duration }{{30, 59}, {60, 118}, {120, 236}}
|
||||
for i, v := range delays {
|
||||
min := expectDelays[i].min * time.Millisecond
|
||||
max := expectDelays[i].max * time.Millisecond
|
||||
assert.True(t, min <= v && v <= max,
|
||||
"Expect delay to be within range, i:%d, v:%s, min:%s, max:%s", i, v, min, max)
|
||||
}
|
||||
}
|
||||
|
||||
// test that the request is retried after the credentials are expired.
|
||||
func TestRequestRecoverExpiredCreds(t *testing.T) {
|
||||
reqNum := 0
|
||||
reqs := []http.Response{
|
||||
{StatusCode: 400, Body: body(`{"__type":"ExpiredTokenException","message":"expired token"}`)},
|
||||
{StatusCode: 200, Body: body(`{"data":"valid"}`)},
|
||||
}
|
||||
|
||||
s := NewService(&Config{MaxRetries: Int(10), Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "")})
|
||||
s.Handlers.Validate.Clear()
|
||||
s.Handlers.Unmarshal.PushBack(unmarshal)
|
||||
s.Handlers.UnmarshalError.PushBack(unmarshalError)
|
||||
|
||||
credExpiredBeforeRetry := false
|
||||
credExpiredAfterRetry := false
|
||||
|
||||
s.Handlers.AfterRetry.PushBack(func(r *Request) {
|
||||
credExpiredAfterRetry = r.Config.Credentials.IsExpired()
|
||||
})
|
||||
|
||||
s.Handlers.Sign.Clear()
|
||||
s.Handlers.Sign.PushBack(func(r *Request) {
|
||||
r.Config.Credentials.Get()
|
||||
})
|
||||
s.Handlers.Send.Clear() // mock sending
|
||||
s.Handlers.Send.PushBack(func(r *Request) {
|
||||
r.HTTPResponse = &reqs[reqNum]
|
||||
reqNum++
|
||||
})
|
||||
out := &testData{}
|
||||
r := NewRequest(s, &Operation{Name: "Operation"}, nil, out)
|
||||
err := r.Send()
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.False(t, credExpiredBeforeRetry, "Expect valid creds before retry check")
|
||||
assert.True(t, credExpiredAfterRetry, "Expect expired creds after retry check")
|
||||
assert.False(t, s.Config.Credentials.IsExpired(), "Expect valid creds after cred expired recovery")
|
||||
|
||||
assert.Equal(t, 1, int(r.RetryCount))
|
||||
assert.Equal(t, "valid", out.Data)
|
||||
}
|
194
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/service.go
generated
vendored
Normal file
194
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/service.go
generated
vendored
Normal file
@ -0,0 +1,194 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/internal/endpoints"
|
||||
)
|
||||
|
||||
// A Service implements the base service request and response handling
|
||||
// used by all services.
|
||||
type Service struct {
|
||||
Config *Config
|
||||
Handlers Handlers
|
||||
ServiceName string
|
||||
APIVersion string
|
||||
Endpoint string
|
||||
SigningName string
|
||||
SigningRegion string
|
||||
JSONVersion string
|
||||
TargetPrefix string
|
||||
RetryRules func(*Request) time.Duration
|
||||
ShouldRetry func(*Request) bool
|
||||
DefaultMaxRetries uint
|
||||
}
|
||||
|
||||
var schemeRE = regexp.MustCompile("^([^:]+)://")
|
||||
|
||||
// NewService will return a pointer to a new Server object initialized.
|
||||
func NewService(config *Config) *Service {
|
||||
svc := &Service{Config: config}
|
||||
svc.Initialize()
|
||||
return svc
|
||||
}
|
||||
|
||||
// Initialize initializes the service.
|
||||
func (s *Service) Initialize() {
|
||||
if s.Config == nil {
|
||||
s.Config = &Config{}
|
||||
}
|
||||
if s.Config.HTTPClient == nil {
|
||||
s.Config.HTTPClient = http.DefaultClient
|
||||
}
|
||||
|
||||
if s.RetryRules == nil {
|
||||
s.RetryRules = retryRules
|
||||
}
|
||||
|
||||
if s.ShouldRetry == nil {
|
||||
s.ShouldRetry = shouldRetry
|
||||
}
|
||||
|
||||
s.DefaultMaxRetries = 3
|
||||
s.Handlers.Validate.PushBack(ValidateEndpointHandler)
|
||||
s.Handlers.Build.PushBack(UserAgentHandler)
|
||||
s.Handlers.Sign.PushBack(BuildContentLength)
|
||||
s.Handlers.Send.PushBack(SendHandler)
|
||||
s.Handlers.AfterRetry.PushBack(AfterRetryHandler)
|
||||
s.Handlers.ValidateResponse.PushBack(ValidateResponseHandler)
|
||||
s.AddDebugHandlers()
|
||||
s.buildEndpoint()
|
||||
|
||||
if !BoolValue(s.Config.DisableParamValidation) {
|
||||
s.Handlers.Validate.PushBack(ValidateParameters)
|
||||
}
|
||||
}
|
||||
|
||||
// buildEndpoint builds the endpoint values the service will use to make requests with.
|
||||
func (s *Service) buildEndpoint() {
|
||||
if StringValue(s.Config.Endpoint) != "" {
|
||||
s.Endpoint = *s.Config.Endpoint
|
||||
} else {
|
||||
s.Endpoint, s.SigningRegion =
|
||||
endpoints.EndpointForRegion(s.ServiceName, StringValue(s.Config.Region))
|
||||
}
|
||||
|
||||
if s.Endpoint != "" && !schemeRE.MatchString(s.Endpoint) {
|
||||
scheme := "https"
|
||||
if BoolValue(s.Config.DisableSSL) {
|
||||
scheme = "http"
|
||||
}
|
||||
s.Endpoint = scheme + "://" + s.Endpoint
|
||||
}
|
||||
}
|
||||
|
||||
// AddDebugHandlers injects debug logging handlers into the service to log request
|
||||
// debug information.
|
||||
func (s *Service) AddDebugHandlers() {
|
||||
if !s.Config.LogLevel.AtLeast(LogDebug) {
|
||||
return
|
||||
}
|
||||
|
||||
s.Handlers.Send.PushFront(logRequest)
|
||||
s.Handlers.Send.PushBack(logResponse)
|
||||
}
|
||||
|
||||
const logReqMsg = `DEBUG: Request %s/%s Details:
|
||||
---[ REQUEST POST-SIGN ]-----------------------------
|
||||
%s
|
||||
-----------------------------------------------------`
|
||||
|
||||
func logRequest(r *Request) {
|
||||
logBody := r.Config.LogLevel.Matches(LogDebugWithHTTPBody)
|
||||
dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody)
|
||||
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ServiceName, r.Operation.Name, string(dumpedBody)))
|
||||
}
|
||||
|
||||
const logRespMsg = `DEBUG: Response %s/%s Details:
|
||||
---[ RESPONSE ]--------------------------------------
|
||||
%s
|
||||
-----------------------------------------------------`
|
||||
|
||||
func logResponse(r *Request) {
|
||||
var msg = "no reponse data"
|
||||
if r.HTTPResponse != nil {
|
||||
logBody := r.Config.LogLevel.Matches(LogDebugWithHTTPBody)
|
||||
dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody)
|
||||
msg = string(dumpedBody)
|
||||
} else if r.Error != nil {
|
||||
msg = r.Error.Error()
|
||||
}
|
||||
r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ServiceName, r.Operation.Name, msg))
|
||||
}
|
||||
|
||||
// MaxRetries returns the number of maximum returns the service will use to make
|
||||
// an individual API request.
|
||||
func (s *Service) MaxRetries() uint {
|
||||
if IntValue(s.Config.MaxRetries) < 0 {
|
||||
return s.DefaultMaxRetries
|
||||
}
|
||||
return uint(IntValue(s.Config.MaxRetries))
|
||||
}
|
||||
|
||||
var seededRand = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
// retryRules returns the delay duration before retrying this request again
|
||||
func retryRules(r *Request) time.Duration {
|
||||
|
||||
delay := int(math.Pow(2, float64(r.RetryCount))) * (seededRand.Intn(30) + 30)
|
||||
return time.Duration(delay) * time.Millisecond
|
||||
}
|
||||
|
||||
// retryableCodes is a collection of service response codes which are retry-able
|
||||
// without any further action.
|
||||
var retryableCodes = map[string]struct{}{
|
||||
"RequestError": {},
|
||||
"ProvisionedThroughputExceededException": {},
|
||||
"Throttling": {},
|
||||
"ThrottlingException": {},
|
||||
"RequestLimitExceeded": {},
|
||||
"RequestThrottled": {},
|
||||
}
|
||||
|
||||
// credsExpiredCodes is a collection of error codes which signify the credentials
|
||||
// need to be refreshed. Expired tokens require refreshing of credentials, and
|
||||
// resigning before the request can be retried.
|
||||
var credsExpiredCodes = map[string]struct{}{
|
||||
"ExpiredToken": {},
|
||||
"ExpiredTokenException": {},
|
||||
"RequestExpired": {}, // EC2 Only
|
||||
}
|
||||
|
||||
func isCodeRetryable(code string) bool {
|
||||
if _, ok := retryableCodes[code]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return isCodeExpiredCreds(code)
|
||||
}
|
||||
|
||||
func isCodeExpiredCreds(code string) bool {
|
||||
_, ok := credsExpiredCodes[code]
|
||||
return ok
|
||||
}
|
||||
|
||||
// shouldRetry returns if the request should be retried.
|
||||
func shouldRetry(r *Request) bool {
|
||||
if r.HTTPResponse.StatusCode >= 500 {
|
||||
return true
|
||||
}
|
||||
if r.Error != nil {
|
||||
if err, ok := r.Error.(awserr.Error); ok {
|
||||
return isCodeRetryable(err.Code())
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
55
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go
generated
vendored
Normal file
55
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// ReadSeekCloser wraps a io.Reader returning a ReaderSeakerCloser
|
||||
func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
|
||||
return ReaderSeekerCloser{r}
|
||||
}
|
||||
|
||||
// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
|
||||
// io.Closer interfaces to the underlying object if they are available.
|
||||
type ReaderSeekerCloser struct {
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
// Read reads from the reader up to size of p. The number of bytes read, and
|
||||
// error if it occurred will be returned.
|
||||
//
|
||||
// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
|
||||
//
|
||||
// Performs the same functionality as io.Reader Read
|
||||
func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
|
||||
switch t := r.r.(type) {
|
||||
case io.Reader:
|
||||
return t.Read(p)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Seek sets the offset for the next Read to offset, interpreted according to
|
||||
// whence: 0 means relative to the origin of the file, 1 means relative to the
|
||||
// current offset, and 2 means relative to the end. Seek returns the new offset
|
||||
// and an error, if any.
|
||||
//
|
||||
// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
|
||||
func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
|
||||
switch t := r.r.(type) {
|
||||
case io.Seeker:
|
||||
return t.Seek(offset, whence)
|
||||
}
|
||||
return int64(0), nil
|
||||
}
|
||||
|
||||
// Close closes the ReaderSeekerCloser.
|
||||
//
|
||||
// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
|
||||
func (r ReaderSeekerCloser) Close() error {
|
||||
switch t := r.r.(type) {
|
||||
case io.Closer:
|
||||
return t.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
8
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
Normal file
8
Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
// Package aws provides core functionality for making requests to AWS services.
|
||||
package aws
|
||||
|
||||
// SDKName is the name of this AWS SDK
|
||||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "0.7.3"
|
31
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go
generated
vendored
Normal file
31
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
// Package endpoints validates regional endpoints for services.
|
||||
package endpoints
|
||||
|
||||
//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
|
||||
//go:generate gofmt -s -w endpoints_map.go
|
||||
|
||||
import "strings"
|
||||
|
||||
// EndpointForRegion returns an endpoint and its signing region for a service and region.
|
||||
// if the service and region pair are not found endpoint and signingRegion will be empty.
|
||||
func EndpointForRegion(svcName, region string) (endpoint, signingRegion string) {
|
||||
derivedKeys := []string{
|
||||
region + "/" + svcName,
|
||||
region + "/*",
|
||||
"*/" + svcName,
|
||||
"*/*",
|
||||
}
|
||||
|
||||
for _, key := range derivedKeys {
|
||||
if val, ok := endpointsMap.Endpoints[key]; ok {
|
||||
ep := val.Endpoint
|
||||
ep = strings.Replace(ep, "{region}", region, -1)
|
||||
ep = strings.Replace(ep, "{service}", svcName, -1)
|
||||
|
||||
endpoint = ep
|
||||
signingRegion = val.SigningRegion
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
77
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.json
generated
vendored
Normal file
77
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.json
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
{
|
||||
"version": 2,
|
||||
"endpoints": {
|
||||
"*/*": {
|
||||
"endpoint": "{service}.{region}.amazonaws.com"
|
||||
},
|
||||
"cn-north-1/*": {
|
||||
"endpoint": "{service}.{region}.amazonaws.com.cn",
|
||||
"signatureVersion": "v4"
|
||||
},
|
||||
"us-gov-west-1/iam": {
|
||||
"endpoint": "iam.us-gov.amazonaws.com"
|
||||
},
|
||||
"us-gov-west-1/sts": {
|
||||
"endpoint": "sts.us-gov-west-1.amazonaws.com"
|
||||
},
|
||||
"us-gov-west-1/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"*/cloudfront": {
|
||||
"endpoint": "cloudfront.amazonaws.com",
|
||||
"signingRegion": "us-east-1"
|
||||
},
|
||||
"*/cloudsearchdomain": {
|
||||
"endpoint": "",
|
||||
"signingRegion": "us-east-1"
|
||||
},
|
||||
"*/iam": {
|
||||
"endpoint": "iam.amazonaws.com",
|
||||
"signingRegion": "us-east-1"
|
||||
},
|
||||
"*/importexport": {
|
||||
"endpoint": "importexport.amazonaws.com",
|
||||
"signingRegion": "us-east-1"
|
||||
},
|
||||
"*/route53": {
|
||||
"endpoint": "route53.amazonaws.com",
|
||||
"signingRegion": "us-east-1"
|
||||
},
|
||||
"*/sts": {
|
||||
"endpoint": "sts.amazonaws.com",
|
||||
"signingRegion": "us-east-1"
|
||||
},
|
||||
"us-east-1/sdb": {
|
||||
"endpoint": "sdb.amazonaws.com",
|
||||
"signingRegion": "us-east-1"
|
||||
},
|
||||
"us-east-1/s3": {
|
||||
"endpoint": "s3.amazonaws.com"
|
||||
},
|
||||
"us-west-1/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"us-west-2/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"eu-west-1/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"ap-southeast-1/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"ap-southeast-2/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"ap-northeast-1/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"sa-east-1/s3": {
|
||||
"endpoint": "s3-{region}.amazonaws.com"
|
||||
},
|
||||
"eu-central-1/s3": {
|
||||
"endpoint": "{service}.{region}.amazonaws.com",
|
||||
"signatureVersion": "v4"
|
||||
}
|
||||
}
|
||||
}
|
89
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_map.go
generated
vendored
Normal file
89
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_map.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
package endpoints
|
||||
|
||||
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
type endpointStruct struct {
|
||||
Version int
|
||||
Endpoints map[string]endpointEntry
|
||||
}
|
||||
|
||||
type endpointEntry struct {
|
||||
Endpoint string
|
||||
SigningRegion string
|
||||
}
|
||||
|
||||
var endpointsMap = endpointStruct{
|
||||
Version: 2,
|
||||
Endpoints: map[string]endpointEntry{
|
||||
"*/*": {
|
||||
Endpoint: "{service}.{region}.amazonaws.com",
|
||||
},
|
||||
"*/cloudfront": {
|
||||
Endpoint: "cloudfront.amazonaws.com",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"*/cloudsearchdomain": {
|
||||
Endpoint: "",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"*/iam": {
|
||||
Endpoint: "iam.amazonaws.com",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"*/importexport": {
|
||||
Endpoint: "importexport.amazonaws.com",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"*/route53": {
|
||||
Endpoint: "route53.amazonaws.com",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"*/sts": {
|
||||
Endpoint: "sts.amazonaws.com",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"ap-northeast-1/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"ap-southeast-1/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"ap-southeast-2/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"cn-north-1/*": {
|
||||
Endpoint: "{service}.{region}.amazonaws.com.cn",
|
||||
},
|
||||
"eu-central-1/s3": {
|
||||
Endpoint: "{service}.{region}.amazonaws.com",
|
||||
},
|
||||
"eu-west-1/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"sa-east-1/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"us-east-1/s3": {
|
||||
Endpoint: "s3.amazonaws.com",
|
||||
},
|
||||
"us-east-1/sdb": {
|
||||
Endpoint: "sdb.amazonaws.com",
|
||||
SigningRegion: "us-east-1",
|
||||
},
|
||||
"us-gov-west-1/iam": {
|
||||
Endpoint: "iam.us-gov.amazonaws.com",
|
||||
},
|
||||
"us-gov-west-1/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"us-gov-west-1/sts": {
|
||||
Endpoint: "sts.us-gov-west-1.amazonaws.com",
|
||||
},
|
||||
"us-west-1/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
"us-west-2/s3": {
|
||||
Endpoint: "s3-{region}.amazonaws.com",
|
||||
},
|
||||
},
|
||||
}
|
28
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_test.go
generated
vendored
Normal file
28
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_test.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
package endpoints
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGlobalEndpoints(t *testing.T) {
|
||||
region := "mock-region-1"
|
||||
svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts"}
|
||||
|
||||
for _, name := range svcs {
|
||||
ep, sr := EndpointForRegion(name, region)
|
||||
assert.Equal(t, name+".amazonaws.com", ep)
|
||||
assert.Equal(t, "us-east-1", sr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServicesInCN(t *testing.T) {
|
||||
region := "cn-north-1"
|
||||
svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts", "s3"}
|
||||
|
||||
for _, name := range svcs {
|
||||
ep, _ := EndpointForRegion(name, region)
|
||||
assert.Equal(t, name+"."+region+".amazonaws.com.cn", ep)
|
||||
}
|
||||
}
|
33
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build.go
generated
vendored
Normal file
33
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// Package query provides serialisation of AWS query requests, and responses.
|
||||
package query
|
||||
|
||||
//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/input/query.json build_test.go
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/internal/protocol/query/queryutil"
|
||||
)
|
||||
|
||||
// Build builds a request for an AWS Query service.
|
||||
func Build(r *aws.Request) {
|
||||
body := url.Values{
|
||||
"Action": {r.Operation.Name},
|
||||
"Version": {r.Service.APIVersion},
|
||||
}
|
||||
if err := queryutil.Parse(body, r.Params, false); err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed encoding Query request", err)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ExpireTime == 0 {
|
||||
r.HTTPRequest.Method = "POST"
|
||||
r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
|
||||
r.SetBufferBody([]byte(body.Encode()))
|
||||
} else { // This is a pre-signed request
|
||||
r.HTTPRequest.Method = "GET"
|
||||
r.HTTPRequest.URL.RawQuery = body.Encode()
|
||||
}
|
||||
}
|
1482
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build_test.go
generated
vendored
Normal file
1482
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
223
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go
generated
vendored
Normal file
223
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go
generated
vendored
Normal file
@ -0,0 +1,223 @@
|
||||
package queryutil
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Parse parses an object i and fills a url.Values object. The isEC2 flag
|
||||
// indicates if this is the EC2 Query sub-protocol.
|
||||
func Parse(body url.Values, i interface{}, isEC2 bool) error {
|
||||
q := queryParser{isEC2: isEC2}
|
||||
return q.parseValue(body, reflect.ValueOf(i), "", "")
|
||||
}
|
||||
|
||||
func elemOf(value reflect.Value) reflect.Value {
|
||||
for value.Kind() == reflect.Ptr {
|
||||
value = value.Elem()
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
type queryParser struct {
|
||||
isEC2 bool
|
||||
}
|
||||
|
||||
func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
|
||||
value = elemOf(value)
|
||||
|
||||
// no need to handle zero values
|
||||
if !value.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
t := tag.Get("type")
|
||||
if t == "" {
|
||||
switch value.Kind() {
|
||||
case reflect.Struct:
|
||||
t = "structure"
|
||||
case reflect.Slice:
|
||||
t = "list"
|
||||
case reflect.Map:
|
||||
t = "map"
|
||||
}
|
||||
}
|
||||
|
||||
switch t {
|
||||
case "structure":
|
||||
return q.parseStruct(v, value, prefix)
|
||||
case "list":
|
||||
return q.parseList(v, value, prefix, tag)
|
||||
case "map":
|
||||
return q.parseMap(v, value, prefix, tag)
|
||||
default:
|
||||
return q.parseScalar(v, value, prefix, tag)
|
||||
}
|
||||
}
|
||||
|
||||
func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error {
|
||||
if !value.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
t := value.Type()
|
||||
for i := 0; i < value.NumField(); i++ {
|
||||
if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c {
|
||||
continue // ignore unexported fields
|
||||
}
|
||||
|
||||
value := elemOf(value.Field(i))
|
||||
field := t.Field(i)
|
||||
var name string
|
||||
|
||||
if q.isEC2 {
|
||||
name = field.Tag.Get("queryName")
|
||||
}
|
||||
if name == "" {
|
||||
if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
|
||||
name = field.Tag.Get("locationNameList")
|
||||
} else if locName := field.Tag.Get("locationName"); locName != "" {
|
||||
name = locName
|
||||
}
|
||||
if name != "" && q.isEC2 {
|
||||
name = strings.ToUpper(name[0:1]) + name[1:]
|
||||
}
|
||||
}
|
||||
if name == "" {
|
||||
name = field.Name
|
||||
}
|
||||
|
||||
if prefix != "" {
|
||||
name = prefix + "." + name
|
||||
}
|
||||
|
||||
if err := q.parseValue(v, value, name, field.Tag); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
|
||||
// If it's empty, generate an empty value
|
||||
if !value.IsNil() && value.Len() == 0 {
|
||||
v.Set(prefix, "")
|
||||
return nil
|
||||
}
|
||||
|
||||
// check for unflattened list member
|
||||
if !q.isEC2 && tag.Get("flattened") == "" {
|
||||
prefix += ".member"
|
||||
}
|
||||
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
slicePrefix := prefix
|
||||
if slicePrefix == "" {
|
||||
slicePrefix = strconv.Itoa(i + 1)
|
||||
} else {
|
||||
slicePrefix = slicePrefix + "." + strconv.Itoa(i+1)
|
||||
}
|
||||
if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
|
||||
// If it's empty, generate an empty value
|
||||
if !value.IsNil() && value.Len() == 0 {
|
||||
v.Set(prefix, "")
|
||||
return nil
|
||||
}
|
||||
|
||||
// check for unflattened list member
|
||||
if !q.isEC2 && tag.Get("flattened") == "" {
|
||||
prefix += ".entry"
|
||||
}
|
||||
|
||||
// sort keys for improved serialization consistency.
|
||||
// this is not strictly necessary for protocol support.
|
||||
mapKeyValues := value.MapKeys()
|
||||
mapKeys := map[string]reflect.Value{}
|
||||
mapKeyNames := make([]string, len(mapKeyValues))
|
||||
for i, mapKey := range mapKeyValues {
|
||||
name := mapKey.String()
|
||||
mapKeys[name] = mapKey
|
||||
mapKeyNames[i] = name
|
||||
}
|
||||
sort.Strings(mapKeyNames)
|
||||
|
||||
for i, mapKeyName := range mapKeyNames {
|
||||
mapKey := mapKeys[mapKeyName]
|
||||
mapValue := value.MapIndex(mapKey)
|
||||
|
||||
kname := tag.Get("locationNameKey")
|
||||
if kname == "" {
|
||||
kname = "key"
|
||||
}
|
||||
vname := tag.Get("locationNameValue")
|
||||
if vname == "" {
|
||||
vname = "value"
|
||||
}
|
||||
|
||||
// serialize key
|
||||
var keyName string
|
||||
if prefix == "" {
|
||||
keyName = strconv.Itoa(i+1) + "." + kname
|
||||
} else {
|
||||
keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname
|
||||
}
|
||||
|
||||
if err := q.parseValue(v, mapKey, keyName, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// serialize value
|
||||
var valueName string
|
||||
if prefix == "" {
|
||||
valueName = strconv.Itoa(i+1) + "." + vname
|
||||
} else {
|
||||
valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname
|
||||
}
|
||||
|
||||
if err := q.parseValue(v, mapValue, valueName, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error {
|
||||
switch value := r.Interface().(type) {
|
||||
case string:
|
||||
v.Set(name, value)
|
||||
case []byte:
|
||||
if !r.IsNil() {
|
||||
v.Set(name, base64.StdEncoding.EncodeToString(value))
|
||||
}
|
||||
case bool:
|
||||
v.Set(name, strconv.FormatBool(value))
|
||||
case int64:
|
||||
v.Set(name, strconv.FormatInt(value, 10))
|
||||
case int:
|
||||
v.Set(name, strconv.Itoa(value))
|
||||
case float64:
|
||||
v.Set(name, strconv.FormatFloat(value, 'f', -1, 64))
|
||||
case float32:
|
||||
v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
|
||||
case time.Time:
|
||||
const ISO8601UTC = "2006-01-02T15:04:05Z"
|
||||
v.Set(name, value.UTC().Format(ISO8601UTC))
|
||||
default:
|
||||
return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
|
||||
}
|
||||
return nil
|
||||
}
|
29
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal.go
generated
vendored
Normal file
29
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
package query
|
||||
|
||||
//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/output/query.json unmarshal_test.go
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil"
|
||||
)
|
||||
|
||||
// Unmarshal unmarshals a response for an AWS Query service.
|
||||
func Unmarshal(r *aws.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
if r.DataFilled() {
|
||||
decoder := xml.NewDecoder(r.HTTPResponse.Body)
|
||||
err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed decoding Query response", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalMeta unmarshals header response values for an AWS Query service.
|
||||
func UnmarshalMeta(r *aws.Request) {
|
||||
// TODO implement unmarshaling of request IDs
|
||||
}
|
33
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_error.go
generated
vendored
Normal file
33
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_error.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
type xmlErrorResponse struct {
|
||||
XMLName xml.Name `xml:"ErrorResponse"`
|
||||
Code string `xml:"Error>Code"`
|
||||
Message string `xml:"Error>Message"`
|
||||
RequestID string `xml:"RequestId"`
|
||||
}
|
||||
|
||||
// UnmarshalError unmarshals an error response for an AWS Query service.
|
||||
func UnmarshalError(r *aws.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
|
||||
resp := &xmlErrorResponse{}
|
||||
err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
|
||||
if err != nil && err != io.EOF {
|
||||
r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err)
|
||||
} else {
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New(resp.Code, resp.Message, nil),
|
||||
r.HTTPResponse.StatusCode,
|
||||
resp.RequestID,
|
||||
)
|
||||
}
|
||||
}
|
1418
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_test.go
generated
vendored
Normal file
1418
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
217
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/build.go
generated
vendored
Normal file
217
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/build.go
generated
vendored
Normal file
@ -0,0 +1,217 @@
|
||||
// Package rest provides RESTful serialisation of AWS requests and responses.
|
||||
package rest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"path"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
// RFC822 returns an RFC822 formatted timestamp for AWS protocols
|
||||
const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT"
|
||||
|
||||
// Whether the byte value can be sent without escaping in AWS URLs
|
||||
var noEscape [256]bool
|
||||
|
||||
func init() {
|
||||
for i := 0; i < len(noEscape); i++ {
|
||||
// AWS expects every character except these to be escaped
|
||||
noEscape[i] = (i >= 'A' && i <= 'Z') ||
|
||||
(i >= 'a' && i <= 'z') ||
|
||||
(i >= '0' && i <= '9') ||
|
||||
i == '-' ||
|
||||
i == '.' ||
|
||||
i == '_' ||
|
||||
i == '~'
|
||||
}
|
||||
}
|
||||
|
||||
// Build builds the REST component of a service request.
|
||||
func Build(r *aws.Request) {
|
||||
if r.ParamsFilled() {
|
||||
v := reflect.ValueOf(r.Params).Elem()
|
||||
buildLocationElements(r, v)
|
||||
buildBody(r, v)
|
||||
}
|
||||
}
|
||||
|
||||
func buildLocationElements(r *aws.Request, v reflect.Value) {
|
||||
query := r.HTTPRequest.URL.Query()
|
||||
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
m := v.Field(i)
|
||||
if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
|
||||
continue
|
||||
}
|
||||
|
||||
if m.IsValid() {
|
||||
field := v.Type().Field(i)
|
||||
name := field.Tag.Get("locationName")
|
||||
if name == "" {
|
||||
name = field.Name
|
||||
}
|
||||
if m.Kind() == reflect.Ptr {
|
||||
m = m.Elem()
|
||||
}
|
||||
if !m.IsValid() {
|
||||
continue
|
||||
}
|
||||
|
||||
switch field.Tag.Get("location") {
|
||||
case "headers": // header maps
|
||||
buildHeaderMap(r, m, field.Tag.Get("locationName"))
|
||||
case "header":
|
||||
buildHeader(r, m, name)
|
||||
case "uri":
|
||||
buildURI(r, m, name)
|
||||
case "querystring":
|
||||
buildQueryString(r, m, name, query)
|
||||
}
|
||||
}
|
||||
if r.Error != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
r.HTTPRequest.URL.RawQuery = query.Encode()
|
||||
updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path)
|
||||
}
|
||||
|
||||
func buildBody(r *aws.Request, v reflect.Value) {
|
||||
if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
|
||||
if payloadName := field.Tag.Get("payload"); payloadName != "" {
|
||||
pfield, _ := v.Type().FieldByName(payloadName)
|
||||
if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
|
||||
payload := reflect.Indirect(v.FieldByName(payloadName))
|
||||
if payload.IsValid() && payload.Interface() != nil {
|
||||
switch reader := payload.Interface().(type) {
|
||||
case io.ReadSeeker:
|
||||
r.SetReaderBody(reader)
|
||||
case []byte:
|
||||
r.SetBufferBody(reader)
|
||||
case string:
|
||||
r.SetStringBody(reader)
|
||||
default:
|
||||
r.Error = awserr.New("SerializationError",
|
||||
"failed to encode REST request",
|
||||
fmt.Errorf("unknown payload type %s", payload.Type()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func buildHeader(r *aws.Request, v reflect.Value, name string) {
|
||||
str, err := convertType(v)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
|
||||
} else if str != nil {
|
||||
r.HTTPRequest.Header.Add(name, *str)
|
||||
}
|
||||
}
|
||||
|
||||
func buildHeaderMap(r *aws.Request, v reflect.Value, prefix string) {
|
||||
for _, key := range v.MapKeys() {
|
||||
str, err := convertType(v.MapIndex(key))
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
|
||||
} else if str != nil {
|
||||
r.HTTPRequest.Header.Add(prefix+key.String(), *str)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func buildURI(r *aws.Request, v reflect.Value, name string) {
|
||||
value, err := convertType(v)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
|
||||
} else if value != nil {
|
||||
uri := r.HTTPRequest.URL.Path
|
||||
uri = strings.Replace(uri, "{"+name+"}", EscapePath(*value, true), -1)
|
||||
uri = strings.Replace(uri, "{"+name+"+}", EscapePath(*value, false), -1)
|
||||
r.HTTPRequest.URL.Path = uri
|
||||
}
|
||||
}
|
||||
|
||||
func buildQueryString(r *aws.Request, v reflect.Value, name string, query url.Values) {
|
||||
str, err := convertType(v)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed to encode REST request", err)
|
||||
} else if str != nil {
|
||||
query.Set(name, *str)
|
||||
}
|
||||
}
|
||||
|
||||
func updatePath(url *url.URL, urlPath string) {
|
||||
scheme, query := url.Scheme, url.RawQuery
|
||||
|
||||
hasSlash := strings.HasSuffix(urlPath, "/")
|
||||
|
||||
// clean up path
|
||||
urlPath = path.Clean(urlPath)
|
||||
if hasSlash && !strings.HasSuffix(urlPath, "/") {
|
||||
urlPath += "/"
|
||||
}
|
||||
|
||||
// get formatted URL minus scheme so we can build this into Opaque
|
||||
url.Scheme, url.Path, url.RawQuery = "", "", ""
|
||||
s := url.String()
|
||||
url.Scheme = scheme
|
||||
url.RawQuery = query
|
||||
|
||||
// build opaque URI
|
||||
url.Opaque = s + urlPath
|
||||
}
|
||||
|
||||
// EscapePath escapes part of a URL path in Amazon style
|
||||
func EscapePath(path string, encodeSep bool) string {
|
||||
var buf bytes.Buffer
|
||||
for i := 0; i < len(path); i++ {
|
||||
c := path[i]
|
||||
if noEscape[c] || (c == '/' && !encodeSep) {
|
||||
buf.WriteByte(c)
|
||||
} else {
|
||||
buf.WriteByte('%')
|
||||
buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16)))
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func convertType(v reflect.Value) (*string, error) {
|
||||
v = reflect.Indirect(v)
|
||||
if !v.IsValid() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var str string
|
||||
switch value := v.Interface().(type) {
|
||||
case string:
|
||||
str = value
|
||||
case []byte:
|
||||
str = base64.StdEncoding.EncodeToString(value)
|
||||
case bool:
|
||||
str = strconv.FormatBool(value)
|
||||
case int64:
|
||||
str = strconv.FormatInt(value, 10)
|
||||
case float64:
|
||||
str = strconv.FormatFloat(value, 'f', -1, 64)
|
||||
case time.Time:
|
||||
str = value.UTC().Format(RFC822)
|
||||
default:
|
||||
err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
|
||||
return nil, err
|
||||
}
|
||||
return &str, nil
|
||||
}
|
45
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/payload.go
generated
vendored
Normal file
45
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/payload.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
package rest
|
||||
|
||||
import "reflect"
|
||||
|
||||
// PayloadMember returns the payload field member of i if there is one, or nil.
|
||||
func PayloadMember(i interface{}) interface{} {
|
||||
if i == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
v := reflect.ValueOf(i).Elem()
|
||||
if !v.IsValid() {
|
||||
return nil
|
||||
}
|
||||
if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
|
||||
if payloadName := field.Tag.Get("payload"); payloadName != "" {
|
||||
field, _ := v.Type().FieldByName(payloadName)
|
||||
if field.Tag.Get("type") != "structure" {
|
||||
return nil
|
||||
}
|
||||
|
||||
payload := v.FieldByName(payloadName)
|
||||
if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) {
|
||||
return payload.Interface()
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PayloadType returns the type of a payload field member of i if there is one, or "".
|
||||
func PayloadType(i interface{}) string {
|
||||
v := reflect.Indirect(reflect.ValueOf(i))
|
||||
if !v.IsValid() {
|
||||
return ""
|
||||
}
|
||||
if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
|
||||
if payloadName := field.Tag.Get("payload"); payloadName != "" {
|
||||
if member, ok := v.Type().FieldByName(payloadName); ok {
|
||||
return member.Tag.Get("type")
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
174
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/unmarshal.go
generated
vendored
Normal file
174
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/unmarshal.go
generated
vendored
Normal file
@ -0,0 +1,174 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
// Unmarshal unmarshals the REST component of a response in a REST service.
|
||||
func Unmarshal(r *aws.Request) {
|
||||
if r.DataFilled() {
|
||||
v := reflect.Indirect(reflect.ValueOf(r.Data))
|
||||
unmarshalBody(r, v)
|
||||
unmarshalLocationElements(r, v)
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalBody(r *aws.Request, v reflect.Value) {
|
||||
if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok {
|
||||
if payloadName := field.Tag.Get("payload"); payloadName != "" {
|
||||
pfield, _ := v.Type().FieldByName(payloadName)
|
||||
if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
|
||||
payload := v.FieldByName(payloadName)
|
||||
if payload.IsValid() {
|
||||
switch payload.Interface().(type) {
|
||||
case []byte:
|
||||
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
|
||||
} else {
|
||||
payload.Set(reflect.ValueOf(b))
|
||||
}
|
||||
case *string:
|
||||
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
|
||||
} else {
|
||||
str := string(b)
|
||||
payload.Set(reflect.ValueOf(&str))
|
||||
}
|
||||
default:
|
||||
switch payload.Type().String() {
|
||||
case "io.ReadSeeker":
|
||||
payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body)))
|
||||
case "aws.ReadSeekCloser", "io.ReadCloser":
|
||||
payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
|
||||
default:
|
||||
r.Error = awserr.New("SerializationError",
|
||||
"failed to decode REST response",
|
||||
fmt.Errorf("unknown payload type %s", payload.Type()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalLocationElements(r *aws.Request, v reflect.Value) {
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
m, field := v.Field(i), v.Type().Field(i)
|
||||
if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) {
|
||||
continue
|
||||
}
|
||||
|
||||
if m.IsValid() {
|
||||
name := field.Tag.Get("locationName")
|
||||
if name == "" {
|
||||
name = field.Name
|
||||
}
|
||||
|
||||
switch field.Tag.Get("location") {
|
||||
case "statusCode":
|
||||
unmarshalStatusCode(m, r.HTTPResponse.StatusCode)
|
||||
case "header":
|
||||
err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name))
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
|
||||
break
|
||||
}
|
||||
case "headers":
|
||||
prefix := field.Tag.Get("locationName")
|
||||
err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if r.Error != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalStatusCode(v reflect.Value, statusCode int) {
|
||||
if !v.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
switch v.Interface().(type) {
|
||||
case *int64:
|
||||
s := int64(statusCode)
|
||||
v.Set(reflect.ValueOf(&s))
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
|
||||
switch r.Interface().(type) {
|
||||
case map[string]*string: // we only support string map value types
|
||||
out := map[string]*string{}
|
||||
for k, v := range headers {
|
||||
k = http.CanonicalHeaderKey(k)
|
||||
if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
|
||||
out[k[len(prefix):]] = &v[0]
|
||||
}
|
||||
}
|
||||
r.Set(reflect.ValueOf(out))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshalHeader(v reflect.Value, header string) error {
|
||||
if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch v.Interface().(type) {
|
||||
case *string:
|
||||
v.Set(reflect.ValueOf(&header))
|
||||
case []byte:
|
||||
b, err := base64.StdEncoding.DecodeString(header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set(reflect.ValueOf(&b))
|
||||
case *bool:
|
||||
b, err := strconv.ParseBool(header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set(reflect.ValueOf(&b))
|
||||
case *int64:
|
||||
i, err := strconv.ParseInt(header, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set(reflect.ValueOf(&i))
|
||||
case *float64:
|
||||
f, err := strconv.ParseFloat(header, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set(reflect.ValueOf(&f))
|
||||
case *time.Time:
|
||||
t, err := time.Parse(RFC822, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set(reflect.ValueOf(&t))
|
||||
default:
|
||||
err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
287
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/build.go
generated
vendored
Normal file
287
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/build.go
generated
vendored
Normal file
@ -0,0 +1,287 @@
|
||||
// Package xmlutil provides XML serialisation of AWS requests and responses.
|
||||
package xmlutil
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BuildXML will serialize params into an xml.Encoder.
|
||||
// Error will be returned if the serialization of any of the params or nested values fails.
|
||||
func BuildXML(params interface{}, e *xml.Encoder) error {
|
||||
b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
|
||||
root := NewXMLElement(xml.Name{})
|
||||
if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, c := range root.Children {
|
||||
for _, v := range c {
|
||||
return StructToXML(e, v, false)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the reflection element of a value, if it is a pointer.
|
||||
func elemOf(value reflect.Value) reflect.Value {
|
||||
for value.Kind() == reflect.Ptr {
|
||||
value = value.Elem()
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
// A xmlBuilder serializes values from Go code to XML
|
||||
type xmlBuilder struct {
|
||||
encoder *xml.Encoder
|
||||
namespaces map[string]string
|
||||
}
|
||||
|
||||
// buildValue generic XMLNode builder for any type. Will build value for their specific type
|
||||
// struct, list, map, scalar.
|
||||
//
|
||||
// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If
|
||||
// type is not provided reflect will be used to determine the value's type.
|
||||
func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
|
||||
value = elemOf(value)
|
||||
if !value.IsValid() { // no need to handle zero values
|
||||
return nil
|
||||
} else if tag.Get("location") != "" { // don't handle non-body location values
|
||||
return nil
|
||||
}
|
||||
|
||||
t := tag.Get("type")
|
||||
if t == "" {
|
||||
switch value.Kind() {
|
||||
case reflect.Struct:
|
||||
t = "structure"
|
||||
case reflect.Slice:
|
||||
t = "list"
|
||||
case reflect.Map:
|
||||
t = "map"
|
||||
}
|
||||
}
|
||||
|
||||
switch t {
|
||||
case "structure":
|
||||
if field, ok := value.Type().FieldByName("SDKShapeTraits"); ok {
|
||||
tag = tag + reflect.StructTag(" ") + field.Tag
|
||||
}
|
||||
return b.buildStruct(value, current, tag)
|
||||
case "list":
|
||||
return b.buildList(value, current, tag)
|
||||
case "map":
|
||||
return b.buildMap(value, current, tag)
|
||||
default:
|
||||
return b.buildScalar(value, current, tag)
|
||||
}
|
||||
}
|
||||
|
||||
// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested
|
||||
// types are converted to XMLNodes also.
|
||||
func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
|
||||
if !value.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
fieldAdded := false
|
||||
|
||||
// unwrap payloads
|
||||
if payload := tag.Get("payload"); payload != "" {
|
||||
field, _ := value.Type().FieldByName(payload)
|
||||
tag = field.Tag
|
||||
value = elemOf(value.FieldByName(payload))
|
||||
|
||||
if !value.IsValid() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
child := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
|
||||
|
||||
// there is an xmlNamespace associated with this struct
|
||||
if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" {
|
||||
ns := xml.Attr{
|
||||
Name: xml.Name{Local: "xmlns"},
|
||||
Value: uri,
|
||||
}
|
||||
if prefix != "" {
|
||||
b.namespaces[prefix] = uri // register the namespace
|
||||
ns.Name.Local = "xmlns:" + prefix
|
||||
}
|
||||
|
||||
child.Attr = append(child.Attr, ns)
|
||||
}
|
||||
|
||||
t := value.Type()
|
||||
for i := 0; i < value.NumField(); i++ {
|
||||
if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c {
|
||||
continue // ignore unexported fields
|
||||
}
|
||||
|
||||
member := elemOf(value.Field(i))
|
||||
field := t.Field(i)
|
||||
mTag := field.Tag
|
||||
|
||||
if mTag.Get("location") != "" { // skip non-body members
|
||||
continue
|
||||
}
|
||||
|
||||
memberName := mTag.Get("locationName")
|
||||
if memberName == "" {
|
||||
memberName = field.Name
|
||||
mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`)
|
||||
}
|
||||
if err := b.buildValue(member, child, mTag); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fieldAdded = true
|
||||
}
|
||||
|
||||
if fieldAdded { // only append this child if we have one ore more valid members
|
||||
current.AddChild(child)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildList adds the value's list items to the current XMLNode as children nodes. All
|
||||
// nested values in the list are converted to XMLNodes also.
|
||||
func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
|
||||
if value.IsNil() { // don't build omitted lists
|
||||
return nil
|
||||
}
|
||||
|
||||
// check for unflattened list member
|
||||
flattened := tag.Get("flattened") != ""
|
||||
|
||||
xname := xml.Name{Local: tag.Get("locationName")}
|
||||
if flattened {
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
child := NewXMLElement(xname)
|
||||
current.AddChild(child)
|
||||
if err := b.buildValue(value.Index(i), child, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
list := NewXMLElement(xname)
|
||||
current.AddChild(list)
|
||||
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
iname := tag.Get("locationNameList")
|
||||
if iname == "" {
|
||||
iname = "member"
|
||||
}
|
||||
|
||||
child := NewXMLElement(xml.Name{Local: iname})
|
||||
list.AddChild(child)
|
||||
if err := b.buildValue(value.Index(i), child, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All
|
||||
// nested values in the map are converted to XMLNodes also.
|
||||
//
|
||||
// Error will be returned if it is unable to build the map's values into XMLNodes
|
||||
func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
|
||||
if value.IsNil() { // don't build omitted maps
|
||||
return nil
|
||||
}
|
||||
|
||||
maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
|
||||
current.AddChild(maproot)
|
||||
current = maproot
|
||||
|
||||
kname, vname := "key", "value"
|
||||
if n := tag.Get("locationNameKey"); n != "" {
|
||||
kname = n
|
||||
}
|
||||
if n := tag.Get("locationNameValue"); n != "" {
|
||||
vname = n
|
||||
}
|
||||
|
||||
// sorting is not required for compliance, but it makes testing easier
|
||||
keys := make([]string, value.Len())
|
||||
for i, k := range value.MapKeys() {
|
||||
keys[i] = k.String()
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
v := value.MapIndex(reflect.ValueOf(k))
|
||||
|
||||
mapcur := current
|
||||
if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps
|
||||
child := NewXMLElement(xml.Name{Local: "entry"})
|
||||
mapcur.AddChild(child)
|
||||
mapcur = child
|
||||
}
|
||||
|
||||
kchild := NewXMLElement(xml.Name{Local: kname})
|
||||
kchild.Text = k
|
||||
vchild := NewXMLElement(xml.Name{Local: vname})
|
||||
mapcur.AddChild(kchild)
|
||||
mapcur.AddChild(vchild)
|
||||
|
||||
if err := b.buildValue(v, vchild, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildScalar will convert the value into a string and append it as a attribute or child
|
||||
// of the current XMLNode.
|
||||
//
|
||||
// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value.
|
||||
//
|
||||
// Error will be returned if the value type is unsupported.
|
||||
func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
|
||||
var str string
|
||||
switch converted := value.Interface().(type) {
|
||||
case string:
|
||||
str = converted
|
||||
case []byte:
|
||||
if !value.IsNil() {
|
||||
str = base64.StdEncoding.EncodeToString(converted)
|
||||
}
|
||||
case bool:
|
||||
str = strconv.FormatBool(converted)
|
||||
case int64:
|
||||
str = strconv.FormatInt(converted, 10)
|
||||
case int:
|
||||
str = strconv.Itoa(converted)
|
||||
case float64:
|
||||
str = strconv.FormatFloat(converted, 'f', -1, 64)
|
||||
case float32:
|
||||
str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
|
||||
case time.Time:
|
||||
const ISO8601UTC = "2006-01-02T15:04:05Z"
|
||||
str = converted.UTC().Format(ISO8601UTC)
|
||||
default:
|
||||
return fmt.Errorf("unsupported value for param %s: %v (%s)",
|
||||
tag.Get("locationName"), value.Interface(), value.Type().Name())
|
||||
}
|
||||
|
||||
xname := xml.Name{Local: tag.Get("locationName")}
|
||||
if tag.Get("xmlAttribute") != "" { // put into current node's attribute list
|
||||
attr := xml.Attr{Name: xname, Value: str}
|
||||
current.Attr = append(current.Attr, attr)
|
||||
} else { // regular text node
|
||||
current.AddChild(&XMLNode{Name: xname, Text: str})
|
||||
}
|
||||
return nil
|
||||
}
|
260
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go
generated
vendored
Normal file
260
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go
generated
vendored
Normal file
@ -0,0 +1,260 @@
|
||||
package xmlutil
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// UnmarshalXML deserializes an xml.Decoder into the container v. V
|
||||
// needs to match the shape of the XML expected to be decoded.
|
||||
// If the shape doesn't match unmarshaling will fail.
|
||||
func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
|
||||
n, _ := XMLToStruct(d, nil)
|
||||
if n.Children != nil {
|
||||
for _, root := range n.Children {
|
||||
for _, c := range root {
|
||||
if wrappedChild, ok := c.Children[wrapper]; ok {
|
||||
c = wrappedChild[0] // pull out wrapped element
|
||||
}
|
||||
|
||||
err := parse(reflect.ValueOf(v), c, "")
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect
|
||||
// will be used to determine the type from r.
|
||||
func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
|
||||
rtype := r.Type()
|
||||
if rtype.Kind() == reflect.Ptr {
|
||||
rtype = rtype.Elem() // check kind of actual element type
|
||||
}
|
||||
|
||||
t := tag.Get("type")
|
||||
if t == "" {
|
||||
switch rtype.Kind() {
|
||||
case reflect.Struct:
|
||||
t = "structure"
|
||||
case reflect.Slice:
|
||||
t = "list"
|
||||
case reflect.Map:
|
||||
t = "map"
|
||||
}
|
||||
}
|
||||
|
||||
switch t {
|
||||
case "structure":
|
||||
if field, ok := rtype.FieldByName("SDKShapeTraits"); ok {
|
||||
tag = field.Tag
|
||||
}
|
||||
return parseStruct(r, node, tag)
|
||||
case "list":
|
||||
return parseList(r, node, tag)
|
||||
case "map":
|
||||
return parseMap(r, node, tag)
|
||||
default:
|
||||
return parseScalar(r, node, tag)
|
||||
}
|
||||
}
|
||||
|
||||
// parseStruct deserializes a structure and its fields from an XMLNode. Any nested
|
||||
// types in the structure will also be deserialized.
|
||||
func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
|
||||
t := r.Type()
|
||||
if r.Kind() == reflect.Ptr {
|
||||
if r.IsNil() { // create the structure if it's nil
|
||||
s := reflect.New(r.Type().Elem())
|
||||
r.Set(s)
|
||||
r = s
|
||||
}
|
||||
|
||||
r = r.Elem()
|
||||
t = t.Elem()
|
||||
}
|
||||
|
||||
// unwrap any payloads
|
||||
if payload := tag.Get("payload"); payload != "" {
|
||||
field, _ := t.FieldByName(payload)
|
||||
return parseStruct(r.FieldByName(payload), node, field.Tag)
|
||||
}
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
if c := field.Name[0:1]; strings.ToLower(c) == c {
|
||||
continue // ignore unexported fields
|
||||
}
|
||||
|
||||
// figure out what this field is called
|
||||
name := field.Name
|
||||
if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
|
||||
name = field.Tag.Get("locationNameList")
|
||||
} else if locName := field.Tag.Get("locationName"); locName != "" {
|
||||
name = locName
|
||||
}
|
||||
|
||||
// try to find the field by name in elements
|
||||
elems := node.Children[name]
|
||||
|
||||
if elems == nil { // try to find the field in attributes
|
||||
for _, a := range node.Attr {
|
||||
if name == a.Name.Local {
|
||||
// turn this into a text node for de-serializing
|
||||
elems = []*XMLNode{{Text: a.Value}}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
member := r.FieldByName(field.Name)
|
||||
for _, elem := range elems {
|
||||
err := parse(member, elem, field.Tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseList deserializes a list of values from an XML node. Each list entry
|
||||
// will also be deserialized.
|
||||
func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
|
||||
t := r.Type()
|
||||
|
||||
if tag.Get("flattened") == "" { // look at all item entries
|
||||
mname := "member"
|
||||
if name := tag.Get("locationNameList"); name != "" {
|
||||
mname = name
|
||||
}
|
||||
|
||||
if Children, ok := node.Children[mname]; ok {
|
||||
if r.IsNil() {
|
||||
r.Set(reflect.MakeSlice(t, len(Children), len(Children)))
|
||||
}
|
||||
|
||||
for i, c := range Children {
|
||||
err := parse(r.Index(i), c, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
} else { // flattened list means this is a single element
|
||||
if r.IsNil() {
|
||||
r.Set(reflect.MakeSlice(t, 0, 0))
|
||||
}
|
||||
|
||||
childR := reflect.Zero(t.Elem())
|
||||
r.Set(reflect.Append(r, childR))
|
||||
err := parse(r.Index(r.Len()-1), node, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode
|
||||
// will also be deserialized as map entries.
|
||||
func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
|
||||
if r.IsNil() {
|
||||
r.Set(reflect.MakeMap(r.Type()))
|
||||
}
|
||||
|
||||
if tag.Get("flattened") == "" { // look at all child entries
|
||||
for _, entry := range node.Children["entry"] {
|
||||
parseMapEntry(r, entry, tag)
|
||||
}
|
||||
} else { // this element is itself an entry
|
||||
parseMapEntry(r, node, tag)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseMapEntry deserializes a map entry from a XML node.
|
||||
func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
|
||||
kname, vname := "key", "value"
|
||||
if n := tag.Get("locationNameKey"); n != "" {
|
||||
kname = n
|
||||
}
|
||||
if n := tag.Get("locationNameValue"); n != "" {
|
||||
vname = n
|
||||
}
|
||||
|
||||
keys, ok := node.Children[kname]
|
||||
values := node.Children[vname]
|
||||
if ok {
|
||||
for i, key := range keys {
|
||||
keyR := reflect.ValueOf(key.Text)
|
||||
value := values[i]
|
||||
valueR := reflect.New(r.Type().Elem()).Elem()
|
||||
|
||||
parse(valueR, value, "")
|
||||
r.SetMapIndex(keyR, valueR)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseScaller deserializes an XMLNode value into a concrete type based on the
|
||||
// interface type of r.
|
||||
//
|
||||
// Error is returned if the deserialization fails due to invalid type conversion,
|
||||
// or unsupported interface type.
|
||||
func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
|
||||
switch r.Interface().(type) {
|
||||
case *string:
|
||||
r.Set(reflect.ValueOf(&node.Text))
|
||||
return nil
|
||||
case []byte:
|
||||
b, err := base64.StdEncoding.DecodeString(node.Text)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Set(reflect.ValueOf(b))
|
||||
case *bool:
|
||||
v, err := strconv.ParseBool(node.Text)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Set(reflect.ValueOf(&v))
|
||||
case *int64:
|
||||
v, err := strconv.ParseInt(node.Text, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Set(reflect.ValueOf(&v))
|
||||
case *float64:
|
||||
v, err := strconv.ParseFloat(node.Text, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Set(reflect.ValueOf(&v))
|
||||
case *time.Time:
|
||||
const ISO8601UTC = "2006-01-02T15:04:05Z"
|
||||
t, err := time.Parse(ISO8601UTC, node.Text)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Set(reflect.ValueOf(&t))
|
||||
default:
|
||||
return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
105
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go
generated
vendored
Normal file
105
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
package xmlutil
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// A XMLNode contains the values to be encoded or decoded.
|
||||
type XMLNode struct {
|
||||
Name xml.Name `json:",omitempty"`
|
||||
Children map[string][]*XMLNode `json:",omitempty"`
|
||||
Text string `json:",omitempty"`
|
||||
Attr []xml.Attr `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NewXMLElement returns a pointer to a new XMLNode initialized to default values.
|
||||
func NewXMLElement(name xml.Name) *XMLNode {
|
||||
return &XMLNode{
|
||||
Name: name,
|
||||
Children: map[string][]*XMLNode{},
|
||||
Attr: []xml.Attr{},
|
||||
}
|
||||
}
|
||||
|
||||
// AddChild adds child to the XMLNode.
|
||||
func (n *XMLNode) AddChild(child *XMLNode) {
|
||||
if _, ok := n.Children[child.Name.Local]; !ok {
|
||||
n.Children[child.Name.Local] = []*XMLNode{}
|
||||
}
|
||||
n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child)
|
||||
}
|
||||
|
||||
// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values.
|
||||
func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) {
|
||||
out := &XMLNode{}
|
||||
for {
|
||||
tok, err := d.Token()
|
||||
if tok == nil || err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
||||
switch typed := tok.(type) {
|
||||
case xml.CharData:
|
||||
out.Text = string(typed.Copy())
|
||||
case xml.StartElement:
|
||||
el := typed.Copy()
|
||||
out.Attr = el.Attr
|
||||
if out.Children == nil {
|
||||
out.Children = map[string][]*XMLNode{}
|
||||
}
|
||||
|
||||
name := typed.Name.Local
|
||||
slice := out.Children[name]
|
||||
if slice == nil {
|
||||
slice = []*XMLNode{}
|
||||
}
|
||||
node, e := XMLToStruct(d, &el)
|
||||
if e != nil {
|
||||
return out, e
|
||||
}
|
||||
node.Name = typed.Name
|
||||
slice = append(slice, node)
|
||||
out.Children[name] = slice
|
||||
case xml.EndElement:
|
||||
if s != nil && s.Name.Local == typed.Name.Local { // matching end token
|
||||
return out, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// StructToXML writes an XMLNode to a xml.Encoder as tokens.
|
||||
func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
|
||||
e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr})
|
||||
|
||||
if node.Text != "" {
|
||||
e.EncodeToken(xml.CharData([]byte(node.Text)))
|
||||
} else if sorted {
|
||||
sortedNames := []string{}
|
||||
for k := range node.Children {
|
||||
sortedNames = append(sortedNames, k)
|
||||
}
|
||||
sort.Strings(sortedNames)
|
||||
|
||||
for _, k := range sortedNames {
|
||||
for _, v := range node.Children[k] {
|
||||
StructToXML(e, v, sorted)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, c := range node.Children {
|
||||
for _, v := range c {
|
||||
StructToXML(e, v, sorted)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
e.EncodeToken(xml.EndElement{Name: node.Name})
|
||||
return e.Flush()
|
||||
}
|
43
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/functional_test.go
generated
vendored
Normal file
43
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/functional_test.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
package v4_test
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/internal/test/unit"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var _ = unit.Imported
|
||||
|
||||
func TestPresignHandler(t *testing.T) {
|
||||
svc := s3.New(nil)
|
||||
req, _ := svc.PutObjectRequest(&s3.PutObjectInput{
|
||||
Bucket: aws.String("bucket"),
|
||||
Key: aws.String("key"),
|
||||
ContentDisposition: aws.String("a+b c$d"),
|
||||
ACL: aws.String("public-read"),
|
||||
})
|
||||
req.Time = time.Unix(0, 0)
|
||||
urlstr, err := req.Presign(5 * time.Minute)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedDate := "19700101T000000Z"
|
||||
expectedHeaders := "host;x-amz-acl"
|
||||
expectedSig := "7edcb4e3a1bf12f4989018d75acbe3a7f03df24bd6f3112602d59fc551f0e4e2"
|
||||
expectedCred := "AKID/19700101/mock-region/s3/aws4_request"
|
||||
|
||||
u, _ := url.Parse(urlstr)
|
||||
urlQ := u.Query()
|
||||
assert.Equal(t, expectedSig, urlQ.Get("X-Amz-Signature"))
|
||||
assert.Equal(t, expectedCred, urlQ.Get("X-Amz-Credential"))
|
||||
assert.Equal(t, expectedHeaders, urlQ.Get("X-Amz-SignedHeaders"))
|
||||
assert.Equal(t, expectedDate, urlQ.Get("X-Amz-Date"))
|
||||
assert.Equal(t, "300", urlQ.Get("X-Amz-Expires"))
|
||||
|
||||
assert.NotContains(t, urlstr, "+") // + encoded as %20
|
||||
}
|
364
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4.go
generated
vendored
Normal file
364
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4.go
generated
vendored
Normal file
@ -0,0 +1,364 @@
|
||||
// Package v4 implements signing for AWS V4 signer
|
||||
package v4
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/internal/protocol/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
authHeaderPrefix = "AWS4-HMAC-SHA256"
|
||||
timeFormat = "20060102T150405Z"
|
||||
shortTimeFormat = "20060102"
|
||||
)
|
||||
|
||||
var ignoredHeaders = map[string]bool{
|
||||
"Authorization": true,
|
||||
"Content-Type": true,
|
||||
"Content-Length": true,
|
||||
"User-Agent": true,
|
||||
}
|
||||
|
||||
type signer struct {
|
||||
Request *http.Request
|
||||
Time time.Time
|
||||
ExpireTime time.Duration
|
||||
ServiceName string
|
||||
Region string
|
||||
CredValues credentials.Value
|
||||
Credentials *credentials.Credentials
|
||||
Query url.Values
|
||||
Body io.ReadSeeker
|
||||
Debug aws.LogLevelType
|
||||
Logger aws.Logger
|
||||
|
||||
isPresign bool
|
||||
formattedTime string
|
||||
formattedShortTime string
|
||||
|
||||
signedHeaders string
|
||||
canonicalHeaders string
|
||||
canonicalString string
|
||||
credentialString string
|
||||
stringToSign string
|
||||
signature string
|
||||
authorization string
|
||||
}
|
||||
|
||||
// Sign requests with signature version 4.
|
||||
//
|
||||
// Will sign the requests with the service config's Credentials object
|
||||
// Signing is skipped if the credentials is the credentials.AnonymousCredentials
|
||||
// object.
|
||||
func Sign(req *aws.Request) {
|
||||
// If the request does not need to be signed ignore the signing of the
|
||||
// request if the AnonymousCredentials object is used.
|
||||
if req.Service.Config.Credentials == credentials.AnonymousCredentials {
|
||||
return
|
||||
}
|
||||
|
||||
region := req.Service.SigningRegion
|
||||
if region == "" {
|
||||
region = aws.StringValue(req.Service.Config.Region)
|
||||
}
|
||||
|
||||
name := req.Service.SigningName
|
||||
if name == "" {
|
||||
name = req.Service.ServiceName
|
||||
}
|
||||
|
||||
s := signer{
|
||||
Request: req.HTTPRequest,
|
||||
Time: req.Time,
|
||||
ExpireTime: req.ExpireTime,
|
||||
Query: req.HTTPRequest.URL.Query(),
|
||||
Body: req.Body,
|
||||
ServiceName: name,
|
||||
Region: region,
|
||||
Credentials: req.Service.Config.Credentials,
|
||||
Debug: req.Service.Config.LogLevel.Value(),
|
||||
Logger: req.Service.Config.Logger,
|
||||
}
|
||||
|
||||
req.Error = s.sign()
|
||||
}
|
||||
|
||||
func (v4 *signer) sign() error {
|
||||
if v4.ExpireTime != 0 {
|
||||
v4.isPresign = true
|
||||
}
|
||||
|
||||
if v4.isRequestSigned() {
|
||||
if !v4.Credentials.IsExpired() {
|
||||
// If the request is already signed, and the credentials have not
|
||||
// expired yet ignore the signing request.
|
||||
return nil
|
||||
}
|
||||
|
||||
// The credentials have expired for this request. The current signing
|
||||
// is invalid, and needs to be request because the request will fail.
|
||||
if v4.isPresign {
|
||||
v4.removePresign()
|
||||
// Update the request's query string to ensure the values stays in
|
||||
// sync in the case retrieving the new credentials fails.
|
||||
v4.Request.URL.RawQuery = v4.Query.Encode()
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
v4.CredValues, err = v4.Credentials.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if v4.isPresign {
|
||||
v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
|
||||
if v4.CredValues.SessionToken != "" {
|
||||
v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
|
||||
} else {
|
||||
v4.Query.Del("X-Amz-Security-Token")
|
||||
}
|
||||
} else if v4.CredValues.SessionToken != "" {
|
||||
v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
|
||||
}
|
||||
|
||||
v4.build()
|
||||
|
||||
if v4.Debug.Matches(aws.LogDebugWithSigning) {
|
||||
v4.logSigningInfo()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const logSignInfoMsg = `DEBUG: Request Signiture:
|
||||
---[ CANONICAL STRING ]-----------------------------
|
||||
%s
|
||||
---[ STRING TO SIGN ]--------------------------------
|
||||
%s%s
|
||||
-----------------------------------------------------`
|
||||
const logSignedURLMsg = `
|
||||
---[ SIGNED URL ]------------------------------------
|
||||
%s`
|
||||
|
||||
func (v4 *signer) logSigningInfo() {
|
||||
signedURLMsg := ""
|
||||
if v4.isPresign {
|
||||
signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String())
|
||||
}
|
||||
msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg)
|
||||
v4.Logger.Log(msg)
|
||||
}
|
||||
|
||||
func (v4 *signer) build() {
|
||||
v4.buildTime() // no depends
|
||||
v4.buildCredentialString() // no depends
|
||||
if v4.isPresign {
|
||||
v4.buildQuery() // no depends
|
||||
}
|
||||
v4.buildCanonicalHeaders() // depends on cred string
|
||||
v4.buildCanonicalString() // depends on canon headers / signed headers
|
||||
v4.buildStringToSign() // depends on canon string
|
||||
v4.buildSignature() // depends on string to sign
|
||||
|
||||
if v4.isPresign {
|
||||
v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature
|
||||
} else {
|
||||
parts := []string{
|
||||
authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString,
|
||||
"SignedHeaders=" + v4.signedHeaders,
|
||||
"Signature=" + v4.signature,
|
||||
}
|
||||
v4.Request.Header.Set("Authorization", strings.Join(parts, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
func (v4 *signer) buildTime() {
|
||||
v4.formattedTime = v4.Time.UTC().Format(timeFormat)
|
||||
v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat)
|
||||
|
||||
if v4.isPresign {
|
||||
duration := int64(v4.ExpireTime / time.Second)
|
||||
v4.Query.Set("X-Amz-Date", v4.formattedTime)
|
||||
v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
|
||||
} else {
|
||||
v4.Request.Header.Set("X-Amz-Date", v4.formattedTime)
|
||||
}
|
||||
}
|
||||
|
||||
func (v4 *signer) buildCredentialString() {
|
||||
v4.credentialString = strings.Join([]string{
|
||||
v4.formattedShortTime,
|
||||
v4.Region,
|
||||
v4.ServiceName,
|
||||
"aws4_request",
|
||||
}, "/")
|
||||
|
||||
if v4.isPresign {
|
||||
v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString)
|
||||
}
|
||||
}
|
||||
|
||||
func (v4 *signer) buildQuery() {
|
||||
for k, h := range v4.Request.Header {
|
||||
if strings.HasPrefix(http.CanonicalHeaderKey(k), "X-Amz-") {
|
||||
continue // never hoist x-amz-* headers, they must be signed
|
||||
}
|
||||
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
|
||||
continue // never hoist ignored headers
|
||||
}
|
||||
|
||||
v4.Request.Header.Del(k)
|
||||
v4.Query.Del(k)
|
||||
for _, v := range h {
|
||||
v4.Query.Add(k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (v4 *signer) buildCanonicalHeaders() {
|
||||
var headers []string
|
||||
headers = append(headers, "host")
|
||||
for k := range v4.Request.Header {
|
||||
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
|
||||
continue // ignored header
|
||||
}
|
||||
headers = append(headers, strings.ToLower(k))
|
||||
}
|
||||
sort.Strings(headers)
|
||||
|
||||
v4.signedHeaders = strings.Join(headers, ";")
|
||||
|
||||
if v4.isPresign {
|
||||
v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders)
|
||||
}
|
||||
|
||||
headerValues := make([]string, len(headers))
|
||||
for i, k := range headers {
|
||||
if k == "host" {
|
||||
headerValues[i] = "host:" + v4.Request.URL.Host
|
||||
} else {
|
||||
headerValues[i] = k + ":" +
|
||||
strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",")
|
||||
}
|
||||
}
|
||||
|
||||
v4.canonicalHeaders = strings.Join(headerValues, "\n")
|
||||
}
|
||||
|
||||
func (v4 *signer) buildCanonicalString() {
|
||||
v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1)
|
||||
uri := v4.Request.URL.Opaque
|
||||
if uri != "" {
|
||||
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
|
||||
} else {
|
||||
uri = v4.Request.URL.Path
|
||||
}
|
||||
if uri == "" {
|
||||
uri = "/"
|
||||
}
|
||||
|
||||
if v4.ServiceName != "s3" {
|
||||
uri = rest.EscapePath(uri, false)
|
||||
}
|
||||
|
||||
v4.canonicalString = strings.Join([]string{
|
||||
v4.Request.Method,
|
||||
uri,
|
||||
v4.Request.URL.RawQuery,
|
||||
v4.canonicalHeaders + "\n",
|
||||
v4.signedHeaders,
|
||||
v4.bodyDigest(),
|
||||
}, "\n")
|
||||
}
|
||||
|
||||
func (v4 *signer) buildStringToSign() {
|
||||
v4.stringToSign = strings.Join([]string{
|
||||
authHeaderPrefix,
|
||||
v4.formattedTime,
|
||||
v4.credentialString,
|
||||
hex.EncodeToString(makeSha256([]byte(v4.canonicalString))),
|
||||
}, "\n")
|
||||
}
|
||||
|
||||
func (v4 *signer) buildSignature() {
|
||||
secret := v4.CredValues.SecretAccessKey
|
||||
date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime))
|
||||
region := makeHmac(date, []byte(v4.Region))
|
||||
service := makeHmac(region, []byte(v4.ServiceName))
|
||||
credentials := makeHmac(service, []byte("aws4_request"))
|
||||
signature := makeHmac(credentials, []byte(v4.stringToSign))
|
||||
v4.signature = hex.EncodeToString(signature)
|
||||
}
|
||||
|
||||
func (v4 *signer) bodyDigest() string {
|
||||
hash := v4.Request.Header.Get("X-Amz-Content-Sha256")
|
||||
if hash == "" {
|
||||
if v4.isPresign && v4.ServiceName == "s3" {
|
||||
hash = "UNSIGNED-PAYLOAD"
|
||||
} else if v4.Body == nil {
|
||||
hash = hex.EncodeToString(makeSha256([]byte{}))
|
||||
} else {
|
||||
hash = hex.EncodeToString(makeSha256Reader(v4.Body))
|
||||
}
|
||||
v4.Request.Header.Add("X-Amz-Content-Sha256", hash)
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
// isRequestSigned returns if the request is currently signed or presigned
|
||||
func (v4 *signer) isRequestSigned() bool {
|
||||
if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" {
|
||||
return true
|
||||
}
|
||||
if v4.Request.Header.Get("Authorization") != "" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// unsign removes signing flags for both signed and presigned requests.
|
||||
func (v4 *signer) removePresign() {
|
||||
v4.Query.Del("X-Amz-Algorithm")
|
||||
v4.Query.Del("X-Amz-Signature")
|
||||
v4.Query.Del("X-Amz-Security-Token")
|
||||
v4.Query.Del("X-Amz-Date")
|
||||
v4.Query.Del("X-Amz-Expires")
|
||||
v4.Query.Del("X-Amz-Credential")
|
||||
v4.Query.Del("X-Amz-SignedHeaders")
|
||||
}
|
||||
|
||||
func makeHmac(key []byte, data []byte) []byte {
|
||||
hash := hmac.New(sha256.New, key)
|
||||
hash.Write(data)
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
func makeSha256(data []byte) []byte {
|
||||
hash := sha256.New()
|
||||
hash.Write(data)
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
func makeSha256Reader(reader io.ReadSeeker) []byte {
|
||||
hash := sha256.New()
|
||||
start, _ := reader.Seek(0, 1)
|
||||
defer reader.Seek(start, 0)
|
||||
|
||||
io.Copy(hash, reader)
|
||||
return hash.Sum(nil)
|
||||
}
|
245
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4_test.go
generated
vendored
Normal file
245
Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4_test.go
generated
vendored
Normal file
@ -0,0 +1,245 @@
|
||||
package v4
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func buildSigner(serviceName string, region string, signTime time.Time, expireTime time.Duration, body string) signer {
|
||||
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
|
||||
reader := strings.NewReader(body)
|
||||
req, _ := http.NewRequest("POST", endpoint, reader)
|
||||
req.URL.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()"
|
||||
req.Header.Add("X-Amz-Target", "prefix.Operation")
|
||||
req.Header.Add("Content-Type", "application/x-amz-json-1.0")
|
||||
req.Header.Add("Content-Length", string(len(body)))
|
||||
req.Header.Add("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)")
|
||||
|
||||
return signer{
|
||||
Request: req,
|
||||
Time: signTime,
|
||||
ExpireTime: expireTime,
|
||||
Query: req.URL.Query(),
|
||||
Body: reader,
|
||||
ServiceName: serviceName,
|
||||
Region: region,
|
||||
Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"),
|
||||
}
|
||||
}
|
||||
|
||||
func removeWS(text string) string {
|
||||
text = strings.Replace(text, " ", "", -1)
|
||||
text = strings.Replace(text, "\n", "", -1)
|
||||
text = strings.Replace(text, "\t", "", -1)
|
||||
return text
|
||||
}
|
||||
|
||||
func assertEqual(t *testing.T, expected, given string) {
|
||||
if removeWS(expected) != removeWS(given) {
|
||||
t.Errorf("\nExpected: %s\nGiven: %s", expected, given)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPresignRequest(t *testing.T) {
|
||||
signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 300*time.Second, "{}")
|
||||
signer.sign()
|
||||
|
||||
expectedDate := "19700101T000000Z"
|
||||
expectedHeaders := "host;x-amz-meta-other-header;x-amz-target"
|
||||
expectedSig := "5eeedebf6f995145ce56daa02902d10485246d3defb34f97b973c1f40ab82d36"
|
||||
expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request"
|
||||
|
||||
q := signer.Request.URL.Query()
|
||||
assert.Equal(t, expectedSig, q.Get("X-Amz-Signature"))
|
||||
assert.Equal(t, expectedCred, q.Get("X-Amz-Credential"))
|
||||
assert.Equal(t, expectedHeaders, q.Get("X-Amz-SignedHeaders"))
|
||||
assert.Equal(t, expectedDate, q.Get("X-Amz-Date"))
|
||||
}
|
||||
|
||||
func TestSignRequest(t *testing.T) {
|
||||
signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 0, "{}")
|
||||
signer.sign()
|
||||
|
||||
expectedDate := "19700101T000000Z"
|
||||
expectedSig := "AWS4-HMAC-SHA256 Credential=AKID/19700101/us-east-1/dynamodb/aws4_request, SignedHeaders=host;x-amz-date;x-amz-meta-other-header;x-amz-security-token;x-amz-target, Signature=69ada33fec48180dab153576e4dd80c4e04124f80dda3eccfed8a67c2b91ed5e"
|
||||
|
||||
q := signer.Request.Header
|
||||
assert.Equal(t, expectedSig, q.Get("Authorization"))
|
||||
assert.Equal(t, expectedDate, q.Get("X-Amz-Date"))
|
||||
}
|
||||
|
||||
func TestSignEmptyBody(t *testing.T) {
|
||||
signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "")
|
||||
signer.Body = nil
|
||||
signer.sign()
|
||||
hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
|
||||
assert.Equal(t, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hash)
|
||||
}
|
||||
|
||||
func TestSignBody(t *testing.T) {
|
||||
signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "hello")
|
||||
signer.sign()
|
||||
hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
|
||||
assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash)
|
||||
}
|
||||
|
||||
func TestSignSeekedBody(t *testing.T) {
|
||||
signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, " hello")
|
||||
signer.Body.Read(make([]byte, 3)) // consume first 3 bytes so body is now "hello"
|
||||
signer.sign()
|
||||
hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
|
||||
assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash)
|
||||
|
||||
start, _ := signer.Body.Seek(0, 1)
|
||||
assert.Equal(t, int64(3), start)
|
||||
}
|
||||
|
||||
func TestPresignEmptyBodyS3(t *testing.T) {
|
||||
signer := buildSigner("s3", "us-east-1", time.Now(), 5*time.Minute, "hello")
|
||||
signer.sign()
|
||||
hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
|
||||
assert.Equal(t, "UNSIGNED-PAYLOAD", hash)
|
||||
}
|
||||
|
||||
func TestSignPrecomputedBodyChecksum(t *testing.T) {
|
||||
signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "hello")
|
||||
signer.Request.Header.Set("X-Amz-Content-Sha256", "PRECOMPUTED")
|
||||
signer.sign()
|
||||
hash := signer.Request.Header.Get("X-Amz-Content-Sha256")
|
||||
assert.Equal(t, "PRECOMPUTED", hash)
|
||||
}
|
||||
|
||||
func TestAnonymousCredentials(t *testing.T) {
|
||||
r := aws.NewRequest(
|
||||
aws.NewService(&aws.Config{Credentials: credentials.AnonymousCredentials}),
|
||||
&aws.Operation{
|
||||
Name: "BatchGetItem",
|
||||
HTTPMethod: "POST",
|
||||
HTTPPath: "/",
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
Sign(r)
|
||||
|
||||
urlQ := r.HTTPRequest.URL.Query()
|
||||
assert.Empty(t, urlQ.Get("X-Amz-Signature"))
|
||||
assert.Empty(t, urlQ.Get("X-Amz-Credential"))
|
||||
assert.Empty(t, urlQ.Get("X-Amz-SignedHeaders"))
|
||||
assert.Empty(t, urlQ.Get("X-Amz-Date"))
|
||||
|
||||
hQ := r.HTTPRequest.Header
|
||||
assert.Empty(t, hQ.Get("Authorization"))
|
||||
assert.Empty(t, hQ.Get("X-Amz-Date"))
|
||||
}
|
||||
|
||||
func TestIgnoreResignRequestWithValidCreds(t *testing.T) {
|
||||
r := aws.NewRequest(
|
||||
aws.NewService(&aws.Config{
|
||||
Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"),
|
||||
Region: aws.String("us-west-2"),
|
||||
}),
|
||||
&aws.Operation{
|
||||
Name: "BatchGetItem",
|
||||
HTTPMethod: "POST",
|
||||
HTTPPath: "/",
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
Sign(r)
|
||||
sig := r.HTTPRequest.Header.Get("Authorization")
|
||||
|
||||
Sign(r)
|
||||
assert.Equal(t, sig, r.HTTPRequest.Header.Get("Authorization"))
|
||||
}
|
||||
|
||||
func TestIgnorePreResignRequestWithValidCreds(t *testing.T) {
|
||||
r := aws.NewRequest(
|
||||
aws.NewService(&aws.Config{
|
||||
Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"),
|
||||
Region: aws.String("us-west-2"),
|
||||
}),
|
||||
&aws.Operation{
|
||||
Name: "BatchGetItem",
|
||||
HTTPMethod: "POST",
|
||||
HTTPPath: "/",
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
r.ExpireTime = time.Minute * 10
|
||||
|
||||
Sign(r)
|
||||
sig := r.HTTPRequest.Header.Get("X-Amz-Signature")
|
||||
|
||||
Sign(r)
|
||||
assert.Equal(t, sig, r.HTTPRequest.Header.Get("X-Amz-Signature"))
|
||||
}
|
||||
|
||||
func TestResignRequestExpiredCreds(t *testing.T) {
|
||||
creds := credentials.NewStaticCredentials("AKID", "SECRET", "SESSION")
|
||||
r := aws.NewRequest(
|
||||
aws.NewService(&aws.Config{Credentials: creds}),
|
||||
&aws.Operation{
|
||||
Name: "BatchGetItem",
|
||||
HTTPMethod: "POST",
|
||||
HTTPPath: "/",
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
Sign(r)
|
||||
querySig := r.HTTPRequest.Header.Get("Authorization")
|
||||
|
||||
creds.Expire()
|
||||
|
||||
Sign(r)
|
||||
assert.NotEqual(t, querySig, r.HTTPRequest.Header.Get("Authorization"))
|
||||
}
|
||||
|
||||
func TestPreResignRequestExpiredCreds(t *testing.T) {
|
||||
provider := &credentials.StaticProvider{credentials.Value{"AKID", "SECRET", "SESSION"}}
|
||||
creds := credentials.NewCredentials(provider)
|
||||
r := aws.NewRequest(
|
||||
aws.NewService(&aws.Config{Credentials: creds}),
|
||||
&aws.Operation{
|
||||
Name: "BatchGetItem",
|
||||
HTTPMethod: "POST",
|
||||
HTTPPath: "/",
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
r.ExpireTime = time.Minute * 10
|
||||
|
||||
Sign(r)
|
||||
querySig := r.HTTPRequest.URL.Query().Get("X-Amz-Signature")
|
||||
|
||||
creds.Expire()
|
||||
r.Time = time.Now().Add(time.Hour * 48)
|
||||
|
||||
Sign(r)
|
||||
assert.NotEqual(t, querySig, r.HTTPRequest.URL.Query().Get("X-Amz-Signature"))
|
||||
}
|
||||
|
||||
func BenchmarkPresignRequest(b *testing.B) {
|
||||
signer := buildSigner("dynamodb", "us-east-1", time.Now(), 300*time.Second, "{}")
|
||||
for i := 0; i < b.N; i++ {
|
||||
signer.sign()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSignRequest(b *testing.B) {
|
||||
signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "{}")
|
||||
for i := 0; i < b.N; i++ {
|
||||
signer.sign()
|
||||
}
|
||||
}
|
1478
Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/api.go
generated
vendored
Normal file
1478
Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/api.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
62
Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface/interface.go
generated
vendored
Normal file
62
Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface/interface.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
// Package cloudwatchiface provides an interface for the Amazon CloudWatch.
|
||||
package cloudwatchiface
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
||||
)
|
||||
|
||||
// CloudWatchAPI is the interface type for cloudwatch.CloudWatch.
|
||||
type CloudWatchAPI interface {
|
||||
DeleteAlarmsRequest(*cloudwatch.DeleteAlarmsInput) (*aws.Request, *cloudwatch.DeleteAlarmsOutput)
|
||||
|
||||
DeleteAlarms(*cloudwatch.DeleteAlarmsInput) (*cloudwatch.DeleteAlarmsOutput, error)
|
||||
|
||||
DescribeAlarmHistoryRequest(*cloudwatch.DescribeAlarmHistoryInput) (*aws.Request, *cloudwatch.DescribeAlarmHistoryOutput)
|
||||
|
||||
DescribeAlarmHistory(*cloudwatch.DescribeAlarmHistoryInput) (*cloudwatch.DescribeAlarmHistoryOutput, error)
|
||||
|
||||
DescribeAlarmHistoryPages(*cloudwatch.DescribeAlarmHistoryInput, func(*cloudwatch.DescribeAlarmHistoryOutput, bool) bool) error
|
||||
|
||||
DescribeAlarmsRequest(*cloudwatch.DescribeAlarmsInput) (*aws.Request, *cloudwatch.DescribeAlarmsOutput)
|
||||
|
||||
DescribeAlarms(*cloudwatch.DescribeAlarmsInput) (*cloudwatch.DescribeAlarmsOutput, error)
|
||||
|
||||
DescribeAlarmsPages(*cloudwatch.DescribeAlarmsInput, func(*cloudwatch.DescribeAlarmsOutput, bool) bool) error
|
||||
|
||||
DescribeAlarmsForMetricRequest(*cloudwatch.DescribeAlarmsForMetricInput) (*aws.Request, *cloudwatch.DescribeAlarmsForMetricOutput)
|
||||
|
||||
DescribeAlarmsForMetric(*cloudwatch.DescribeAlarmsForMetricInput) (*cloudwatch.DescribeAlarmsForMetricOutput, error)
|
||||
|
||||
DisableAlarmActionsRequest(*cloudwatch.DisableAlarmActionsInput) (*aws.Request, *cloudwatch.DisableAlarmActionsOutput)
|
||||
|
||||
DisableAlarmActions(*cloudwatch.DisableAlarmActionsInput) (*cloudwatch.DisableAlarmActionsOutput, error)
|
||||
|
||||
EnableAlarmActionsRequest(*cloudwatch.EnableAlarmActionsInput) (*aws.Request, *cloudwatch.EnableAlarmActionsOutput)
|
||||
|
||||
EnableAlarmActions(*cloudwatch.EnableAlarmActionsInput) (*cloudwatch.EnableAlarmActionsOutput, error)
|
||||
|
||||
GetMetricStatisticsRequest(*cloudwatch.GetMetricStatisticsInput) (*aws.Request, *cloudwatch.GetMetricStatisticsOutput)
|
||||
|
||||
GetMetricStatistics(*cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error)
|
||||
|
||||
ListMetricsRequest(*cloudwatch.ListMetricsInput) (*aws.Request, *cloudwatch.ListMetricsOutput)
|
||||
|
||||
ListMetrics(*cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error)
|
||||
|
||||
ListMetricsPages(*cloudwatch.ListMetricsInput, func(*cloudwatch.ListMetricsOutput, bool) bool) error
|
||||
|
||||
PutMetricAlarmRequest(*cloudwatch.PutMetricAlarmInput) (*aws.Request, *cloudwatch.PutMetricAlarmOutput)
|
||||
|
||||
PutMetricAlarm(*cloudwatch.PutMetricAlarmInput) (*cloudwatch.PutMetricAlarmOutput, error)
|
||||
|
||||
PutMetricDataRequest(*cloudwatch.PutMetricDataInput) (*aws.Request, *cloudwatch.PutMetricDataOutput)
|
||||
|
||||
PutMetricData(*cloudwatch.PutMetricDataInput) (*cloudwatch.PutMetricDataOutput, error)
|
||||
|
||||
SetAlarmStateRequest(*cloudwatch.SetAlarmStateInput) (*aws.Request, *cloudwatch.SetAlarmStateOutput)
|
||||
|
||||
SetAlarmState(*cloudwatch.SetAlarmStateInput) (*cloudwatch.SetAlarmStateOutput, error)
|
||||
}
|
15
Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface/interface_test.go
generated
vendored
Normal file
15
Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface/interface_test.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package cloudwatchiface_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestInterface(t *testing.T) {
|
||||
assert.Implements(t, (*cloudwatchiface.CloudWatchAPI)(nil), cloudwatch.New(nil))
|
||||
}
|
426
Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/examples_test.go
generated
vendored
Normal file
426
Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/examples_test.go
generated
vendored
Normal file
@ -0,0 +1,426 @@
|
||||
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package cloudwatch_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
||||
)
|
||||
|
||||
var _ time.Duration
|
||||
var _ bytes.Buffer
|
||||
|
||||
func ExampleCloudWatch_DeleteAlarms() {
|
||||
svc := cloudwatch.New(nil)
|
||||
|
||||
params := &cloudwatch.DeleteAlarmsInput{
|
||||
AlarmNames: []*string{ // Required
|
||||
aws.String("AlarmName"), // Required
|
||||
// More values...
|
||||
},
|
||||
}
|
||||
resp, err := svc.DeleteAlarms(params)
|
||||
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok {
|
||||
// Generic AWS error with Code, Message, and original error (if any)
|
||||
fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// A service error occurred
|
||||
fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
|
||||
}
|
||||
} else {
|
||||
// This case should never be hit, the SDK should always return an
|
||||
// error which satisfies the awserr.Error interface.
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Pretty-print the response data.
|
||||
fmt.Println(awsutil.Prettify(resp))
|
||||
}
|
||||
|
||||
func ExampleCloudWatch_DescribeAlarmHistory() {
|
||||
svc := cloudwatch.New(nil)
|
||||
|
||||
params := &cloudwatch.DescribeAlarmHistoryInput{
|
||||
AlarmName: aws.String("AlarmName"),
|
||||
EndDate: aws.Time(time.Now()),
|
||||
HistoryItemType: aws.String("HistoryItemType"),
|
||||
MaxRecords: aws.Int64(1),
|
||||
NextToken: aws.String("NextToken"),
|
||||
StartDate: aws.Time(time.Now()),
|
||||
}
|
||||
resp, err := svc.DescribeAlarmHistory(params)
|
||||
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok {
|
||||
// Generic AWS error with Code, Message, and original error (if any)
|
||||
fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// A service error occurred
|
||||
fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
|
||||
}
|
||||
} else {
|
||||
// This case should never be hit, the SDK should always return an
|
||||
// error which satisfies the awserr.Error interface.
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Pretty-print the response data.
|
||||
fmt.Println(awsutil.Prettify(resp))
|
||||
}
|
||||
|
||||
func ExampleCloudWatch_DescribeAlarms() {
|
||||
svc := cloudwatch.New(nil)
|
||||
|
||||
params := &cloudwatch.DescribeAlarmsInput{
|
||||
ActionPrefix: aws.String("ActionPrefix"),
|
||||
AlarmNamePrefix: aws.String("AlarmNamePrefix"),
|
||||
AlarmNames: []*string{
|
||||
aws.String("AlarmName"), // Required
|
||||
// More values...
|
||||
},
|
||||
MaxRecords: aws.Int64(1),
|
||||
NextToken: aws.String("NextToken"),
|
||||
StateValue: aws.String("StateValue"),
|
||||
}
|
||||
resp, err := svc.DescribeAlarms(params)
|
||||
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok {
|
||||
// Generic AWS error with Code, Message, and original error (if any)
|
||||
fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// A service error occurred
|
||||
fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
|
||||
}
|
||||
} else {
|
||||
// This case should never be hit, the SDK should always return an
|
||||
// error which satisfies the awserr.Error interface.
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Pretty-print the response data.
|
||||
fmt.Println(awsutil.Prettify(resp))
|
||||
}
|
||||
|
||||
func ExampleCloudWatch_DescribeAlarmsForMetric() {
|
||||
svc := cloudwatch.New(nil)
|
||||
|
||||
params := &cloudwatch.DescribeAlarmsForMetricInput{
|
||||
MetricName: aws.String("MetricName"), // Required
|
||||
Namespace: aws.String("Namespace"), // Required
|
||||
Dimensions: []*cloudwatch.Dimension{
|
||||
{ // Required
|
||||
Name: aws.String("DimensionName"), // Required
|
||||
Value: aws.String("DimensionValue"), // Required
|
||||
},
|
||||
// More values...
|
||||
},
|
||||
Period: aws.Int64(1),
|
||||
Statistic: aws.String("Statistic"),
|
||||
Unit: aws.String("StandardUnit"),
|
||||
}
|
||||
resp, err := svc.DescribeAlarmsForMetric(params)
|
||||
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok {
|
||||
// Generic AWS error with Code, Message, and original error (if any)
|
||||
fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// A service error occurred
|
||||
fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
|
||||
}
|
||||
} else {
|
||||
// This case should never be hit, the SDK should always return an
|
||||
// error which satisfies the awserr.Error interface.
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Pretty-print the response data.
|
||||
fmt.Println(awsutil.Prettify(resp))
|
||||
}
|
||||
|
||||
func ExampleCloudWatch_DisableAlarmActions() {
|
||||
svc := cloudwatch.New(nil)
|
||||
|
||||
params := &cloudwatch.DisableAlarmActionsInput{
|
||||
AlarmNames: []*string{ // Required
|
||||
aws.String("AlarmName"), // Required
|
||||
// More values...
|
||||
},
|
||||
}
|
||||
resp, err := svc.DisableAlarmActions(params)
|
||||
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok {
|
||||
// Generic AWS error with Code, Message, and original error (if any)
|
||||
fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// A service error occurred
|
||||
fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
|
||||
}
|
||||
} else {
|
||||
// This case should never be hit, the SDK should always return an
|
||||
// error which satisfies the awserr.Error interface.
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Pretty-print the response data.
|
||||
fmt.Println(awsutil.Prettify(resp))
|
||||
}
|
||||
|
||||
func ExampleCloudWatch_EnableAlarmActions() {
|
||||
svc := cloudwatch.New(nil)
|
||||
|
||||
params := &cloudwatch.EnableAlarmActionsInput{
|
||||
AlarmNames: []*string{ // Required
|
||||
aws.String("AlarmName"), // Required
|
||||
// More values...
|
||||
},
|
||||
}
|
||||
resp, err := svc.EnableAlarmActions(params)
|
||||
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok {
|
||||
// Generic AWS error with Code, Message, and original error (if any)
|
||||
fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// A service error occurred
|
||||
fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
|
||||
}
|
||||
} else {
|
||||
// This case should never be hit, the SDK should always return an
|
||||
// error which satisfies the awserr.Error interface.
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Pretty-print the response data.
|
||||
fmt.Println(awsutil.Prettify(resp))
|
||||
}
|
||||
|
||||
func ExampleCloudWatch_GetMetricStatistics() {
|
||||
svc := cloudwatch.New(nil)
|
||||
|
||||
params := &cloudwatch.GetMetricStatisticsInput{
|
||||
EndTime: aws.Time(time.Now()), // Required
|
||||
MetricName: aws.String("MetricName"), // Required
|
||||
Namespace: aws.String("Namespace"), // Required
|
||||
Period: aws.Int64(1), // Required
|
||||
StartTime: aws.Time(time.Now()), // Required
|
||||
Statistics: []*string{ // Required
|
||||
aws.String("Statistic"), // Required
|
||||
// More values...
|
||||
},
|
||||
Dimensions: []*cloudwatch.Dimension{
|
||||
{ // Required
|
||||
Name: aws.String("DimensionName"), // Required
|
||||
Value: aws.String("DimensionValue"), // Required
|
||||
},
|
||||
// More values...
|
||||
},
|
||||
Unit: aws.String("StandardUnit"),
|
||||
}
|
||||
resp, err := svc.GetMetricStatistics(params)
|
||||
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok {
|
||||
// Generic AWS error with Code, Message, and original error (if any)
|
||||
fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// A service error occurred
|
||||
fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
|
||||
}
|
||||
} else {
|
||||
// This case should never be hit, the SDK should always return an
|
||||
// error which satisfies the awserr.Error interface.
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Pretty-print the response data.
|
||||
fmt.Println(awsutil.Prettify(resp))
|
||||
}
|
||||
|
||||
func ExampleCloudWatch_ListMetrics() {
|
||||
svc := cloudwatch.New(nil)
|
||||
|
||||
params := &cloudwatch.ListMetricsInput{
|
||||
Dimensions: []*cloudwatch.DimensionFilter{
|
||||
{ // Required
|
||||
Name: aws.String("DimensionName"), // Required
|
||||
Value: aws.String("DimensionValue"),
|
||||
},
|
||||
// More values...
|
||||
},
|
||||
MetricName: aws.String("MetricName"),
|
||||
Namespace: aws.String("Namespace"),
|
||||
NextToken: aws.String("NextToken"),
|
||||
}
|
||||
resp, err := svc.ListMetrics(params)
|
||||
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok {
|
||||
// Generic AWS error with Code, Message, and original error (if any)
|
||||
fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// A service error occurred
|
||||
fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
|
||||
}
|
||||
} else {
|
||||
// This case should never be hit, the SDK should always return an
|
||||
// error which satisfies the awserr.Error interface.
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Pretty-print the response data.
|
||||
fmt.Println(awsutil.Prettify(resp))
|
||||
}
|
||||
|
||||
func ExampleCloudWatch_PutMetricAlarm() {
|
||||
svc := cloudwatch.New(nil)
|
||||
|
||||
params := &cloudwatch.PutMetricAlarmInput{
|
||||
AlarmName: aws.String("AlarmName"), // Required
|
||||
ComparisonOperator: aws.String("ComparisonOperator"), // Required
|
||||
EvaluationPeriods: aws.Int64(1), // Required
|
||||
MetricName: aws.String("MetricName"), // Required
|
||||
Namespace: aws.String("Namespace"), // Required
|
||||
Period: aws.Int64(1), // Required
|
||||
Statistic: aws.String("Statistic"), // Required
|
||||
Threshold: aws.Float64(1.0), // Required
|
||||
ActionsEnabled: aws.Bool(true),
|
||||
AlarmActions: []*string{
|
||||
aws.String("ResourceName"), // Required
|
||||
// More values...
|
||||
},
|
||||
AlarmDescription: aws.String("AlarmDescription"),
|
||||
Dimensions: []*cloudwatch.Dimension{
|
||||
{ // Required
|
||||
Name: aws.String("DimensionName"), // Required
|
||||
Value: aws.String("DimensionValue"), // Required
|
||||
},
|
||||
// More values...
|
||||
},
|
||||
InsufficientDataActions: []*string{
|
||||
aws.String("ResourceName"), // Required
|
||||
// More values...
|
||||
},
|
||||
OKActions: []*string{
|
||||
aws.String("ResourceName"), // Required
|
||||
// More values...
|
||||
},
|
||||
Unit: aws.String("StandardUnit"),
|
||||
}
|
||||
resp, err := svc.PutMetricAlarm(params)
|
||||
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok {
|
||||
// Generic AWS error with Code, Message, and original error (if any)
|
||||
fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// A service error occurred
|
||||
fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
|
||||
}
|
||||
} else {
|
||||
// This case should never be hit, the SDK should always return an
|
||||
// error which satisfies the awserr.Error interface.
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Pretty-print the response data.
|
||||
fmt.Println(awsutil.Prettify(resp))
|
||||
}
|
||||
|
||||
func ExampleCloudWatch_PutMetricData() {
|
||||
svc := cloudwatch.New(nil)
|
||||
|
||||
params := &cloudwatch.PutMetricDataInput{
|
||||
MetricData: []*cloudwatch.MetricDatum{ // Required
|
||||
{ // Required
|
||||
MetricName: aws.String("MetricName"), // Required
|
||||
Dimensions: []*cloudwatch.Dimension{
|
||||
{ // Required
|
||||
Name: aws.String("DimensionName"), // Required
|
||||
Value: aws.String("DimensionValue"), // Required
|
||||
},
|
||||
// More values...
|
||||
},
|
||||
StatisticValues: &cloudwatch.StatisticSet{
|
||||
Maximum: aws.Float64(1.0), // Required
|
||||
Minimum: aws.Float64(1.0), // Required
|
||||
SampleCount: aws.Float64(1.0), // Required
|
||||
Sum: aws.Float64(1.0), // Required
|
||||
},
|
||||
Timestamp: aws.Time(time.Now()),
|
||||
Unit: aws.String("StandardUnit"),
|
||||
Value: aws.Float64(1.0),
|
||||
},
|
||||
// More values...
|
||||
},
|
||||
Namespace: aws.String("Namespace"), // Required
|
||||
}
|
||||
resp, err := svc.PutMetricData(params)
|
||||
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok {
|
||||
// Generic AWS error with Code, Message, and original error (if any)
|
||||
fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// A service error occurred
|
||||
fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
|
||||
}
|
||||
} else {
|
||||
// This case should never be hit, the SDK should always return an
|
||||
// error which satisfies the awserr.Error interface.
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Pretty-print the response data.
|
||||
fmt.Println(awsutil.Prettify(resp))
|
||||
}
|
||||
|
||||
func ExampleCloudWatch_SetAlarmState() {
|
||||
svc := cloudwatch.New(nil)
|
||||
|
||||
params := &cloudwatch.SetAlarmStateInput{
|
||||
AlarmName: aws.String("AlarmName"), // Required
|
||||
StateReason: aws.String("StateReason"), // Required
|
||||
StateValue: aws.String("StateValue"), // Required
|
||||
StateReasonData: aws.String("StateReasonData"),
|
||||
}
|
||||
resp, err := svc.SetAlarmState(params)
|
||||
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.Error); ok {
|
||||
// Generic AWS error with Code, Message, and original error (if any)
|
||||
fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// A service error occurred
|
||||
fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
|
||||
}
|
||||
} else {
|
||||
// This case should never be hit, the SDK should always return an
|
||||
// error which satisfies the awserr.Error interface.
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Pretty-print the response data.
|
||||
fmt.Println(awsutil.Prettify(resp))
|
||||
}
|
96
Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/service.go
generated
vendored
Normal file
96
Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudwatch/service.go
generated
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package cloudwatch
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/internal/protocol/query"
|
||||
"github.com/aws/aws-sdk-go/internal/signer/v4"
|
||||
)
|
||||
|
||||
// This is the Amazon CloudWatch API Reference. This guide provides detailed
|
||||
// information about Amazon CloudWatch actions, data types, parameters, and
|
||||
// errors. For detailed information about Amazon CloudWatch features and their
|
||||
// associated API calls, go to the Amazon CloudWatch Developer Guide (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide).
|
||||
//
|
||||
// Amazon CloudWatch is a web service that enables you to publish, monitor,
|
||||
// and manage various metrics, as well as configure alarm actions based on data
|
||||
// from metrics. For more information about this product go to http://aws.amazon.com/cloudwatch
|
||||
// (http://aws.amazon.com/cloudwatch).
|
||||
//
|
||||
// For information about the namespace, metric names, and dimensions that
|
||||
// other Amazon Web Services products use to send metrics to Cloudwatch, go
|
||||
// to Amazon CloudWatch Metrics, Namespaces, and Dimensions Reference (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html)
|
||||
// in the Amazon CloudWatch Developer Guide.
|
||||
//
|
||||
// Use the following links to get started using the Amazon CloudWatch API Reference:
|
||||
//
|
||||
// Actions (http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_Operations.html):
|
||||
// An alphabetical list of all Amazon CloudWatch actions. Data Types (http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_Types.html):
|
||||
// An alphabetical list of all Amazon CloudWatch data types. Common Parameters
|
||||
// (http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CommonParameters.html):
|
||||
// Parameters that all Query actions can use. Common Errors (http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CommonErrors.html):
|
||||
// Client and server errors that all actions can return. Regions and Endpoints
|
||||
// (http://docs.aws.amazon.com/general/latest/gr/index.html?rande.html): Itemized
|
||||
// regions and endpoints for all AWS products. WSDL Location (http://monitoring.amazonaws.com/doc/2010-08-01/CloudWatch.wsdl):
|
||||
// http://monitoring.amazonaws.com/doc/2010-08-01/CloudWatch.wsdl In addition
|
||||
// to using the Amazon CloudWatch API, you can also use the following SDKs and
|
||||
// third-party libraries to access Amazon CloudWatch programmatically.
|
||||
//
|
||||
// AWS SDK for Java Documentation (http://aws.amazon.com/documentation/sdkforjava/)
|
||||
// AWS SDK for .NET Documentation (http://aws.amazon.com/documentation/sdkfornet/)
|
||||
// AWS SDK for PHP Documentation (http://aws.amazon.com/documentation/sdkforphp/)
|
||||
// AWS SDK for Ruby Documentation (http://aws.amazon.com/documentation/sdkforruby/)
|
||||
// Developers in the AWS developer community also provide their own libraries,
|
||||
// which you can find at the following AWS developer centers:
|
||||
//
|
||||
// AWS Java Developer Center (http://aws.amazon.com/java/) AWS PHP Developer
|
||||
// Center (http://aws.amazon.com/php/) AWS Python Developer Center (http://aws.amazon.com/python/)
|
||||
// AWS Ruby Developer Center (http://aws.amazon.com/ruby/) AWS Windows and .NET
|
||||
// Developer Center (http://aws.amazon.com/net/)
|
||||
type CloudWatch struct {
|
||||
*aws.Service
|
||||
}
|
||||
|
||||
// Used for custom service initialization logic
|
||||
var initService func(*aws.Service)
|
||||
|
||||
// Used for custom request initialization logic
|
||||
var initRequest func(*aws.Request)
|
||||
|
||||
// New returns a new CloudWatch client.
|
||||
func New(config *aws.Config) *CloudWatch {
|
||||
service := &aws.Service{
|
||||
Config: aws.DefaultConfig.Merge(config),
|
||||
ServiceName: "monitoring",
|
||||
APIVersion: "2010-08-01",
|
||||
}
|
||||
service.Initialize()
|
||||
|
||||
// Handlers
|
||||
service.Handlers.Sign.PushBack(v4.Sign)
|
||||
service.Handlers.Build.PushBack(query.Build)
|
||||
service.Handlers.Unmarshal.PushBack(query.Unmarshal)
|
||||
service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
|
||||
service.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
|
||||
|
||||
// Run custom service initialization if present
|
||||
if initService != nil {
|
||||
initService(service)
|
||||
}
|
||||
|
||||
return &CloudWatch{service}
|
||||
}
|
||||
|
||||
// newRequest creates a new request for a CloudWatch operation and runs any
|
||||
// custom request initialization.
|
||||
func (c *CloudWatch) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {
|
||||
req := aws.NewRequest(c.Service, op, params, data)
|
||||
|
||||
// Run custom request initialization if present
|
||||
if initRequest != nil {
|
||||
initRequest(req)
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
14
Godeps/_workspace/src/github.com/vaughan0/go-ini/LICENSE
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/vaughan0/go-ini/LICENSE
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
Copyright (c) 2013 Vaughan Newton
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
|
||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
|
||||
Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
|
||||
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
70
Godeps/_workspace/src/github.com/vaughan0/go-ini/README.md
generated
vendored
Normal file
70
Godeps/_workspace/src/github.com/vaughan0/go-ini/README.md
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
go-ini
|
||||
======
|
||||
|
||||
INI parsing library for Go (golang).
|
||||
|
||||
View the API documentation [here](http://godoc.org/github.com/vaughan0/go-ini).
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
Parse an INI file:
|
||||
|
||||
```go
|
||||
import "github.com/vaughan0/go-ini"
|
||||
|
||||
file, err := ini.LoadFile("myfile.ini")
|
||||
```
|
||||
|
||||
Get data from the parsed file:
|
||||
|
||||
```go
|
||||
name, ok := file.Get("person", "name")
|
||||
if !ok {
|
||||
panic("'name' variable missing from 'person' section")
|
||||
}
|
||||
```
|
||||
|
||||
Iterate through values in a section:
|
||||
|
||||
```go
|
||||
for key, value := range file["mysection"] {
|
||||
fmt.Printf("%s => %s\n", key, value)
|
||||
}
|
||||
```
|
||||
|
||||
Iterate through sections in a file:
|
||||
|
||||
```go
|
||||
for name, section := range file {
|
||||
fmt.Printf("Section name: %s\n", name)
|
||||
}
|
||||
```
|
||||
|
||||
File Format
|
||||
-----------
|
||||
|
||||
INI files are parsed by go-ini line-by-line. Each line may be one of the following:
|
||||
|
||||
* A section definition: [section-name]
|
||||
* A property: key = value
|
||||
* A comment: #blahblah _or_ ;blahblah
|
||||
* Blank. The line will be ignored.
|
||||
|
||||
Properties defined before any section headers are placed in the default section, which has
|
||||
the empty string as it's key.
|
||||
|
||||
Example:
|
||||
|
||||
```ini
|
||||
# I am a comment
|
||||
; So am I!
|
||||
|
||||
[apples]
|
||||
colour = red or green
|
||||
shape = applish
|
||||
|
||||
[oranges]
|
||||
shape = square
|
||||
colour = blue
|
||||
```
|
123
Godeps/_workspace/src/github.com/vaughan0/go-ini/ini.go
generated
vendored
Normal file
123
Godeps/_workspace/src/github.com/vaughan0/go-ini/ini.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
// Package ini provides functions for parsing INI configuration files.
|
||||
package ini
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
sectionRegex = regexp.MustCompile(`^\[(.*)\]$`)
|
||||
assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
|
||||
)
|
||||
|
||||
// ErrSyntax is returned when there is a syntax error in an INI file.
|
||||
type ErrSyntax struct {
|
||||
Line int
|
||||
Source string // The contents of the erroneous line, without leading or trailing whitespace
|
||||
}
|
||||
|
||||
func (e ErrSyntax) Error() string {
|
||||
return fmt.Sprintf("invalid INI syntax on line %d: %s", e.Line, e.Source)
|
||||
}
|
||||
|
||||
// A File represents a parsed INI file.
|
||||
type File map[string]Section
|
||||
|
||||
// A Section represents a single section of an INI file.
|
||||
type Section map[string]string
|
||||
|
||||
// Returns a named Section. A Section will be created if one does not already exist for the given name.
|
||||
func (f File) Section(name string) Section {
|
||||
section := f[name]
|
||||
if section == nil {
|
||||
section = make(Section)
|
||||
f[name] = section
|
||||
}
|
||||
return section
|
||||
}
|
||||
|
||||
// Looks up a value for a key in a section and returns that value, along with a boolean result similar to a map lookup.
|
||||
func (f File) Get(section, key string) (value string, ok bool) {
|
||||
if s := f[section]; s != nil {
|
||||
value, ok = s[key]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Loads INI data from a reader and stores the data in the File.
|
||||
func (f File) Load(in io.Reader) (err error) {
|
||||
bufin, ok := in.(*bufio.Reader)
|
||||
if !ok {
|
||||
bufin = bufio.NewReader(in)
|
||||
}
|
||||
return parseFile(bufin, f)
|
||||
}
|
||||
|
||||
// Loads INI data from a named file and stores the data in the File.
|
||||
func (f File) LoadFile(file string) (err error) {
|
||||
in, err := os.Open(file)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer in.Close()
|
||||
return f.Load(in)
|
||||
}
|
||||
|
||||
func parseFile(in *bufio.Reader, file File) (err error) {
|
||||
section := ""
|
||||
lineNum := 0
|
||||
for done := false; !done; {
|
||||
var line string
|
||||
if line, err = in.ReadString('\n'); err != nil {
|
||||
if err == io.EOF {
|
||||
done = true
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
lineNum++
|
||||
line = strings.TrimSpace(line)
|
||||
if len(line) == 0 {
|
||||
// Skip blank lines
|
||||
continue
|
||||
}
|
||||
if line[0] == ';' || line[0] == '#' {
|
||||
// Skip comments
|
||||
continue
|
||||
}
|
||||
|
||||
if groups := assignRegex.FindStringSubmatch(line); groups != nil {
|
||||
key, val := groups[1], groups[2]
|
||||
key, val = strings.TrimSpace(key), strings.TrimSpace(val)
|
||||
file.Section(section)[key] = val
|
||||
} else if groups := sectionRegex.FindStringSubmatch(line); groups != nil {
|
||||
name := strings.TrimSpace(groups[1])
|
||||
section = name
|
||||
// Create the section if it does not exist
|
||||
file.Section(section)
|
||||
} else {
|
||||
return ErrSyntax{lineNum, line}
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Loads and returns a File from a reader.
|
||||
func Load(in io.Reader) (File, error) {
|
||||
file := make(File)
|
||||
err := file.Load(in)
|
||||
return file, err
|
||||
}
|
||||
|
||||
// Loads and returns an INI File from a file on disk.
|
||||
func LoadFile(filename string) (File, error) {
|
||||
file := make(File)
|
||||
err := file.LoadFile(filename)
|
||||
return file, err
|
||||
}
|
43
Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_linux_test.go
generated
vendored
Normal file
43
Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_linux_test.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
package ini
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLoadFile(t *testing.T) {
|
||||
originalOpenFiles := numFilesOpen(t)
|
||||
|
||||
file, err := LoadFile("test.ini")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if originalOpenFiles != numFilesOpen(t) {
|
||||
t.Error("test.ini not closed")
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(file, File{"default": {"stuff": "things"}}) {
|
||||
t.Error("file not read correctly")
|
||||
}
|
||||
}
|
||||
|
||||
func numFilesOpen(t *testing.T) (num uint64) {
|
||||
var rlimit syscall.Rlimit
|
||||
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
maxFds := int(rlimit.Cur)
|
||||
|
||||
var stat syscall.Stat_t
|
||||
for i := 0; i < maxFds; i++ {
|
||||
if syscall.Fstat(i, &stat) == nil {
|
||||
num++
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
89
Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_test.go
generated
vendored
Normal file
89
Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_test.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
package ini
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLoad(t *testing.T) {
|
||||
src := `
|
||||
# Comments are ignored
|
||||
|
||||
herp = derp
|
||||
|
||||
[foo]
|
||||
hello=world
|
||||
whitespace should = not matter
|
||||
; sneaky semicolon-style comment
|
||||
multiple = equals = signs
|
||||
|
||||
[bar]
|
||||
this = that`
|
||||
|
||||
file, err := Load(strings.NewReader(src))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
check := func(section, key, expect string) {
|
||||
if value, _ := file.Get(section, key); value != expect {
|
||||
t.Errorf("Get(%q, %q): expected %q, got %q", section, key, expect, value)
|
||||
}
|
||||
}
|
||||
|
||||
check("", "herp", "derp")
|
||||
check("foo", "hello", "world")
|
||||
check("foo", "whitespace should", "not matter")
|
||||
check("foo", "multiple", "equals = signs")
|
||||
check("bar", "this", "that")
|
||||
}
|
||||
|
||||
func TestSyntaxError(t *testing.T) {
|
||||
src := `
|
||||
# Line 2
|
||||
[foo]
|
||||
bar = baz
|
||||
# Here's an error on line 6:
|
||||
wut?
|
||||
herp = derp`
|
||||
_, err := Load(strings.NewReader(src))
|
||||
t.Logf("%T: %v", err, err)
|
||||
if err == nil {
|
||||
t.Fatal("expected an error, got nil")
|
||||
}
|
||||
syntaxErr, ok := err.(ErrSyntax)
|
||||
if !ok {
|
||||
t.Fatal("expected an error of type ErrSyntax")
|
||||
}
|
||||
if syntaxErr.Line != 6 {
|
||||
t.Fatal("incorrect line number")
|
||||
}
|
||||
if syntaxErr.Source != "wut?" {
|
||||
t.Fatal("incorrect source")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefinedSectionBehaviour(t *testing.T) {
|
||||
check := func(src string, expect File) {
|
||||
file, err := Load(strings.NewReader(src))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(file, expect) {
|
||||
t.Errorf("expected %v, got %v", expect, file)
|
||||
}
|
||||
}
|
||||
// No sections for an empty file
|
||||
check("", File{})
|
||||
// Default section only if there are actually values for it
|
||||
check("foo=bar", File{"": {"foo": "bar"}})
|
||||
// User-defined sections should always be present, even if empty
|
||||
check("[a]\n[b]\nfoo=bar", File{
|
||||
"a": {},
|
||||
"b": {"foo": "bar"},
|
||||
})
|
||||
check("foo=bar\n[a]\nthis=that", File{
|
||||
"": {"foo": "bar"},
|
||||
"a": {"this": "that"},
|
||||
})
|
||||
}
|
2
Godeps/_workspace/src/github.com/vaughan0/go-ini/test.ini
generated
vendored
Normal file
2
Godeps/_workspace/src/github.com/vaughan0/go-ini/test.ini
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
[default]
|
||||
stuff = things
|
@ -21,6 +21,7 @@
|
||||
"angular-native-dragdrop": "~1.1.1",
|
||||
"angular-bindonce": "~0.3.3",
|
||||
"requirejs": "~2.1.18",
|
||||
"requirejs-text": "~2.0.14"
|
||||
"requirejs-text": "~2.0.14",
|
||||
"aws-sdk": "~2.1.42"
|
||||
}
|
||||
}
|
||||
|
@ -72,8 +72,12 @@ func ProxyDataSourceRequest(c *middleware.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
proxyPath := c.Params("*")
|
||||
proxy := NewReverseProxy(&query.Result, proxyPath)
|
||||
proxy.Transport = dataProxyTransport
|
||||
proxy.ServeHTTP(c.RW(), c.Req.Request)
|
||||
if query.Result.Type == m.DS_CLOUDWATCH {
|
||||
ProxyCloudWatchDataSourceRequest(c)
|
||||
} else {
|
||||
proxyPath := c.Params("*")
|
||||
proxy := NewReverseProxy(&query.Result, proxyPath)
|
||||
proxy.Transport = dataProxyTransport
|
||||
proxy.ServeHTTP(c.RW(), c.Req.Request)
|
||||
}
|
||||
}
|
||||
|
107
pkg/api/dataproxy_cloudwatch.go
Normal file
107
pkg/api/dataproxy_cloudwatch.go
Normal file
@ -0,0 +1,107 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
||||
"github.com/grafana/grafana/pkg/middleware"
|
||||
)
|
||||
|
||||
func ProxyCloudWatchDataSourceRequest(c *middleware.Context) {
|
||||
body, _ := ioutil.ReadAll(c.Req.Request.Body)
|
||||
|
||||
reqInfo := &struct {
|
||||
Region string `json:"region"`
|
||||
Service string `json:"service"`
|
||||
Action string `json:"action"`
|
||||
}{}
|
||||
json.Unmarshal([]byte(body), reqInfo)
|
||||
|
||||
svc := cloudwatch.New(&aws.Config{Region: aws.String(reqInfo.Region)})
|
||||
|
||||
switch reqInfo.Action {
|
||||
case "GetMetricStatistics":
|
||||
reqParam := &struct {
|
||||
Parameters struct {
|
||||
Namespace string `json:"Namespace"`
|
||||
MetricName string `json:"MetricName"`
|
||||
Dimensions []map[string]string `json:"Dimensions"`
|
||||
Statistics []string `json:"Statistics"`
|
||||
StartTime int64 `json:"StartTime"`
|
||||
EndTime int64 `json:"EndTime"`
|
||||
Period int64 `json:"Period"`
|
||||
} `json:"parameters"`
|
||||
}{}
|
||||
json.Unmarshal([]byte(body), reqParam)
|
||||
|
||||
statistics := make([]*string, 0)
|
||||
for k := range reqParam.Parameters.Statistics {
|
||||
statistics = append(statistics, &reqParam.Parameters.Statistics[k])
|
||||
}
|
||||
dimensions := make([]*cloudwatch.Dimension, 0)
|
||||
for _, d := range reqParam.Parameters.Dimensions {
|
||||
dimensions = append(dimensions, &cloudwatch.Dimension{
|
||||
Name: aws.String(d["Name"]),
|
||||
Value: aws.String(d["Value"]),
|
||||
})
|
||||
}
|
||||
|
||||
params := &cloudwatch.GetMetricStatisticsInput{
|
||||
Namespace: aws.String(reqParam.Parameters.Namespace),
|
||||
MetricName: aws.String(reqParam.Parameters.MetricName),
|
||||
Dimensions: dimensions,
|
||||
Statistics: statistics,
|
||||
StartTime: aws.Time(time.Unix(reqParam.Parameters.StartTime, 0)),
|
||||
EndTime: aws.Time(time.Unix(reqParam.Parameters.EndTime, 0)),
|
||||
Period: aws.Int64(reqParam.Parameters.Period),
|
||||
}
|
||||
|
||||
resp, err := svc.GetMetricStatistics(params)
|
||||
if err != nil {
|
||||
c.JsonApiErr(500, "Unable to call AWS API", err)
|
||||
return
|
||||
}
|
||||
|
||||
respJson, _ := json.Marshal(resp)
|
||||
fmt.Fprint(c.RW(), string(respJson))
|
||||
case "ListMetrics":
|
||||
reqParam := &struct {
|
||||
Parameters struct {
|
||||
Namespace string `json:"Namespace"`
|
||||
MetricName string `json:"MetricName"`
|
||||
Dimensions []map[string]string `json:"Dimensions"`
|
||||
} `json:"parameters"`
|
||||
}{}
|
||||
json.Unmarshal([]byte(body), reqParam)
|
||||
|
||||
dimensions := make([]*cloudwatch.DimensionFilter, 0)
|
||||
for _, d := range reqParam.Parameters.Dimensions {
|
||||
dimensions = append(dimensions, &cloudwatch.DimensionFilter{
|
||||
Name: aws.String(d["Name"]),
|
||||
Value: aws.String(d["Value"]),
|
||||
})
|
||||
}
|
||||
|
||||
params := &cloudwatch.ListMetricsInput{
|
||||
Namespace: aws.String(reqParam.Parameters.Namespace),
|
||||
MetricName: aws.String(reqParam.Parameters.MetricName),
|
||||
Dimensions: dimensions,
|
||||
}
|
||||
|
||||
resp, err := svc.ListMetrics(params)
|
||||
if err != nil {
|
||||
c.JsonApiErr(500, "Unable to call AWS API", err)
|
||||
return
|
||||
}
|
||||
|
||||
respJson, _ := json.Marshal(resp)
|
||||
fmt.Fprint(c.RW(), string(respJson))
|
||||
default:
|
||||
c.JsonApiErr(500, "Unexpected CloudWatch action", errors.New(reqInfo.Action))
|
||||
}
|
||||
}
|
@ -11,6 +11,7 @@ const (
|
||||
DS_INFLUXDB_08 = "influxdb_08"
|
||||
DS_ES = "elasticsearch"
|
||||
DS_OPENTSDB = "opentsdb"
|
||||
DS_CLOUDWATCH = "cloudwatch"
|
||||
DS_ACCESS_DIRECT = "direct"
|
||||
DS_ACCESS_PROXY = "proxy"
|
||||
)
|
||||
|
@ -45,6 +45,7 @@ require.config({
|
||||
modernizr: '../vendor/modernizr-2.6.1',
|
||||
|
||||
'bootstrap-tagsinput': '../vendor/tagsinput/bootstrap-tagsinput',
|
||||
'aws-sdk': '../vendor/aws-sdk/dist/aws-sdk.min',
|
||||
},
|
||||
shim: {
|
||||
|
||||
|
@ -13,6 +13,7 @@ function (angular, kbn) {
|
||||
link: function(scope, elem, attrs) {
|
||||
var _t = '<i class="grafana-tip fa fa-'+(attrs.icon||'question-circle')+'" bs-tooltip="\''+
|
||||
kbn.addslashes(elem.text())+'\'"></i>';
|
||||
_t = _t.replace(/{/g, '\\{').replace(/}/g, '\\}');
|
||||
elem.replaceWith($compile(angular.element(_t))(scope));
|
||||
}
|
||||
};
|
||||
|
560
public/app/plugins/datasource/cloudwatch/datasource.js
Normal file
560
public/app/plugins/datasource/cloudwatch/datasource.js
Normal file
@ -0,0 +1,560 @@
|
||||
/* global AWS */
|
||||
define([
|
||||
'angular',
|
||||
'lodash',
|
||||
'kbn',
|
||||
'moment',
|
||||
'./queryCtrl',
|
||||
'aws-sdk',
|
||||
],
|
||||
function (angular, _, kbn) {
|
||||
'use strict';
|
||||
|
||||
var module = angular.module('grafana.services');
|
||||
|
||||
module.factory('CloudWatchDatasource', function($q, $http, templateSrv) {
|
||||
|
||||
function CloudWatchDatasource(datasource) {
|
||||
this.type = 'cloudwatch';
|
||||
this.name = datasource.name;
|
||||
this.supportMetrics = true;
|
||||
this.proxyMode = (datasource.jsonData.access === 'proxy');
|
||||
this.proxyUrl = datasource.url;
|
||||
|
||||
this.defaultRegion = datasource.jsonData.defaultRegion;
|
||||
this.credentials = {
|
||||
accessKeyId: datasource.jsonData.accessKeyId,
|
||||
secretAccessKey: datasource.jsonData.secretAccessKey
|
||||
};
|
||||
|
||||
/* jshint -W101 */
|
||||
this.supportedRegion = [
|
||||
'us-east-1', 'us-west-2', 'us-west-1', 'eu-west-1', 'eu-central-1', 'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1', 'sa-east-1'
|
||||
];
|
||||
|
||||
this.supportedMetrics = {
|
||||
'AWS/AutoScaling': [
|
||||
'GroupMinSize', 'GroupMaxSize', 'GroupDesiredCapacity', 'GroupInServiceInstances', 'GroupPendingInstances', 'GroupStandbyInstances', 'GroupTerminatingInstances', 'GroupTotalInstances'
|
||||
],
|
||||
'AWS/Billing': [
|
||||
'EstimatedCharges'
|
||||
],
|
||||
'AWS/CloudFront': [
|
||||
'Requests', 'BytesDownloaded', 'BytesUploaded', 'TotalErrorRate', '4xxErrorRate', '5xxErrorRate'
|
||||
],
|
||||
'AWS/CloudSearch': [
|
||||
'SuccessfulRequests', 'SearchableDocuments', 'IndexUtilization', 'Partitions'
|
||||
],
|
||||
'AWS/DynamoDB': [
|
||||
'ConditionalCheckFailedRequests', 'ConsumedReadCapacityUnits', 'ConsumedWriteCapacityUnits', 'OnlineIndexConsumedWriteCapacity', 'OnlineIndexPercentageProgress', 'OnlineIndexThrottleEvents', 'ProvisionedReadCapacityUnits', 'ProvisionedWriteCapacityUnits', 'ReadThrottleEvents', 'ReturnedItemCount', 'SuccessfulRequestLatency', 'SystemErrors', 'ThrottledRequests', 'UserErrors', 'WriteThrottleEvents'
|
||||
],
|
||||
'AWS/ElastiCache': [
|
||||
'CPUUtilization', 'SwapUsage', 'FreeableMemory', 'NetworkBytesIn', 'NetworkBytesOut',
|
||||
'BytesUsedForCacheItems', 'BytesReadIntoMemcached', 'BytesWrittenOutFromMemcached', 'CasBadval', 'CasHits', 'CasMisses', 'CmdFlush', 'CmdGet', 'CmdSet', 'CurrConnections', 'CurrItems', 'DecrHits', 'DecrMisses', 'DeleteHits', 'DeleteMisses', 'Evictions', 'GetHits', 'GetMisses', 'IncrHits', 'IncrMisses', 'Reclaimed',
|
||||
'CurrConnections', 'Evictions', 'Reclaimed', 'NewConnections', 'BytesUsedForCache', 'CacheHits', 'CacheMisses', 'ReplicationLag', 'GetTypeCmds', 'SetTypeCmds', 'KeyBasedCmds', 'StringBasedCmds', 'HashBasedCmds', 'ListBasedCmds', 'SetBasedCmds', 'SortedSetBasedCmds', 'CurrItems'
|
||||
],
|
||||
'AWS/EBS': [
|
||||
'VolumeReadBytes', 'VolumeWriteBytes', 'VolumeReadOps', 'VolumeWriteOps', 'VolumeTotalReadTime', 'VolumeTotalWriteTime', 'VolumeIdleTime', 'VolumeQueueLength', 'VolumeThroughputPercentage', 'VolumeConsumedReadWriteOps'
|
||||
],
|
||||
'AWS/EC2': [
|
||||
'CPUCreditUsage', 'CPUCreditBalance', 'CPUUtilization', 'DiskReadOps', 'DiskWriteOps', 'DiskReadBytes', 'DiskWriteBytes', 'NetworkIn', 'NetworkOut', 'StatusCheckFailed', 'StatusCheckFailed_Instance', 'StatusCheckFailed_System'
|
||||
],
|
||||
'AWS/ELB': [
|
||||
'HealthyHostCount', 'UnHealthyHostCount', 'RequestCount', 'Latency', 'HTTPCode_ELB_4XX', 'HTTPCode_ELB_5XX', 'HTTPCode_Backend_2XX', 'HTTPCode_Backend_3XX', 'HTTPCode_Backend_4XX', 'HTTPCode_Backend_5XX', 'BackendConnectionErrors', 'SurgeQueueLength', 'SpilloverCount'
|
||||
],
|
||||
'AWS/ElasticMapReduce': [
|
||||
'CoreNodesPending', 'CoreNodesRunning', 'HBaseBackupFailed', 'HBaseMostRecentBackupDuration', 'HBaseTimeSinceLastSuccessfulBackup', 'HDFSBytesRead', 'HDFSBytesWritten', 'HDFSUtilization', 'IsIdle', 'JobsFailed', 'JobsRunning', 'LiveDataNodes', 'LiveTaskTrackers', 'MapSlotsOpen', 'MissingBlocks', 'ReduceSlotsOpen', 'RemainingMapTasks', 'RemainingMapTasksPerSlot', 'RemainingReduceTasks', 'RunningMapTasks', 'RunningReduceTasks', 'S3BytesRead', 'S3BytesWritten', 'TaskNodesPending', 'TaskNodesRunning', 'TotalLoad'
|
||||
],
|
||||
'AWS/Kinesis': [
|
||||
'PutRecord.Bytes', 'PutRecord.Latency', 'PutRecord.Success', 'PutRecords.Bytes', 'PutRecords.Latency', 'PutRecords.Records', 'PutRecords.Success', 'IncomingBytes', 'IncomingRecords', 'GetRecords.Bytes', 'GetRecords.IteratorAgeMilliseconds', 'GetRecords.Latency', 'GetRecords.Success'
|
||||
],
|
||||
'AWS/ML': [
|
||||
'PredictCount', 'PredictFailureCount'
|
||||
],
|
||||
'AWS/OpsWorks': [
|
||||
'cpu_idle', 'cpu_nice', 'cpu_system', 'cpu_user', 'cpu_waitio', 'load_1', 'load_5', 'load_15', 'memory_buffers', 'memory_cached', 'memory_free', 'memory_swap', 'memory_total', 'memory_used', 'procs'
|
||||
],
|
||||
'AWS/Redshift': [
|
||||
'CPUUtilization', 'DatabaseConnections', 'HealthStatus', 'MaintenanceMode', 'NetworkReceiveThroughput', 'NetworkTransmitThroughput', 'PercentageDiskSpaceUsed', 'ReadIOPS', 'ReadLatency', 'ReadThroughput', 'WriteIOPS', 'WriteLatency', 'WriteThroughput'
|
||||
],
|
||||
'AWS/RDS': [
|
||||
'BinLogDiskUsage', 'CPUUtilization', 'DatabaseConnections', 'DiskQueueDepth', 'FreeableMemory', 'FreeStorageSpace', 'ReplicaLag', 'SwapUsage', 'ReadIOPS', 'WriteIOPS', 'ReadLatency', 'WriteLatency', 'ReadThroughput', 'WriteThroughput', 'NetworkReceiveThroughput', 'NetworkTransmitThroughput'
|
||||
],
|
||||
'AWS/Route53': [
|
||||
'HealthCheckStatus', 'HealthCheckPercentageHealthy'
|
||||
],
|
||||
'AWS/SNS': [
|
||||
'NumberOfMessagesPublished', 'PublishSize', 'NumberOfNotificationsDelivered', 'NumberOfNotificationsFailed'
|
||||
],
|
||||
'AWS/SQS': [
|
||||
'NumberOfMessagesSent', 'SentMessageSize', 'NumberOfMessagesReceived', 'NumberOfEmptyReceives', 'NumberOfMessagesDeleted', 'ApproximateNumberOfMessagesDelayed', 'ApproximateNumberOfMessagesVisible', 'ApproximateNumberOfMessagesNotVisible'
|
||||
],
|
||||
'AWS/S3': [
|
||||
'BucketSizeBytes', 'NumberOfObjects'
|
||||
],
|
||||
'AWS/SWF': [
|
||||
'DecisionTaskScheduleToStartTime', 'DecisionTaskStartToCloseTime', 'DecisionTasksCompleted', 'StartedDecisionTasksTimedOutOnClose', 'WorkflowStartToCloseTime', 'WorkflowsCanceled', 'WorkflowsCompleted', 'WorkflowsContinuedAsNew', 'WorkflowsFailed', 'WorkflowsTerminated', 'WorkflowsTimedOut'
|
||||
],
|
||||
'AWS/StorageGateway': [
|
||||
'CacheHitPercent', 'CachePercentUsed', 'CachePercentDirty', 'CloudBytesDownloaded', 'CloudDownloadLatency', 'CloudBytesUploaded', 'UploadBufferFree', 'UploadBufferPercentUsed', 'UploadBufferUsed', 'QueuedWrites', 'ReadBytes', 'ReadTime', 'TotalCacheSize', 'WriteBytes', 'WriteTime', 'WorkingStorageFree', 'WorkingStoragePercentUsed', 'WorkingStorageUsed', 'CacheHitPercent', 'CachePercentUsed', 'CachePercentDirty', 'ReadBytes', 'ReadTime', 'WriteBytes', 'WriteTime', 'QueuedWrites'
|
||||
],
|
||||
'AWS/WorkSpaces': [
|
||||
'Available', 'Unhealthy', 'ConnectionAttempt', 'ConnectionSuccess', 'ConnectionFailure', 'SessionLaunchTime', 'InSessionLatency', 'SessionDisconnect'
|
||||
],
|
||||
};
|
||||
|
||||
this.supportedDimensions = {
|
||||
'AWS/AutoScaling': [
|
||||
'AutoScalingGroupName'
|
||||
],
|
||||
'AWS/Billing': [
|
||||
'ServiceName', 'LinkedAccount', 'Currency'
|
||||
],
|
||||
'AWS/CloudFront': [
|
||||
'DistributionId', 'Region'
|
||||
],
|
||||
'AWS/CloudSearch': [
|
||||
|
||||
],
|
||||
'AWS/DynamoDB': [
|
||||
'TableName', 'GlobalSecondaryIndexName', 'Operation'
|
||||
],
|
||||
'AWS/ElastiCache': [
|
||||
'CacheClusterId', 'CacheNodeId'
|
||||
],
|
||||
'AWS/EBS': [
|
||||
'VolumeId'
|
||||
],
|
||||
'AWS/EC2': [
|
||||
'AutoScalingGroupName', 'ImageId', 'InstanceId', 'InstanceType'
|
||||
],
|
||||
'AWS/ELB': [
|
||||
'LoadBalancerName', 'AvailabilityZone'
|
||||
],
|
||||
'AWS/ElasticMapReduce': [
|
||||
'ClusterId', 'JobId'
|
||||
],
|
||||
'AWS/Kinesis': [
|
||||
'StreamName'
|
||||
],
|
||||
'AWS/ML': [
|
||||
'MLModelId', 'RequestMode'
|
||||
],
|
||||
'AWS/OpsWorks': [
|
||||
'StackId', 'LayerId', 'InstanceId'
|
||||
],
|
||||
'AWS/Redshift': [
|
||||
'NodeID', 'ClusterIdentifier'
|
||||
],
|
||||
'AWS/RDS': [
|
||||
'DBInstanceIdentifier', 'DatabaseClass', 'EngineName'
|
||||
],
|
||||
'AWS/Route53': [
|
||||
'HealthCheckId'
|
||||
],
|
||||
'AWS/SNS': [
|
||||
'Application', 'Platform', 'TopicName'
|
||||
],
|
||||
'AWS/SQS': [
|
||||
'QueueName'
|
||||
],
|
||||
'AWS/S3': [
|
||||
'BucketName', 'StorageType'
|
||||
],
|
||||
'AWS/SWF': [
|
||||
'Domain', 'ActivityTypeName', 'ActivityTypeVersion'
|
||||
],
|
||||
'AWS/StorageGateway': [
|
||||
'GatewayId', 'GatewayName', 'VolumeId'
|
||||
],
|
||||
'AWS/WorkSpaces': [
|
||||
'DirectoryId', 'WorkspaceId'
|
||||
],
|
||||
};
|
||||
/* jshint +W101 */
|
||||
|
||||
/* load custom metrics definitions */
|
||||
var self = this;
|
||||
$q.all(
|
||||
_.chain(datasource.jsonData.customMetricsAttributes)
|
||||
.reject(function(u) {
|
||||
return _.isEmpty(u);
|
||||
})
|
||||
.map(function(u) {
|
||||
return $http({ method: 'GET', url: u });
|
||||
})
|
||||
)
|
||||
.then(function(allResponse) {
|
||||
_.chain(allResponse)
|
||||
.map(function(d) {
|
||||
return d.data.Metrics;
|
||||
})
|
||||
.flatten()
|
||||
.reject(function(metric) {
|
||||
return metric.Namespace.indexOf('AWS/') === 0;
|
||||
})
|
||||
.map(function(metric) {
|
||||
metric.Dimensions = _.chain(metric.Dimensions)
|
||||
.map(function(d) {
|
||||
return d.Name;
|
||||
})
|
||||
.value().sort();
|
||||
return metric;
|
||||
})
|
||||
.uniq(function(metric) {
|
||||
return metric.Namespace + metric.MetricName + metric.Dimensions.join('');
|
||||
})
|
||||
.each(function(metric) {
|
||||
if (!_.has(self.supportedMetrics, metric.Namespace)) {
|
||||
self.supportedMetrics[metric.Namespace] = [];
|
||||
}
|
||||
self.supportedMetrics[metric.Namespace].push(metric.MetricName);
|
||||
|
||||
if (!_.has(self.supportedDimensions, metric.Namespace)) {
|
||||
self.supportedDimensions[metric.Namespace] = [];
|
||||
}
|
||||
|
||||
self.supportedDimensions[metric.Namespace] = _.union(self.supportedDimensions[metric.Namespace], metric.Dimensions);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// Called once per panel (graph)
|
||||
CloudWatchDatasource.prototype.query = function(options) {
|
||||
var start = convertToCloudWatchTime(options.range.from);
|
||||
var end = convertToCloudWatchTime(options.range.to);
|
||||
|
||||
var queries = [];
|
||||
_.each(options.targets, _.bind(function(target) {
|
||||
if (!target.namespace || !target.metricName || _.isEmpty(target.statistics)) {
|
||||
return;
|
||||
}
|
||||
|
||||
var query = {};
|
||||
query.region = templateSrv.replace(target.region, options.scopedVars);
|
||||
query.namespace = templateSrv.replace(target.namespace, options.scopedVars);
|
||||
query.metricName = templateSrv.replace(target.metricName, options.scopedVars);
|
||||
query.dimensions = convertDimensionFormat(target.dimensions);
|
||||
query.statistics = getActivatedStatistics(target.statistics);
|
||||
query.period = parseInt(target.period, 10);
|
||||
|
||||
var range = end - start;
|
||||
// CloudWatch limit datapoints up to 1440
|
||||
if (range / query.period >= 1440) {
|
||||
query.period = Math.floor(range / 1440 / 60) * 60;
|
||||
}
|
||||
|
||||
queries.push(query);
|
||||
}, this));
|
||||
|
||||
// No valid targets, return the empty result to save a round trip.
|
||||
if (_.isEmpty(queries)) {
|
||||
var d = $q.defer();
|
||||
d.resolve({ data: [] });
|
||||
return d.promise;
|
||||
}
|
||||
|
||||
var allQueryPromise = _.map(queries, _.bind(function(query) {
|
||||
return this.performTimeSeriesQuery(query, start, end);
|
||||
}, this));
|
||||
|
||||
return $q.all(allQueryPromise)
|
||||
.then(function(allResponse) {
|
||||
var result = [];
|
||||
|
||||
_.each(allResponse, function(response, index) {
|
||||
var metrics = transformMetricData(response, options.targets[index]);
|
||||
_.each(metrics, function(m) {
|
||||
result.push(m);
|
||||
});
|
||||
});
|
||||
|
||||
return { data: result };
|
||||
});
|
||||
};
|
||||
|
||||
CloudWatchDatasource.prototype.performTimeSeriesQuery = function(query, start, end) {
|
||||
var cloudwatch = this.getCloudWatchClient(query.region);
|
||||
|
||||
var params = {
|
||||
Namespace: query.namespace,
|
||||
MetricName: query.metricName,
|
||||
Dimensions: query.dimensions,
|
||||
Statistics: query.statistics,
|
||||
StartTime: start,
|
||||
EndTime: end,
|
||||
Period: query.period
|
||||
};
|
||||
|
||||
var d = $q.defer();
|
||||
cloudwatch.getMetricStatistics(params, function(err, data) {
|
||||
if (err) {
|
||||
return d.reject(err);
|
||||
}
|
||||
return d.resolve(data);
|
||||
});
|
||||
|
||||
return d.promise;
|
||||
};
|
||||
|
||||
CloudWatchDatasource.prototype.performSuggestRegion = function() {
|
||||
return this.supportedRegion;
|
||||
};
|
||||
|
||||
CloudWatchDatasource.prototype.performSuggestNamespace = function() {
|
||||
return _.keys(this.supportedMetrics);
|
||||
};
|
||||
|
||||
CloudWatchDatasource.prototype.performSuggestMetrics = function(namespace) {
|
||||
namespace = templateSrv.replace(namespace);
|
||||
return this.supportedMetrics[namespace] || [];
|
||||
};
|
||||
|
||||
CloudWatchDatasource.prototype.performSuggestDimensionKeys = function(namespace) {
|
||||
namespace = templateSrv.replace(namespace);
|
||||
return this.supportedDimensions[namespace] || [];
|
||||
};
|
||||
|
||||
CloudWatchDatasource.prototype.performSuggestDimensionValues = function(region, namespace, metricName, dimensions) {
|
||||
region = templateSrv.replace(region);
|
||||
namespace = templateSrv.replace(namespace);
|
||||
metricName = templateSrv.replace(metricName);
|
||||
|
||||
var cloudwatch = this.getCloudWatchClient(region);
|
||||
|
||||
var params = {
|
||||
Namespace: namespace,
|
||||
MetricName: metricName
|
||||
};
|
||||
if (!_.isEmpty(dimensions)) {
|
||||
params.Dimensions = convertDimensionFormat(dimensions);
|
||||
}
|
||||
|
||||
var d = $q.defer();
|
||||
|
||||
cloudwatch.listMetrics(params, function(err, data) {
|
||||
if (err) {
|
||||
return d.reject(err);
|
||||
}
|
||||
|
||||
var suggestData = _.chain(data.Metrics)
|
||||
.map(function(metric) {
|
||||
return metric.Dimensions;
|
||||
})
|
||||
.reject(function(metric) {
|
||||
return _.isEmpty(metric);
|
||||
})
|
||||
.value();
|
||||
|
||||
return d.resolve(suggestData);
|
||||
});
|
||||
|
||||
return d.promise;
|
||||
};
|
||||
|
||||
CloudWatchDatasource.prototype.getTemplateVariableNames = function() {
|
||||
var variables = [];
|
||||
templateSrv.fillVariableValuesForUrl(variables);
|
||||
|
||||
return _.map(_.keys(variables), function(k) {
|
||||
return k.replace(/var-/, '$');
|
||||
});
|
||||
};
|
||||
|
||||
CloudWatchDatasource.prototype.metricFindQuery = function(query) {
|
||||
var region;
|
||||
var namespace;
|
||||
var metricName;
|
||||
|
||||
var transformSuggestData = function(suggestData) {
|
||||
return _.map(suggestData, function(v) {
|
||||
return { text: v };
|
||||
});
|
||||
};
|
||||
|
||||
var d = $q.defer();
|
||||
|
||||
var regionQuery = query.match(/^region\(\)/);
|
||||
if (regionQuery) {
|
||||
d.resolve(transformSuggestData(this.performSuggestRegion()));
|
||||
return d.promise;
|
||||
}
|
||||
|
||||
var namespaceQuery = query.match(/^namespace\(\)/);
|
||||
if (namespaceQuery) {
|
||||
d.resolve(transformSuggestData(this.performSuggestNamespace()));
|
||||
return d.promise;
|
||||
}
|
||||
|
||||
var metricNameQuery = query.match(/^metrics\(([^\)]+?)\)/);
|
||||
if (metricNameQuery) {
|
||||
namespace = templateSrv.replace(metricNameQuery[1]);
|
||||
d.resolve(transformSuggestData(this.performSuggestMetrics(namespace)));
|
||||
return d.promise;
|
||||
}
|
||||
|
||||
var dimensionKeysQuery = query.match(/^dimension_keys\(([^\)]+?)\)/);
|
||||
if (dimensionKeysQuery) {
|
||||
namespace = templateSrv.replace(dimensionKeysQuery[1]);
|
||||
d.resolve(transformSuggestData(this.performSuggestDimensionKeys(namespace)));
|
||||
return d.promise;
|
||||
}
|
||||
|
||||
var dimensionValuesQuery = query.match(/^dimension_values\(([^,]+?),\s?([^,]+?),\s?([^,]+?)(,\s?([^)]*))?\)/);
|
||||
if (dimensionValuesQuery) {
|
||||
region = templateSrv.replace(dimensionValuesQuery[1]);
|
||||
namespace = templateSrv.replace(dimensionValuesQuery[2]);
|
||||
metricName = templateSrv.replace(dimensionValuesQuery[3]);
|
||||
var dimensionPart = templateSrv.replace(dimensionValuesQuery[5]);
|
||||
|
||||
var dimensions = {};
|
||||
if (!_.isEmpty(dimensionPart)) {
|
||||
_.each(dimensionPart.split(','), function(v) {
|
||||
var t = v.split('=');
|
||||
if (t.length !== 2) {
|
||||
throw new Error('Invalid query format');
|
||||
}
|
||||
dimensions[t[0]] = t[1];
|
||||
});
|
||||
}
|
||||
|
||||
return this.performSuggestDimensionValues(region, namespace, metricName, dimensions)
|
||||
.then(function(suggestData) {
|
||||
return _.map(suggestData, function(dimensions) {
|
||||
var result = _.chain(dimensions)
|
||||
.sortBy(function(dimension) {
|
||||
return dimension.Name;
|
||||
})
|
||||
.map(function(dimension) {
|
||||
return dimension.Name + '=' + dimension.Value;
|
||||
})
|
||||
.value().join(',');
|
||||
|
||||
return { text: result };
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
return $q.when([]);
|
||||
};
|
||||
|
||||
CloudWatchDatasource.prototype.testDatasource = function() {
|
||||
/* use billing metrics for test */
|
||||
var region = 'us-east-1';
|
||||
var namespace = 'AWS/Billing';
|
||||
var metricName = 'EstimatedCharges';
|
||||
var dimensions = {};
|
||||
|
||||
return this.performSuggestDimensionValues(region, namespace, metricName, dimensions).then(function () {
|
||||
return { status: 'success', message: 'Data source is working', title: 'Success' };
|
||||
});
|
||||
};
|
||||
|
||||
CloudWatchDatasource.prototype.getCloudWatchClient = function(region) {
|
||||
if (!this.proxyMode) {
|
||||
return new AWS.CloudWatch({
|
||||
region: region,
|
||||
accessKeyId: this.credentials.accessKeyId,
|
||||
secretAccessKey: this.credentials.secretAccessKey
|
||||
});
|
||||
} else {
|
||||
var self = this;
|
||||
var generateRequestProxy = function(service, action) {
|
||||
return function(params, callback) {
|
||||
var data = {
|
||||
region: region,
|
||||
service: service,
|
||||
action: action,
|
||||
parameters: params
|
||||
};
|
||||
|
||||
var options = {
|
||||
method: 'POST',
|
||||
url: self.proxyUrl,
|
||||
data: data
|
||||
};
|
||||
|
||||
$http(options).then(function(response) {
|
||||
callback(null, response.data);
|
||||
}, function(err) {
|
||||
callback(err, []);
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
return {
|
||||
getMetricStatistics: generateRequestProxy('CloudWatch', 'GetMetricStatistics'),
|
||||
listMetrics: generateRequestProxy('CloudWatch', 'ListMetrics')
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
CloudWatchDatasource.prototype.getDefaultRegion = function() {
|
||||
return this.defaultRegion;
|
||||
};
|
||||
|
||||
function transformMetricData(md, options) {
|
||||
var result = [];
|
||||
|
||||
var dimensionPart = templateSrv.replace(JSON.stringify(options.dimensions));
|
||||
_.each(getActivatedStatistics(options.statistics), function(s) {
|
||||
var originalSettings = _.templateSettings;
|
||||
_.templateSettings = {
|
||||
interpolate: /\{\{(.+?)\}\}/g
|
||||
};
|
||||
var template = _.template(options.legendFormat);
|
||||
|
||||
var metricLabel;
|
||||
if (_.isEmpty(options.legendFormat)) {
|
||||
metricLabel = md.Label + '_' + s + dimensionPart;
|
||||
} else {
|
||||
var d = convertDimensionFormat(options.dimensions);
|
||||
metricLabel = template({
|
||||
Region: templateSrv.replace(options.region),
|
||||
Namespace: templateSrv.replace(options.namespace),
|
||||
MetricName: templateSrv.replace(options.metricName),
|
||||
Dimensions: d,
|
||||
Statistics: s
|
||||
});
|
||||
}
|
||||
|
||||
_.templateSettings = originalSettings;
|
||||
|
||||
var dps = _.map(md.Datapoints, function(value) {
|
||||
return [value[s], new Date(value.Timestamp).getTime()];
|
||||
});
|
||||
dps = _.sortBy(dps, function(dp) { return dp[1]; });
|
||||
|
||||
result.push({ target: metricLabel, datapoints: dps });
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
function getActivatedStatistics(statistics) {
|
||||
var activatedStatistics = [];
|
||||
_.each(statistics, function(v, k) {
|
||||
if (v) {
|
||||
activatedStatistics.push(k);
|
||||
}
|
||||
});
|
||||
return activatedStatistics;
|
||||
}
|
||||
|
||||
function convertToCloudWatchTime(date) {
|
||||
return Math.round(kbn.parseDate(date).getTime() / 1000);
|
||||
}
|
||||
|
||||
function convertDimensionFormat(dimensions) {
|
||||
return _.map(_.keys(dimensions), function(key) {
|
||||
return {
|
||||
Name: templateSrv.replace(key),
|
||||
Value: templateSrv.replace(dimensions[key])
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
return CloudWatchDatasource;
|
||||
});
|
||||
|
||||
});
|
@ -0,0 +1,50 @@
|
||||
<h5>CloudWatch details</h5>
|
||||
|
||||
<div class="tight-form">
|
||||
<ul class="tight-form-list">
|
||||
<li class="tight-form-item" style="width: 80px">
|
||||
Default Region
|
||||
</li>
|
||||
<li>
|
||||
<input type="text" class="tight-form-input input-large" ng-model='current.jsonData.defaultRegion' placeholder="" required></input>
|
||||
</li>
|
||||
</ul>
|
||||
<ul class="tight-form-list">
|
||||
<li class="tight-form-item">
|
||||
Access <tip>Direct = url is used directly from browser, Proxy = Grafana backend will proxy the request</label>
|
||||
</li>
|
||||
<li>
|
||||
<select class="input-medium tight-form-input" ng-model="current.jsonData.access" ng-options="f for f in ['direct', 'proxy']" ng-init="current.jsonData.access = current.jsonData.access || 'direct'"></select>
|
||||
</li>
|
||||
</ul>
|
||||
<div class="clearfix"></div>
|
||||
</div>
|
||||
<div class="tight-form" ng-show="current.jsonData.access === 'direct'">
|
||||
<ul class="tight-form-list">
|
||||
<li class="tight-form-item" style="width: 80px">
|
||||
Access Key Id
|
||||
</li>
|
||||
<li>
|
||||
<input type="text" class="tight-form-input input-large" ng-model='current.jsonData.accessKeyId' placeholder="" ng-required="current.jsonData.access === 'direct'"></input>
|
||||
</li>
|
||||
<li class="tight-form-item">
|
||||
Secret Access Key
|
||||
|
||||
</li>
|
||||
<li>
|
||||
<input type="password" class="tight-form-input input-large" ng-model='current.jsonData.secretAccessKey' placeholder="" ng-required="current.jsonData.access === 'direct'"></input>
|
||||
</li>
|
||||
</ul>
|
||||
<div class="clearfix"></div>
|
||||
</div>
|
||||
<div class="tight-form last">
|
||||
<ul class="tight-form-list">
|
||||
<li class="tight-form-item" style="width: 80px">
|
||||
Custom Metrics Attributes
|
||||
</li>
|
||||
<li>
|
||||
<input type="text" class="tight-form-input input-xlarge" ng-model='current.jsonData.customMetricsAttributes[0]' ng-init="current.jsonData.customMetricsAttributes = current.jsonData.customMetricsAttributes || []" placeholder="JSON url" bs-tooltip="'Set JSON url of the result, \'aws cloudwatch list-metrics --output json\''"></input>
|
||||
</li>
|
||||
</ul>
|
||||
<div class="clearfix"></div>
|
||||
</div>
|
@ -0,0 +1,241 @@
|
||||
<div class="editor-row" style="margin-top: 10px;">
|
||||
|
||||
<div ng-repeat="target in panel.targets"
|
||||
style="margin-bottom: 10px;"
|
||||
ng-class="{'tight-form-disabled': target.hide}"
|
||||
ng-controller="CloudWatchQueryCtrl"
|
||||
ng-init="init()">
|
||||
|
||||
<div class="tight-form">
|
||||
<ul class="tight-form-list pull-right">
|
||||
<li class="tight-form-item">
|
||||
<div class="dropdown">
|
||||
<a class="pointer dropdown-toggle"
|
||||
data-toggle="dropdown"
|
||||
tabindex="1">
|
||||
<i class="fa fa-bars"></i>
|
||||
</a>
|
||||
<ul class="dropdown-menu pull-right" role="menu">
|
||||
<li role="menuitem">
|
||||
<a tabindex="1"
|
||||
ng-click="duplicate()">
|
||||
Duplicate
|
||||
</a>
|
||||
</li>
|
||||
<li role="menuitem">
|
||||
<a tabindex="1"
|
||||
ng-click="moveMetricQuery($index, $index-1)">
|
||||
Move up
|
||||
</a>
|
||||
</li>
|
||||
<li role="menuitem">
|
||||
<a tabindex="1"
|
||||
ng-click="moveMetricQuery($index, $index+1)">
|
||||
Move down
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</li>
|
||||
<li class="tight-form-item last">
|
||||
<a class="pointer" tabindex="1" ng-click="removeDataQuery(target)">
|
||||
<i class="fa fa-remove"></i>
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<ul class="tight-form-list">
|
||||
<li>
|
||||
<a class="tight-form-item"
|
||||
ng-click="target.hide = !target.hide; get_data();"
|
||||
role="menuitem">
|
||||
<i class="fa fa-eye"></i>
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<ul class="tight-form-list" role="menu">
|
||||
<li class="tight-form-item" style="width: 100px">
|
||||
Namespace
|
||||
</li>
|
||||
<li>
|
||||
<input type="text"
|
||||
class="input-medium tight-form-input"
|
||||
ng-model="target.namespace"
|
||||
spellcheck='false'
|
||||
bs-typeahead="suggestNamespace"
|
||||
placeholder="namespace"
|
||||
data-min-length=0 data-items=100
|
||||
ng-model-onblur
|
||||
ng-change="refreshMetricData()"
|
||||
>
|
||||
</li>
|
||||
<li class="tight-form-item">
|
||||
Metric
|
||||
</li>
|
||||
<li>
|
||||
<input type="text"
|
||||
class="input-medium tight-form-input"
|
||||
ng-model="target.metricName"
|
||||
spellcheck='false'
|
||||
bs-typeahead="suggestMetrics"
|
||||
placeholder="metric name"
|
||||
data-min-length=0 data-items=100
|
||||
ng-model-onblur
|
||||
ng-change="refreshMetricData()"
|
||||
>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<div class="clearfix"></div>
|
||||
</div>
|
||||
|
||||
<div class="tight-form">
|
||||
<ul class="tight-form-list" role="menu">
|
||||
<li class="tight-form-item">
|
||||
<i class="fa fa-eye invisible"></i>
|
||||
</li>
|
||||
|
||||
<li class="tight-form-item" style="width: 100px">
|
||||
Dimensions
|
||||
</li>
|
||||
<li ng-repeat="(key, value) in target.escapedDimensions track by $index" class="tight-form-item">
|
||||
{{key}} = {{value}}
|
||||
<a ng-click="removeDimension(key)">
|
||||
<i class="fa fa-remove"></i>
|
||||
</a>
|
||||
</li>
|
||||
|
||||
<li class="tight-form-item" ng-hide="addDimensionMode">
|
||||
<a ng-click="addDimension()">
|
||||
<i class="fa fa-plus"></i>
|
||||
</a>
|
||||
</li>
|
||||
|
||||
<li ng-show="addDimensionMode">
|
||||
<input type="text"
|
||||
class="input-small tight-form-input"
|
||||
spellcheck='false'
|
||||
bs-typeahead="suggestDimensionKeys"
|
||||
data-min-length=0 data-items=100
|
||||
ng-model="target.currentDimensionKey"
|
||||
placeholder="key">
|
||||
<input type="text"
|
||||
class="input-small tight-form-input"
|
||||
spellcheck='false'
|
||||
bs-typeahead="suggestDimensionValues"
|
||||
data-min-length=0 data-items=100
|
||||
ng-model="target.currentDimensionValue"
|
||||
placeholder="value">
|
||||
<a ng-click="addDimension()">
|
||||
add dimension
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<div class="clearfix"></div>
|
||||
</div>
|
||||
|
||||
<div class="tight-form">
|
||||
<ul class="tight-form-list" role="menu">
|
||||
<li class="tight-form-item">
|
||||
<i class="fa fa-eye invisible"></i>
|
||||
</li>
|
||||
|
||||
<li class="tight-form-item" style="width: 100px">
|
||||
Statistics
|
||||
</li>
|
||||
<li class="tight-form-item">
|
||||
<editor-checkbox text="Min" model="target.statistics.Minimum" change="statisticsOptionChanged()"></editor-checkbox>
|
||||
</li>
|
||||
<li class="tight-form-item">
|
||||
<editor-checkbox text="Max" model="target.statistics.Maximum" change="statisticsOptionChanged()"></editor-checkbox>
|
||||
</li>
|
||||
<li class="tight-form-item">
|
||||
<editor-checkbox text="Avg" model="target.statistics.Average" change="statisticsOptionChanged()"></editor-checkbox>
|
||||
</li>
|
||||
<li class="tight-form-item">
|
||||
<editor-checkbox text="Sum" model="target.statistics.Sum" change="statisticsOptionChanged()"></editor-checkbox>
|
||||
</li>
|
||||
<li class="tight-form-item last">
|
||||
<editor-checkbox text="SampleCount" model="target.statistics.SampleCount" change="statisticsOptionChanged()"></editor-checkbox>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<div class="clearfix"></div>
|
||||
</div>
|
||||
|
||||
<div class="tight-form">
|
||||
<ul class="tight-form-list" role="menu">
|
||||
<li class="tight-form-item">
|
||||
<i class="fa fa-eye invisible"></i>
|
||||
</li>
|
||||
|
||||
<li class="tight-form-item" style="width: 100px">
|
||||
Period
|
||||
</li>
|
||||
<li>
|
||||
<input type="text"
|
||||
class="input-mini tight-form-input"
|
||||
ng-model="target.period"
|
||||
data-placement="right"
|
||||
spellcheck='false'
|
||||
placeholder="period"
|
||||
data-min-length=0 data-items=100
|
||||
ng-model-onblur
|
||||
ng-change="refreshMetricData()"
|
||||
/>
|
||||
<a bs-tooltip="target.errors.period"
|
||||
style="color: rgb(229, 189, 28)"
|
||||
ng-show="target.errors.period">
|
||||
<i class="fa fa-warning"></i>
|
||||
</a>
|
||||
</li>
|
||||
<li class="tight-form-item">
|
||||
Region
|
||||
</li>
|
||||
<li>
|
||||
<input type="text"
|
||||
class="input-medium tight-form-input"
|
||||
ng-model="target.region"
|
||||
spellcheck='false'
|
||||
bs-typeahead="suggestRegion"
|
||||
placeholder="region"
|
||||
data-min-length=0 data-items=100
|
||||
ng-model-onblur
|
||||
ng-change="refreshMetricData()"
|
||||
>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<div class="clearfix"></div>
|
||||
</div>
|
||||
|
||||
<div class="tight-form">
|
||||
<ul class="tight-form-list" role="menu">
|
||||
<li class="tight-form-item">
|
||||
<i class="fa fa-eye invisible"></i>
|
||||
</li>
|
||||
|
||||
<li class="tight-form-item">
|
||||
Legend Format
|
||||
</li>
|
||||
<li>
|
||||
<input type="text"
|
||||
class="input-xxlarge tight-form-input"
|
||||
ng-model="target.legendFormat"
|
||||
spellcheck='false'
|
||||
placeholder="legend format"
|
||||
data-min-length=0 data-items=100
|
||||
ng-model-onblur
|
||||
ng-change="refreshMetricData()"
|
||||
>
|
||||
<tip>Syntax: {{Region}} {{Namespace}} {{MetricName}} {{Statistics}} {{Dimensions[N].Name}} {{Dimensions[N].Value}}</tip>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<div class="clearfix"></div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
16
public/app/plugins/datasource/cloudwatch/plugin.json
Normal file
16
public/app/plugins/datasource/cloudwatch/plugin.json
Normal file
@ -0,0 +1,16 @@
|
||||
{
|
||||
"pluginType": "datasource",
|
||||
"name": "CloudWatch",
|
||||
|
||||
"type": "cloudwatch",
|
||||
"serviceName": "CloudWatchDatasource",
|
||||
|
||||
"module": "plugins/datasource/cloudwatch/datasource",
|
||||
|
||||
"partials": {
|
||||
"config": "app/plugins/datasource/cloudwatch/partials/config.html",
|
||||
"query": "app/plugins/datasource/cloudwatch/partials/query.editor.html"
|
||||
},
|
||||
|
||||
"metrics": true
|
||||
}
|
142
public/app/plugins/datasource/cloudwatch/queryCtrl.js
Normal file
142
public/app/plugins/datasource/cloudwatch/queryCtrl.js
Normal file
@ -0,0 +1,142 @@
|
||||
define([
|
||||
'angular',
|
||||
'lodash',
|
||||
],
|
||||
function (angular, _) {
|
||||
'use strict';
|
||||
|
||||
var module = angular.module('grafana.controllers');
|
||||
|
||||
module.controller('CloudWatchQueryCtrl', function($scope, templateSrv) {
|
||||
|
||||
$scope.init = function() {
|
||||
$scope.target.namespace = $scope.target.namespace || '';
|
||||
$scope.target.metricName = $scope.target.metricName || '';
|
||||
$scope.target.dimensions = $scope.target.dimensions || {};
|
||||
$scope.target.escapedDimensions = this.escapeDimensions($scope.target.dimensions);
|
||||
$scope.target.statistics = $scope.target.statistics || {};
|
||||
$scope.target.period = $scope.target.period || 60;
|
||||
$scope.target.region = $scope.target.region || $scope.datasource.getDefaultRegion();
|
||||
|
||||
$scope.target.errors = validateTarget();
|
||||
};
|
||||
|
||||
$scope.refreshMetricData = function() {
|
||||
$scope.target.errors = validateTarget($scope.target);
|
||||
|
||||
// this does not work so good
|
||||
if (!_.isEqual($scope.oldTarget, $scope.target) && _.isEmpty($scope.target.errors)) {
|
||||
$scope.oldTarget = angular.copy($scope.target);
|
||||
$scope.get_data();
|
||||
}
|
||||
};
|
||||
|
||||
$scope.moveMetricQuery = function(fromIndex, toIndex) {
|
||||
_.move($scope.panel.targets, fromIndex, toIndex);
|
||||
};
|
||||
|
||||
$scope.duplicate = function() {
|
||||
var clone = angular.copy($scope.target);
|
||||
$scope.panel.targets.push(clone);
|
||||
};
|
||||
|
||||
$scope.suggestRegion = function(query, callback) { // jshint unused:false
|
||||
return _.union($scope.datasource.performSuggestRegion(), $scope.datasource.getTemplateVariableNames());
|
||||
};
|
||||
|
||||
$scope.suggestNamespace = function(query, callback) { // jshint unused:false
|
||||
return _.union($scope.datasource.performSuggestNamespace(), $scope.datasource.getTemplateVariableNames());
|
||||
};
|
||||
|
||||
$scope.suggestMetrics = function(query, callback) { // jshint unused:false
|
||||
return _.union($scope.datasource.performSuggestMetrics($scope.target.namespace), $scope.datasource.getTemplateVariableNames());
|
||||
};
|
||||
|
||||
$scope.suggestDimensionKeys = function(query, callback) { // jshint unused:false
|
||||
return _.union($scope.datasource.performSuggestDimensionKeys($scope.target.namespace), $scope.datasource.getTemplateVariableNames());
|
||||
};
|
||||
|
||||
$scope.suggestDimensionValues = function(query, callback) {
|
||||
if (!$scope.target.namespace || !$scope.target.metricName) {
|
||||
return callback([]);
|
||||
}
|
||||
|
||||
$scope.datasource.performSuggestDimensionValues(
|
||||
$scope.target.region,
|
||||
$scope.target.namespace,
|
||||
$scope.target.metricName,
|
||||
$scope.target.dimensions
|
||||
)
|
||||
.then(function(result) {
|
||||
var suggestData = _.chain(result)
|
||||
.flatten(true)
|
||||
.filter(function(dimension) {
|
||||
return dimension.Name === templateSrv.replace($scope.target.currentDimensionKey);
|
||||
})
|
||||
.pluck('Value')
|
||||
.uniq()
|
||||
.value();
|
||||
|
||||
suggestData = _.union(suggestData, $scope.datasource.getTemplateVariableNames());
|
||||
callback(suggestData);
|
||||
}, function() {
|
||||
callback([]);
|
||||
});
|
||||
};
|
||||
|
||||
$scope.addDimension = function() {
|
||||
if (!$scope.addDimensionMode) {
|
||||
$scope.addDimensionMode = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!$scope.target.dimensions) {
|
||||
$scope.target.dimensions = {};
|
||||
}
|
||||
|
||||
$scope.target.dimensions[$scope.target.currentDimensionKey] = $scope.target.currentDimensionValue;
|
||||
$scope.target.escapedDimensions = this.escapeDimensions($scope.target.dimensions);
|
||||
$scope.target.currentDimensionKey = '';
|
||||
$scope.target.currentDimensionValue = '';
|
||||
$scope.refreshMetricData();
|
||||
|
||||
$scope.addDimensionMode = false;
|
||||
};
|
||||
|
||||
$scope.removeDimension = function(key) {
|
||||
key = key.replace(/\\\$/g, '$');
|
||||
delete $scope.target.dimensions[key];
|
||||
$scope.target.escapedDimensions = this.escapeDimensions($scope.target.dimensions);
|
||||
$scope.refreshMetricData();
|
||||
};
|
||||
|
||||
$scope.escapeDimensions = function(d) {
|
||||
var result = {};
|
||||
_.chain(d)
|
||||
.keys(d)
|
||||
.each(function(k) {
|
||||
var v = d[k];
|
||||
result[k.replace(/\$/g, '\uFF04')] = v.replace(/\$/g, '\$');
|
||||
});
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
$scope.statisticsOptionChanged = function() {
|
||||
$scope.refreshMetricData();
|
||||
};
|
||||
|
||||
// TODO: validate target
|
||||
function validateTarget() {
|
||||
var errs = {};
|
||||
|
||||
if ($scope.target.period < 60 || ($scope.target.period % 60) !== 0) {
|
||||
errs.period = 'Period must be at least 60 seconds and must be a multiple of 60';
|
||||
}
|
||||
|
||||
return errs;
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
});
|
154
public/test/specs/cloudwatch-datasource-specs.js
Normal file
154
public/test/specs/cloudwatch-datasource-specs.js
Normal file
@ -0,0 +1,154 @@
|
||||
define([
|
||||
'helpers',
|
||||
'plugins/datasource/cloudwatch/datasource',
|
||||
'aws-sdk',
|
||||
], function(helpers) {
|
||||
'use strict';
|
||||
|
||||
describe('CloudWatchDatasource', function() {
|
||||
var ctx = new helpers.ServiceTestContext();
|
||||
|
||||
beforeEach(module('grafana.services'));
|
||||
beforeEach(ctx.providePhase(['templateSrv']));
|
||||
beforeEach(ctx.createService('CloudWatchDatasource'));
|
||||
beforeEach(function() {
|
||||
ctx.ds = new ctx.service({
|
||||
jsonData: {
|
||||
defaultRegion: 'us-east-1',
|
||||
access: 'proxy'
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('When performing CloudWatch query', function() {
|
||||
var requestParams;
|
||||
|
||||
var query = {
|
||||
range: { from: 'now-1h', to: 'now' },
|
||||
targets: [
|
||||
{
|
||||
region: 'us-east-1',
|
||||
namespace: 'AWS/EC2',
|
||||
metricName: 'CPUUtilization',
|
||||
dimensions: {
|
||||
InstanceId: 'i-12345678'
|
||||
},
|
||||
statistics: {
|
||||
Average: true
|
||||
},
|
||||
period: 300
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
var response = {
|
||||
Datapoints: [
|
||||
{
|
||||
Average: 1,
|
||||
Timestamp: 'Wed Dec 31 1969 16:00:00 GMT-0800 (PST)'
|
||||
}
|
||||
],
|
||||
Label: 'CPUUtilization'
|
||||
};
|
||||
|
||||
beforeEach(function() {
|
||||
ctx.ds.getCloudWatchClient = function() {
|
||||
return {
|
||||
getMetricStatistics: function(params, callback) {
|
||||
setTimeout(function() {
|
||||
requestParams = params;
|
||||
callback(null, response);
|
||||
}, 0);
|
||||
}
|
||||
};
|
||||
};
|
||||
});
|
||||
|
||||
it('should generate the correct query', function() {
|
||||
ctx.ds.query(query).then(function() {
|
||||
expect(requestParams.Namespace).to.be(query.targets[0].namespace);
|
||||
expect(requestParams.MetricName).to.be(query.targets[0].metricName);
|
||||
expect(requestParams.Dimensions[0].Name).to.be(Object.keys(query.targets[0].dimensions)[0]);
|
||||
expect(requestParams.Dimensions[0].Value).to.be(query.targets[0].dimensions[Object.keys(query.targets[0].dimensions)[0]]);
|
||||
expect(requestParams.Statistics).to.eql(Object.keys(query.targets[0].statistics));
|
||||
expect(requestParams.Period).to.be(query.targets[0].period);
|
||||
});
|
||||
});
|
||||
|
||||
it('should return series list', function() {
|
||||
ctx.ds.query(query).then(function(result) {
|
||||
var s = Object.keys(query.targets[0].statistics)[0];
|
||||
expect(result.data[0].target).to.be(response.Label + s);
|
||||
expect(result.data[0].datapoints[0][0]).to.be(response.Datapoints[0][s]);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('When performing CloudWatch metricFindQuery', function() {
|
||||
var requestParams;
|
||||
|
||||
var response = {
|
||||
Metrics: [
|
||||
{
|
||||
Namespace: 'AWS/EC2',
|
||||
MetricName: 'CPUUtilization',
|
||||
Dimensions: [
|
||||
{
|
||||
Name: 'InstanceId',
|
||||
Value: 'i-12345678'
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
beforeEach(function() {
|
||||
ctx.ds.getCloudWatchClient = function() {
|
||||
return {
|
||||
listMetrics: function(params, callback) {
|
||||
setTimeout(function() {
|
||||
requestParams = params;
|
||||
callback(null, response);
|
||||
}, 0);
|
||||
}
|
||||
};
|
||||
};
|
||||
});
|
||||
|
||||
it('should return suggest list for region()', function() {
|
||||
var query = 'region()';
|
||||
ctx.ds.metricFindQuery(query).then(function(result) {
|
||||
expect(result).to.contain('us-east-1');
|
||||
});
|
||||
});
|
||||
|
||||
it('should return suggest list for namespace()', function() {
|
||||
var query = 'namespace()';
|
||||
ctx.ds.metricFindQuery(query).then(function(result) {
|
||||
expect(result).to.contain('AWS/EC2');
|
||||
});
|
||||
});
|
||||
|
||||
it('should return suggest list for metrics()', function() {
|
||||
var query = 'metrics(AWS/EC2)';
|
||||
ctx.ds.metricFindQuery(query).then(function(result) {
|
||||
expect(result).to.contain('CPUUtilization');
|
||||
});
|
||||
});
|
||||
|
||||
it('should return suggest list for dimension_keys()', function() {
|
||||
var query = 'dimension_keys(AWS/EC2)';
|
||||
ctx.ds.metricFindQuery(query).then(function(result) {
|
||||
expect(result).to.contain('InstanceId');
|
||||
});
|
||||
});
|
||||
|
||||
it('should return suggest list for dimension_values()', function() {
|
||||
var query = 'dimension_values(us-east-1,AWS/EC2,CPUUtilization)';
|
||||
ctx.ds.metricFindQuery(query).then(function(result) {
|
||||
expect(result).to.contain('InstanceId');
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
@ -48,6 +48,7 @@ require.config({
|
||||
'jquery.flot.fillbelow': '../vendor/flot/jquery.flot.fillbelow',
|
||||
|
||||
modernizr: '../vendor/modernizr-2.6.1',
|
||||
'aws-sdk': '../vendor/aws-sdk/dist/aws-sdk.min',
|
||||
},
|
||||
|
||||
shim: {
|
||||
@ -150,6 +151,7 @@ require([
|
||||
'specs/unsavedChangesSrv-specs',
|
||||
'specs/valueSelectDropdown-specs',
|
||||
'specs/opentsdbDatasource-specs',
|
||||
'specs/cloudwatch-datasource-specs',
|
||||
];
|
||||
|
||||
var pluginSpecs = (config.plugins.specs || []).map(function (spec) {
|
||||
|
33
public/vendor/aws-sdk/.bower.json
vendored
Normal file
33
public/vendor/aws-sdk/.bower.json
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
{
|
||||
"name": "aws-sdk",
|
||||
"ignore": [
|
||||
"apis",
|
||||
"doc-src",
|
||||
"dist-tools",
|
||||
"eslint-rules",
|
||||
"features",
|
||||
"lib",
|
||||
"scripts",
|
||||
"tasks",
|
||||
"test",
|
||||
"Gemfile*",
|
||||
"configuration*",
|
||||
"Rakefile",
|
||||
"package.json",
|
||||
"testem.json",
|
||||
".*",
|
||||
"index.js"
|
||||
],
|
||||
"main": "dist/aws-sdk.js",
|
||||
"homepage": "https://github.com/aws/aws-sdk-js",
|
||||
"version": "2.1.42",
|
||||
"_release": "2.1.42",
|
||||
"_resolution": {
|
||||
"type": "version",
|
||||
"tag": "v2.1.42",
|
||||
"commit": "6ad65d3e09a3a4531c84d12b980e6fb9af136a0a"
|
||||
},
|
||||
"_source": "git://github.com/aws/aws-sdk-js.git",
|
||||
"_target": "~2.1.41",
|
||||
"_originalSource": "aws-sdk"
|
||||
}
|
84
public/vendor/aws-sdk/CONTRIBUTING.md
vendored
Normal file
84
public/vendor/aws-sdk/CONTRIBUTING.md
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
# Contributing to the AWS SDK for JavaScript
|
||||
|
||||
We work hard to provide a high-quality and useful SDK, and we greatly value
|
||||
feedback and contributions from our community. Whether it's a bug report,
|
||||
new feature, correction, or additional documentation, we welcome your issues
|
||||
and pull requests. Please read through this document before submitting any
|
||||
issues or pull requests to ensure we have all the necessary information to
|
||||
effectively respond to your bug report or contribution.
|
||||
|
||||
|
||||
## Filing Bug Reports
|
||||
|
||||
You can file bug reports against the SDK on the [GitHub issues][issues] page.
|
||||
|
||||
If you are filing a report for a bug or regression in the SDK, it's extremely
|
||||
helpful to provide as much information as possible when opening the original
|
||||
issue. This helps us reproduce and investigate the possible bug without having
|
||||
to wait for this extra information to be provided. Please read the following
|
||||
guidelines prior to filing a bug report.
|
||||
|
||||
1. Search through existing [issues][] to ensure that your specific issue has
|
||||
not yet been reported. If it is a common issue, it is likely there is
|
||||
already a bug report for your problem.
|
||||
|
||||
2. Ensure that you have tested the latest version of the SDK. Although you
|
||||
may have an issue against an older version of the SDK, we cannot provide
|
||||
bug fixes for old versions. It's also possible that the bug may have been
|
||||
fixed in the latest release.
|
||||
|
||||
3. Provide as much information about your environment, SDK version, and
|
||||
relevant dependencies as possible. For example, let us know what version
|
||||
of Node.js you are using, or if it's a browser issue, which browser you
|
||||
are using. If the issue only occurs with a specific dependency loaded,
|
||||
please provide that dependency name and version.
|
||||
|
||||
4. Provide a minimal test case that reproduces your issue or any error
|
||||
information you related to your problem. We can provide feedback much
|
||||
more quickly if we know what operations you are calling in the SDK. If
|
||||
you cannot provide a full test case, provide as much code as you can
|
||||
to help us diagnose the problem. Any relevant information should be provided
|
||||
as well, like whether this is a persistent issue, or if it only occurs
|
||||
some of the time.
|
||||
|
||||
|
||||
## Submitting Pull Requests
|
||||
|
||||
We are always happy to receive code and documentation contributions to the SDK.
|
||||
Please be aware of the following notes prior to opening a pull request:
|
||||
|
||||
1. The SDK is released under the [Apache license][license]. Any code you submit
|
||||
will be released under that license. For substantial contributions, we may
|
||||
ask you to sign a [Contributor License Agreement (CLA)][cla].
|
||||
|
||||
2. If you would like to implement support for a significant feature that is not
|
||||
yet available in the SDK, please talk to us beforehand to avoid any
|
||||
duplication of effort.
|
||||
|
||||
### Testing
|
||||
|
||||
To run the tests locally, install `phantomjs`. You can do so using [Homebrew][homebrew]:
|
||||
|
||||
```
|
||||
brew install phantomjs
|
||||
```
|
||||
|
||||
Then, to run all tests:
|
||||
|
||||
```
|
||||
npm test
|
||||
```
|
||||
|
||||
To run a particular test subset e.g. just the unit tests:
|
||||
|
||||
```
|
||||
npm run-script unit
|
||||
```
|
||||
|
||||
See the implementation of the `test` script in `package.json` for more options.
|
||||
|
||||
[issues]: https://github.com/aws/aws-sdk-js/issues
|
||||
[pr]: https://github.com/aws/aws-sdk-js/pulls
|
||||
[license]: http://aws.amazon.com/apache2.0/
|
||||
[cla]: http://en.wikipedia.org/wiki/Contributor_License_Agreement
|
||||
[homebrew]: http://brew.sh/
|
201
public/vendor/aws-sdk/LICENSE.txt
vendored
Normal file
201
public/vendor/aws-sdk/LICENSE.txt
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
5
public/vendor/aws-sdk/NOTICE.txt
vendored
Normal file
5
public/vendor/aws-sdk/NOTICE.txt
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
AWS SDK for JavaScript
|
||||
Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
|
||||
This product includes software developed at
|
||||
Amazon Web Services, Inc. (http://aws.amazon.com/).
|
124
public/vendor/aws-sdk/README.md
vendored
Normal file
124
public/vendor/aws-sdk/README.md
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
# AWS SDK for JavaScript
|
||||
|
||||
[![NPM](https://nodei.co/npm/aws-sdk.svg?downloads=true&downloadRank=true&stars=true)](https://nodei.co/npm/aws-sdk/)
|
||||
|
||||
[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.svg)](https://gitter.im/aws/aws-sdk-js)
|
||||
|
||||
[![Version](https://badge.fury.io/js/aws-sdk.svg)](http://badge.fury.io/js/aws-sdk) [![Build Status](https://travis-ci.org/aws/aws-sdk-js.svg?branch=master)](https://travis-ci.org/aws/aws-sdk-js) [![Coverage Status](https://coveralls.io/repos/aws/aws-sdk-js/badge.svg?branch=master)](https://coveralls.io/r/aws/aws-sdk-js?branch=master)
|
||||
|
||||
The official AWS SDK for JavaScript, available for browsers and mobile devices,
|
||||
or Node.js backends
|
||||
|
||||
Release notes can be found at http://aws.amazon.com/releasenotes/SDK/JavaScript
|
||||
|
||||
<p class="note">
|
||||
If you are upgrading from 1.x to 2.0 of the SDK, please see
|
||||
the {file:UPGRADING.md} notes for information on how to migrate existing code
|
||||
to work with the new major version.
|
||||
</p>
|
||||
|
||||
## Installing
|
||||
|
||||
### In the Browser
|
||||
|
||||
To use the SDK in the browser, simply add the following script tag to your
|
||||
HTML pages:
|
||||
|
||||
<script src="https://sdk.amazonaws.com/js/aws-sdk-2.1.42.min.js"></script>
|
||||
|
||||
The AWS SDK is also compatible with [browserify](http://browserify.org).
|
||||
|
||||
### In Node.js
|
||||
|
||||
The preferred way to install the AWS SDK for Node.js is to use the
|
||||
[npm](http://npmjs.org) package manager for Node.js. Simply type the following
|
||||
into a terminal window:
|
||||
|
||||
```sh
|
||||
npm install aws-sdk
|
||||
```
|
||||
|
||||
### Using Bower
|
||||
|
||||
You can also use [Bower](http://bower.io) to install the SDK by typing the
|
||||
following into a terminal window:
|
||||
|
||||
```sh
|
||||
bower install aws-sdk-js
|
||||
```
|
||||
|
||||
## Usage and Getting Started
|
||||
|
||||
You can find a getting started guide at:
|
||||
|
||||
http://docs.aws.amazon.com/AWSJavaScriptSDK/guide/
|
||||
|
||||
## Supported Services
|
||||
|
||||
<p class="note"><strong>Note</strong>:
|
||||
Although all services are supported in the browser version of the SDK,
|
||||
not all of the services are available in the default hosted build (using the
|
||||
script tag provided above). A list of services in the hosted build are provided
|
||||
in the "<a href="http://docs.aws.amazon.com/AWSJavaScriptSDK/guide/browser-services.html">Working With Services</a>"
|
||||
section of the browser SDK guide, including instructions on how to build a
|
||||
custom version of the SDK with extra services.
|
||||
</p>
|
||||
|
||||
The SDK currently supports the following services:
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<th>Service Name</th>
|
||||
<th>Class Name</th>
|
||||
<th>API Version</th>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr><td>Amazon CloudFront</td><td>AWS.CloudFront</td><td>2014-10-21</td></tr>
|
||||
<tr><td>Amazon CloudSearch</td><td>AWS.CloudSearch</td><td>2013-01-01</td></tr>
|
||||
<tr><td>Amazon CloudSearch Domain</td><td>AWS.CloudSearchDomain</td><td>2013-01-01</td></tr>
|
||||
<tr><td>Amazon CloudWatch</td><td>AWS.CloudWatch</td><td>2010-08-01</td></tr>
|
||||
<tr><td>Amazon CloudWatch Logs</td><td>AWS.CloudWatchLogs</td><td>2014-03-28</td></tr>
|
||||
<tr><td>Amazon Cognito Identity</td><td>AWS.CognitoIdentity</td><td>2014-06-30</td></tr>
|
||||
<tr><td>Amazon Cognito Sync</td><td>AWS.CognitoSync</td><td>2014-06-30</td></tr>
|
||||
<tr><td>Amazon DynamoDB</td><td>AWS.DynamoDB</td><td>2012-08-10</td></tr>
|
||||
<tr><td>Amazon Elastic Compute Cloud</td><td>AWS.EC2</td><td>2014-10-01</td></tr>
|
||||
<tr><td>Amazon Elastic MapReduce</td><td>AWS.EMR</td><td>2009-03-31</td></tr>
|
||||
<tr><td>Amazon Elastic Transcoder</td><td>AWS.ElasticTranscoder</td><td>2012-09-25</td></tr>
|
||||
<tr><td>Amazon ElastiCache</td><td>AWS.ElastiCache</td><td>2014-09-30</td></tr>
|
||||
<tr><td>Amazon Glacier</td><td>AWS.Glacier</td><td>2012-06-01</td></tr>
|
||||
<tr><td>Amazon Kinesis</td><td>AWS.Kinesis</td><td>2013-12-02</td></tr>
|
||||
<tr><td>Amazon Redshift</td><td>AWS.Redshift</td><td>2012-12-01</td></tr>
|
||||
<tr><td>Amazon Relational Database Service</td><td>AWS.RDS</td><td>2014-09-01</td></tr>
|
||||
<tr><td>Amazon Route 53</td><td>AWS.Route53</td><td>2013-04-01</td></tr>
|
||||
<tr><td>Amazon Route 53 Domains</td><td>AWS.Route53Domains</td><td>2014-05-15</td></tr>
|
||||
<tr><td>Amazon Simple Email Service</td><td>AWS.SES</td><td>2010-12-01</td></tr>
|
||||
<tr><td>Amazon Simple Notification Service</td><td>AWS.SNS</td><td>2010-03-31</td></tr>
|
||||
<tr><td>Amazon Simple Queue Service</td><td>AWS.SQS</td><td>2012-11-05</td></tr>
|
||||
<tr><td>Amazon Simple Storage Service</td><td>AWS.S3</td><td>2006-03-01</td></tr>
|
||||
<tr><td>Amazon Simple Workflow Service</td><td>AWS.SWF</td><td>2012-01-25</td></tr>
|
||||
<tr><td>Amazon SimpleDB</td><td>AWS.SimpleDB</td><td>2009-04-15</td></tr>
|
||||
<tr><td>Auto Scaling</td><td>AWS.AutoScaling</td><td>2011-01-01</td></tr>
|
||||
<tr><td>AWS CloudFormation</td><td>AWS.CloudFormation</td><td>2010-05-15</td></tr>
|
||||
<tr><td>AWS CloudTrail</td><td>AWS.CloudTrail</td><td>2013-11-01</td></tr>
|
||||
<tr><td>AWS CodeDeploy</td><td>AWS.CodeDeploy</td><td>2014-10-06</td></tr>
|
||||
<tr><td>AWS Config</td><td>AWS.ConfigService</td><td>2014-11-12</td></tr>
|
||||
<tr><td>AWS Data Pipeline</td><td>AWS.DataPipeline</td><td>2012-10-29</td></tr>
|
||||
<tr><td>AWS Direct Connect</td><td>AWS.DirectConnect</td><td>2012-10-25</td></tr>
|
||||
<tr><td>AWS Elastic Beanstalk</td><td>AWS.ElasticBeanstalk</td><td>2010-12-01</td></tr>
|
||||
<tr><td>AWS Identity and Access Management</td><td>AWS.IAM</td><td>2010-05-08</td></tr>
|
||||
<tr><td>AWS Import/Export</td><td>AWS.ImportExport</td><td>2010-06-01</td></tr>
|
||||
<tr><td>AWS Key Management Service</td><td>AWS.KMS</td><td>2014-11-01</td></tr>
|
||||
<tr><td>AWS Lambda</td><td>AWS.Lambda</td><td>2014-11-11</td></tr>
|
||||
<tr><td>AWS OpsWorks</td><td>AWS.OpsWorks</td><td>2013-02-18</td></tr>
|
||||
<tr><td>AWS Security Token Service</td><td>AWS.STS</td><td>2011-06-15</td></tr>
|
||||
<tr><td>AWS Storage Gateway</td><td>AWS.StorageGateway</td><td>2013-06-30</td></tr>
|
||||
<tr><td>AWS Support</td><td>AWS.Support</td><td>2013-04-15</td></tr>
|
||||
<tr><td>Elastic Load Balancing</td><td>AWS.ELB</td><td>2012-06-01</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
## License
|
||||
|
||||
This SDK is distributed under the
|
||||
[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0),
|
||||
see LICENSE.txt and NOTICE.txt for more information.
|
157
public/vendor/aws-sdk/UPGRADING.md
vendored
Normal file
157
public/vendor/aws-sdk/UPGRADING.md
vendored
Normal file
@ -0,0 +1,157 @@
|
||||
# @!title Upgrading Notes (1.x to 2.0)
|
||||
|
||||
# Upgrading Notes (1.x to 2.0)
|
||||
|
||||
This document captures breaking changes from 1.x versions to the first
|
||||
stable 2.x (non-RC) release of the AWS SDK for JavaScript.
|
||||
|
||||
## 1. Automatic Conversion of Base64 and Timestamp Types on Input/Output
|
||||
|
||||
The SDK will now automatically encode and decode base64-encoded values, as well
|
||||
as timestamp values, on the user's behalf. This change affects any operation
|
||||
where Base64 or Timestamp values were sent by a request or returned in a
|
||||
response, i.e., `AWS.DynamoDB` and `AWS.SQS`, which allow for Base64
|
||||
encoded values.
|
||||
|
||||
User code that previously did base64 conversion no longer requires this.
|
||||
Furthermore, values encoded as base64 are now returned as Buffer objects
|
||||
from server responses (and can also be passed as Buffer input). For
|
||||
example, the following 1.x `SQS.sendMessage()` parameters:
|
||||
|
||||
```javascript
|
||||
var params = {
|
||||
MessageBody: 'Some Message',
|
||||
MessageAttributes: {
|
||||
attrName: {
|
||||
DataType: 'Binary',
|
||||
BinaryValue: new Buffer('example text').toString('base64')
|
||||
}
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
Can be rewritten as:
|
||||
|
||||
```javascript
|
||||
var params = {
|
||||
MessageBody: 'Some Message',
|
||||
MessageAttributes: {
|
||||
attrName: {
|
||||
DataType: 'Binary',
|
||||
BinaryValue: 'example text'
|
||||
}
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
And the message will be read as:
|
||||
|
||||
```javascript
|
||||
sqs.receiveMessage(params, function(err, data) {
|
||||
// buf is <Buffer 65 78 61 6d 70 6c 65 20 74 65 78 74>
|
||||
var buf = data.Messages[0].MessageAttributes.attrName.BinaryValue;
|
||||
console.log(buf.toString()); // "example text"
|
||||
});
|
||||
```
|
||||
|
||||
## 2. Moved response.data.RequestId to response.requestId
|
||||
|
||||
The SDK now stores request IDs for all services in a consistent place on the
|
||||
response object, rather than inside the response.data property. This is to
|
||||
improve consistency across services that expose request IDs in different ways.
|
||||
Note that this is also a breaking change that renames the
|
||||
`response.data.RequestId` property to `response.requestId`
|
||||
(or `this.requestId` inside of a callback).
|
||||
|
||||
To migrate your code, change:
|
||||
|
||||
```javascript
|
||||
svc.operation(params, function (err, data) {
|
||||
console.log('Request ID:', data.RequestId);
|
||||
});
|
||||
```
|
||||
|
||||
To the following:
|
||||
|
||||
```javascript
|
||||
svc.operation(params, function () {
|
||||
console.log('Request ID:', this.requestId);
|
||||
});
|
||||
```
|
||||
|
||||
## 3. Exposed Wrapper Elements
|
||||
|
||||
If you use {AWS.ElastiCache}, {AWS.RDS}, or {AWS.Redshift}, you must now access
|
||||
the response through the top-level output property in the response for certain
|
||||
operations. This change corrects the SDK to behave according to documentation
|
||||
output, which was previously listing this wrapper element.
|
||||
|
||||
Example:
|
||||
|
||||
`RDS.describeEngineDefaultParameters()` used to return:
|
||||
|
||||
```javascript
|
||||
{ Parameters: [ ... ] }
|
||||
```
|
||||
|
||||
This operation now returns:
|
||||
|
||||
```javascript
|
||||
{ EngineDefaults: { Parameters: [ ... ] } }
|
||||
```
|
||||
|
||||
The full list of affected operations for each service are:
|
||||
|
||||
**AWS.ElastiCache**: authorizeCacheSecurityGroupIngress, createCacheCluster,
|
||||
createCacheParameterGroup, createCacheSecurityGroup, createCacheSubnetGroup,
|
||||
createReplicationGroup, deleteCacheCluster, deleteReplicationGroup,
|
||||
describeEngineDefaultParameters, modifyCacheCluster, modifyCacheSubnetGroup,
|
||||
modifyReplicationGroup, purchaseReservedCacheNodesOffering, rebootCacheCluster,
|
||||
revokeCacheSecurityGroupIngress
|
||||
|
||||
**AWS.RDS**: addSourceIdentifierToSubscription, authorizeDBSecurityGroupIngress,
|
||||
copyDBSnapshot, createDBInstance, createDBInstanceReadReplica,
|
||||
createDBParameterGroup, createDBSecurityGroup, createDBSnapshot,
|
||||
createDBSubnetGroup, createEventSubscription, createOptionGroup,
|
||||
deleteDBInstance, deleteDBSnapshot, deleteEventSubscription,
|
||||
describeEngineDefaultParameters, modifyDBInstance, modifyDBSubnetGroup,
|
||||
modifyEventSubscription, modifyOptionGroup, promoteReadReplica,
|
||||
purchaseReservedDBInstancesOffering, rebootDBInstance,
|
||||
removeSourceIdentifierFromSubscription, restoreDBInstanceFromDBSnapshot,
|
||||
restoreDBInstanceToPointInTime, revokeDBSecurityGroupIngress
|
||||
|
||||
**AWS.Redshift**: authorizeClusterSecurityGroupIngress, authorizeSnapshotAccess,
|
||||
copyClusterSnapshot, createCluster, createClusterParameterGroup,
|
||||
createClusterSecurityGroup, createClusterSnapshot, createClusterSubnetGroup,
|
||||
createEventSubscription, createHsmClientCertificate, createHsmConfiguration,
|
||||
deleteCluster, deleteClusterSnapshot, describeDefaultClusterParameters,
|
||||
disableSnapshotCopy, enableSnapshotCopy, modifyCluster,
|
||||
modifyClusterSubnetGroup, modifyEventSubscription,
|
||||
modifySnapshotCopyRetentionPeriod, purchaseReservedNodeOffering, rebootCluster,
|
||||
restoreFromClusterSnapshot, revokeClusterSecurityGroupIngress,
|
||||
revokeSnapshotAccess, rotateEncryptionKey
|
||||
|
||||
## 4. Dropped `.Client` and `.client` Properties
|
||||
|
||||
The `.Client` and `.client` properties have been removed from Service objects.
|
||||
If you are using the `.Client` property on a Service class or a `.client`
|
||||
property on an instance of the service, remove these properties from your code.
|
||||
|
||||
Upgrading example:
|
||||
|
||||
The following 1.x code:
|
||||
|
||||
```
|
||||
var sts = new AWS.STS.Client();
|
||||
// or
|
||||
var sts = new AWS.STS();
|
||||
|
||||
sts.client.operation(...);
|
||||
```
|
||||
|
||||
Should be changed to the following:
|
||||
|
||||
```
|
||||
var sts = new AWS.STS();
|
||||
sts.operation(...)
|
||||
```
|
9
public/vendor/aws-sdk/bower.json
vendored
Normal file
9
public/vendor/aws-sdk/bower.json
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "aws-sdk",
|
||||
"ignore": [
|
||||
"apis", "doc-src", "dist-tools", "eslint-rules", "features", "lib",
|
||||
"scripts", "tasks", "test", "Gemfile*", "configuration*",
|
||||
"Rakefile", "package.json", "testem.json", ".*", "index.js"
|
||||
],
|
||||
"main": "dist/aws-sdk.js"
|
||||
}
|
96
public/vendor/aws-sdk/dist/BUNDLE_LICENSE.txt
vendored
Normal file
96
public/vendor/aws-sdk/dist/BUNDLE_LICENSE.txt
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
The bundled package of the AWS SDK for JavaScript is available under the
|
||||
Apache License, Version 2.0:
|
||||
|
||||
Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
may not use this file except in compliance with the License. A copy of
|
||||
the License is located at
|
||||
|
||||
http://aws.amazon.com/apache2.0/
|
||||
|
||||
or in the "license" file accompanying this file. This file is
|
||||
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
ANY KIND, either express or implied. See the License for the specific
|
||||
language governing permissions and limitations under the License.
|
||||
|
||||
This product bundles browserify, which is available under a
|
||||
"3-clause BSD" license:
|
||||
|
||||
Copyright Joyent, Inc. and other Node contributors.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
persons to whom the Software is furnished to do so, subject to the
|
||||
following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
|
||||
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
This product bundles crypto-browserify, which is available under
|
||||
the MIT license:
|
||||
|
||||
Copyright (c) 2013 Dominic Tarr
|
||||
|
||||
Permission is hereby granted, free of charge,
|
||||
to any person obtaining a copy of this software and
|
||||
associated documentation files (the "Software"), to
|
||||
deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify,
|
||||
merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom
|
||||
the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
|
||||
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
This product bundles MD5, SHA-1, and SHA-256 hashing algorithm components,
|
||||
which are available under a BSD license:
|
||||
|
||||
Copyright (c) 1998 - 2009, Paul Johnston & Contributors
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyrightnotice,
|
||||
this list of conditions and the following disclaimer. Redistributions
|
||||
in binary form must reproduce the above copyright notice, this list of
|
||||
conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
Neither the name of the author nor the names of its contributors may
|
||||
be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
THE POSSIBILITY OF SUCH DAMAGE.
|
10800
public/vendor/aws-sdk/dist/aws-sdk.js
vendored
Normal file
10800
public/vendor/aws-sdk/dist/aws-sdk.js
vendored
Normal file
File diff suppressed because one or more lines are too long
17
public/vendor/aws-sdk/dist/aws-sdk.min.js
vendored
Normal file
17
public/vendor/aws-sdk/dist/aws-sdk.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue
Block a user