mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
replace dep with go modules (#16017)
- guide shamelessly stolen from prometheus/prometheus - updates local interface of oauth exchange - updates local impl of hclogger - bump jaeger client version closes #16088
This commit is contained in:
277
vendor/cloud.google.com/go/civil/civil.go
generated
vendored
Normal file
277
vendor/cloud.google.com/go/civil/civil.go
generated
vendored
Normal file
@@ -0,0 +1,277 @@
|
||||
// Copyright 2016 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package civil implements types for civil time, a time-zone-independent
|
||||
// representation of time that follows the rules of the proleptic
|
||||
// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second
|
||||
// minutes.
|
||||
//
|
||||
// Because they lack location information, these types do not represent unique
|
||||
// moments or intervals of time. Use time.Time for that purpose.
|
||||
package civil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Date represents a date (year, month, day).
|
||||
//
|
||||
// This type does not include location information, and therefore does not
|
||||
// describe a unique 24-hour timespan.
|
||||
type Date struct {
|
||||
Year int // Year (e.g., 2014).
|
||||
Month time.Month // Month of the year (January = 1, ...).
|
||||
Day int // Day of the month, starting at 1.
|
||||
}
|
||||
|
||||
// DateOf returns the Date in which a time occurs in that time's location.
|
||||
func DateOf(t time.Time) Date {
|
||||
var d Date
|
||||
d.Year, d.Month, d.Day = t.Date()
|
||||
return d
|
||||
}
|
||||
|
||||
// ParseDate parses a string in RFC3339 full-date format and returns the date value it represents.
|
||||
func ParseDate(s string) (Date, error) {
|
||||
t, err := time.Parse("2006-01-02", s)
|
||||
if err != nil {
|
||||
return Date{}, err
|
||||
}
|
||||
return DateOf(t), nil
|
||||
}
|
||||
|
||||
// String returns the date in RFC3339 full-date format.
|
||||
func (d Date) String() string {
|
||||
return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day)
|
||||
}
|
||||
|
||||
// IsValid reports whether the date is valid.
|
||||
func (d Date) IsValid() bool {
|
||||
return DateOf(d.In(time.UTC)) == d
|
||||
}
|
||||
|
||||
// In returns the time corresponding to time 00:00:00 of the date in the location.
|
||||
//
|
||||
// In is always consistent with time.Date, even when time.Date returns a time
|
||||
// on a different day. For example, if loc is America/Indiana/Vincennes, then both
|
||||
// time.Date(1955, time.May, 1, 0, 0, 0, 0, loc)
|
||||
// and
|
||||
// civil.Date{Year: 1955, Month: time.May, Day: 1}.In(loc)
|
||||
// return 23:00:00 on April 30, 1955.
|
||||
//
|
||||
// In panics if loc is nil.
|
||||
func (d Date) In(loc *time.Location) time.Time {
|
||||
return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc)
|
||||
}
|
||||
|
||||
// AddDays returns the date that is n days in the future.
|
||||
// n can also be negative to go into the past.
|
||||
func (d Date) AddDays(n int) Date {
|
||||
return DateOf(d.In(time.UTC).AddDate(0, 0, n))
|
||||
}
|
||||
|
||||
// DaysSince returns the signed number of days between the date and s, not including the end day.
|
||||
// This is the inverse operation to AddDays.
|
||||
func (d Date) DaysSince(s Date) (days int) {
|
||||
// We convert to Unix time so we do not have to worry about leap seconds:
|
||||
// Unix time increases by exactly 86400 seconds per day.
|
||||
deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix()
|
||||
return int(deltaUnix / 86400)
|
||||
}
|
||||
|
||||
// Before reports whether d1 occurs before d2.
|
||||
func (d1 Date) Before(d2 Date) bool {
|
||||
if d1.Year != d2.Year {
|
||||
return d1.Year < d2.Year
|
||||
}
|
||||
if d1.Month != d2.Month {
|
||||
return d1.Month < d2.Month
|
||||
}
|
||||
return d1.Day < d2.Day
|
||||
}
|
||||
|
||||
// After reports whether d1 occurs after d2.
|
||||
func (d1 Date) After(d2 Date) bool {
|
||||
return d2.Before(d1)
|
||||
}
|
||||
|
||||
// MarshalText implements the encoding.TextMarshaler interface.
|
||||
// The output is the result of d.String().
|
||||
func (d Date) MarshalText() ([]byte, error) {
|
||||
return []byte(d.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the encoding.TextUnmarshaler interface.
|
||||
// The date is expected to be a string in a format accepted by ParseDate.
|
||||
func (d *Date) UnmarshalText(data []byte) error {
|
||||
var err error
|
||||
*d, err = ParseDate(string(data))
|
||||
return err
|
||||
}
|
||||
|
||||
// A Time represents a time with nanosecond precision.
|
||||
//
|
||||
// This type does not include location information, and therefore does not
|
||||
// describe a unique moment in time.
|
||||
//
|
||||
// This type exists to represent the TIME type in storage-based APIs like BigQuery.
|
||||
// Most operations on Times are unlikely to be meaningful. Prefer the DateTime type.
|
||||
type Time struct {
|
||||
Hour int // The hour of the day in 24-hour format; range [0-23]
|
||||
Minute int // The minute of the hour; range [0-59]
|
||||
Second int // The second of the minute; range [0-59]
|
||||
Nanosecond int // The nanosecond of the second; range [0-999999999]
|
||||
}
|
||||
|
||||
// TimeOf returns the Time representing the time of day in which a time occurs
|
||||
// in that time's location. It ignores the date.
|
||||
func TimeOf(t time.Time) Time {
|
||||
var tm Time
|
||||
tm.Hour, tm.Minute, tm.Second = t.Clock()
|
||||
tm.Nanosecond = t.Nanosecond()
|
||||
return tm
|
||||
}
|
||||
|
||||
// ParseTime parses a string and returns the time value it represents.
|
||||
// ParseTime accepts an extended form of the RFC3339 partial-time format. After
|
||||
// the HH:MM:SS part of the string, an optional fractional part may appear,
|
||||
// consisting of a decimal point followed by one to nine decimal digits.
|
||||
// (RFC3339 admits only one digit after the decimal point).
|
||||
func ParseTime(s string) (Time, error) {
|
||||
t, err := time.Parse("15:04:05.999999999", s)
|
||||
if err != nil {
|
||||
return Time{}, err
|
||||
}
|
||||
return TimeOf(t), nil
|
||||
}
|
||||
|
||||
// String returns the date in the format described in ParseTime. If Nanoseconds
|
||||
// is zero, no fractional part will be generated. Otherwise, the result will
|
||||
// end with a fractional part consisting of a decimal point and nine digits.
|
||||
func (t Time) String() string {
|
||||
s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second)
|
||||
if t.Nanosecond == 0 {
|
||||
return s
|
||||
}
|
||||
return s + fmt.Sprintf(".%09d", t.Nanosecond)
|
||||
}
|
||||
|
||||
// IsValid reports whether the time is valid.
|
||||
func (t Time) IsValid() bool {
|
||||
// Construct a non-zero time.
|
||||
tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC)
|
||||
return TimeOf(tm) == t
|
||||
}
|
||||
|
||||
// MarshalText implements the encoding.TextMarshaler interface.
|
||||
// The output is the result of t.String().
|
||||
func (t Time) MarshalText() ([]byte, error) {
|
||||
return []byte(t.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the encoding.TextUnmarshaler interface.
|
||||
// The time is expected to be a string in a format accepted by ParseTime.
|
||||
func (t *Time) UnmarshalText(data []byte) error {
|
||||
var err error
|
||||
*t, err = ParseTime(string(data))
|
||||
return err
|
||||
}
|
||||
|
||||
// A DateTime represents a date and time.
|
||||
//
|
||||
// This type does not include location information, and therefore does not
|
||||
// describe a unique moment in time.
|
||||
type DateTime struct {
|
||||
Date Date
|
||||
Time Time
|
||||
}
|
||||
|
||||
// Note: We deliberately do not embed Date into DateTime, to avoid promoting AddDays and Sub.
|
||||
|
||||
// DateTimeOf returns the DateTime in which a time occurs in that time's location.
|
||||
func DateTimeOf(t time.Time) DateTime {
|
||||
return DateTime{
|
||||
Date: DateOf(t),
|
||||
Time: TimeOf(t),
|
||||
}
|
||||
}
|
||||
|
||||
// ParseDateTime parses a string and returns the DateTime it represents.
|
||||
// ParseDateTime accepts a variant of the RFC3339 date-time format that omits
|
||||
// the time offset but includes an optional fractional time, as described in
|
||||
// ParseTime. Informally, the accepted format is
|
||||
// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF]
|
||||
// where the 'T' may be a lower-case 't'.
|
||||
func ParseDateTime(s string) (DateTime, error) {
|
||||
t, err := time.Parse("2006-01-02T15:04:05.999999999", s)
|
||||
if err != nil {
|
||||
t, err = time.Parse("2006-01-02t15:04:05.999999999", s)
|
||||
if err != nil {
|
||||
return DateTime{}, err
|
||||
}
|
||||
}
|
||||
return DateTimeOf(t), nil
|
||||
}
|
||||
|
||||
// String returns the date in the format described in ParseDate.
|
||||
func (dt DateTime) String() string {
|
||||
return dt.Date.String() + "T" + dt.Time.String()
|
||||
}
|
||||
|
||||
// IsValid reports whether the datetime is valid.
|
||||
func (dt DateTime) IsValid() bool {
|
||||
return dt.Date.IsValid() && dt.Time.IsValid()
|
||||
}
|
||||
|
||||
// In returns the time corresponding to the DateTime in the given location.
|
||||
//
|
||||
// If the time is missing or ambigous at the location, In returns the same
|
||||
// result as time.Date. For example, if loc is America/Indiana/Vincennes, then
|
||||
// both
|
||||
// time.Date(1955, time.May, 1, 0, 30, 0, 0, loc)
|
||||
// and
|
||||
// civil.DateTime{
|
||||
// civil.Date{Year: 1955, Month: time.May, Day: 1}},
|
||||
// civil.Time{Minute: 30}}.In(loc)
|
||||
// return 23:30:00 on April 30, 1955.
|
||||
//
|
||||
// In panics if loc is nil.
|
||||
func (dt DateTime) In(loc *time.Location) time.Time {
|
||||
return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc)
|
||||
}
|
||||
|
||||
// Before reports whether dt1 occurs before dt2.
|
||||
func (dt1 DateTime) Before(dt2 DateTime) bool {
|
||||
return dt1.In(time.UTC).Before(dt2.In(time.UTC))
|
||||
}
|
||||
|
||||
// After reports whether dt1 occurs after dt2.
|
||||
func (dt1 DateTime) After(dt2 DateTime) bool {
|
||||
return dt2.Before(dt1)
|
||||
}
|
||||
|
||||
// MarshalText implements the encoding.TextMarshaler interface.
|
||||
// The output is the result of dt.String().
|
||||
func (dt DateTime) MarshalText() ([]byte, error) {
|
||||
return []byte(dt.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the encoding.TextUnmarshaler interface.
|
||||
// The datetime is expected to be a string in a format accepted by ParseDateTime
|
||||
func (dt *DateTime) UnmarshalText(data []byte) error {
|
||||
var err error
|
||||
*dt, err = ParseDateTime(string(data))
|
||||
return err
|
||||
}
|
||||
460
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
460
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
// Copyright 2014 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -20,6 +20,7 @@
|
||||
package metadata // import "cloud.google.com/go/compute/metadata"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -31,9 +32,6 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -64,7 +62,7 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
metaClient = &http.Client{
|
||||
defaultClient = &Client{hc: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
@@ -72,15 +70,15 @@ var (
|
||||
}).Dial,
|
||||
ResponseHeaderTimeout: 2 * time.Second,
|
||||
},
|
||||
}
|
||||
subscribeClient = &http.Client{
|
||||
}}
|
||||
subscribeClient = &Client{hc: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
}
|
||||
}}
|
||||
)
|
||||
|
||||
// NotDefinedError is returned when requested metadata is not defined.
|
||||
@@ -95,74 +93,16 @@ func (suffix NotDefinedError) Error() string {
|
||||
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
|
||||
}
|
||||
|
||||
// Get returns a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
//
|
||||
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
||||
// 169.254.169.254 will be used instead.
|
||||
//
|
||||
// If the requested metadata is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
func Get(suffix string) (string, error) {
|
||||
val, _, err := getETag(metaClient, suffix)
|
||||
return val, err
|
||||
}
|
||||
|
||||
// getETag returns a value from the metadata service as well as the associated
|
||||
// ETag using the provided client. This func is otherwise equivalent to Get.
|
||||
func getETag(client *http.Client, suffix string) (value, etag string, err error) {
|
||||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
||||
// a container, which is an important use-case for local testing of cloud
|
||||
// deployments. To enable spoofing of the metadata service, the environment
|
||||
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
||||
// requests shall go.
|
||||
host := os.Getenv(metadataHostEnv)
|
||||
if host == "" {
|
||||
// Using 169.254.169.254 instead of "metadata" here because Go
|
||||
// binaries built with the "netgo" tag and without cgo won't
|
||||
// know the search suffix for "metadata" is
|
||||
// ".google.internal", and this IP address is documented as
|
||||
// being stable anyway.
|
||||
host = metadataIP
|
||||
}
|
||||
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, _ := http.NewRequest("GET", url, nil)
|
||||
req.Header.Set("Metadata-Flavor", "Google")
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return "", "", NotDefinedError(suffix)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
|
||||
}
|
||||
all, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return string(all), res.Header.Get("Etag"), nil
|
||||
}
|
||||
|
||||
func getTrimmed(suffix string) (s string, err error) {
|
||||
s, err = Get(suffix)
|
||||
s = strings.TrimSpace(s)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *cachedValue) get() (v string, err error) {
|
||||
func (c *cachedValue) get(cl *Client) (v string, err error) {
|
||||
defer c.mu.Unlock()
|
||||
c.mu.Lock()
|
||||
if c.v != "" {
|
||||
return c.v, nil
|
||||
}
|
||||
if c.trim {
|
||||
v, err = getTrimmed(c.k)
|
||||
v, err = cl.getTrimmed(c.k)
|
||||
} else {
|
||||
v, err = Get(c.k)
|
||||
v, err = cl.Get(c.k)
|
||||
}
|
||||
if err == nil {
|
||||
c.v = v
|
||||
@@ -201,7 +141,7 @@ func testOnGCE() bool {
|
||||
go func() {
|
||||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := ctxhttp.Do(ctx, metaClient, req)
|
||||
res, err := defaultClient.hc.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
resc <- false
|
||||
return
|
||||
@@ -266,6 +206,255 @@ func systemInfoSuggestsGCE() bool {
|
||||
return name == "Google" || name == "Google Compute Engine"
|
||||
}
|
||||
|
||||
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
|
||||
// ResponseHeaderTimeout).
|
||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
return subscribeClient.Subscribe(suffix, fn)
|
||||
}
|
||||
|
||||
// Get calls Client.Get on the default client.
|
||||
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
|
||||
|
||||
// ProjectID returns the current instance's project ID string.
|
||||
func ProjectID() (string, error) { return defaultClient.ProjectID() }
|
||||
|
||||
// NumericProjectID returns the current instance's numeric project ID.
|
||||
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
|
||||
|
||||
// InternalIP returns the instance's primary internal IP address.
|
||||
func InternalIP() (string, error) { return defaultClient.InternalIP() }
|
||||
|
||||
// ExternalIP returns the instance's primary external (public) IP address.
|
||||
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
|
||||
|
||||
// Hostname returns the instance's hostname. This will be of the form
|
||||
// "<instanceID>.c.<projID>.internal".
|
||||
func Hostname() (string, error) { return defaultClient.Hostname() }
|
||||
|
||||
// InstanceTags returns the list of user-defined instance tags,
|
||||
// assigned when initially creating a GCE instance.
|
||||
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
|
||||
|
||||
// InstanceID returns the current VM's numeric instance ID.
|
||||
func InstanceID() (string, error) { return defaultClient.InstanceID() }
|
||||
|
||||
// InstanceName returns the current VM's instance ID string.
|
||||
func InstanceName() (string, error) { return defaultClient.InstanceName() }
|
||||
|
||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||
func Zone() (string, error) { return defaultClient.Zone() }
|
||||
|
||||
// InstanceAttributes calls Client.InstanceAttributes on the default client.
|
||||
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
|
||||
|
||||
// ProjectAttributes calls Client.ProjectAttributes on the default client.
|
||||
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
|
||||
|
||||
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
|
||||
func InstanceAttributeValue(attr string) (string, error) {
|
||||
return defaultClient.InstanceAttributeValue(attr)
|
||||
}
|
||||
|
||||
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
|
||||
func ProjectAttributeValue(attr string) (string, error) {
|
||||
return defaultClient.ProjectAttributeValue(attr)
|
||||
}
|
||||
|
||||
// Scopes calls Client.Scopes on the default client.
|
||||
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
|
||||
|
||||
func strsContains(ss []string, s string) bool {
|
||||
for _, v := range ss {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// A Client provides metadata.
|
||||
type Client struct {
|
||||
hc *http.Client
|
||||
}
|
||||
|
||||
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
|
||||
// will use the given http.Client instead of the default client.
|
||||
func NewClient(c *http.Client) *Client {
|
||||
return &Client{hc: c}
|
||||
}
|
||||
|
||||
// getETag returns a value from the metadata service as well as the associated ETag.
|
||||
// This func is otherwise equivalent to Get.
|
||||
func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
||||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
||||
// a container, which is an important use-case for local testing of cloud
|
||||
// deployments. To enable spoofing of the metadata service, the environment
|
||||
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
||||
// requests shall go.
|
||||
host := os.Getenv(metadataHostEnv)
|
||||
if host == "" {
|
||||
// Using 169.254.169.254 instead of "metadata" here because Go
|
||||
// binaries built with the "netgo" tag and without cgo won't
|
||||
// know the search suffix for "metadata" is
|
||||
// ".google.internal", and this IP address is documented as
|
||||
// being stable anyway.
|
||||
host = metadataIP
|
||||
}
|
||||
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, _ := http.NewRequest("GET", url, nil)
|
||||
req.Header.Set("Metadata-Flavor", "Google")
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return "", "", NotDefinedError(suffix)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
|
||||
}
|
||||
all, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return string(all), res.Header.Get("Etag"), nil
|
||||
}
|
||||
|
||||
// Get returns a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
//
|
||||
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
||||
// 169.254.169.254 will be used instead.
|
||||
//
|
||||
// If the requested metadata is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
func (c *Client) Get(suffix string) (string, error) {
|
||||
val, _, err := c.getETag(suffix)
|
||||
return val, err
|
||||
}
|
||||
|
||||
func (c *Client) getTrimmed(suffix string) (s string, err error) {
|
||||
s, err = c.Get(suffix)
|
||||
s = strings.TrimSpace(s)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Client) lines(suffix string) ([]string, error) {
|
||||
j, err := c.Get(suffix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := strings.Split(strings.TrimSpace(j), "\n")
|
||||
for i := range s {
|
||||
s[i] = strings.TrimSpace(s[i])
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// ProjectID returns the current instance's project ID string.
|
||||
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
|
||||
|
||||
// NumericProjectID returns the current instance's numeric project ID.
|
||||
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
|
||||
|
||||
// InstanceID returns the current VM's numeric instance ID.
|
||||
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
|
||||
|
||||
// InternalIP returns the instance's primary internal IP address.
|
||||
func (c *Client) InternalIP() (string, error) {
|
||||
return c.getTrimmed("instance/network-interfaces/0/ip")
|
||||
}
|
||||
|
||||
// ExternalIP returns the instance's primary external (public) IP address.
|
||||
func (c *Client) ExternalIP() (string, error) {
|
||||
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
||||
}
|
||||
|
||||
// Hostname returns the instance's hostname. This will be of the form
|
||||
// "<instanceID>.c.<projID>.internal".
|
||||
func (c *Client) Hostname() (string, error) {
|
||||
return c.getTrimmed("instance/hostname")
|
||||
}
|
||||
|
||||
// InstanceTags returns the list of user-defined instance tags,
|
||||
// assigned when initially creating a GCE instance.
|
||||
func (c *Client) InstanceTags() ([]string, error) {
|
||||
var s []string
|
||||
j, err := c.Get("instance/tags")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InstanceName returns the current VM's instance ID string.
|
||||
func (c *Client) InstanceName() (string, error) {
|
||||
host, err := c.Hostname()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Split(host, ".")[0], nil
|
||||
}
|
||||
|
||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||
func (c *Client) Zone() (string, error) {
|
||||
zone, err := c.getTrimmed("instance/zone")
|
||||
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return zone[strings.LastIndex(zone, "/")+1:], nil
|
||||
}
|
||||
|
||||
// InstanceAttributes returns the list of user-defined attributes,
|
||||
// assigned when initially creating a GCE VM instance. The value of an
|
||||
// attribute can be obtained with InstanceAttributeValue.
|
||||
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
|
||||
|
||||
// ProjectAttributes returns the list of user-defined attributes
|
||||
// applying to the project as a whole, not just this VM. The value of
|
||||
// an attribute can be obtained with ProjectAttributeValue.
|
||||
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
|
||||
|
||||
// InstanceAttributeValue returns the value of the provided VM
|
||||
// instance attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// InstanceAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
|
||||
return c.Get("instance/attributes/" + attr)
|
||||
}
|
||||
|
||||
// ProjectAttributeValue returns the value of the provided
|
||||
// project attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// ProjectAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
|
||||
return c.Get("project/attributes/" + attr)
|
||||
}
|
||||
|
||||
// Scopes returns the service account scopes for the given account.
|
||||
// The account may be empty or the string "default" to use the instance's
|
||||
// main account.
|
||||
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
|
||||
if serviceAccount == "" {
|
||||
serviceAccount = "default"
|
||||
}
|
||||
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
||||
}
|
||||
|
||||
// Subscribe subscribes to a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
// The suffix may contain query parameters.
|
||||
@@ -275,11 +464,11 @@ func systemInfoSuggestsGCE() bool {
|
||||
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
||||
// is deleted. Subscribe returns the error value returned from the last call to
|
||||
// fn, which may be nil when ok == false.
|
||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
const failedSubscribeSleep = time.Second * 5
|
||||
|
||||
// First check to see if the metadata value exists at all.
|
||||
val, lastETag, err := getETag(subscribeClient, suffix)
|
||||
val, lastETag, err := c.getETag(suffix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -295,7 +484,7 @@ func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
suffix += "?wait_for_change=true&last_etag="
|
||||
}
|
||||
for {
|
||||
val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
|
||||
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
|
||||
if err != nil {
|
||||
if _, deleted := err.(NotDefinedError); !deleted {
|
||||
time.Sleep(failedSubscribeSleep)
|
||||
@@ -310,128 +499,3 @@ func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ProjectID returns the current instance's project ID string.
|
||||
func ProjectID() (string, error) { return projID.get() }
|
||||
|
||||
// NumericProjectID returns the current instance's numeric project ID.
|
||||
func NumericProjectID() (string, error) { return projNum.get() }
|
||||
|
||||
// InternalIP returns the instance's primary internal IP address.
|
||||
func InternalIP() (string, error) {
|
||||
return getTrimmed("instance/network-interfaces/0/ip")
|
||||
}
|
||||
|
||||
// ExternalIP returns the instance's primary external (public) IP address.
|
||||
func ExternalIP() (string, error) {
|
||||
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
||||
}
|
||||
|
||||
// Hostname returns the instance's hostname. This will be of the form
|
||||
// "<instanceID>.c.<projID>.internal".
|
||||
func Hostname() (string, error) {
|
||||
return getTrimmed("instance/hostname")
|
||||
}
|
||||
|
||||
// InstanceTags returns the list of user-defined instance tags,
|
||||
// assigned when initially creating a GCE instance.
|
||||
func InstanceTags() ([]string, error) {
|
||||
var s []string
|
||||
j, err := Get("instance/tags")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InstanceID returns the current VM's numeric instance ID.
|
||||
func InstanceID() (string, error) {
|
||||
return instID.get()
|
||||
}
|
||||
|
||||
// InstanceName returns the current VM's instance ID string.
|
||||
func InstanceName() (string, error) {
|
||||
host, err := Hostname()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Split(host, ".")[0], nil
|
||||
}
|
||||
|
||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||
func Zone() (string, error) {
|
||||
zone, err := getTrimmed("instance/zone")
|
||||
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return zone[strings.LastIndex(zone, "/")+1:], nil
|
||||
}
|
||||
|
||||
// InstanceAttributes returns the list of user-defined attributes,
|
||||
// assigned when initially creating a GCE VM instance. The value of an
|
||||
// attribute can be obtained with InstanceAttributeValue.
|
||||
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
|
||||
|
||||
// ProjectAttributes returns the list of user-defined attributes
|
||||
// applying to the project as a whole, not just this VM. The value of
|
||||
// an attribute can be obtained with ProjectAttributeValue.
|
||||
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
|
||||
|
||||
func lines(suffix string) ([]string, error) {
|
||||
j, err := Get(suffix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := strings.Split(strings.TrimSpace(j), "\n")
|
||||
for i := range s {
|
||||
s[i] = strings.TrimSpace(s[i])
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InstanceAttributeValue returns the value of the provided VM
|
||||
// instance attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// InstanceAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func InstanceAttributeValue(attr string) (string, error) {
|
||||
return Get("instance/attributes/" + attr)
|
||||
}
|
||||
|
||||
// ProjectAttributeValue returns the value of the provided
|
||||
// project attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// ProjectAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func ProjectAttributeValue(attr string) (string, error) {
|
||||
return Get("project/attributes/" + attr)
|
||||
}
|
||||
|
||||
// Scopes returns the service account scopes for the given account.
|
||||
// The account may be empty or the string "default" to use the instance's
|
||||
// main account.
|
||||
func Scopes(serviceAccount string) ([]string, error) {
|
||||
if serviceAccount == "" {
|
||||
serviceAccount = "default"
|
||||
}
|
||||
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
||||
}
|
||||
|
||||
func strsContains(ss []string, s string) bool {
|
||||
for _, v := range ss {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
5
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
Normal file
5
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
TAGS
|
||||
tags
|
||||
.*.swp
|
||||
tomlcheck/tomlcheck
|
||||
toml.test
|
||||
3
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
Normal file
3
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
Compatible with TOML version
|
||||
[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)
|
||||
|
||||
27
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
27
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
@@ -1,14 +1,21 @@
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
Copyright (c) 2013 TOML authors
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
19
vendor/github.com/BurntSushi/toml/Makefile
generated
vendored
Normal file
19
vendor/github.com/BurntSushi/toml/Makefile
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
install:
|
||||
go install ./...
|
||||
|
||||
test: install
|
||||
go test -v
|
||||
toml-test toml-test-decoder
|
||||
toml-test -encoder toml-test-encoder
|
||||
|
||||
fmt:
|
||||
gofmt -w *.go */*.go
|
||||
colcheck *.go */*.go
|
||||
|
||||
tags:
|
||||
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
|
||||
|
||||
push:
|
||||
git push origin master
|
||||
git push github master
|
||||
|
||||
218
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
Normal file
218
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
Normal file
@@ -0,0 +1,218 @@
|
||||
## TOML parser and encoder for Go with reflection
|
||||
|
||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
packages. This package also supports the `encoding.TextUnmarshaler` and
|
||||
`encoding.TextMarshaler` interfaces so that you can define custom data
|
||||
representations. (There is an example of this below.)
|
||||
|
||||
Spec: https://github.com/toml-lang/toml
|
||||
|
||||
Compatible with TOML version
|
||||
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
|
||||
|
||||
Documentation: https://godoc.org/github.com/BurntSushi/toml
|
||||
|
||||
Installation:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml
|
||||
```
|
||||
|
||||
Try the toml validator:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml/cmd/tomlv
|
||||
tomlv some-toml-file.toml
|
||||
```
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/toml) [](https://godoc.org/github.com/BurntSushi/toml)
|
||||
|
||||
### Testing
|
||||
|
||||
This package passes all tests in
|
||||
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
|
||||
and the encoder.
|
||||
|
||||
### Examples
|
||||
|
||||
This package works similarly to how the Go standard library handles `XML`
|
||||
and `JSON`. Namely, data is loaded into Go values via reflection.
|
||||
|
||||
For the simplest example, consider some TOML file as just a list of keys
|
||||
and values:
|
||||
|
||||
```toml
|
||||
Age = 25
|
||||
Cats = [ "Cauchy", "Plato" ]
|
||||
Pi = 3.14
|
||||
Perfection = [ 6, 28, 496, 8128 ]
|
||||
DOB = 1987-07-05T05:45:00Z
|
||||
```
|
||||
|
||||
Which could be defined in Go as:
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
Age int
|
||||
Cats []string
|
||||
Pi float64
|
||||
Perfection []int
|
||||
DOB time.Time // requires `import time`
|
||||
}
|
||||
```
|
||||
|
||||
And then decoded with:
|
||||
|
||||
```go
|
||||
var conf Config
|
||||
if _, err := toml.Decode(tomlData, &conf); err != nil {
|
||||
// handle error
|
||||
}
|
||||
```
|
||||
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML
|
||||
key value directly:
|
||||
|
||||
```toml
|
||||
some_key_NAME = "wat"
|
||||
```
|
||||
|
||||
```go
|
||||
type TOML struct {
|
||||
ObscureKey string `toml:"some_key_NAME"`
|
||||
}
|
||||
```
|
||||
|
||||
### Using the `encoding.TextUnmarshaler` interface
|
||||
|
||||
Here's an example that automatically parses duration strings into
|
||||
`time.Duration` values:
|
||||
|
||||
```toml
|
||||
[[song]]
|
||||
name = "Thunder Road"
|
||||
duration = "4m49s"
|
||||
|
||||
[[song]]
|
||||
name = "Stairway to Heaven"
|
||||
duration = "8m03s"
|
||||
```
|
||||
|
||||
Which can be decoded with:
|
||||
|
||||
```go
|
||||
type song struct {
|
||||
Name string
|
||||
Duration duration
|
||||
}
|
||||
type songs struct {
|
||||
Song []song
|
||||
}
|
||||
var favorites songs
|
||||
if _, err := toml.Decode(blob, &favorites); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, s := range favorites.Song {
|
||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
||||
}
|
||||
```
|
||||
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
`encoding.TextUnmarshaler` interface:
|
||||
|
||||
```go
|
||||
type duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
d.Duration, err = time.ParseDuration(string(text))
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
### More complex usage
|
||||
|
||||
Here's an example of how to load the example from the official spec page:
|
||||
|
||||
```toml
|
||||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
||||
|
||||
# Line breaks are OK when inside arrays
|
||||
hosts = [
|
||||
"alpha",
|
||||
"omega"
|
||||
]
|
||||
```
|
||||
|
||||
And the corresponding Go types are:
|
||||
|
||||
```go
|
||||
type tomlConfig struct {
|
||||
Title string
|
||||
Owner ownerInfo
|
||||
DB database `toml:"database"`
|
||||
Servers map[string]server
|
||||
Clients clients
|
||||
}
|
||||
|
||||
type ownerInfo struct {
|
||||
Name string
|
||||
Org string `toml:"organization"`
|
||||
Bio string
|
||||
DOB time.Time
|
||||
}
|
||||
|
||||
type database struct {
|
||||
Server string
|
||||
Ports []int
|
||||
ConnMax int `toml:"connection_max"`
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
type server struct {
|
||||
IP string
|
||||
DC string
|
||||
}
|
||||
|
||||
type clients struct {
|
||||
Data [][]interface{}
|
||||
Hosts []string
|
||||
}
|
||||
```
|
||||
|
||||
Note that a case insensitive match will be tried if an exact match can't be
|
||||
found.
|
||||
|
||||
A working example of the above can be found in `_examples/example.{go,toml}`.
|
||||
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
generated
vendored
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
generated
vendored
@@ -1,14 +0,0 @@
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
||||
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
generated
vendored
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
generated
vendored
@@ -1,14 +0,0 @@
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
||||
14
vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING
generated
vendored
14
vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING
generated
vendored
@@ -1,14 +0,0 @@
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
||||
2
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
2
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
@@ -775,7 +775,7 @@ func lexDatetime(lx *lexer) stateFn {
|
||||
return lexDatetime
|
||||
}
|
||||
switch r {
|
||||
case '-', 'T', ':', '.', 'Z':
|
||||
case '-', 'T', ':', '.', 'Z', '+':
|
||||
return lexDatetime
|
||||
}
|
||||
|
||||
|
||||
1
vendor/github.com/BurntSushi/toml/session.vim
generated
vendored
Normal file
1
vendor/github.com/BurntSushi/toml/session.vim
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
au BufWritePost *.go silent!make tags > /dev/null 2>&1
|
||||
24
vendor/github.com/Unknwon/com/.gitignore
generated
vendored
Normal file
24
vendor/github.com/Unknwon/com/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
.idea
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.iml
|
||||
20
vendor/github.com/Unknwon/com/README.md
generated
vendored
Normal file
20
vendor/github.com/Unknwon/com/README.md
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
Common Functions
|
||||
================
|
||||
|
||||
[](https://travis-ci.org/Unknwon/com) [](http://gowalker.org/github.com/Unknwon/com)
|
||||
|
||||
This is an open source project for commonly used functions for the Go programming language.
|
||||
|
||||
This package need >= **go 1.2**
|
||||
|
||||
Code Convention: based on [Go Code Convention](https://github.com/Unknwon/go-code-convention).
|
||||
|
||||
## Contribute
|
||||
|
||||
Your contribute is welcome, but you have to check following steps after you added some functions and commit them:
|
||||
|
||||
1. Make sure you wrote user-friendly comments for **all functions** .
|
||||
2. Make sure you wrote test cases with any possible condition for **all functions** in file `*_test.go`.
|
||||
3. Make sure you wrote benchmarks for **all functions** in file `*_test.go`.
|
||||
4. Make sure you wrote useful examples for **all functions** in file `example_test.go`.
|
||||
5. Make sure you ran `go test` and got **PASS** .
|
||||
53
vendor/github.com/Unknwon/com/dir.go
generated
vendored
53
vendor/github.com/Unknwon/com/dir.go
generated
vendored
@@ -32,7 +32,7 @@ func IsDir(dir string) bool {
|
||||
return f.IsDir()
|
||||
}
|
||||
|
||||
func statDir(dirPath, recPath string, includeDir, isDirOnly bool) ([]string, error) {
|
||||
func statDir(dirPath, recPath string, includeDir, isDirOnly, followSymlinks bool) ([]string, error) {
|
||||
dir, err := os.Open(dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -56,13 +56,29 @@ func statDir(dirPath, recPath string, includeDir, isDirOnly bool) ([]string, err
|
||||
if includeDir {
|
||||
statList = append(statList, relPath+"/")
|
||||
}
|
||||
s, err := statDir(curPath, relPath, includeDir, isDirOnly)
|
||||
s, err := statDir(curPath, relPath, includeDir, isDirOnly, followSymlinks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
statList = append(statList, s...)
|
||||
} else if !isDirOnly {
|
||||
statList = append(statList, relPath)
|
||||
} else if followSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
link, err := os.Readlink(curPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if IsDir(link) {
|
||||
if includeDir {
|
||||
statList = append(statList, relPath+"/")
|
||||
}
|
||||
s, err := statDir(curPath, relPath, includeDir, isDirOnly, followSymlinks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
statList = append(statList, s...)
|
||||
}
|
||||
}
|
||||
}
|
||||
return statList, nil
|
||||
@@ -84,7 +100,26 @@ func StatDir(rootPath string, includeDir ...bool) ([]string, error) {
|
||||
if len(includeDir) >= 1 {
|
||||
isIncludeDir = includeDir[0]
|
||||
}
|
||||
return statDir(rootPath, "", isIncludeDir, false)
|
||||
return statDir(rootPath, "", isIncludeDir, false, false)
|
||||
}
|
||||
|
||||
// LstatDir gathers information of given directory by depth-first.
|
||||
// It returns slice of file list, follows symbolic links and includes subdirectories if enabled;
|
||||
// it returns error and nil slice when error occurs in underlying functions,
|
||||
// or given path is not a directory or does not exist.
|
||||
//
|
||||
// Slice does not include given path itself.
|
||||
// If subdirectories is enabled, they will have suffix '/'.
|
||||
func LstatDir(rootPath string, includeDir ...bool) ([]string, error) {
|
||||
if !IsDir(rootPath) {
|
||||
return nil, errors.New("not a directory or does not exist: " + rootPath)
|
||||
}
|
||||
|
||||
isIncludeDir := false
|
||||
if len(includeDir) >= 1 {
|
||||
isIncludeDir = includeDir[0]
|
||||
}
|
||||
return statDir(rootPath, "", isIncludeDir, false, true)
|
||||
}
|
||||
|
||||
// GetAllSubDirs returns all subdirectories of given root path.
|
||||
@@ -93,7 +128,17 @@ func GetAllSubDirs(rootPath string) ([]string, error) {
|
||||
if !IsDir(rootPath) {
|
||||
return nil, errors.New("not a directory or does not exist: " + rootPath)
|
||||
}
|
||||
return statDir(rootPath, "", true, true)
|
||||
return statDir(rootPath, "", true, true, false)
|
||||
}
|
||||
|
||||
// LgetAllSubDirs returns all subdirectories of given root path, including
|
||||
// following symbolic links, if any.
|
||||
// Slice does not include given path itself.
|
||||
func LgetAllSubDirs(rootPath string) ([]string, error) {
|
||||
if !IsDir(rootPath) {
|
||||
return nil, errors.New("not a directory or does not exist: " + rootPath)
|
||||
}
|
||||
return statDir(rootPath, "", true, true, true)
|
||||
}
|
||||
|
||||
// GetFileListBySuffix returns an ordered list of file paths.
|
||||
|
||||
8
vendor/github.com/Unknwon/com/go.mod
generated
vendored
Normal file
8
vendor/github.com/Unknwon/com/go.mod
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
module github.com/Unknwon/com
|
||||
|
||||
require (
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect
|
||||
github.com/jtolds/gls v4.2.1+incompatible // indirect
|
||||
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 // indirect
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c
|
||||
)
|
||||
8
vendor/github.com/Unknwon/com/go.sum
generated
vendored
Normal file
8
vendor/github.com/Unknwon/com/go.sum
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 h1:Jpy1PXuP99tXNrhbq2BaPz9B+jNAvH1JPQQpG/9GCXY=
|
||||
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c h1:Ho+uVpkel/udgjbwB5Lktg9BtvJSh2DT0Hi6LPSyI2w=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
4
vendor/github.com/Unknwon/com/math.go
generated
vendored
4
vendor/github.com/Unknwon/com/math.go
generated
vendored
@@ -14,12 +14,12 @@
|
||||
|
||||
package com
|
||||
|
||||
// PowInt is int type of math.Pow function.
|
||||
// PowInt is int type of math.Pow function.
|
||||
func PowInt(x int, y int) int {
|
||||
if y <= 0 {
|
||||
return 1
|
||||
} else {
|
||||
if y % 2 == 0 {
|
||||
if y%2 == 0 {
|
||||
sqrt := PowInt(x, y/2)
|
||||
return sqrt * sqrt
|
||||
} else {
|
||||
|
||||
18
vendor/github.com/VividCortex/mysqlerr/README.md
generated
vendored
Normal file
18
vendor/github.com/VividCortex/mysqlerr/README.md
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
mysqlerr
|
||||
========
|
||||
|
||||
MySQL Server Error Constants
|
||||
|
||||
Covers up to MySQL 5.7.13. Notice that some constants were renamed in later
|
||||
versions of MySQL, because they became obsolete. (In case you wonder: the names
|
||||
here match the symbols MySQL uses in source code.) Obsolete names haven't been
|
||||
changed in this package to avoid breaking code, but you should no longer be
|
||||
using them in applications. Here's the full list of changes since this package's
|
||||
first version:
|
||||
|
||||
| Code | This package | MySQL (as of 5.7.8) |
|
||||
| ---: | ------------ | ------------------- |
|
||||
| 1150 | ER_DELAYED_CANT_CHANGE_LOCK | ER_UNUSED1 |
|
||||
| 1151 | ER_TOO_MANY_DELAYED_THREADS | ER_UNUSED2 |
|
||||
| 1165 | ER_DELAYED_INSERT_TABLE_LOCKED | ER_UNUSED3 |
|
||||
| 1349 | ER_VIEW_SELECT_DERIVED | ER_VIEW_SELECT_DERIVED_UNUSED |
|
||||
2
vendor/github.com/aws/aws-sdk-go/NOTICE.txt
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/NOTICE.txt
generated
vendored
@@ -1,3 +1,3 @@
|
||||
AWS SDK for Go
|
||||
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
Copyright 2014-2015 Stripe, Inc.
|
||||
|
||||
6
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
@@ -118,6 +118,12 @@ var LogHTTPResponseHandler = request.NamedHandler{
|
||||
func logResponse(r *request.Request) {
|
||||
lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
|
||||
|
||||
if r.HTTPResponse == nil {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil"))
|
||||
return
|
||||
}
|
||||
|
||||
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||
if logBody {
|
||||
r.HTTPResponse.Body = &teeReaderCloser{
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
// +build !go1.9
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
import "time"
|
||||
|
||||
// Context is an copy of the Go v1.7 stdlib's context.Context interface.
|
||||
// It is represented as a SDK interface to enable you to use the "WithContext"
|
||||
@@ -35,37 +35,3 @@ type Context interface {
|
||||
// functions.
|
||||
Value(key interface{}) interface{}
|
||||
}
|
||||
|
||||
// BackgroundContext returns a context that will never be canceled, has no
|
||||
// values, and no deadline. This context is used by the SDK to provide
|
||||
// backwards compatibility with non-context API operations and functionality.
|
||||
//
|
||||
// Go 1.6 and before:
|
||||
// This context function is equivalent to context.Background in the Go stdlib.
|
||||
//
|
||||
// Go 1.7 and later:
|
||||
// The context returned will be the value returned by context.Background()
|
||||
//
|
||||
// See https://golang.org/pkg/context for more information on Contexts.
|
||||
func BackgroundContext() Context {
|
||||
return backgroundCtx
|
||||
}
|
||||
|
||||
// SleepWithContext will wait for the timer duration to expire, or the context
|
||||
// is canceled. Which ever happens first. If the context is canceled the Context's
|
||||
// error will be returned.
|
||||
//
|
||||
// Expects Context to always return a non-nil error if the Done channel is closed.
|
||||
func SleepWithContext(ctx Context, dur time.Duration) error {
|
||||
t := time.NewTimer(dur)
|
||||
defer t.Stop()
|
||||
|
||||
select {
|
||||
case <-t.C:
|
||||
break
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
9
vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go
generated
vendored
@@ -1,9 +0,0 @@
|
||||
// +build go1.7
|
||||
|
||||
package aws
|
||||
|
||||
import "context"
|
||||
|
||||
var (
|
||||
backgroundCtx = context.Background()
|
||||
)
|
||||
11
vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
generated
vendored
Normal file
11
vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build go1.9
|
||||
|
||||
package aws
|
||||
|
||||
import "context"
|
||||
|
||||
// Context is an alias of the Go stdlib's context.Context interface.
|
||||
// It can be used within the SDK's API operation "WithContext" methods.
|
||||
//
|
||||
// See https://golang.org/pkg/context on how to use contexts.
|
||||
type Context = context.Context
|
||||
@@ -39,3 +39,18 @@ func (e *emptyCtx) String() string {
|
||||
var (
|
||||
backgroundCtx = new(emptyCtx)
|
||||
)
|
||||
|
||||
// BackgroundContext returns a context that will never be canceled, has no
|
||||
// values, and no deadline. This context is used by the SDK to provide
|
||||
// backwards compatibility with non-context API operations and functionality.
|
||||
//
|
||||
// Go 1.6 and before:
|
||||
// This context function is equivalent to context.Background in the Go stdlib.
|
||||
//
|
||||
// Go 1.7 and later:
|
||||
// The context returned will be the value returned by context.Background()
|
||||
//
|
||||
// See https://golang.org/pkg/context for more information on Contexts.
|
||||
func BackgroundContext() Context {
|
||||
return backgroundCtx
|
||||
}
|
||||
20
vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
generated
vendored
Normal file
20
vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// +build go1.7
|
||||
|
||||
package aws
|
||||
|
||||
import "context"
|
||||
|
||||
// BackgroundContext returns a context that will never be canceled, has no
|
||||
// values, and no deadline. This context is used by the SDK to provide
|
||||
// backwards compatibility with non-context API operations and functionality.
|
||||
//
|
||||
// Go 1.6 and before:
|
||||
// This context function is equivalent to context.Background in the Go stdlib.
|
||||
//
|
||||
// Go 1.7 and later:
|
||||
// The context returned will be the value returned by context.Background()
|
||||
//
|
||||
// See https://golang.org/pkg/context for more information on Contexts.
|
||||
func BackgroundContext() Context {
|
||||
return context.Background()
|
||||
}
|
||||
24
vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go
generated
vendored
Normal file
24
vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// SleepWithContext will wait for the timer duration to expire, or the context
|
||||
// is canceled. Which ever happens first. If the context is canceled the Context's
|
||||
// error will be returned.
|
||||
//
|
||||
// Expects Context to always return a non-nil error if the Done channel is closed.
|
||||
func SleepWithContext(ctx Context, dur time.Duration) error {
|
||||
t := time.NewTimer(dur)
|
||||
defer t.Stop()
|
||||
|
||||
select {
|
||||
case <-t.C:
|
||||
break
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
12
vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
generated
vendored
Normal file
12
vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
[default]
|
||||
aws_access_key_id = accessKey
|
||||
aws_secret_access_key = secret
|
||||
aws_session_token = token
|
||||
|
||||
[no_token]
|
||||
aws_access_key_id = accessKey
|
||||
aws_secret_access_key = secret
|
||||
|
||||
[with_colon]
|
||||
aws_access_key_id: accessKey
|
||||
aws_secret_access_key: secret
|
||||
9
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
@@ -24,8 +24,9 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) {
|
||||
|
||||
output := &metadataOutput{}
|
||||
req := c.NewRequest(op, nil, output)
|
||||
err := req.Send()
|
||||
|
||||
return output.Content, req.Send()
|
||||
return output.Content, err
|
||||
}
|
||||
|
||||
// GetUserData returns the userdata that was configured for the service. If
|
||||
@@ -45,8 +46,9 @@ func (c *EC2Metadata) GetUserData() (string, error) {
|
||||
r.Error = awserr.New("NotFoundError", "user-data not found", r.Error)
|
||||
}
|
||||
})
|
||||
err := req.Send()
|
||||
|
||||
return output.Content, req.Send()
|
||||
return output.Content, err
|
||||
}
|
||||
|
||||
// GetDynamicData uses the path provided to request information from the EC2
|
||||
@@ -61,8 +63,9 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
|
||||
|
||||
output := &metadataOutput{}
|
||||
req := c.NewRequest(op, nil, output)
|
||||
err := req.Send()
|
||||
|
||||
return output.Content, req.Send()
|
||||
return output.Content, err
|
||||
}
|
||||
|
||||
// GetInstanceIdentityDocument retrieves an identity document describing an
|
||||
|
||||
3
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
@@ -92,6 +92,9 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
|
||||
svc.Handlers.Send.SwapNamed(request.NamedHandler{
|
||||
Name: corehandlers.SendHandler.Name,
|
||||
Fn: func(r *request.Request) {
|
||||
r.HTTPResponse = &http.Response{
|
||||
Header: http.Header{},
|
||||
}
|
||||
r.Error = awserr.New(
|
||||
request.CanceledErrorCode,
|
||||
"EC2 IMDS access disabled via "+disableServiceEnvVar+" env var",
|
||||
|
||||
28
vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
generated
vendored
28
vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
generated
vendored
@@ -85,6 +85,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol
|
||||
custAddS3DualStack(p)
|
||||
custRmIotDataService(p)
|
||||
custFixAppAutoscalingChina(p)
|
||||
custFixAppAutoscalingUsGov(p)
|
||||
}
|
||||
|
||||
return ps, nil
|
||||
@@ -149,6 +150,33 @@ func custFixAppAutoscalingChina(p *partition) {
|
||||
p.Services[serviceName] = s
|
||||
}
|
||||
|
||||
func custFixAppAutoscalingUsGov(p *partition) {
|
||||
if p.ID != "aws-us-gov" {
|
||||
return
|
||||
}
|
||||
|
||||
const serviceName = "application-autoscaling"
|
||||
s, ok := p.Services[serviceName]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if a := s.Defaults.CredentialScope.Service; a != "" {
|
||||
fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a)
|
||||
return
|
||||
}
|
||||
|
||||
if a := s.Defaults.Hostname; a != "" {
|
||||
fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a)
|
||||
return
|
||||
}
|
||||
|
||||
s.Defaults.CredentialScope.Service = "application-autoscaling"
|
||||
s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com"
|
||||
|
||||
p.Services[serviceName] = s
|
||||
}
|
||||
|
||||
type decodeModelError struct {
|
||||
awsError
|
||||
}
|
||||
|
||||
436
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
436
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
@@ -185,6 +185,107 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"api.ecr": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{
|
||||
Hostname: "api.ecr.ap-northeast-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-northeast-1",
|
||||
},
|
||||
},
|
||||
"ap-northeast-2": endpoint{
|
||||
Hostname: "api.ecr.ap-northeast-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-northeast-2",
|
||||
},
|
||||
},
|
||||
"ap-south-1": endpoint{
|
||||
Hostname: "api.ecr.ap-south-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-south-1",
|
||||
},
|
||||
},
|
||||
"ap-southeast-1": endpoint{
|
||||
Hostname: "api.ecr.ap-southeast-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-southeast-1",
|
||||
},
|
||||
},
|
||||
"ap-southeast-2": endpoint{
|
||||
Hostname: "api.ecr.ap-southeast-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-southeast-2",
|
||||
},
|
||||
},
|
||||
"ca-central-1": endpoint{
|
||||
Hostname: "api.ecr.ca-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ca-central-1",
|
||||
},
|
||||
},
|
||||
"eu-central-1": endpoint{
|
||||
Hostname: "api.ecr.eu-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "eu-central-1",
|
||||
},
|
||||
},
|
||||
"eu-north-1": endpoint{
|
||||
Hostname: "api.ecr.eu-north-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "eu-north-1",
|
||||
},
|
||||
},
|
||||
"eu-west-1": endpoint{
|
||||
Hostname: "api.ecr.eu-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "eu-west-1",
|
||||
},
|
||||
},
|
||||
"eu-west-2": endpoint{
|
||||
Hostname: "api.ecr.eu-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "eu-west-2",
|
||||
},
|
||||
},
|
||||
"eu-west-3": endpoint{
|
||||
Hostname: "api.ecr.eu-west-3.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "eu-west-3",
|
||||
},
|
||||
},
|
||||
"sa-east-1": endpoint{
|
||||
Hostname: "api.ecr.sa-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "sa-east-1",
|
||||
},
|
||||
},
|
||||
"us-east-1": endpoint{
|
||||
Hostname: "api.ecr.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"us-east-2": endpoint{
|
||||
Hostname: "api.ecr.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"us-west-1": endpoint{
|
||||
Hostname: "api.ecr.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"us-west-2": endpoint{
|
||||
Hostname: "api.ecr.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"api.mediatailor": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -193,6 +294,7 @@ var awsPartition = partition{
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"api.pricing": service{
|
||||
@@ -219,9 +321,33 @@ var awsPartition = partition{
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"us-east-1-fips": endpoint{
|
||||
Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"us-east-2": endpoint{},
|
||||
"us-east-2-fips": endpoint{
|
||||
Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-1-fips": endpoint{
|
||||
Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"us-west-2": endpoint{},
|
||||
"us-west-2-fips": endpoint{
|
||||
Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"apigateway": service{
|
||||
@@ -281,6 +407,10 @@ var awsPartition = partition{
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
@@ -296,6 +426,7 @@ var awsPartition = partition{
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
@@ -374,6 +505,7 @@ var awsPartition = partition{
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
@@ -768,7 +900,9 @@ var awsPartition = partition{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
@@ -776,6 +910,15 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"comprehendmedical": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"config": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -813,6 +956,21 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"datasync": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"dax": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -820,6 +978,7 @@ var awsPartition = partition{
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
@@ -871,6 +1030,7 @@ var awsPartition = partition{
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
@@ -881,6 +1041,35 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"docdb": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{
|
||||
Hostname: "rds.eu-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "eu-west-1",
|
||||
},
|
||||
},
|
||||
"us-east-1": endpoint{
|
||||
Hostname: "rds.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"us-east-2": endpoint{
|
||||
Hostname: "rds.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"us-west-2": endpoint{
|
||||
Hostname: "rds.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"ds": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -964,27 +1153,6 @@ var awsPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"ecr": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"ecs": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -1063,6 +1231,7 @@ var awsPartition = partition{
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
@@ -1165,11 +1334,17 @@ var awsPartition = partition{
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"fips": endpoint{
|
||||
Hostname: "es-fips.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"events": service{
|
||||
@@ -1228,10 +1403,7 @@ var awsPartition = partition{
|
||||
},
|
||||
},
|
||||
"fsx": service{
|
||||
Defaults: endpoint{
|
||||
SSLCommonName: "fsx.us-west-2.amazonaws.com",
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
@@ -1293,6 +1465,7 @@ var awsPartition = partition{
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
@@ -1451,6 +1624,7 @@ var awsPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
@@ -1460,6 +1634,12 @@ var awsPartition = partition{
|
||||
"kms": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ProdFips": endpoint{
|
||||
Hostname: "kms-fips.ca-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ca-central-1",
|
||||
},
|
||||
},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
@@ -1499,6 +1679,22 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"license-manager": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"lightsail": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -1563,6 +1759,7 @@ var awsPartition = partition{
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
@@ -1686,6 +1883,22 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"mq": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"mturk-requester": service{
|
||||
IsRegionalized: boxedFalse,
|
||||
|
||||
@@ -1699,12 +1912,24 @@ var awsPartition = partition{
|
||||
"neptune": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{
|
||||
Hostname: "rds.ap-northeast-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-northeast-1",
|
||||
},
|
||||
},
|
||||
"ap-southeast-1": endpoint{
|
||||
Hostname: "rds.ap-southeast-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-southeast-1",
|
||||
},
|
||||
},
|
||||
"ap-southeast-2": endpoint{
|
||||
Hostname: "rds.ap-southeast-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-southeast-2",
|
||||
},
|
||||
},
|
||||
"eu-central-1": endpoint{
|
||||
Hostname: "rds.eu-central-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
@@ -1813,6 +2038,7 @@ var awsPartition = partition{
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
@@ -1871,6 +2097,8 @@ var awsPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
@@ -1928,15 +2156,22 @@ var awsPartition = partition{
|
||||
},
|
||||
"route53resolver": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -2243,6 +2478,26 @@ var awsPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"securityhub": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"serverlessrepo": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
@@ -2302,6 +2557,7 @@ var awsPartition = partition{
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
@@ -2684,6 +2940,7 @@ var awsPartition = partition{
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
@@ -2842,6 +3099,23 @@ var awscnPartition = partition{
|
||||
},
|
||||
},
|
||||
Services: services{
|
||||
"api.ecr": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{
|
||||
Hostname: "api.ecr.cn-north-1.amazonaws.com.cn",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "cn-north-1",
|
||||
},
|
||||
},
|
||||
"cn-northwest-1": endpoint{
|
||||
Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "cn-northwest-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"apigateway": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -2962,13 +3236,6 @@ var awscnPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"ecr": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"ecs": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -3022,6 +3289,19 @@ var awscnPartition = partition{
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"firehose": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"gamelift": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"glacier": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
@@ -3175,6 +3455,13 @@ var awscnPartition = partition{
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"states": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"storagegateway": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -3253,6 +3540,23 @@ var awsusgovPartition = partition{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"api.ecr": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{
|
||||
Hostname: "api.ecr.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
},
|
||||
"us-gov-west-1": endpoint{
|
||||
Hostname: "api.ecr.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"api.sagemaker": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -3267,12 +3571,23 @@ var awsusgovPartition = partition{
|
||||
},
|
||||
},
|
||||
"application-autoscaling": service{
|
||||
|
||||
Defaults: endpoint{
|
||||
Hostname: "autoscaling.{region}.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Service: "application-autoscaling",
|
||||
},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"athena": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"autoscaling": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -3396,13 +3711,6 @@ var awsusgovPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"ecr": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"ecs": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -3457,6 +3765,12 @@ var awsusgovPartition = partition{
|
||||
"es": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"fips": endpoint{
|
||||
Hostname: "es-fips.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
@@ -3468,6 +3782,12 @@ var awsusgovPartition = partition{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"firehose": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"glacier": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -3477,6 +3797,12 @@ var awsusgovPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"glue": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"guardduty": service{
|
||||
IsRegionalized: boxedTrue,
|
||||
Defaults: endpoint{
|
||||
@@ -3526,6 +3852,12 @@ var awsusgovPartition = partition{
|
||||
"kms": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ProdFips": endpoint{
|
||||
Hostname: "kms-fips.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
@@ -3761,5 +4093,11 @@ var awsusgovPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"workspaces": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
59
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
59
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
@@ -480,7 +479,7 @@ func (r *Request) Send() error {
|
||||
|
||||
if err := r.sendRequest(); err == nil {
|
||||
return nil
|
||||
} else if !shouldRetryCancel(r) {
|
||||
} else if !shouldRetryCancel(r.Error) {
|
||||
return err
|
||||
} else {
|
||||
r.Handlers.Retry.Run(r)
|
||||
@@ -562,30 +561,46 @@ func AddToUserAgent(r *Request, s string) {
|
||||
r.HTTPRequest.Header.Set("User-Agent", s)
|
||||
}
|
||||
|
||||
func shouldRetryCancel(r *Request) bool {
|
||||
awsErr, ok := r.Error.(awserr.Error)
|
||||
timeoutErr := false
|
||||
errStr := r.Error.Error()
|
||||
if ok {
|
||||
if awsErr.Code() == CanceledErrorCode {
|
||||
type temporary interface {
|
||||
Temporary() bool
|
||||
}
|
||||
|
||||
func shouldRetryCancel(err error) bool {
|
||||
switch err := err.(type) {
|
||||
case awserr.Error:
|
||||
if err.Code() == CanceledErrorCode {
|
||||
return false
|
||||
}
|
||||
err := awsErr.OrigErr()
|
||||
netErr, netOK := err.(net.Error)
|
||||
timeoutErr = netOK && netErr.Temporary()
|
||||
if urlErr, ok := err.(*url.Error); !timeoutErr && ok {
|
||||
errStr = urlErr.Err.Error()
|
||||
return shouldRetryCancel(err.OrigErr())
|
||||
case *url.Error:
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
// Refused connections should be retried as the service may not yet
|
||||
// be running on the port. Go TCP dial considers refused
|
||||
// connections as not temporary.
|
||||
return true
|
||||
}
|
||||
// *url.Error only implements Temporary after golang 1.6 but since
|
||||
// url.Error only wraps the error:
|
||||
return shouldRetryCancel(err.Err)
|
||||
case temporary:
|
||||
// If the error is temporary, we want to allow continuation of the
|
||||
// retry process
|
||||
return err.Temporary()
|
||||
case nil:
|
||||
// `awserr.Error.OrigErr()` can be nil, meaning there was an error but
|
||||
// because we don't know the cause, it is marked as retriable. See
|
||||
// TestRequest4xxUnretryable for an example.
|
||||
return true
|
||||
default:
|
||||
switch err.Error() {
|
||||
case "net/http: request canceled",
|
||||
"net/http: request canceled while waiting for connection":
|
||||
// known 1.5 error case when an http request is cancelled
|
||||
return false
|
||||
}
|
||||
// here we don't know the error; so we allow a retry.
|
||||
return true
|
||||
}
|
||||
|
||||
// There can be two types of canceled errors here.
|
||||
// The first being a net.Error and the other being an error.
|
||||
// If the request was timed out, we want to continue the retry
|
||||
// process. Otherwise, return the canceled error.
|
||||
return timeoutErr ||
|
||||
(errStr != "net/http: request canceled" &&
|
||||
errStr != "net/http: request canceled while waiting for connection")
|
||||
|
||||
}
|
||||
|
||||
// SanitizeHostForHeader removes default port from host and updates request.Host
|
||||
|
||||
1
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
1
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
@@ -38,6 +38,7 @@ var throttleCodes = map[string]struct{}{
|
||||
"ThrottlingException": {},
|
||||
"RequestLimitExceeded": {},
|
||||
"RequestThrottled": {},
|
||||
"RequestThrottledException": {},
|
||||
"TooManyRequestsException": {}, // Lambda functions
|
||||
"PriorRequestNotComplete": {}, // Route53
|
||||
"TransactionInProgressException": {},
|
||||
|
||||
26
vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go
generated
vendored
Normal file
26
vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
// +build go1.7
|
||||
|
||||
package session
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Transport that should be used when a custom CA bundle is specified with the
|
||||
// SDK.
|
||||
func getCABundleTransport() *http.Transport {
|
||||
return &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}).DialContext,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
}
|
||||
22
vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go
generated
vendored
Normal file
22
vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
// +build !go1.6,go1.5
|
||||
|
||||
package session
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Transport that should be used when a custom CA bundle is specified with the
|
||||
// SDK.
|
||||
func getCABundleTransport() *http.Transport {
|
||||
return &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
}
|
||||
}
|
||||
23
vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go
generated
vendored
Normal file
23
vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
// +build !go1.7,go1.6
|
||||
|
||||
package session
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Transport that should be used when a custom CA bundle is specified with the
|
||||
// SDK.
|
||||
func getCABundleTransport() *http.Transport {
|
||||
return &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
}
|
||||
5
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
5
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
@@ -407,7 +407,10 @@ func loadCustomCABundle(s *Session, bundle io.Reader) error {
|
||||
}
|
||||
}
|
||||
if t == nil {
|
||||
t = &http.Transport{}
|
||||
// Nil transport implies `http.DefaultTransport` should be used. Since
|
||||
// the SDK cannot modify, nor copy the `DefaultTransport` specifying
|
||||
// the values the next closest behavior.
|
||||
t = getCABundleTransport()
|
||||
}
|
||||
|
||||
p, err := loadCertPool(bundle)
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
@@ -182,7 +182,7 @@ type Signer struct {
|
||||
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
|
||||
DisableURIPathEscaping bool
|
||||
|
||||
// Disales the automatical setting of the HTTP request's Body field with the
|
||||
// Disables the automatical setting of the HTTP request's Body field with the
|
||||
// io.ReadSeeker passed in to the signer. This is useful if you're using a
|
||||
// custom wrapper around the body for the io.ReadSeeker and want to preserve
|
||||
// the Body value on the Request.Body.
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
@@ -5,4 +5,4 @@ package aws
|
||||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.16.15"
|
||||
const SDKVersion = "1.18.5"
|
||||
|
||||
7
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
7
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
@@ -155,6 +155,9 @@ func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.
|
||||
return awserr.New("SerializationError", "failed to encode REST request", err)
|
||||
}
|
||||
|
||||
name = strings.TrimSpace(name)
|
||||
str = strings.TrimSpace(str)
|
||||
|
||||
header.Add(name, str)
|
||||
|
||||
return nil
|
||||
@@ -170,8 +173,10 @@ func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag)
|
||||
return awserr.New("SerializationError", "failed to encode REST request", err)
|
||||
|
||||
}
|
||||
keyStr := strings.TrimSpace(key.String())
|
||||
str = strings.TrimSpace(str)
|
||||
|
||||
header.Add(prefix+key.String(), str)
|
||||
header.Add(prefix+keyStr, str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
149
vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go
generated
vendored
149
vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go
generated
vendored
@@ -802,6 +802,12 @@ func (c *CloudWatch) GetMetricDataRequest(input *GetMetricDataInput) (req *reque
|
||||
Name: opGetMetricData,
|
||||
HTTPMethod: "POST",
|
||||
HTTPPath: "/",
|
||||
Paginator: &request.Paginator{
|
||||
InputTokens: []string{"NextToken"},
|
||||
OutputTokens: []string{"NextToken"},
|
||||
LimitToken: "MaxDatapoints",
|
||||
TruncationToken: "",
|
||||
},
|
||||
}
|
||||
|
||||
if input == nil {
|
||||
@@ -821,7 +827,7 @@ func (c *CloudWatch) GetMetricDataRequest(input *GetMetricDataInput) (req *reque
|
||||
// to create new time series that represent new insights into your data. For
|
||||
// example, using Lambda metrics, you could divide the Errors metric by the
|
||||
// Invocations metric to get an error rate time series. For more information
|
||||
// about metric math expressions, see Metric Math Syntax and Functions (http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax)
|
||||
// about metric math expressions, see Metric Math Syntax and Functions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax)
|
||||
// in the Amazon CloudWatch User Guide.
|
||||
//
|
||||
// Calls to the GetMetricData API have a different pricing structure than calls
|
||||
@@ -884,6 +890,56 @@ func (c *CloudWatch) GetMetricDataWithContext(ctx aws.Context, input *GetMetricD
|
||||
return out, req.Send()
|
||||
}
|
||||
|
||||
// GetMetricDataPages iterates over the pages of a GetMetricData operation,
|
||||
// calling the "fn" function with the response data for each page. To stop
|
||||
// iterating, return false from the fn function.
|
||||
//
|
||||
// See GetMetricData method for more information on how to use this operation.
|
||||
//
|
||||
// Note: This operation can generate multiple requests to a service.
|
||||
//
|
||||
// // Example iterating over at most 3 pages of a GetMetricData operation.
|
||||
// pageNum := 0
|
||||
// err := client.GetMetricDataPages(params,
|
||||
// func(page *GetMetricDataOutput, lastPage bool) bool {
|
||||
// pageNum++
|
||||
// fmt.Println(page)
|
||||
// return pageNum <= 3
|
||||
// })
|
||||
//
|
||||
func (c *CloudWatch) GetMetricDataPages(input *GetMetricDataInput, fn func(*GetMetricDataOutput, bool) bool) error {
|
||||
return c.GetMetricDataPagesWithContext(aws.BackgroundContext(), input, fn)
|
||||
}
|
||||
|
||||
// GetMetricDataPagesWithContext same as GetMetricDataPages except
|
||||
// it takes a Context and allows setting request options on the pages.
|
||||
//
|
||||
// The context must be non-nil and will be used for request cancellation. If
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
func (c *CloudWatch) GetMetricDataPagesWithContext(ctx aws.Context, input *GetMetricDataInput, fn func(*GetMetricDataOutput, bool) bool, opts ...request.Option) error {
|
||||
p := request.Pagination{
|
||||
NewRequest: func() (*request.Request, error) {
|
||||
var inCpy *GetMetricDataInput
|
||||
if input != nil {
|
||||
tmp := *input
|
||||
inCpy = &tmp
|
||||
}
|
||||
req, _ := c.GetMetricDataRequest(inCpy)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return req, nil
|
||||
},
|
||||
}
|
||||
|
||||
cont := true
|
||||
for p.Next() && cont {
|
||||
cont = fn(p.Page().(*GetMetricDataOutput), !p.HasNextPage())
|
||||
}
|
||||
return p.Err()
|
||||
}
|
||||
|
||||
const opGetMetricStatistics = "GetMetricStatistics"
|
||||
|
||||
// GetMetricStatisticsRequest generates a "aws/request.Request" representing the
|
||||
@@ -980,7 +1036,7 @@ func (c *CloudWatch) GetMetricStatisticsRequest(input *GetMetricStatisticsInput)
|
||||
// 2016.
|
||||
//
|
||||
// For information about metrics and dimensions supported by AWS services, see
|
||||
// the Amazon CloudWatch Metrics and Dimensions Reference (http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CW_Support_For_AWS.html)
|
||||
// the Amazon CloudWatch Metrics and Dimensions Reference (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CW_Support_For_AWS.html)
|
||||
// in the Amazon CloudWatch User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
@@ -1144,6 +1200,12 @@ func (c *CloudWatch) ListDashboardsRequest(input *ListDashboardsInput) (req *req
|
||||
Name: opListDashboards,
|
||||
HTTPMethod: "POST",
|
||||
HTTPPath: "/",
|
||||
Paginator: &request.Paginator{
|
||||
InputTokens: []string{"NextToken"},
|
||||
OutputTokens: []string{"NextToken"},
|
||||
LimitToken: "",
|
||||
TruncationToken: "",
|
||||
},
|
||||
}
|
||||
|
||||
if input == nil {
|
||||
@@ -1201,6 +1263,56 @@ func (c *CloudWatch) ListDashboardsWithContext(ctx aws.Context, input *ListDashb
|
||||
return out, req.Send()
|
||||
}
|
||||
|
||||
// ListDashboardsPages iterates over the pages of a ListDashboards operation,
|
||||
// calling the "fn" function with the response data for each page. To stop
|
||||
// iterating, return false from the fn function.
|
||||
//
|
||||
// See ListDashboards method for more information on how to use this operation.
|
||||
//
|
||||
// Note: This operation can generate multiple requests to a service.
|
||||
//
|
||||
// // Example iterating over at most 3 pages of a ListDashboards operation.
|
||||
// pageNum := 0
|
||||
// err := client.ListDashboardsPages(params,
|
||||
// func(page *ListDashboardsOutput, lastPage bool) bool {
|
||||
// pageNum++
|
||||
// fmt.Println(page)
|
||||
// return pageNum <= 3
|
||||
// })
|
||||
//
|
||||
func (c *CloudWatch) ListDashboardsPages(input *ListDashboardsInput, fn func(*ListDashboardsOutput, bool) bool) error {
|
||||
return c.ListDashboardsPagesWithContext(aws.BackgroundContext(), input, fn)
|
||||
}
|
||||
|
||||
// ListDashboardsPagesWithContext same as ListDashboardsPages except
|
||||
// it takes a Context and allows setting request options on the pages.
|
||||
//
|
||||
// The context must be non-nil and will be used for request cancellation. If
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
func (c *CloudWatch) ListDashboardsPagesWithContext(ctx aws.Context, input *ListDashboardsInput, fn func(*ListDashboardsOutput, bool) bool, opts ...request.Option) error {
|
||||
p := request.Pagination{
|
||||
NewRequest: func() (*request.Request, error) {
|
||||
var inCpy *ListDashboardsInput
|
||||
if input != nil {
|
||||
tmp := *input
|
||||
inCpy = &tmp
|
||||
}
|
||||
req, _ := c.ListDashboardsRequest(inCpy)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return req, nil
|
||||
},
|
||||
}
|
||||
|
||||
cont := true
|
||||
for p.Next() && cont {
|
||||
cont = fn(p.Page().(*ListDashboardsOutput), !p.HasNextPage())
|
||||
}
|
||||
return p.Err()
|
||||
}
|
||||
|
||||
const opListMetrics = "ListMetrics"
|
||||
|
||||
// ListMetricsRequest generates a "aws/request.Request" representing the
|
||||
@@ -1533,7 +1645,7 @@ func (c *CloudWatch) PutMetricAlarmRequest(input *PutMetricAlarmInput) (req *req
|
||||
// The first time you create an alarm in the AWS Management Console, the CLI,
|
||||
// or by using the PutMetricAlarm API, CloudWatch creates the necessary service-linked
|
||||
// role for you. The service-linked role is called AWSServiceRoleForCloudWatchEvents.
|
||||
// For more information, see AWS service-linked role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-service-linked-role).
|
||||
// For more information, see AWS service-linked role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-service-linked-role).
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
@@ -1637,7 +1749,7 @@ func (c *CloudWatch) PutMetricDataRequest(input *PutMetricDataInput) (req *reque
|
||||
//
|
||||
// You can use up to 10 dimensions per metric to further clarify what data the
|
||||
// metric collects. For more information about specifying dimensions, see Publishing
|
||||
// Metrics (http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html)
|
||||
// Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html)
|
||||
// in the Amazon CloudWatch User Guide.
|
||||
//
|
||||
// Data points with time stamps from 24 hours ago or longer can take at least
|
||||
@@ -2927,6 +3039,13 @@ func (s *GetMetricDataInput) SetStartTime(v time.Time) *GetMetricDataInput {
|
||||
type GetMetricDataOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Contains a message about the operation or the results, if the operation results
|
||||
// in such a message. Examples of messages that may be returned include Maximum
|
||||
// number of allowed metrics exceeded and You are not authorized to search one
|
||||
// or more metrics. If there is a message, as much of the operation as possible
|
||||
// is still executed.
|
||||
Messages []*MessageData `type:"list"`
|
||||
|
||||
// The metrics that are returned, including the metric name, namespace, and
|
||||
// dimensions.
|
||||
MetricDataResults []*MetricDataResult `type:"list"`
|
||||
@@ -2945,6 +3064,12 @@ func (s GetMetricDataOutput) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetMessages sets the Messages field's value.
|
||||
func (s *GetMetricDataOutput) SetMessages(v []*MessageData) *GetMetricDataOutput {
|
||||
s.Messages = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetMetricDataResults sets the MetricDataResults field's value.
|
||||
func (s *GetMetricDataOutput) SetMetricDataResults(v []*MetricDataResult) *GetMetricDataOutput {
|
||||
s.MetricDataResults = v
|
||||
@@ -2965,9 +3090,9 @@ type GetMetricStatisticsInput struct {
|
||||
// dimensions as a separate metric. If a specific combination of dimensions
|
||||
// was not published, you can't retrieve statistics for it. You must specify
|
||||
// the same dimensions that were used when the metrics were created. For an
|
||||
// example, see Dimension Combinations (http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#dimension-combinations)
|
||||
// example, see Dimension Combinations (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#dimension-combinations)
|
||||
// in the Amazon CloudWatch User Guide. For more information about specifying
|
||||
// dimensions, see Publishing Metrics (http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html)
|
||||
// dimensions, see Publishing Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html)
|
||||
// in the Amazon CloudWatch User Guide.
|
||||
Dimensions []*Dimension `type:"list"`
|
||||
|
||||
@@ -3865,7 +3990,7 @@ func (s *MetricAlarm) SetUnit(v string) *MetricAlarm {
|
||||
// contain a MetricStat parameter to retrieve a metric, and as many as 10 structures
|
||||
// that contain the Expression parameter to perform a math expression. Any expression
|
||||
// used in a PutMetricAlarm operation must return a single time series. For
|
||||
// more information, see Metric Math Syntax and Functions (http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax)
|
||||
// more information, see Metric Math Syntax and Functions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax)
|
||||
// in the Amazon CloudWatch User Guide.
|
||||
//
|
||||
// Some of the parameters of this structure also have different uses whether
|
||||
@@ -3878,7 +4003,7 @@ type MetricDataQuery struct {
|
||||
// is performing a math expression. This expression can use the Id of the other
|
||||
// metrics to refer to those metrics, and can also use the Id of other expressions
|
||||
// to use the result of those expressions. For more information about metric
|
||||
// math expressions, see Metric Math Syntax and Functions (http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax)
|
||||
// math expressions, see Metric Math Syntax and Functions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax)
|
||||
// in the Amazon CloudWatch User Guide.
|
||||
//
|
||||
// Within each MetricDataQuery object, you must specify either Expression or
|
||||
@@ -4094,7 +4219,7 @@ type MetricDatum struct {
|
||||
// to one second. Setting this to 60 specifies this metric as a regular-resolution
|
||||
// metric, which CloudWatch stores at 1-minute resolution. Currently, high resolution
|
||||
// is available only for custom metrics. For more information about high-resolution
|
||||
// metrics, see High-Resolution Metrics (http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html#high-resolution-metrics)
|
||||
// metrics, see High-Resolution Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html#high-resolution-metrics)
|
||||
// in the Amazon CloudWatch User Guide.
|
||||
//
|
||||
// This field is optional, if you do not specify it the default of 60 is used.
|
||||
@@ -4439,7 +4564,7 @@ type PutMetricAlarmInput struct {
|
||||
|
||||
// The number of datapoints that must be breaching to trigger the alarm. This
|
||||
// is used only if you are setting an "M out of N" alarm. In that case, this
|
||||
// value is the M. For more information, see Evaluating an Alarm (http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarm-evaluation)
|
||||
// value is the M. For more information, see Evaluating an Alarm (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarm-evaluation)
|
||||
// in the Amazon CloudWatch User Guide.
|
||||
DatapointsToAlarm *int64 `min:"1" type:"integer"`
|
||||
|
||||
@@ -4451,7 +4576,7 @@ type PutMetricAlarmInput struct {
|
||||
// significant. If you specify evaluate or omit this parameter, the alarm is
|
||||
// always evaluated and possibly changes state no matter how many data points
|
||||
// are available. For more information, see Percentile-Based CloudWatch Alarms
|
||||
// and Low Data Samples (http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#percentiles-with-low-samples).
|
||||
// and Low Data Samples (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#percentiles-with-low-samples).
|
||||
//
|
||||
// Valid Values: evaluate | ignore
|
||||
EvaluateLowSampleCountPercentile *string `min:"1" type:"string"`
|
||||
@@ -4549,7 +4674,7 @@ type PutMetricAlarmInput struct {
|
||||
|
||||
// Sets how this alarm is to handle missing data points. If TreatMissingData
|
||||
// is omitted, the default behavior of missing is used. For more information,
|
||||
// see Configuring How CloudWatch Alarms Treats Missing Data (http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data).
|
||||
// see Configuring How CloudWatch Alarms Treats Missing Data (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data).
|
||||
//
|
||||
// Valid Values: breaching | notBreaching | ignore | missing
|
||||
TreatMissingData *string `min:"1" type:"string"`
|
||||
|
||||
1843
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
1843
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
File diff suppressed because it is too large
Load Diff
16
vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go
generated
vendored
16
vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go
generated
vendored
@@ -718,6 +718,9 @@ type EC2API interface {
|
||||
DescribeInternetGatewaysWithContext(aws.Context, *ec2.DescribeInternetGatewaysInput, ...request.Option) (*ec2.DescribeInternetGatewaysOutput, error)
|
||||
DescribeInternetGatewaysRequest(*ec2.DescribeInternetGatewaysInput) (*request.Request, *ec2.DescribeInternetGatewaysOutput)
|
||||
|
||||
DescribeInternetGatewaysPages(*ec2.DescribeInternetGatewaysInput, func(*ec2.DescribeInternetGatewaysOutput, bool) bool) error
|
||||
DescribeInternetGatewaysPagesWithContext(aws.Context, *ec2.DescribeInternetGatewaysInput, func(*ec2.DescribeInternetGatewaysOutput, bool) bool, ...request.Option) error
|
||||
|
||||
DescribeKeyPairs(*ec2.DescribeKeyPairsInput) (*ec2.DescribeKeyPairsOutput, error)
|
||||
DescribeKeyPairsWithContext(aws.Context, *ec2.DescribeKeyPairsInput, ...request.Option) (*ec2.DescribeKeyPairsOutput, error)
|
||||
DescribeKeyPairsRequest(*ec2.DescribeKeyPairsInput) (*request.Request, *ec2.DescribeKeyPairsOutput)
|
||||
@@ -745,6 +748,9 @@ type EC2API interface {
|
||||
DescribeNetworkAclsWithContext(aws.Context, *ec2.DescribeNetworkAclsInput, ...request.Option) (*ec2.DescribeNetworkAclsOutput, error)
|
||||
DescribeNetworkAclsRequest(*ec2.DescribeNetworkAclsInput) (*request.Request, *ec2.DescribeNetworkAclsOutput)
|
||||
|
||||
DescribeNetworkAclsPages(*ec2.DescribeNetworkAclsInput, func(*ec2.DescribeNetworkAclsOutput, bool) bool) error
|
||||
DescribeNetworkAclsPagesWithContext(aws.Context, *ec2.DescribeNetworkAclsInput, func(*ec2.DescribeNetworkAclsOutput, bool) bool, ...request.Option) error
|
||||
|
||||
DescribeNetworkInterfaceAttribute(*ec2.DescribeNetworkInterfaceAttributeInput) (*ec2.DescribeNetworkInterfaceAttributeOutput, error)
|
||||
DescribeNetworkInterfaceAttributeWithContext(aws.Context, *ec2.DescribeNetworkInterfaceAttributeInput, ...request.Option) (*ec2.DescribeNetworkInterfaceAttributeOutput, error)
|
||||
DescribeNetworkInterfaceAttributeRequest(*ec2.DescribeNetworkInterfaceAttributeInput) (*request.Request, *ec2.DescribeNetworkInterfaceAttributeOutput)
|
||||
@@ -962,10 +968,16 @@ type EC2API interface {
|
||||
DescribeVpcPeeringConnectionsWithContext(aws.Context, *ec2.DescribeVpcPeeringConnectionsInput, ...request.Option) (*ec2.DescribeVpcPeeringConnectionsOutput, error)
|
||||
DescribeVpcPeeringConnectionsRequest(*ec2.DescribeVpcPeeringConnectionsInput) (*request.Request, *ec2.DescribeVpcPeeringConnectionsOutput)
|
||||
|
||||
DescribeVpcPeeringConnectionsPages(*ec2.DescribeVpcPeeringConnectionsInput, func(*ec2.DescribeVpcPeeringConnectionsOutput, bool) bool) error
|
||||
DescribeVpcPeeringConnectionsPagesWithContext(aws.Context, *ec2.DescribeVpcPeeringConnectionsInput, func(*ec2.DescribeVpcPeeringConnectionsOutput, bool) bool, ...request.Option) error
|
||||
|
||||
DescribeVpcs(*ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error)
|
||||
DescribeVpcsWithContext(aws.Context, *ec2.DescribeVpcsInput, ...request.Option) (*ec2.DescribeVpcsOutput, error)
|
||||
DescribeVpcsRequest(*ec2.DescribeVpcsInput) (*request.Request, *ec2.DescribeVpcsOutput)
|
||||
|
||||
DescribeVpcsPages(*ec2.DescribeVpcsInput, func(*ec2.DescribeVpcsOutput, bool) bool) error
|
||||
DescribeVpcsPagesWithContext(aws.Context, *ec2.DescribeVpcsInput, func(*ec2.DescribeVpcsOutput, bool) bool, ...request.Option) error
|
||||
|
||||
DescribeVpnConnections(*ec2.DescribeVpnConnectionsInput) (*ec2.DescribeVpnConnectionsOutput, error)
|
||||
DescribeVpnConnectionsWithContext(aws.Context, *ec2.DescribeVpnConnectionsInput, ...request.Option) (*ec2.DescribeVpnConnectionsOutput, error)
|
||||
DescribeVpnConnectionsRequest(*ec2.DescribeVpnConnectionsInput) (*request.Request, *ec2.DescribeVpnConnectionsOutput)
|
||||
@@ -1174,6 +1186,10 @@ type EC2API interface {
|
||||
ModifyInstanceCreditSpecificationWithContext(aws.Context, *ec2.ModifyInstanceCreditSpecificationInput, ...request.Option) (*ec2.ModifyInstanceCreditSpecificationOutput, error)
|
||||
ModifyInstanceCreditSpecificationRequest(*ec2.ModifyInstanceCreditSpecificationInput) (*request.Request, *ec2.ModifyInstanceCreditSpecificationOutput)
|
||||
|
||||
ModifyInstanceEventStartTime(*ec2.ModifyInstanceEventStartTimeInput) (*ec2.ModifyInstanceEventStartTimeOutput, error)
|
||||
ModifyInstanceEventStartTimeWithContext(aws.Context, *ec2.ModifyInstanceEventStartTimeInput, ...request.Option) (*ec2.ModifyInstanceEventStartTimeOutput, error)
|
||||
ModifyInstanceEventStartTimeRequest(*ec2.ModifyInstanceEventStartTimeInput) (*request.Request, *ec2.ModifyInstanceEventStartTimeOutput)
|
||||
|
||||
ModifyInstancePlacement(*ec2.ModifyInstancePlacementInput) (*ec2.ModifyInstancePlacementOutput, error)
|
||||
ModifyInstancePlacementWithContext(aws.Context, *ec2.ModifyInstancePlacementInput, ...request.Option) (*ec2.ModifyInstancePlacementOutput, error)
|
||||
ModifyInstancePlacementRequest(*ec2.ModifyInstancePlacementInput) (*request.Request, *ec2.ModifyInstancePlacementOutput)
|
||||
|
||||
44
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
44
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
@@ -2047,7 +2047,7 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req
|
||||
|
||||
// GetBucketLifecycle API operation for Amazon Simple Storage Service.
|
||||
//
|
||||
// Deprecated, see the GetBucketLifecycleConfiguration operation.
|
||||
// No longer used, see the GetBucketLifecycleConfiguration operation.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
@@ -2428,7 +2428,7 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat
|
||||
|
||||
// GetBucketNotification API operation for Amazon Simple Storage Service.
|
||||
//
|
||||
// Deprecated, see the GetBucketNotificationConfiguration operation.
|
||||
// No longer used, see the GetBucketNotificationConfiguration operation.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
@@ -5287,7 +5287,7 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req
|
||||
|
||||
// PutBucketLifecycle API operation for Amazon Simple Storage Service.
|
||||
//
|
||||
// Deprecated, see the PutBucketLifecycleConfiguration operation.
|
||||
// No longer used, see the PutBucketLifecycleConfiguration operation.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
@@ -5600,7 +5600,7 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re
|
||||
|
||||
// PutBucketNotification API operation for Amazon Simple Storage Service.
|
||||
//
|
||||
// Deprecated, see the PutBucketNotificationConfiguraiton operation.
|
||||
// No longer used, see the PutBucketNotificationConfiguration operation.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
@@ -8937,7 +8937,7 @@ type CreateBucketConfiguration struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Specifies the region where the bucket will be created. If you don't specify
|
||||
// a region, the bucket will be created in US Standard.
|
||||
// a region, the bucket is created in US East (N. Virginia) Region (us-east-1).
|
||||
LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"`
|
||||
}
|
||||
|
||||
@@ -11215,7 +11215,7 @@ type FilterRule struct {
|
||||
// The object key name prefix or suffix identifying one or more objects to which
|
||||
// the filtering rule applies. The maximum prefix length is 1,024 characters.
|
||||
// Overlapping prefixes and suffixes are not supported. For more information,
|
||||
// see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
Name *string `type:"string" enum:"FilterRuleName"`
|
||||
|
||||
@@ -15149,7 +15149,7 @@ type LambdaFunctionConfiguration struct {
|
||||
Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
|
||||
|
||||
// A container for object key name filtering rules. For information about key
|
||||
// name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
Filter *NotificationConfigurationFilter `type:"structure"`
|
||||
|
||||
@@ -15332,7 +15332,7 @@ type LifecycleRule struct {
|
||||
NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"`
|
||||
|
||||
// Prefix identifying one or more objects to which the rule applies. This is
|
||||
// deprecated; use Filter instead.
|
||||
// No longer used; use Filter instead.
|
||||
//
|
||||
// Deprecated: Prefix has been deprecated
|
||||
Prefix *string `deprecated:"true" type:"string"`
|
||||
@@ -17624,8 +17624,8 @@ type NoncurrentVersionExpiration struct {
|
||||
// Specifies the number of days an object is noncurrent before Amazon S3 can
|
||||
// perform the associated action. For information about the noncurrent days
|
||||
// calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
|
||||
// (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in
|
||||
// the Amazon Simple Storage Service Developer Guide.
|
||||
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
NoncurrentDays *int64 `type:"integer"`
|
||||
}
|
||||
|
||||
@@ -17657,8 +17657,8 @@ type NoncurrentVersionTransition struct {
|
||||
// Specifies the number of days an object is noncurrent before Amazon S3 can
|
||||
// perform the associated action. For information about the noncurrent days
|
||||
// calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
|
||||
// (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in
|
||||
// the Amazon Simple Storage Service Developer Guide.
|
||||
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
NoncurrentDays *int64 `type:"integer"`
|
||||
|
||||
// The class of storage used to store the object.
|
||||
@@ -17806,7 +17806,7 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf
|
||||
}
|
||||
|
||||
// A container for object key name filtering rules. For information about key
|
||||
// name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
type NotificationConfigurationFilter struct {
|
||||
_ struct{} `type:"structure"`
|
||||
@@ -20199,7 +20199,8 @@ type PutObjectInput struct {
|
||||
// body cannot be determined automatically.
|
||||
ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
|
||||
|
||||
// The base64-encoded 128-bit MD5 digest of the part data.
|
||||
// The base64-encoded 128-bit MD5 digest of the part data. This parameter is
|
||||
// auto-populated when using the command from the CLI
|
||||
ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
|
||||
|
||||
// A standard MIME type describing the format of the object data.
|
||||
@@ -20638,6 +20639,7 @@ type PutObjectLockConfigurationInput struct {
|
||||
// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
|
||||
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
|
||||
|
||||
// A token to allow Object Lock to be enabled for an existing bucket.
|
||||
Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"`
|
||||
}
|
||||
|
||||
@@ -21146,7 +21148,7 @@ type QueueConfiguration struct {
|
||||
Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
|
||||
|
||||
// A container for object key name filtering rules. For information about key
|
||||
// name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
Filter *NotificationConfigurationFilter `type:"structure"`
|
||||
|
||||
@@ -22496,7 +22498,7 @@ func (r *readSelectObjectContentEventStream) unmarshalerForEventType(
|
||||
// Amazon S3 uses this to parse object data into records. It returns only records
|
||||
// that match the specified SQL expression. You must also specify the data serialization
|
||||
// format for the response. For more information, see S3Select API Documentation
|
||||
// (http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html).
|
||||
// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html).
|
||||
type SelectObjectContentInput struct {
|
||||
_ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
|
||||
@@ -22534,15 +22536,15 @@ type SelectObjectContentInput struct {
|
||||
RequestProgress *RequestProgress `type:"structure"`
|
||||
|
||||
// The SSE Algorithm used to encrypt the object. For more information, see
|
||||
// Server-Side Encryption (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
|
||||
// Server-Side Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
|
||||
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
|
||||
|
||||
// The SSE Customer Key. For more information, see Server-Side Encryption (Using
|
||||
// Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
|
||||
// Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
|
||||
SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
|
||||
|
||||
// The SSE Customer Key MD5. For more information, see Server-Side Encryption
|
||||
// (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
|
||||
// (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
|
||||
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
|
||||
}
|
||||
|
||||
@@ -23347,7 +23349,7 @@ type TopicConfiguration struct {
|
||||
Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
|
||||
|
||||
// A container for object key name filtering rules. For information about key
|
||||
// name filtering, see Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
Filter *NotificationConfigurationFilter `type:"structure"`
|
||||
|
||||
@@ -23537,7 +23539,7 @@ type UploadPartCopyInput struct {
|
||||
// the form bytes=first-last, where the first and last are the zero-based byte
|
||||
// offsets to copy. For example, bytes=0-9 indicates that you want to copy the
|
||||
// first ten bytes of the source. You can copy a range only if the source object
|
||||
// is greater than 5 GB.
|
||||
// is greater than 5 MB.
|
||||
CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"`
|
||||
|
||||
// Specifies the algorithm to use when decrypting the source object (e.g., AES256).
|
||||
|
||||
13
vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
generated
vendored
13
vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
generated
vendored
@@ -26,11 +26,16 @@ func unmarshalError(r *request.Request) {
|
||||
// Bucket exists in a different region, and request needs
|
||||
// to be made to the correct region.
|
||||
if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
|
||||
msg := fmt.Sprintf(
|
||||
"incorrect region, the bucket is not in '%s' region at endpoint '%s'",
|
||||
aws.StringValue(r.Config.Region),
|
||||
aws.StringValue(r.Config.Endpoint),
|
||||
)
|
||||
if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 {
|
||||
msg += fmt.Sprintf(", bucket is in '%s' region", v)
|
||||
}
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New("BucketRegionError",
|
||||
fmt.Sprintf("incorrect region, the bucket is not in '%s' region",
|
||||
aws.StringValue(r.Config.Region)),
|
||||
nil),
|
||||
awserr.New("BucketRegionError", msg, nil),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
|
||||
3
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
@@ -243,6 +244,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
|
||||
|
||||
output = &AssumeRoleWithSAMLOutput{}
|
||||
req = c.newRequest(op, input, output)
|
||||
req.Config.Credentials = credentials.AnonymousCredentials
|
||||
return
|
||||
}
|
||||
|
||||
@@ -425,6 +427,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
|
||||
|
||||
output = &AssumeRoleWithWebIdentityOutput{}
|
||||
req = c.newRequest(op, input, output)
|
||||
req.Config.Credentials = credentials.AnonymousCredentials
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
12
vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go
generated
vendored
12
vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go
generated
vendored
@@ -1,12 +0,0 @@
|
||||
package sts
|
||||
|
||||
import "github.com/aws/aws-sdk-go/aws/request"
|
||||
|
||||
func init() {
|
||||
initRequest = func(r *request.Request) {
|
||||
switch r.Operation.Name {
|
||||
case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity:
|
||||
r.Handlers.Sign.Clear() // these operations are unsigned
|
||||
}
|
||||
}
|
||||
}
|
||||
104
vendor/github.com/benbjohnson/clock/README.md
generated
vendored
Normal file
104
vendor/github.com/benbjohnson/clock/README.md
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
clock [](https://drone.io/github.com/benbjohnson/clock/latest) [](https://coveralls.io/r/benbjohnson/clock?branch=master) [](https://godoc.org/github.com/benbjohnson/clock) 
|
||||
=====
|
||||
|
||||
Clock is a small library for mocking time in Go. It provides an interface
|
||||
around the standard library's [`time`][time] package so that the application
|
||||
can use the realtime clock while tests can use the mock clock.
|
||||
|
||||
[time]: http://golang.org/pkg/time/
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
### Realtime Clock
|
||||
|
||||
Your application can maintain a `Clock` variable that will allow realtime and
|
||||
mock clocks to be interchangable. For example, if you had an `Application` type:
|
||||
|
||||
```go
|
||||
import "github.com/benbjohnson/clock"
|
||||
|
||||
type Application struct {
|
||||
Clock clock.Clock
|
||||
}
|
||||
```
|
||||
|
||||
You could initialize it to use the realtime clock like this:
|
||||
|
||||
```go
|
||||
var app Application
|
||||
app.Clock = clock.New()
|
||||
...
|
||||
```
|
||||
|
||||
Then all timers and time-related functionality should be performed from the
|
||||
`Clock` variable.
|
||||
|
||||
|
||||
### Mocking time
|
||||
|
||||
In your tests, you will want to use a `Mock` clock:
|
||||
|
||||
```go
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
)
|
||||
|
||||
func TestApplication_DoSomething(t *testing.T) {
|
||||
mock := clock.NewMock()
|
||||
app := Application{Clock: mock}
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Now that you've initialized your application to use the mock clock, you can
|
||||
adjust the time programmatically. The mock clock always starts from the Unix
|
||||
epoch (midnight, Jan 1, 1970 UTC).
|
||||
|
||||
|
||||
### Controlling time
|
||||
|
||||
The mock clock provides the same functions that the standard library's `time`
|
||||
package provides. For example, to find the current time, you use the `Now()`
|
||||
function:
|
||||
|
||||
```go
|
||||
mock := clock.NewMock()
|
||||
|
||||
// Find the current time.
|
||||
mock.Now().UTC() // 1970-01-01 00:00:00 +0000 UTC
|
||||
|
||||
// Move the clock forward.
|
||||
mock.Add(2 * time.Hour)
|
||||
|
||||
// Check the time again. It's 2 hours later!
|
||||
mock.Now().UTC() // 1970-01-01 02:00:00 +0000 UTC
|
||||
```
|
||||
|
||||
Timers and Tickers are also controlled by this same mock clock. They will only
|
||||
execute when the clock is moved forward:
|
||||
|
||||
```
|
||||
mock := clock.NewMock()
|
||||
count := 0
|
||||
|
||||
// Kick off a timer to increment every 1 mock second.
|
||||
go func() {
|
||||
ticker := clock.Ticker(1 * time.Second)
|
||||
for {
|
||||
<-ticker.C
|
||||
count++
|
||||
}
|
||||
}()
|
||||
runtime.Gosched()
|
||||
|
||||
// Move the clock forward 10 second.
|
||||
mock.Add(10 * time.Second)
|
||||
|
||||
// This prints 10.
|
||||
fmt.Println(count)
|
||||
```
|
||||
|
||||
|
||||
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
7
vendor/github.com/bmizerany/assert/.gitignore
generated
vendored
Normal file
7
vendor/github.com/bmizerany/assert/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
_go_.*
|
||||
_gotest_.*
|
||||
_obj
|
||||
_test
|
||||
_testmain.go
|
||||
*.out
|
||||
*.[568]
|
||||
47
vendor/github.com/bmizerany/assert/README.md
generated
vendored
Normal file
47
vendor/github.com/bmizerany/assert/README.md
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
# NO LONGER MAINTAINED - Just use Go's testing package.
|
||||
|
||||
# Assert (c) Blake Mizerany and Keith Rarick -- MIT LICENCE
|
||||
|
||||
## Assertions for Go tests
|
||||
|
||||
## Install
|
||||
|
||||
$ go get github.com/bmizerany/assert
|
||||
|
||||
## Use
|
||||
|
||||
**point.go**
|
||||
|
||||
package point
|
||||
|
||||
type Point struct {
|
||||
x, y int
|
||||
}
|
||||
|
||||
**point_test.go**
|
||||
|
||||
|
||||
package point
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"github.com/bmizerany/assert"
|
||||
)
|
||||
|
||||
func TestAsserts(t *testing.T) {
|
||||
p1 := Point{1, 1}
|
||||
p2 := Point{2, 1}
|
||||
|
||||
assert.Equal(t, p1, p2)
|
||||
}
|
||||
|
||||
**output**
|
||||
$ go test
|
||||
--- FAIL: TestAsserts (0.00 seconds)
|
||||
assert.go:15: /Users/flavio.barbosa/dev/stewie/src/point_test.go:12
|
||||
assert.go:24: ! X: 1 != 2
|
||||
FAIL
|
||||
|
||||
## Docs
|
||||
|
||||
http://github.com/bmizerany/assert
|
||||
13
vendor/github.com/bradfitz/gomemcache/memcache/memcache.go
generated
vendored
13
vendor/github.com/bradfitz/gomemcache/memcache/memcache.go
generated
vendored
@@ -23,7 +23,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
|
||||
"strconv"
|
||||
@@ -33,7 +32,7 @@ import (
|
||||
)
|
||||
|
||||
// Similar to:
|
||||
// http://code.google.com/appengine/docs/go/memcache/reference.html
|
||||
// https://godoc.org/google.golang.org/appengine/memcache
|
||||
|
||||
var (
|
||||
// ErrCacheMiss means that a Get failed because the item wasn't present.
|
||||
@@ -326,8 +325,9 @@ func (c *Client) Get(key string) (item *Item, err error) {
|
||||
|
||||
// Touch updates the expiry for the given key. The seconds parameter is either
|
||||
// a Unix timestamp or, if seconds is less than 1 month, the number of seconds
|
||||
// into the future at which time the item will expire. ErrCacheMiss is returned if the
|
||||
// key is not in the cache. The key must be at most 250 bytes in length.
|
||||
// into the future at which time the item will expire. Zero means the item has
|
||||
// no expiration time. ErrCacheMiss is returned if the key is not in the cache.
|
||||
// The key must be at most 250 bytes in length.
|
||||
func (c *Client) Touch(key string, seconds int32) (err error) {
|
||||
return c.withKeyAddr(key, func(addr net.Addr) error {
|
||||
return c.touchFromAddr(addr, []string{key}, seconds)
|
||||
@@ -481,11 +481,14 @@ func parseGetResponse(r *bufio.Reader, cb func(*Item)) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
it.Value, err = ioutil.ReadAll(io.LimitReader(r, int64(size)+2))
|
||||
it.Value = make([]byte, size+2)
|
||||
_, err = io.ReadFull(r, it.Value)
|
||||
if err != nil {
|
||||
it.Value = nil
|
||||
return err
|
||||
}
|
||||
if !bytes.HasSuffix(it.Value, crlf) {
|
||||
it.Value = nil
|
||||
return fmt.Errorf("memcache: corrupt get result read")
|
||||
}
|
||||
it.Value = it.Value[:size]
|
||||
|
||||
21
vendor/github.com/codahale/hdrhistogram/LICENSE
generated
vendored
21
vendor/github.com/codahale/hdrhistogram/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Coda Hale
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
564
vendor/github.com/codahale/hdrhistogram/hdr.go
generated
vendored
564
vendor/github.com/codahale/hdrhistogram/hdr.go
generated
vendored
@@ -1,564 +0,0 @@
|
||||
// Package hdrhistogram provides an implementation of Gil Tene's HDR Histogram
|
||||
// data structure. The HDR Histogram allows for fast and accurate analysis of
|
||||
// the extreme ranges of data with non-normal distributions, like latency.
|
||||
package hdrhistogram
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
)
|
||||
|
||||
// A Bracket is a part of a cumulative distribution.
|
||||
type Bracket struct {
|
||||
Quantile float64
|
||||
Count, ValueAt int64
|
||||
}
|
||||
|
||||
// A Snapshot is an exported view of a Histogram, useful for serializing them.
|
||||
// A Histogram can be constructed from it by passing it to Import.
|
||||
type Snapshot struct {
|
||||
LowestTrackableValue int64
|
||||
HighestTrackableValue int64
|
||||
SignificantFigures int64
|
||||
Counts []int64
|
||||
}
|
||||
|
||||
// A Histogram is a lossy data structure used to record the distribution of
|
||||
// non-normally distributed data (like latency) with a high degree of accuracy
|
||||
// and a bounded degree of precision.
|
||||
type Histogram struct {
|
||||
lowestTrackableValue int64
|
||||
highestTrackableValue int64
|
||||
unitMagnitude int64
|
||||
significantFigures int64
|
||||
subBucketHalfCountMagnitude int32
|
||||
subBucketHalfCount int32
|
||||
subBucketMask int64
|
||||
subBucketCount int32
|
||||
bucketCount int32
|
||||
countsLen int32
|
||||
totalCount int64
|
||||
counts []int64
|
||||
}
|
||||
|
||||
// New returns a new Histogram instance capable of tracking values in the given
|
||||
// range and with the given amount of precision.
|
||||
func New(minValue, maxValue int64, sigfigs int) *Histogram {
|
||||
if sigfigs < 1 || 5 < sigfigs {
|
||||
panic(fmt.Errorf("sigfigs must be [1,5] (was %d)", sigfigs))
|
||||
}
|
||||
|
||||
largestValueWithSingleUnitResolution := 2 * math.Pow10(sigfigs)
|
||||
subBucketCountMagnitude := int32(math.Ceil(math.Log2(float64(largestValueWithSingleUnitResolution))))
|
||||
|
||||
subBucketHalfCountMagnitude := subBucketCountMagnitude
|
||||
if subBucketHalfCountMagnitude < 1 {
|
||||
subBucketHalfCountMagnitude = 1
|
||||
}
|
||||
subBucketHalfCountMagnitude--
|
||||
|
||||
unitMagnitude := int32(math.Floor(math.Log2(float64(minValue))))
|
||||
if unitMagnitude < 0 {
|
||||
unitMagnitude = 0
|
||||
}
|
||||
|
||||
subBucketCount := int32(math.Pow(2, float64(subBucketHalfCountMagnitude)+1))
|
||||
|
||||
subBucketHalfCount := subBucketCount / 2
|
||||
subBucketMask := int64(subBucketCount-1) << uint(unitMagnitude)
|
||||
|
||||
// determine exponent range needed to support the trackable value with no
|
||||
// overflow:
|
||||
smallestUntrackableValue := int64(subBucketCount) << uint(unitMagnitude)
|
||||
bucketsNeeded := int32(1)
|
||||
for smallestUntrackableValue < maxValue {
|
||||
smallestUntrackableValue <<= 1
|
||||
bucketsNeeded++
|
||||
}
|
||||
|
||||
bucketCount := bucketsNeeded
|
||||
countsLen := (bucketCount + 1) * (subBucketCount / 2)
|
||||
|
||||
return &Histogram{
|
||||
lowestTrackableValue: minValue,
|
||||
highestTrackableValue: maxValue,
|
||||
unitMagnitude: int64(unitMagnitude),
|
||||
significantFigures: int64(sigfigs),
|
||||
subBucketHalfCountMagnitude: subBucketHalfCountMagnitude,
|
||||
subBucketHalfCount: subBucketHalfCount,
|
||||
subBucketMask: subBucketMask,
|
||||
subBucketCount: subBucketCount,
|
||||
bucketCount: bucketCount,
|
||||
countsLen: countsLen,
|
||||
totalCount: 0,
|
||||
counts: make([]int64, countsLen),
|
||||
}
|
||||
}
|
||||
|
||||
// ByteSize returns an estimate of the amount of memory allocated to the
|
||||
// histogram in bytes.
|
||||
//
|
||||
// N.B.: This does not take into account the overhead for slices, which are
|
||||
// small, constant, and specific to the compiler version.
|
||||
func (h *Histogram) ByteSize() int {
|
||||
return 6*8 + 5*4 + len(h.counts)*8
|
||||
}
|
||||
|
||||
// Merge merges the data stored in the given histogram with the receiver,
|
||||
// returning the number of recorded values which had to be dropped.
|
||||
func (h *Histogram) Merge(from *Histogram) (dropped int64) {
|
||||
i := from.rIterator()
|
||||
for i.next() {
|
||||
v := i.valueFromIdx
|
||||
c := i.countAtIdx
|
||||
|
||||
if h.RecordValues(v, c) != nil {
|
||||
dropped += c
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// TotalCount returns total number of values recorded.
|
||||
func (h *Histogram) TotalCount() int64 {
|
||||
return h.totalCount
|
||||
}
|
||||
|
||||
// Max returns the approximate maximum recorded value.
|
||||
func (h *Histogram) Max() int64 {
|
||||
var max int64
|
||||
i := h.iterator()
|
||||
for i.next() {
|
||||
if i.countAtIdx != 0 {
|
||||
max = i.highestEquivalentValue
|
||||
}
|
||||
}
|
||||
return h.highestEquivalentValue(max)
|
||||
}
|
||||
|
||||
// Min returns the approximate minimum recorded value.
|
||||
func (h *Histogram) Min() int64 {
|
||||
var min int64
|
||||
i := h.iterator()
|
||||
for i.next() {
|
||||
if i.countAtIdx != 0 && min == 0 {
|
||||
min = i.highestEquivalentValue
|
||||
break
|
||||
}
|
||||
}
|
||||
return h.lowestEquivalentValue(min)
|
||||
}
|
||||
|
||||
// Mean returns the approximate arithmetic mean of the recorded values.
|
||||
func (h *Histogram) Mean() float64 {
|
||||
if h.totalCount == 0 {
|
||||
return 0
|
||||
}
|
||||
var total int64
|
||||
i := h.iterator()
|
||||
for i.next() {
|
||||
if i.countAtIdx != 0 {
|
||||
total += i.countAtIdx * h.medianEquivalentValue(i.valueFromIdx)
|
||||
}
|
||||
}
|
||||
return float64(total) / float64(h.totalCount)
|
||||
}
|
||||
|
||||
// StdDev returns the approximate standard deviation of the recorded values.
|
||||
func (h *Histogram) StdDev() float64 {
|
||||
if h.totalCount == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
mean := h.Mean()
|
||||
geometricDevTotal := 0.0
|
||||
|
||||
i := h.iterator()
|
||||
for i.next() {
|
||||
if i.countAtIdx != 0 {
|
||||
dev := float64(h.medianEquivalentValue(i.valueFromIdx)) - mean
|
||||
geometricDevTotal += (dev * dev) * float64(i.countAtIdx)
|
||||
}
|
||||
}
|
||||
|
||||
return math.Sqrt(geometricDevTotal / float64(h.totalCount))
|
||||
}
|
||||
|
||||
// Reset deletes all recorded values and restores the histogram to its original
|
||||
// state.
|
||||
func (h *Histogram) Reset() {
|
||||
h.totalCount = 0
|
||||
for i := range h.counts {
|
||||
h.counts[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
// RecordValue records the given value, returning an error if the value is out
|
||||
// of range.
|
||||
func (h *Histogram) RecordValue(v int64) error {
|
||||
return h.RecordValues(v, 1)
|
||||
}
|
||||
|
||||
// RecordCorrectedValue records the given value, correcting for stalls in the
|
||||
// recording process. This only works for processes which are recording values
|
||||
// at an expected interval (e.g., doing jitter analysis). Processes which are
|
||||
// recording ad-hoc values (e.g., latency for incoming requests) can't take
|
||||
// advantage of this.
|
||||
func (h *Histogram) RecordCorrectedValue(v, expectedInterval int64) error {
|
||||
if err := h.RecordValue(v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if expectedInterval <= 0 || v <= expectedInterval {
|
||||
return nil
|
||||
}
|
||||
|
||||
missingValue := v - expectedInterval
|
||||
for missingValue >= expectedInterval {
|
||||
if err := h.RecordValue(missingValue); err != nil {
|
||||
return err
|
||||
}
|
||||
missingValue -= expectedInterval
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordValues records n occurrences of the given value, returning an error if
|
||||
// the value is out of range.
|
||||
func (h *Histogram) RecordValues(v, n int64) error {
|
||||
idx := h.countsIndexFor(v)
|
||||
if idx < 0 || int(h.countsLen) <= idx {
|
||||
return fmt.Errorf("value %d is too large to be recorded", v)
|
||||
}
|
||||
h.counts[idx] += n
|
||||
h.totalCount += n
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValueAtQuantile returns the recorded value at the given quantile (0..100).
|
||||
func (h *Histogram) ValueAtQuantile(q float64) int64 {
|
||||
if q > 100 {
|
||||
q = 100
|
||||
}
|
||||
|
||||
total := int64(0)
|
||||
countAtPercentile := int64(((q / 100) * float64(h.totalCount)) + 0.5)
|
||||
|
||||
i := h.iterator()
|
||||
for i.next() {
|
||||
total += i.countAtIdx
|
||||
if total >= countAtPercentile {
|
||||
return h.highestEquivalentValue(i.valueFromIdx)
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// CumulativeDistribution returns an ordered list of brackets of the
|
||||
// distribution of recorded values.
|
||||
func (h *Histogram) CumulativeDistribution() []Bracket {
|
||||
var result []Bracket
|
||||
|
||||
i := h.pIterator(1)
|
||||
for i.next() {
|
||||
result = append(result, Bracket{
|
||||
Quantile: i.percentile,
|
||||
Count: i.countToIdx,
|
||||
ValueAt: i.highestEquivalentValue,
|
||||
})
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// SignificantFigures returns the significant figures used to create the
|
||||
// histogram
|
||||
func (h *Histogram) SignificantFigures() int64 {
|
||||
return h.significantFigures
|
||||
}
|
||||
|
||||
// LowestTrackableValue returns the lower bound on values that will be added
|
||||
// to the histogram
|
||||
func (h *Histogram) LowestTrackableValue() int64 {
|
||||
return h.lowestTrackableValue
|
||||
}
|
||||
|
||||
// HighestTrackableValue returns the upper bound on values that will be added
|
||||
// to the histogram
|
||||
func (h *Histogram) HighestTrackableValue() int64 {
|
||||
return h.highestTrackableValue
|
||||
}
|
||||
|
||||
// Histogram bar for plotting
|
||||
type Bar struct {
|
||||
From, To, Count int64
|
||||
}
|
||||
|
||||
// Pretty print as csv for easy plotting
|
||||
func (b Bar) String() string {
|
||||
return fmt.Sprintf("%v, %v, %v\n", b.From, b.To, b.Count)
|
||||
}
|
||||
|
||||
// Distribution returns an ordered list of bars of the
|
||||
// distribution of recorded values, counts can be normalized to a probability
|
||||
func (h *Histogram) Distribution() (result []Bar) {
|
||||
i := h.iterator()
|
||||
for i.next() {
|
||||
result = append(result, Bar{
|
||||
Count: i.countAtIdx,
|
||||
From: h.lowestEquivalentValue(i.valueFromIdx),
|
||||
To: i.highestEquivalentValue,
|
||||
})
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Equals returns true if the two Histograms are equivalent, false if not.
|
||||
func (h *Histogram) Equals(other *Histogram) bool {
|
||||
switch {
|
||||
case
|
||||
h.lowestTrackableValue != other.lowestTrackableValue,
|
||||
h.highestTrackableValue != other.highestTrackableValue,
|
||||
h.unitMagnitude != other.unitMagnitude,
|
||||
h.significantFigures != other.significantFigures,
|
||||
h.subBucketHalfCountMagnitude != other.subBucketHalfCountMagnitude,
|
||||
h.subBucketHalfCount != other.subBucketHalfCount,
|
||||
h.subBucketMask != other.subBucketMask,
|
||||
h.subBucketCount != other.subBucketCount,
|
||||
h.bucketCount != other.bucketCount,
|
||||
h.countsLen != other.countsLen,
|
||||
h.totalCount != other.totalCount:
|
||||
return false
|
||||
default:
|
||||
for i, c := range h.counts {
|
||||
if c != other.counts[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Export returns a snapshot view of the Histogram. This can be later passed to
|
||||
// Import to construct a new Histogram with the same state.
|
||||
func (h *Histogram) Export() *Snapshot {
|
||||
return &Snapshot{
|
||||
LowestTrackableValue: h.lowestTrackableValue,
|
||||
HighestTrackableValue: h.highestTrackableValue,
|
||||
SignificantFigures: h.significantFigures,
|
||||
Counts: append([]int64(nil), h.counts...), // copy
|
||||
}
|
||||
}
|
||||
|
||||
// Import returns a new Histogram populated from the Snapshot data (which the
|
||||
// caller must stop accessing).
|
||||
func Import(s *Snapshot) *Histogram {
|
||||
h := New(s.LowestTrackableValue, s.HighestTrackableValue, int(s.SignificantFigures))
|
||||
h.counts = s.Counts
|
||||
totalCount := int64(0)
|
||||
for i := int32(0); i < h.countsLen; i++ {
|
||||
countAtIndex := h.counts[i]
|
||||
if countAtIndex > 0 {
|
||||
totalCount += countAtIndex
|
||||
}
|
||||
}
|
||||
h.totalCount = totalCount
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *Histogram) iterator() *iterator {
|
||||
return &iterator{
|
||||
h: h,
|
||||
subBucketIdx: -1,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Histogram) rIterator() *rIterator {
|
||||
return &rIterator{
|
||||
iterator: iterator{
|
||||
h: h,
|
||||
subBucketIdx: -1,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Histogram) pIterator(ticksPerHalfDistance int32) *pIterator {
|
||||
return &pIterator{
|
||||
iterator: iterator{
|
||||
h: h,
|
||||
subBucketIdx: -1,
|
||||
},
|
||||
ticksPerHalfDistance: ticksPerHalfDistance,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Histogram) sizeOfEquivalentValueRange(v int64) int64 {
|
||||
bucketIdx := h.getBucketIndex(v)
|
||||
subBucketIdx := h.getSubBucketIdx(v, bucketIdx)
|
||||
adjustedBucket := bucketIdx
|
||||
if subBucketIdx >= h.subBucketCount {
|
||||
adjustedBucket++
|
||||
}
|
||||
return int64(1) << uint(h.unitMagnitude+int64(adjustedBucket))
|
||||
}
|
||||
|
||||
func (h *Histogram) valueFromIndex(bucketIdx, subBucketIdx int32) int64 {
|
||||
return int64(subBucketIdx) << uint(int64(bucketIdx)+h.unitMagnitude)
|
||||
}
|
||||
|
||||
func (h *Histogram) lowestEquivalentValue(v int64) int64 {
|
||||
bucketIdx := h.getBucketIndex(v)
|
||||
subBucketIdx := h.getSubBucketIdx(v, bucketIdx)
|
||||
return h.valueFromIndex(bucketIdx, subBucketIdx)
|
||||
}
|
||||
|
||||
func (h *Histogram) nextNonEquivalentValue(v int64) int64 {
|
||||
return h.lowestEquivalentValue(v) + h.sizeOfEquivalentValueRange(v)
|
||||
}
|
||||
|
||||
func (h *Histogram) highestEquivalentValue(v int64) int64 {
|
||||
return h.nextNonEquivalentValue(v) - 1
|
||||
}
|
||||
|
||||
func (h *Histogram) medianEquivalentValue(v int64) int64 {
|
||||
return h.lowestEquivalentValue(v) + (h.sizeOfEquivalentValueRange(v) >> 1)
|
||||
}
|
||||
|
||||
func (h *Histogram) getCountAtIndex(bucketIdx, subBucketIdx int32) int64 {
|
||||
return h.counts[h.countsIndex(bucketIdx, subBucketIdx)]
|
||||
}
|
||||
|
||||
func (h *Histogram) countsIndex(bucketIdx, subBucketIdx int32) int32 {
|
||||
bucketBaseIdx := (bucketIdx + 1) << uint(h.subBucketHalfCountMagnitude)
|
||||
offsetInBucket := subBucketIdx - h.subBucketHalfCount
|
||||
return bucketBaseIdx + offsetInBucket
|
||||
}
|
||||
|
||||
func (h *Histogram) getBucketIndex(v int64) int32 {
|
||||
pow2Ceiling := bitLen(v | h.subBucketMask)
|
||||
return int32(pow2Ceiling - int64(h.unitMagnitude) -
|
||||
int64(h.subBucketHalfCountMagnitude+1))
|
||||
}
|
||||
|
||||
func (h *Histogram) getSubBucketIdx(v int64, idx int32) int32 {
|
||||
return int32(v >> uint(int64(idx)+int64(h.unitMagnitude)))
|
||||
}
|
||||
|
||||
func (h *Histogram) countsIndexFor(v int64) int {
|
||||
bucketIdx := h.getBucketIndex(v)
|
||||
subBucketIdx := h.getSubBucketIdx(v, bucketIdx)
|
||||
return int(h.countsIndex(bucketIdx, subBucketIdx))
|
||||
}
|
||||
|
||||
type iterator struct {
|
||||
h *Histogram
|
||||
bucketIdx, subBucketIdx int32
|
||||
countAtIdx, countToIdx, valueFromIdx int64
|
||||
highestEquivalentValue int64
|
||||
}
|
||||
|
||||
func (i *iterator) next() bool {
|
||||
if i.countToIdx >= i.h.totalCount {
|
||||
return false
|
||||
}
|
||||
|
||||
// increment bucket
|
||||
i.subBucketIdx++
|
||||
if i.subBucketIdx >= i.h.subBucketCount {
|
||||
i.subBucketIdx = i.h.subBucketHalfCount
|
||||
i.bucketIdx++
|
||||
}
|
||||
|
||||
if i.bucketIdx >= i.h.bucketCount {
|
||||
return false
|
||||
}
|
||||
|
||||
i.countAtIdx = i.h.getCountAtIndex(i.bucketIdx, i.subBucketIdx)
|
||||
i.countToIdx += i.countAtIdx
|
||||
i.valueFromIdx = i.h.valueFromIndex(i.bucketIdx, i.subBucketIdx)
|
||||
i.highestEquivalentValue = i.h.highestEquivalentValue(i.valueFromIdx)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
type rIterator struct {
|
||||
iterator
|
||||
countAddedThisStep int64
|
||||
}
|
||||
|
||||
func (r *rIterator) next() bool {
|
||||
for r.iterator.next() {
|
||||
if r.countAtIdx != 0 {
|
||||
r.countAddedThisStep = r.countAtIdx
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type pIterator struct {
|
||||
iterator
|
||||
seenLastValue bool
|
||||
ticksPerHalfDistance int32
|
||||
percentileToIteratorTo float64
|
||||
percentile float64
|
||||
}
|
||||
|
||||
func (p *pIterator) next() bool {
|
||||
if !(p.countToIdx < p.h.totalCount) {
|
||||
if p.seenLastValue {
|
||||
return false
|
||||
}
|
||||
|
||||
p.seenLastValue = true
|
||||
p.percentile = 100
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
if p.subBucketIdx == -1 && !p.iterator.next() {
|
||||
return false
|
||||
}
|
||||
|
||||
var done = false
|
||||
for !done {
|
||||
currentPercentile := (100.0 * float64(p.countToIdx)) / float64(p.h.totalCount)
|
||||
if p.countAtIdx != 0 && p.percentileToIteratorTo <= currentPercentile {
|
||||
p.percentile = p.percentileToIteratorTo
|
||||
halfDistance := math.Trunc(math.Pow(2, math.Trunc(math.Log2(100.0/(100.0-p.percentileToIteratorTo)))+1))
|
||||
percentileReportingTicks := float64(p.ticksPerHalfDistance) * halfDistance
|
||||
p.percentileToIteratorTo += 100.0 / percentileReportingTicks
|
||||
return true
|
||||
}
|
||||
done = !p.iterator.next()
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func bitLen(x int64) (n int64) {
|
||||
for ; x >= 0x8000; x >>= 16 {
|
||||
n += 16
|
||||
}
|
||||
if x >= 0x80 {
|
||||
x >>= 8
|
||||
n += 8
|
||||
}
|
||||
if x >= 0x8 {
|
||||
x >>= 4
|
||||
n += 4
|
||||
}
|
||||
if x >= 0x2 {
|
||||
x >>= 2
|
||||
n += 2
|
||||
}
|
||||
if x >= 0x1 {
|
||||
n++
|
||||
}
|
||||
return
|
||||
}
|
||||
45
vendor/github.com/codahale/hdrhistogram/window.go
generated
vendored
45
vendor/github.com/codahale/hdrhistogram/window.go
generated
vendored
@@ -1,45 +0,0 @@
|
||||
package hdrhistogram
|
||||
|
||||
// A WindowedHistogram combines histograms to provide windowed statistics.
|
||||
type WindowedHistogram struct {
|
||||
idx int
|
||||
h []Histogram
|
||||
m *Histogram
|
||||
|
||||
Current *Histogram
|
||||
}
|
||||
|
||||
// NewWindowed creates a new WindowedHistogram with N underlying histograms with
|
||||
// the given parameters.
|
||||
func NewWindowed(n int, minValue, maxValue int64, sigfigs int) *WindowedHistogram {
|
||||
w := WindowedHistogram{
|
||||
idx: -1,
|
||||
h: make([]Histogram, n),
|
||||
m: New(minValue, maxValue, sigfigs),
|
||||
}
|
||||
|
||||
for i := range w.h {
|
||||
w.h[i] = *New(minValue, maxValue, sigfigs)
|
||||
}
|
||||
w.Rotate()
|
||||
|
||||
return &w
|
||||
}
|
||||
|
||||
// Merge returns a histogram which includes the recorded values from all the
|
||||
// sections of the window.
|
||||
func (w *WindowedHistogram) Merge() *Histogram {
|
||||
w.m.Reset()
|
||||
for _, h := range w.h {
|
||||
w.m.Merge(&h)
|
||||
}
|
||||
return w.m
|
||||
}
|
||||
|
||||
// Rotate resets the oldest histogram and rotates it to be used as the current
|
||||
// histogram.
|
||||
func (w *WindowedHistogram) Rotate() {
|
||||
w.idx++
|
||||
w.Current = &w.h[w.idx%len(w.h)]
|
||||
w.Current.Reset()
|
||||
}
|
||||
2
vendor/github.com/codegangsta/cli/.flake8
generated
vendored
Normal file
2
vendor/github.com/codegangsta/cli/.flake8
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
[flake8]
|
||||
max-line-length = 120
|
||||
2
vendor/github.com/codegangsta/cli/.gitignore
generated
vendored
Normal file
2
vendor/github.com/codegangsta/cli/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.coverprofile
|
||||
node_modules/
|
||||
435
vendor/github.com/codegangsta/cli/CHANGELOG.md
generated
vendored
Normal file
435
vendor/github.com/codegangsta/cli/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,435 @@
|
||||
# Change Log
|
||||
|
||||
**ATTN**: This project uses [semantic versioning](http://semver.org/).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## 1.20.0 - 2017-08-10
|
||||
|
||||
### Fixed
|
||||
|
||||
* `HandleExitCoder` is now correctly iterates over all errors in
|
||||
a `MultiError`. The exit code is the exit code of the last error or `1` if
|
||||
there are no `ExitCoder`s in the `MultiError`.
|
||||
* Fixed YAML file loading on Windows (previously would fail validate the file path)
|
||||
* Subcommand `Usage`, `Description`, `ArgsUsage`, `OnUsageError` correctly
|
||||
propogated
|
||||
* `ErrWriter` is now passed downwards through command structure to avoid the
|
||||
need to redefine it
|
||||
* Pass `Command` context into `OnUsageError` rather than parent context so that
|
||||
all fields are avaiable
|
||||
* Errors occuring in `Before` funcs are no longer double printed
|
||||
* Use `UsageText` in the help templates for commands and subcommands if
|
||||
defined; otherwise build the usage as before (was previously ignoring this
|
||||
field)
|
||||
* `IsSet` and `GlobalIsSet` now correctly return whether a flag is set if
|
||||
a program calls `Set` or `GlobalSet` directly after flag parsing (would
|
||||
previously only return `true` if the flag was set during parsing)
|
||||
|
||||
### Changed
|
||||
|
||||
* No longer exit the program on command/subcommand error if the error raised is
|
||||
not an `OsExiter`. This exiting behavior was introduced in 1.19.0, but was
|
||||
determined to be a regression in functionality. See [the
|
||||
PR](https://github.com/urfave/cli/pull/595) for discussion.
|
||||
|
||||
### Added
|
||||
|
||||
* `CommandsByName` type was added to make it easy to sort `Command`s by name,
|
||||
alphabetically
|
||||
* `altsrc` now handles loading of string and int arrays from TOML
|
||||
* Support for definition of custom help templates for `App` via
|
||||
`CustomAppHelpTemplate`
|
||||
* Support for arbitrary key/value fields on `App` to be used with
|
||||
`CustomAppHelpTemplate` via `ExtraInfo`
|
||||
* `HelpFlag`, `VersionFlag`, and `BashCompletionFlag` changed to explictly be
|
||||
`cli.Flag`s allowing for the use of custom flags satisfying the `cli.Flag`
|
||||
interface to be used.
|
||||
|
||||
|
||||
## [1.19.1] - 2016-11-21
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixes regression introduced in 1.19.0 where using an `ActionFunc` as
|
||||
the `Action` for a command would cause it to error rather than calling the
|
||||
function. Should not have a affected declarative cases using `func(c
|
||||
*cli.Context) err)`.
|
||||
- Shell completion now handles the case where the user specifies
|
||||
`--generate-bash-completion` immediately after a flag that takes an argument.
|
||||
Previously it call the application with `--generate-bash-completion` as the
|
||||
flag value.
|
||||
|
||||
## [1.19.0] - 2016-11-19
|
||||
### Added
|
||||
- `FlagsByName` was added to make it easy to sort flags (e.g. `sort.Sort(cli.FlagsByName(app.Flags))`)
|
||||
- A `Description` field was added to `App` for a more detailed description of
|
||||
the application (similar to the existing `Description` field on `Command`)
|
||||
- Flag type code generation via `go generate`
|
||||
- Write to stderr and exit 1 if action returns non-nil error
|
||||
- Added support for TOML to the `altsrc` loader
|
||||
- `SkipArgReorder` was added to allow users to skip the argument reordering.
|
||||
This is useful if you want to consider all "flags" after an argument as
|
||||
arguments rather than flags (the default behavior of the stdlib `flag`
|
||||
library). This is backported functionality from the [removal of the flag
|
||||
reordering](https://github.com/urfave/cli/pull/398) in the unreleased version
|
||||
2
|
||||
- For formatted errors (those implementing `ErrorFormatter`), the errors will
|
||||
be formatted during output. Compatible with `pkg/errors`.
|
||||
|
||||
### Changed
|
||||
- Raise minimum tested/supported Go version to 1.2+
|
||||
|
||||
### Fixed
|
||||
- Consider empty environment variables as set (previously environment variables
|
||||
with the equivalent of `""` would be skipped rather than their value used).
|
||||
- Return an error if the value in a given environment variable cannot be parsed
|
||||
as the flag type. Previously these errors were silently swallowed.
|
||||
- Print full error when an invalid flag is specified (which includes the invalid flag)
|
||||
- `App.Writer` defaults to `stdout` when `nil`
|
||||
- If no action is specified on a command or app, the help is now printed instead of `panic`ing
|
||||
- `App.Metadata` is initialized automatically now (previously was `nil` unless initialized)
|
||||
- Correctly show help message if `-h` is provided to a subcommand
|
||||
- `context.(Global)IsSet` now respects environment variables. Previously it
|
||||
would return `false` if a flag was specified in the environment rather than
|
||||
as an argument
|
||||
- Removed deprecation warnings to STDERR to avoid them leaking to the end-user
|
||||
- `altsrc`s import paths were updated to use `gopkg.in/urfave/cli.v1`. This
|
||||
fixes issues that occurred when `gopkg.in/urfave/cli.v1` was imported as well
|
||||
as `altsrc` where Go would complain that the types didn't match
|
||||
|
||||
## [1.18.1] - 2016-08-28
|
||||
### Fixed
|
||||
- Removed deprecation warnings to STDERR to avoid them leaking to the end-user (backported)
|
||||
|
||||
## [1.18.0] - 2016-06-27
|
||||
### Added
|
||||
- `./runtests` test runner with coverage tracking by default
|
||||
- testing on OS X
|
||||
- testing on Windows
|
||||
- `UintFlag`, `Uint64Flag`, and `Int64Flag` types and supporting code
|
||||
|
||||
### Changed
|
||||
- Use spaces for alignment in help/usage output instead of tabs, making the
|
||||
output alignment consistent regardless of tab width
|
||||
|
||||
### Fixed
|
||||
- Printing of command aliases in help text
|
||||
- Printing of visible flags for both struct and struct pointer flags
|
||||
- Display the `help` subcommand when using `CommandCategories`
|
||||
- No longer swallows `panic`s that occur within the `Action`s themselves when
|
||||
detecting the signature of the `Action` field
|
||||
|
||||
## [1.17.1] - 2016-08-28
|
||||
### Fixed
|
||||
- Removed deprecation warnings to STDERR to avoid them leaking to the end-user
|
||||
|
||||
## [1.17.0] - 2016-05-09
|
||||
### Added
|
||||
- Pluggable flag-level help text rendering via `cli.DefaultFlagStringFunc`
|
||||
- `context.GlobalBoolT` was added as an analogue to `context.GlobalBool`
|
||||
- Support for hiding commands by setting `Hidden: true` -- this will hide the
|
||||
commands in help output
|
||||
|
||||
### Changed
|
||||
- `Float64Flag`, `IntFlag`, and `DurationFlag` default values are no longer
|
||||
quoted in help text output.
|
||||
- All flag types now include `(default: {value})` strings following usage when a
|
||||
default value can be (reasonably) detected.
|
||||
- `IntSliceFlag` and `StringSliceFlag` usage strings are now more consistent
|
||||
with non-slice flag types
|
||||
- Apps now exit with a code of 3 if an unknown subcommand is specified
|
||||
(previously they printed "No help topic for...", but still exited 0. This
|
||||
makes it easier to script around apps built using `cli` since they can trust
|
||||
that a 0 exit code indicated a successful execution.
|
||||
- cleanups based on [Go Report Card
|
||||
feedback](https://goreportcard.com/report/github.com/urfave/cli)
|
||||
|
||||
## [1.16.1] - 2016-08-28
|
||||
### Fixed
|
||||
- Removed deprecation warnings to STDERR to avoid them leaking to the end-user
|
||||
|
||||
## [1.16.0] - 2016-05-02
|
||||
### Added
|
||||
- `Hidden` field on all flag struct types to omit from generated help text
|
||||
|
||||
### Changed
|
||||
- `BashCompletionFlag` (`--enable-bash-completion`) is now omitted from
|
||||
generated help text via the `Hidden` field
|
||||
|
||||
### Fixed
|
||||
- handling of error values in `HandleAction` and `HandleExitCoder`
|
||||
|
||||
## [1.15.0] - 2016-04-30
|
||||
### Added
|
||||
- This file!
|
||||
- Support for placeholders in flag usage strings
|
||||
- `App.Metadata` map for arbitrary data/state management
|
||||
- `Set` and `GlobalSet` methods on `*cli.Context` for altering values after
|
||||
parsing.
|
||||
- Support for nested lookup of dot-delimited keys in structures loaded from
|
||||
YAML.
|
||||
|
||||
### Changed
|
||||
- The `App.Action` and `Command.Action` now prefer a return signature of
|
||||
`func(*cli.Context) error`, as defined by `cli.ActionFunc`. If a non-nil
|
||||
`error` is returned, there may be two outcomes:
|
||||
- If the error fulfills `cli.ExitCoder`, then `os.Exit` will be called
|
||||
automatically
|
||||
- Else the error is bubbled up and returned from `App.Run`
|
||||
- Specifying an `Action` with the legacy return signature of
|
||||
`func(*cli.Context)` will produce a deprecation message to stderr
|
||||
- Specifying an `Action` that is not a `func` type will produce a non-zero exit
|
||||
from `App.Run`
|
||||
- Specifying an `Action` func that has an invalid (input) signature will
|
||||
produce a non-zero exit from `App.Run`
|
||||
|
||||
### Deprecated
|
||||
- <a name="deprecated-cli-app-runandexitonerror"></a>
|
||||
`cli.App.RunAndExitOnError`, which should now be done by returning an error
|
||||
that fulfills `cli.ExitCoder` to `cli.App.Run`.
|
||||
- <a name="deprecated-cli-app-action-signature"></a> the legacy signature for
|
||||
`cli.App.Action` of `func(*cli.Context)`, which should now have a return
|
||||
signature of `func(*cli.Context) error`, as defined by `cli.ActionFunc`.
|
||||
|
||||
### Fixed
|
||||
- Added missing `*cli.Context.GlobalFloat64` method
|
||||
|
||||
## [1.14.0] - 2016-04-03 (backfilled 2016-04-25)
|
||||
### Added
|
||||
- Codebeat badge
|
||||
- Support for categorization via `CategorizedHelp` and `Categories` on app.
|
||||
|
||||
### Changed
|
||||
- Use `filepath.Base` instead of `path.Base` in `Name` and `HelpName`.
|
||||
|
||||
### Fixed
|
||||
- Ensure version is not shown in help text when `HideVersion` set.
|
||||
|
||||
## [1.13.0] - 2016-03-06 (backfilled 2016-04-25)
|
||||
### Added
|
||||
- YAML file input support.
|
||||
- `NArg` method on context.
|
||||
|
||||
## [1.12.0] - 2016-02-17 (backfilled 2016-04-25)
|
||||
### Added
|
||||
- Custom usage error handling.
|
||||
- Custom text support in `USAGE` section of help output.
|
||||
- Improved help messages for empty strings.
|
||||
- AppVeyor CI configuration.
|
||||
|
||||
### Changed
|
||||
- Removed `panic` from default help printer func.
|
||||
- De-duping and optimizations.
|
||||
|
||||
### Fixed
|
||||
- Correctly handle `Before`/`After` at command level when no subcommands.
|
||||
- Case of literal `-` argument causing flag reordering.
|
||||
- Environment variable hints on Windows.
|
||||
- Docs updates.
|
||||
|
||||
## [1.11.1] - 2015-12-21 (backfilled 2016-04-25)
|
||||
### Changed
|
||||
- Use `path.Base` in `Name` and `HelpName`
|
||||
- Export `GetName` on flag types.
|
||||
|
||||
### Fixed
|
||||
- Flag parsing when skipping is enabled.
|
||||
- Test output cleanup.
|
||||
- Move completion check to account for empty input case.
|
||||
|
||||
## [1.11.0] - 2015-11-15 (backfilled 2016-04-25)
|
||||
### Added
|
||||
- Destination scan support for flags.
|
||||
- Testing against `tip` in Travis CI config.
|
||||
|
||||
### Changed
|
||||
- Go version in Travis CI config.
|
||||
|
||||
### Fixed
|
||||
- Removed redundant tests.
|
||||
- Use correct example naming in tests.
|
||||
|
||||
## [1.10.2] - 2015-10-29 (backfilled 2016-04-25)
|
||||
### Fixed
|
||||
- Remove unused var in bash completion.
|
||||
|
||||
## [1.10.1] - 2015-10-21 (backfilled 2016-04-25)
|
||||
### Added
|
||||
- Coverage and reference logos in README.
|
||||
|
||||
### Fixed
|
||||
- Use specified values in help and version parsing.
|
||||
- Only display app version and help message once.
|
||||
|
||||
## [1.10.0] - 2015-10-06 (backfilled 2016-04-25)
|
||||
### Added
|
||||
- More tests for existing functionality.
|
||||
- `ArgsUsage` at app and command level for help text flexibility.
|
||||
|
||||
### Fixed
|
||||
- Honor `HideHelp` and `HideVersion` in `App.Run`.
|
||||
- Remove juvenile word from README.
|
||||
|
||||
## [1.9.0] - 2015-09-08 (backfilled 2016-04-25)
|
||||
### Added
|
||||
- `FullName` on command with accompanying help output update.
|
||||
- Set default `$PROG` in bash completion.
|
||||
|
||||
### Changed
|
||||
- Docs formatting.
|
||||
|
||||
### Fixed
|
||||
- Removed self-referential imports in tests.
|
||||
|
||||
## [1.8.0] - 2015-06-30 (backfilled 2016-04-25)
|
||||
### Added
|
||||
- Support for `Copyright` at app level.
|
||||
- `Parent` func at context level to walk up context lineage.
|
||||
|
||||
### Fixed
|
||||
- Global flag processing at top level.
|
||||
|
||||
## [1.7.1] - 2015-06-11 (backfilled 2016-04-25)
|
||||
### Added
|
||||
- Aggregate errors from `Before`/`After` funcs.
|
||||
- Doc comments on flag structs.
|
||||
- Include non-global flags when checking version and help.
|
||||
- Travis CI config updates.
|
||||
|
||||
### Fixed
|
||||
- Ensure slice type flags have non-nil values.
|
||||
- Collect global flags from the full command hierarchy.
|
||||
- Docs prose.
|
||||
|
||||
## [1.7.0] - 2015-05-03 (backfilled 2016-04-25)
|
||||
### Changed
|
||||
- `HelpPrinter` signature includes output writer.
|
||||
|
||||
### Fixed
|
||||
- Specify go 1.1+ in docs.
|
||||
- Set `Writer` when running command as app.
|
||||
|
||||
## [1.6.0] - 2015-03-23 (backfilled 2016-04-25)
|
||||
### Added
|
||||
- Multiple author support.
|
||||
- `NumFlags` at context level.
|
||||
- `Aliases` at command level.
|
||||
|
||||
### Deprecated
|
||||
- `ShortName` at command level.
|
||||
|
||||
### Fixed
|
||||
- Subcommand help output.
|
||||
- Backward compatible support for deprecated `Author` and `Email` fields.
|
||||
- Docs regarding `Names`/`Aliases`.
|
||||
|
||||
## [1.5.0] - 2015-02-20 (backfilled 2016-04-25)
|
||||
### Added
|
||||
- `After` hook func support at app and command level.
|
||||
|
||||
### Fixed
|
||||
- Use parsed context when running command as subcommand.
|
||||
- Docs prose.
|
||||
|
||||
## [1.4.1] - 2015-01-09 (backfilled 2016-04-25)
|
||||
### Added
|
||||
- Support for hiding `-h / --help` flags, but not `help` subcommand.
|
||||
- Stop flag parsing after `--`.
|
||||
|
||||
### Fixed
|
||||
- Help text for generic flags to specify single value.
|
||||
- Use double quotes in output for defaults.
|
||||
- Use `ParseInt` instead of `ParseUint` for int environment var values.
|
||||
- Use `0` as base when parsing int environment var values.
|
||||
|
||||
## [1.4.0] - 2014-12-12 (backfilled 2016-04-25)
|
||||
### Added
|
||||
- Support for environment variable lookup "cascade".
|
||||
- Support for `Stdout` on app for output redirection.
|
||||
|
||||
### Fixed
|
||||
- Print command help instead of app help in `ShowCommandHelp`.
|
||||
|
||||
## [1.3.1] - 2014-11-13 (backfilled 2016-04-25)
|
||||
### Added
|
||||
- Docs and example code updates.
|
||||
|
||||
### Changed
|
||||
- Default `-v / --version` flag made optional.
|
||||
|
||||
## [1.3.0] - 2014-08-10 (backfilled 2016-04-25)
|
||||
### Added
|
||||
- `FlagNames` at context level.
|
||||
- Exposed `VersionPrinter` var for more control over version output.
|
||||
- Zsh completion hook.
|
||||
- `AUTHOR` section in default app help template.
|
||||
- Contribution guidelines.
|
||||
- `DurationFlag` type.
|
||||
|
||||
## [1.2.0] - 2014-08-02
|
||||
### Added
|
||||
- Support for environment variable defaults on flags plus tests.
|
||||
|
||||
## [1.1.0] - 2014-07-15
|
||||
### Added
|
||||
- Bash completion.
|
||||
- Optional hiding of built-in help command.
|
||||
- Optional skipping of flag parsing at command level.
|
||||
- `Author`, `Email`, and `Compiled` metadata on app.
|
||||
- `Before` hook func support at app and command level.
|
||||
- `CommandNotFound` func support at app level.
|
||||
- Command reference available on context.
|
||||
- `GenericFlag` type.
|
||||
- `Float64Flag` type.
|
||||
- `BoolTFlag` type.
|
||||
- `IsSet` flag helper on context.
|
||||
- More flag lookup funcs at context level.
|
||||
- More tests & docs.
|
||||
|
||||
### Changed
|
||||
- Help template updates to account for presence/absence of flags.
|
||||
- Separated subcommand help template.
|
||||
- Exposed `HelpPrinter` var for more control over help output.
|
||||
|
||||
## [1.0.0] - 2013-11-01
|
||||
### Added
|
||||
- `help` flag in default app flag set and each command flag set.
|
||||
- Custom handling of argument parsing errors.
|
||||
- Command lookup by name at app level.
|
||||
- `StringSliceFlag` type and supporting `StringSlice` type.
|
||||
- `IntSliceFlag` type and supporting `IntSlice` type.
|
||||
- Slice type flag lookups by name at context level.
|
||||
- Export of app and command help functions.
|
||||
- More tests & docs.
|
||||
|
||||
## 0.1.0 - 2013-07-22
|
||||
### Added
|
||||
- Initial implementation.
|
||||
|
||||
[Unreleased]: https://github.com/urfave/cli/compare/v1.18.0...HEAD
|
||||
[1.18.0]: https://github.com/urfave/cli/compare/v1.17.0...v1.18.0
|
||||
[1.17.0]: https://github.com/urfave/cli/compare/v1.16.0...v1.17.0
|
||||
[1.16.0]: https://github.com/urfave/cli/compare/v1.15.0...v1.16.0
|
||||
[1.15.0]: https://github.com/urfave/cli/compare/v1.14.0...v1.15.0
|
||||
[1.14.0]: https://github.com/urfave/cli/compare/v1.13.0...v1.14.0
|
||||
[1.13.0]: https://github.com/urfave/cli/compare/v1.12.0...v1.13.0
|
||||
[1.12.0]: https://github.com/urfave/cli/compare/v1.11.1...v1.12.0
|
||||
[1.11.1]: https://github.com/urfave/cli/compare/v1.11.0...v1.11.1
|
||||
[1.11.0]: https://github.com/urfave/cli/compare/v1.10.2...v1.11.0
|
||||
[1.10.2]: https://github.com/urfave/cli/compare/v1.10.1...v1.10.2
|
||||
[1.10.1]: https://github.com/urfave/cli/compare/v1.10.0...v1.10.1
|
||||
[1.10.0]: https://github.com/urfave/cli/compare/v1.9.0...v1.10.0
|
||||
[1.9.0]: https://github.com/urfave/cli/compare/v1.8.0...v1.9.0
|
||||
[1.8.0]: https://github.com/urfave/cli/compare/v1.7.1...v1.8.0
|
||||
[1.7.1]: https://github.com/urfave/cli/compare/v1.7.0...v1.7.1
|
||||
[1.7.0]: https://github.com/urfave/cli/compare/v1.6.0...v1.7.0
|
||||
[1.6.0]: https://github.com/urfave/cli/compare/v1.5.0...v1.6.0
|
||||
[1.5.0]: https://github.com/urfave/cli/compare/v1.4.1...v1.5.0
|
||||
[1.4.1]: https://github.com/urfave/cli/compare/v1.4.0...v1.4.1
|
||||
[1.4.0]: https://github.com/urfave/cli/compare/v1.3.1...v1.4.0
|
||||
[1.3.1]: https://github.com/urfave/cli/compare/v1.3.0...v1.3.1
|
||||
[1.3.0]: https://github.com/urfave/cli/compare/v1.2.0...v1.3.0
|
||||
[1.2.0]: https://github.com/urfave/cli/compare/v1.1.0...v1.2.0
|
||||
[1.1.0]: https://github.com/urfave/cli/compare/v1.0.0...v1.1.0
|
||||
[1.0.0]: https://github.com/urfave/cli/compare/v0.1.0...v1.0.0
|
||||
1381
vendor/github.com/codegangsta/cli/README.md
generated
vendored
Normal file
1381
vendor/github.com/codegangsta/cli/README.md
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
93
vendor/github.com/codegangsta/cli/flag-types.json
generated
vendored
Normal file
93
vendor/github.com/codegangsta/cli/flag-types.json
generated
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
[
|
||||
{
|
||||
"name": "Bool",
|
||||
"type": "bool",
|
||||
"value": false,
|
||||
"context_default": "false",
|
||||
"parser": "strconv.ParseBool(f.Value.String())"
|
||||
},
|
||||
{
|
||||
"name": "BoolT",
|
||||
"type": "bool",
|
||||
"value": false,
|
||||
"doctail": " that is true by default",
|
||||
"context_default": "false",
|
||||
"parser": "strconv.ParseBool(f.Value.String())"
|
||||
},
|
||||
{
|
||||
"name": "Duration",
|
||||
"type": "time.Duration",
|
||||
"doctail": " (see https://golang.org/pkg/time/#ParseDuration)",
|
||||
"context_default": "0",
|
||||
"parser": "time.ParseDuration(f.Value.String())"
|
||||
},
|
||||
{
|
||||
"name": "Float64",
|
||||
"type": "float64",
|
||||
"context_default": "0",
|
||||
"parser": "strconv.ParseFloat(f.Value.String(), 64)"
|
||||
},
|
||||
{
|
||||
"name": "Generic",
|
||||
"type": "Generic",
|
||||
"dest": false,
|
||||
"context_default": "nil",
|
||||
"context_type": "interface{}"
|
||||
},
|
||||
{
|
||||
"name": "Int64",
|
||||
"type": "int64",
|
||||
"context_default": "0",
|
||||
"parser": "strconv.ParseInt(f.Value.String(), 0, 64)"
|
||||
},
|
||||
{
|
||||
"name": "Int",
|
||||
"type": "int",
|
||||
"context_default": "0",
|
||||
"parser": "strconv.ParseInt(f.Value.String(), 0, 64)",
|
||||
"parser_cast": "int(parsed)"
|
||||
},
|
||||
{
|
||||
"name": "IntSlice",
|
||||
"type": "*IntSlice",
|
||||
"dest": false,
|
||||
"context_default": "nil",
|
||||
"context_type": "[]int",
|
||||
"parser": "(f.Value.(*IntSlice)).Value(), error(nil)"
|
||||
},
|
||||
{
|
||||
"name": "Int64Slice",
|
||||
"type": "*Int64Slice",
|
||||
"dest": false,
|
||||
"context_default": "nil",
|
||||
"context_type": "[]int64",
|
||||
"parser": "(f.Value.(*Int64Slice)).Value(), error(nil)"
|
||||
},
|
||||
{
|
||||
"name": "String",
|
||||
"type": "string",
|
||||
"context_default": "\"\"",
|
||||
"parser": "f.Value.String(), error(nil)"
|
||||
},
|
||||
{
|
||||
"name": "StringSlice",
|
||||
"type": "*StringSlice",
|
||||
"dest": false,
|
||||
"context_default": "nil",
|
||||
"context_type": "[]string",
|
||||
"parser": "(f.Value.(*StringSlice)).Value(), error(nil)"
|
||||
},
|
||||
{
|
||||
"name": "Uint64",
|
||||
"type": "uint64",
|
||||
"context_default": "0",
|
||||
"parser": "strconv.ParseUint(f.Value.String(), 0, 64)"
|
||||
},
|
||||
{
|
||||
"name": "Uint",
|
||||
"type": "uint",
|
||||
"context_default": "0",
|
||||
"parser": "strconv.ParseUint(f.Value.String(), 0, 64)",
|
||||
"parser_cast": "uint(parsed)"
|
||||
}
|
||||
]
|
||||
255
vendor/github.com/codegangsta/cli/generate-flag-types
generated
vendored
Normal file
255
vendor/github.com/codegangsta/cli/generate-flag-types
generated
vendored
Normal file
@@ -0,0 +1,255 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
The flag types that ship with the cli library have many things in common, and
|
||||
so we can take advantage of the `go generate` command to create much of the
|
||||
source code from a list of definitions. These definitions attempt to cover
|
||||
the parts that vary between flag types, and should evolve as needed.
|
||||
|
||||
An example of the minimum definition needed is:
|
||||
|
||||
{
|
||||
"name": "SomeType",
|
||||
"type": "sometype",
|
||||
"context_default": "nil"
|
||||
}
|
||||
|
||||
In this example, the code generated for the `cli` package will include a type
|
||||
named `SomeTypeFlag` that is expected to wrap a value of type `sometype`.
|
||||
Fetching values by name via `*cli.Context` will default to a value of `nil`.
|
||||
|
||||
A more complete, albeit somewhat redundant, example showing all available
|
||||
definition keys is:
|
||||
|
||||
{
|
||||
"name": "VeryMuchType",
|
||||
"type": "*VeryMuchType",
|
||||
"value": true,
|
||||
"dest": false,
|
||||
"doctail": " which really only wraps a []float64, oh well!",
|
||||
"context_type": "[]float64",
|
||||
"context_default": "nil",
|
||||
"parser": "parseVeryMuchType(f.Value.String())",
|
||||
"parser_cast": "[]float64(parsed)"
|
||||
}
|
||||
|
||||
The meaning of each field is as follows:
|
||||
|
||||
name (string) - The type "name", which will be suffixed with
|
||||
`Flag` when generating the type definition
|
||||
for `cli` and the wrapper type for `altsrc`
|
||||
type (string) - The type that the generated `Flag` type for `cli`
|
||||
is expected to "contain" as its `.Value` member
|
||||
value (bool) - Should the generated `cli` type have a `Value`
|
||||
member?
|
||||
dest (bool) - Should the generated `cli` type support a
|
||||
destination pointer?
|
||||
doctail (string) - Additional docs for the `cli` flag type comment
|
||||
context_type (string) - The literal type used in the `*cli.Context`
|
||||
reader func signature
|
||||
context_default (string) - The literal value used as the default by the
|
||||
`*cli.Context` reader funcs when no value is
|
||||
present
|
||||
parser (string) - Literal code used to parse the flag `f`,
|
||||
expected to have a return signature of
|
||||
(value, error)
|
||||
parser_cast (string) - Literal code used to cast the `parsed` value
|
||||
returned from the `parser` code
|
||||
"""
|
||||
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import textwrap
|
||||
|
||||
|
||||
class _FancyFormatter(argparse.ArgumentDefaultsHelpFormatter,
|
||||
argparse.RawDescriptionHelpFormatter):
|
||||
pass
|
||||
|
||||
|
||||
def main(sysargs=sys.argv[:]):
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Generate flag type code!',
|
||||
formatter_class=_FancyFormatter)
|
||||
parser.add_argument(
|
||||
'package',
|
||||
type=str, default='cli', choices=_WRITEFUNCS.keys(),
|
||||
help='Package for which flag types will be generated'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-i', '--in-json',
|
||||
type=argparse.FileType('r'),
|
||||
default=sys.stdin,
|
||||
help='Input JSON file which defines each type to be generated'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-o', '--out-go',
|
||||
type=argparse.FileType('w'),
|
||||
default=sys.stdout,
|
||||
help='Output file/stream to which generated source will be written'
|
||||
)
|
||||
parser.epilog = __doc__
|
||||
|
||||
args = parser.parse_args(sysargs[1:])
|
||||
_generate_flag_types(_WRITEFUNCS[args.package], args.out_go, args.in_json)
|
||||
return 0
|
||||
|
||||
|
||||
def _generate_flag_types(writefunc, output_go, input_json):
|
||||
types = json.load(input_json)
|
||||
|
||||
tmp = tempfile.NamedTemporaryFile(suffix='.go', delete=False)
|
||||
writefunc(tmp, types)
|
||||
tmp.close()
|
||||
|
||||
new_content = subprocess.check_output(
|
||||
['goimports', tmp.name]
|
||||
).decode('utf-8')
|
||||
|
||||
print(new_content, file=output_go, end='')
|
||||
output_go.flush()
|
||||
os.remove(tmp.name)
|
||||
|
||||
|
||||
def _set_typedef_defaults(typedef):
|
||||
typedef.setdefault('doctail', '')
|
||||
typedef.setdefault('context_type', typedef['type'])
|
||||
typedef.setdefault('dest', True)
|
||||
typedef.setdefault('value', True)
|
||||
typedef.setdefault('parser', 'f.Value, error(nil)')
|
||||
typedef.setdefault('parser_cast', 'parsed')
|
||||
|
||||
|
||||
def _write_cli_flag_types(outfile, types):
|
||||
_fwrite(outfile, """\
|
||||
package cli
|
||||
|
||||
// WARNING: This file is generated!
|
||||
|
||||
""")
|
||||
|
||||
for typedef in types:
|
||||
_set_typedef_defaults(typedef)
|
||||
|
||||
_fwrite(outfile, """\
|
||||
// {name}Flag is a flag with type {type}{doctail}
|
||||
type {name}Flag struct {{
|
||||
Name string
|
||||
Usage string
|
||||
EnvVar string
|
||||
Hidden bool
|
||||
""".format(**typedef))
|
||||
|
||||
if typedef['value']:
|
||||
_fwrite(outfile, """\
|
||||
Value {type}
|
||||
""".format(**typedef))
|
||||
|
||||
if typedef['dest']:
|
||||
_fwrite(outfile, """\
|
||||
Destination *{type}
|
||||
""".format(**typedef))
|
||||
|
||||
_fwrite(outfile, "\n}\n\n")
|
||||
|
||||
_fwrite(outfile, """\
|
||||
// String returns a readable representation of this value
|
||||
// (for usage defaults)
|
||||
func (f {name}Flag) String() string {{
|
||||
return FlagStringer(f)
|
||||
}}
|
||||
|
||||
// GetName returns the name of the flag
|
||||
func (f {name}Flag) GetName() string {{
|
||||
return f.Name
|
||||
}}
|
||||
|
||||
// {name} looks up the value of a local {name}Flag, returns
|
||||
// {context_default} if not found
|
||||
func (c *Context) {name}(name string) {context_type} {{
|
||||
return lookup{name}(name, c.flagSet)
|
||||
}}
|
||||
|
||||
// Global{name} looks up the value of a global {name}Flag, returns
|
||||
// {context_default} if not found
|
||||
func (c *Context) Global{name}(name string) {context_type} {{
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {{
|
||||
return lookup{name}(name, fs)
|
||||
}}
|
||||
return {context_default}
|
||||
}}
|
||||
|
||||
func lookup{name}(name string, set *flag.FlagSet) {context_type} {{
|
||||
f := set.Lookup(name)
|
||||
if f != nil {{
|
||||
parsed, err := {parser}
|
||||
if err != nil {{
|
||||
return {context_default}
|
||||
}}
|
||||
return {parser_cast}
|
||||
}}
|
||||
return {context_default}
|
||||
}}
|
||||
""".format(**typedef))
|
||||
|
||||
|
||||
def _write_altsrc_flag_types(outfile, types):
|
||||
_fwrite(outfile, """\
|
||||
package altsrc
|
||||
|
||||
import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
// WARNING: This file is generated!
|
||||
|
||||
""")
|
||||
|
||||
for typedef in types:
|
||||
_set_typedef_defaults(typedef)
|
||||
|
||||
_fwrite(outfile, """\
|
||||
// {name}Flag is the flag type that wraps cli.{name}Flag to allow
|
||||
// for other values to be specified
|
||||
type {name}Flag struct {{
|
||||
cli.{name}Flag
|
||||
set *flag.FlagSet
|
||||
}}
|
||||
|
||||
// New{name}Flag creates a new {name}Flag
|
||||
func New{name}Flag(fl cli.{name}Flag) *{name}Flag {{
|
||||
return &{name}Flag{{{name}Flag: fl, set: nil}}
|
||||
}}
|
||||
|
||||
// Apply saves the flagSet for later usage calls, then calls the
|
||||
// wrapped {name}Flag.Apply
|
||||
func (f *{name}Flag) Apply(set *flag.FlagSet) {{
|
||||
f.set = set
|
||||
f.{name}Flag.Apply(set)
|
||||
}}
|
||||
|
||||
// ApplyWithError saves the flagSet for later usage calls, then calls the
|
||||
// wrapped {name}Flag.ApplyWithError
|
||||
func (f *{name}Flag) ApplyWithError(set *flag.FlagSet) error {{
|
||||
f.set = set
|
||||
return f.{name}Flag.ApplyWithError(set)
|
||||
}}
|
||||
""".format(**typedef))
|
||||
|
||||
|
||||
def _fwrite(outfile, text):
|
||||
print(textwrap.dedent(text), end='', file=outfile)
|
||||
|
||||
|
||||
_WRITEFUNCS = {
|
||||
'cli': _write_cli_flag_types,
|
||||
'altsrc': _write_altsrc_flag_types
|
||||
}
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
122
vendor/github.com/codegangsta/cli/runtests
generated
vendored
Normal file
122
vendor/github.com/codegangsta/cli/runtests
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
#!/usr/bin/env python
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from subprocess import check_call, check_output
|
||||
|
||||
|
||||
PACKAGE_NAME = os.environ.get(
|
||||
'CLI_PACKAGE_NAME', 'github.com/urfave/cli'
|
||||
)
|
||||
|
||||
|
||||
def main(sysargs=sys.argv[:]):
|
||||
targets = {
|
||||
'vet': _vet,
|
||||
'test': _test,
|
||||
'gfmrun': _gfmrun,
|
||||
'toc': _toc,
|
||||
'gen': _gen,
|
||||
}
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'target', nargs='?', choices=tuple(targets.keys()), default='test'
|
||||
)
|
||||
args = parser.parse_args(sysargs[1:])
|
||||
|
||||
targets[args.target]()
|
||||
return 0
|
||||
|
||||
|
||||
def _test():
|
||||
if check_output('go version'.split()).split()[2] < 'go1.2':
|
||||
_run('go test -v .')
|
||||
return
|
||||
|
||||
coverprofiles = []
|
||||
for subpackage in ['', 'altsrc']:
|
||||
coverprofile = 'cli.coverprofile'
|
||||
if subpackage != '':
|
||||
coverprofile = '{}.coverprofile'.format(subpackage)
|
||||
|
||||
coverprofiles.append(coverprofile)
|
||||
|
||||
_run('go test -v'.split() + [
|
||||
'-coverprofile={}'.format(coverprofile),
|
||||
('{}/{}'.format(PACKAGE_NAME, subpackage)).rstrip('/')
|
||||
])
|
||||
|
||||
combined_name = _combine_coverprofiles(coverprofiles)
|
||||
_run('go tool cover -func={}'.format(combined_name))
|
||||
os.remove(combined_name)
|
||||
|
||||
|
||||
def _gfmrun():
|
||||
go_version = check_output('go version'.split()).split()[2]
|
||||
if go_version < 'go1.3':
|
||||
print('runtests: skip on {}'.format(go_version), file=sys.stderr)
|
||||
return
|
||||
_run(['gfmrun', '-c', str(_gfmrun_count()), '-s', 'README.md'])
|
||||
|
||||
|
||||
def _vet():
|
||||
_run('go vet ./...')
|
||||
|
||||
|
||||
def _toc():
|
||||
_run('node_modules/.bin/markdown-toc -i README.md')
|
||||
_run('git diff --exit-code')
|
||||
|
||||
|
||||
def _gen():
|
||||
go_version = check_output('go version'.split()).split()[2]
|
||||
if go_version < 'go1.5':
|
||||
print('runtests: skip on {}'.format(go_version), file=sys.stderr)
|
||||
return
|
||||
|
||||
_run('go generate ./...')
|
||||
_run('git diff --exit-code')
|
||||
|
||||
|
||||
def _run(command):
|
||||
if hasattr(command, 'split'):
|
||||
command = command.split()
|
||||
print('runtests: {}'.format(' '.join(command)), file=sys.stderr)
|
||||
check_call(command)
|
||||
|
||||
|
||||
def _gfmrun_count():
|
||||
with open('README.md') as infile:
|
||||
lines = infile.read().splitlines()
|
||||
return len(filter(_is_go_runnable, lines))
|
||||
|
||||
|
||||
def _is_go_runnable(line):
|
||||
return line.startswith('package main')
|
||||
|
||||
|
||||
def _combine_coverprofiles(coverprofiles):
|
||||
combined = tempfile.NamedTemporaryFile(
|
||||
suffix='.coverprofile', delete=False
|
||||
)
|
||||
combined.write('mode: set\n')
|
||||
|
||||
for coverprofile in coverprofiles:
|
||||
with open(coverprofile, 'r') as infile:
|
||||
for line in infile.readlines():
|
||||
if not line.startswith('mode: '):
|
||||
combined.write(line)
|
||||
|
||||
combined.flush()
|
||||
name = combined.name
|
||||
combined.close()
|
||||
return name
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
2
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
2
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
@@ -2,7 +2,7 @@ ISC License
|
||||
|
||||
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
|
||||
187
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
187
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
@@ -16,7 +16,9 @@
|
||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build !js,!appengine,!safe,!disableunsafe
|
||||
// Go versions prior to 1.4 are disabled because they use a different layout
|
||||
// for interfaces which make the implementation of unsafeReflectValue more complex.
|
||||
// +build !js,!appengine,!safe,!disableunsafe,go1.4
|
||||
|
||||
package spew
|
||||
|
||||
@@ -34,80 +36,49 @@ const (
|
||||
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||
)
|
||||
|
||||
var (
|
||||
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
|
||||
// internal reflect.Value fields. These values are valid before golang
|
||||
// commit ecccf07e7f9d which changed the format. The are also valid
|
||||
// after commit 82f48826c6c7 which changed the format again to mirror
|
||||
// the original format. Code in the init function updates these offsets
|
||||
// as necessary.
|
||||
offsetPtr = uintptr(ptrSize)
|
||||
offsetScalar = uintptr(0)
|
||||
offsetFlag = uintptr(ptrSize * 2)
|
||||
type flag uintptr
|
||||
|
||||
// flagKindWidth and flagKindShift indicate various bits that the
|
||||
// reflect package uses internally to track kind information.
|
||||
//
|
||||
// flagRO indicates whether or not the value field of a reflect.Value is
|
||||
// read-only.
|
||||
//
|
||||
// flagIndir indicates whether the value field of a reflect.Value is
|
||||
// the actual data or a pointer to the data.
|
||||
//
|
||||
// These values are valid before golang commit 90a7c3c86944 which
|
||||
// changed their positions. Code in the init function updates these
|
||||
// flags as necessary.
|
||||
flagKindWidth = uintptr(5)
|
||||
flagKindShift = uintptr(flagKindWidth - 1)
|
||||
flagRO = uintptr(1 << 0)
|
||||
flagIndir = uintptr(1 << 1)
|
||||
var (
|
||||
// flagRO indicates whether the value field of a reflect.Value
|
||||
// is read-only.
|
||||
flagRO flag
|
||||
|
||||
// flagAddr indicates whether the address of the reflect.Value's
|
||||
// value may be taken.
|
||||
flagAddr flag
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Older versions of reflect.Value stored small integers directly in the
|
||||
// ptr field (which is named val in the older versions). Versions
|
||||
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
|
||||
// scalar for this purpose which unfortunately came before the flag
|
||||
// field, so the offset of the flag field is different for those
|
||||
// versions.
|
||||
//
|
||||
// This code constructs a new reflect.Value from a known small integer
|
||||
// and checks if the size of the reflect.Value struct indicates it has
|
||||
// the scalar field. When it does, the offsets are updated accordingly.
|
||||
vv := reflect.ValueOf(0xf00)
|
||||
if unsafe.Sizeof(vv) == (ptrSize * 4) {
|
||||
offsetScalar = ptrSize * 2
|
||||
offsetFlag = ptrSize * 3
|
||||
}
|
||||
// flagKindMask holds the bits that make up the kind
|
||||
// part of the flags field. In all the supported versions,
|
||||
// it is in the lower 5 bits.
|
||||
const flagKindMask = flag(0x1f)
|
||||
|
||||
// Commit 90a7c3c86944 changed the flag positions such that the low
|
||||
// order bits are the kind. This code extracts the kind from the flags
|
||||
// field and ensures it's the correct type. When it's not, the flag
|
||||
// order has been changed to the newer format, so the flags are updated
|
||||
// accordingly.
|
||||
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
|
||||
upfv := *(*uintptr)(upf)
|
||||
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
|
||||
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
|
||||
flagKindShift = 0
|
||||
flagRO = 1 << 5
|
||||
flagIndir = 1 << 6
|
||||
// Different versions of Go have used different
|
||||
// bit layouts for the flags type. This table
|
||||
// records the known combinations.
|
||||
var okFlags = []struct {
|
||||
ro, addr flag
|
||||
}{{
|
||||
// From Go 1.4 to 1.5
|
||||
ro: 1 << 5,
|
||||
addr: 1 << 7,
|
||||
}, {
|
||||
// Up to Go tip.
|
||||
ro: 1<<5 | 1<<6,
|
||||
addr: 1 << 8,
|
||||
}}
|
||||
|
||||
// Commit adf9b30e5594 modified the flags to separate the
|
||||
// flagRO flag into two bits which specifies whether or not the
|
||||
// field is embedded. This causes flagIndir to move over a bit
|
||||
// and means that flagRO is the combination of either of the
|
||||
// original flagRO bit and the new bit.
|
||||
//
|
||||
// This code detects the change by extracting what used to be
|
||||
// the indirect bit to ensure it's set. When it's not, the flag
|
||||
// order has been changed to the newer format, so the flags are
|
||||
// updated accordingly.
|
||||
if upfv&flagIndir == 0 {
|
||||
flagRO = 3 << 5
|
||||
flagIndir = 1 << 7
|
||||
}
|
||||
var flagValOffset = func() uintptr {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
return field.Offset
|
||||
}()
|
||||
|
||||
// flagField returns a pointer to the flag field of a reflect.Value.
|
||||
func flagField(v *reflect.Value) *flag {
|
||||
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
|
||||
}
|
||||
|
||||
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||
@@ -119,34 +90,56 @@ func init() {
|
||||
// This allows us to check for implementations of the Stringer and error
|
||||
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||
// inaccessible values such as unexported struct fields.
|
||||
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
|
||||
indirects := 1
|
||||
vt := v.Type()
|
||||
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
|
||||
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
|
||||
if rvf&flagIndir != 0 {
|
||||
vt = reflect.PtrTo(v.Type())
|
||||
indirects++
|
||||
} else if offsetScalar != 0 {
|
||||
// The value is in the scalar field when it's not one of the
|
||||
// reference types.
|
||||
switch vt.Kind() {
|
||||
case reflect.Uintptr:
|
||||
case reflect.Chan:
|
||||
case reflect.Func:
|
||||
case reflect.Map:
|
||||
case reflect.Ptr:
|
||||
case reflect.UnsafePointer:
|
||||
default:
|
||||
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
|
||||
offsetScalar)
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
|
||||
return v
|
||||
}
|
||||
flagFieldPtr := flagField(&v)
|
||||
*flagFieldPtr &^= flagRO
|
||||
*flagFieldPtr |= flagAddr
|
||||
return v
|
||||
}
|
||||
|
||||
// Sanity checks against future reflect package changes
|
||||
// to the type or semantics of the Value.flag field.
|
||||
func init() {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
|
||||
panic("reflect.Value flag field has changed kind")
|
||||
}
|
||||
type t0 int
|
||||
var t struct {
|
||||
A t0
|
||||
// t0 will have flagEmbedRO set.
|
||||
t0
|
||||
// a will have flagStickyRO set
|
||||
a t0
|
||||
}
|
||||
vA := reflect.ValueOf(t).FieldByName("A")
|
||||
va := reflect.ValueOf(t).FieldByName("a")
|
||||
vt0 := reflect.ValueOf(t).FieldByName("t0")
|
||||
|
||||
// Infer flagRO from the difference between the flags
|
||||
// for the (otherwise identical) fields in t.
|
||||
flagPublic := *flagField(&vA)
|
||||
flagWithRO := *flagField(&va) | *flagField(&vt0)
|
||||
flagRO = flagPublic ^ flagWithRO
|
||||
|
||||
// Infer flagAddr from the difference between a value
|
||||
// taken from a pointer and not.
|
||||
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
|
||||
flagNoPtr := *flagField(&vA)
|
||||
flagPtr := *flagField(&vPtrA)
|
||||
flagAddr = flagNoPtr ^ flagPtr
|
||||
|
||||
// Check that the inferred flags tally with one of the known versions.
|
||||
for _, f := range okFlags {
|
||||
if flagRO == f.ro && flagAddr == f.addr {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
pv := reflect.NewAt(vt, upv)
|
||||
rv = pv
|
||||
for i := 0; i < indirects; i++ {
|
||||
rv = rv.Elem()
|
||||
}
|
||||
return rv
|
||||
panic("reflect.Value read-only flag has changed semantics")
|
||||
}
|
||||
|
||||
2
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
2
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
@@ -16,7 +16,7 @@
|
||||
// when the code is running on Google App Engine, compiled by GopherJS, or
|
||||
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build js appengine safe disableunsafe
|
||||
// +build js appengine safe disableunsafe !go1.4
|
||||
|
||||
package spew
|
||||
|
||||
|
||||
2
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
2
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
@@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
|
||||
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
|
||||
// prefix to Writer w.
|
||||
func printHexPtr(w io.Writer, p uintptr) {
|
||||
// Null pointer.
|
||||
|
||||
10
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
10
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
@@ -35,16 +35,16 @@ var (
|
||||
|
||||
// cCharRE is a regular expression that matches a cgo char.
|
||||
// It is used to detect character arrays to hexdump them.
|
||||
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
|
||||
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
|
||||
|
||||
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
||||
// char. It is used to detect unsigned character arrays to hexdump
|
||||
// them.
|
||||
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
|
||||
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
|
||||
|
||||
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
||||
// It is used to detect uint8_t arrays to hexdump them.
|
||||
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
|
||||
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
|
||||
)
|
||||
|
||||
// dumpState contains information about the state of a dump operation.
|
||||
@@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
|
||||
// Display dereferenced value.
|
||||
d.w.Write(openParenBytes)
|
||||
switch {
|
||||
case nilFound == true:
|
||||
case nilFound:
|
||||
d.w.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound == true:
|
||||
case cycleFound:
|
||||
d.w.Write(circularBytes)
|
||||
|
||||
default:
|
||||
|
||||
4
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
4
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
@@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) {
|
||||
|
||||
// Display dereferenced value.
|
||||
switch {
|
||||
case nilFound == true:
|
||||
case nilFound:
|
||||
f.fs.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound == true:
|
||||
case cycleFound:
|
||||
f.fs.Write(circularShortBytes)
|
||||
|
||||
default:
|
||||
|
||||
255
vendor/github.com/denisenkom/go-mssqldb/README.md
generated
vendored
Normal file
255
vendor/github.com/denisenkom/go-mssqldb/README.md
generated
vendored
Normal file
@@ -0,0 +1,255 @@
|
||||
# A pure Go MSSQL driver for Go's database/sql package
|
||||
|
||||
[](http://godoc.org/github.com/denisenkom/go-mssqldb)
|
||||
[](https://ci.appveyor.com/project/denisenkom/go-mssqldb)
|
||||
[](https://codecov.io/gh/denisenkom/go-mssqldb)
|
||||
|
||||
## Install
|
||||
|
||||
Requires Go 1.8 or above.
|
||||
|
||||
Install with `go get github.com/denisenkom/go-mssqldb` .
|
||||
|
||||
## Connection Parameters and DSN
|
||||
|
||||
The recommended connection string uses a URL format:
|
||||
`sqlserver://username:password@host/instance?param1=value¶m2=value`
|
||||
Other supported formats are listed below.
|
||||
|
||||
### Common parameters:
|
||||
|
||||
* `user id` - enter the SQL Server Authentication user id or the Windows Authentication user id in the DOMAIN\User format. On Windows, if user id is empty or missing Single-Sign-On is used.
|
||||
* `password`
|
||||
* `database`
|
||||
* `connection timeout` - in seconds (default is 0 for no timeout), set to 0 for no timeout. Recommended to set to 0 and use context to manage query and connection timeouts.
|
||||
* `dial timeout` - in seconds (default is 15), set to 0 for no timeout
|
||||
* `encrypt`
|
||||
* `disable` - Data send between client and server is not encrypted.
|
||||
* `false` - Data sent between client and server is not encrypted beyond the login packet. (Default)
|
||||
* `true` - Data sent between client and server is encrypted.
|
||||
* `app name` - The application name (default is go-mssqldb)
|
||||
|
||||
### Connection parameters for ODBC and ADO style connection strings:
|
||||
|
||||
* `server` - host or host\instance (default localhost)
|
||||
* `port` - used only when there is no instance in server (default 1433)
|
||||
|
||||
### Less common parameters:
|
||||
|
||||
* `keepAlive` - in seconds; 0 to disable (default is 30)
|
||||
* `failoverpartner` - host or host\instance (default is no partner).
|
||||
* `failoverport` - used only when there is no instance in failoverpartner (default 1433)
|
||||
* `packet size` - in bytes; 512 to 32767 (default is 4096)
|
||||
* Encrypted connections have a maximum packet size of 16383 bytes
|
||||
* Further information on usage: https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-network-packet-size-server-configuration-option
|
||||
* `log` - logging flags (default 0/no logging, 63 for full logging)
|
||||
* 1 log errors
|
||||
* 2 log messages
|
||||
* 4 log rows affected
|
||||
* 8 trace sql statements
|
||||
* 16 log statement parameters
|
||||
* 32 log transaction begin/end
|
||||
* `TrustServerCertificate`
|
||||
* false - Server certificate is checked. Default is false if encypt is specified.
|
||||
* true - Server certificate is not checked. Default is true if encrypt is not specified. If trust server certificate is true, driver accepts any certificate presented by the server and any host name in that certificate. In this mode, TLS is susceptible to man-in-the-middle attacks. This should be used only for testing.
|
||||
* `certificate` - The file that contains the public key certificate of the CA that signed the SQL Server certificate. The specified certificate overrides the go platform specific CA certificates.
|
||||
* `hostNameInCertificate` - Specifies the Common Name (CN) in the server certificate. Default value is the server host.
|
||||
* `ServerSPN` - The kerberos SPN (Service Principal Name) for the server. Default is MSSQLSvc/host:port.
|
||||
* `Workstation ID` - The workstation name (default is the host name)
|
||||
* `ApplicationIntent` - Can be given the value `ReadOnly` to initiate a read-only connection to an Availability Group listener.
|
||||
|
||||
### The connection string can be specified in one of three formats:
|
||||
|
||||
|
||||
1. URL: with `sqlserver` scheme. username and password appears before the host. Any instance appears as
|
||||
the first segment in the path. All other options are query parameters. Examples:
|
||||
|
||||
* `sqlserver://username:password@host/instance?param1=value¶m2=value`
|
||||
* `sqlserver://username:password@host:port?param1=value¶m2=value`
|
||||
* `sqlserver://sa@localhost/SQLExpress?database=master&connection+timeout=30` // `SQLExpress instance.
|
||||
* `sqlserver://sa:mypass@localhost?database=master&connection+timeout=30` // username=sa, password=mypass.
|
||||
* `sqlserver://sa:mypass@localhost:1234?database=master&connection+timeout=30` // port 1234 on localhost.
|
||||
* `sqlserver://sa:my%7Bpass@somehost?connection+timeout=30` // password is "my{pass"
|
||||
|
||||
A string of this format can be constructed using the `URL` type in the `net/url` package.
|
||||
|
||||
```go
|
||||
query := url.Values{}
|
||||
query.Add("app name", "MyAppName")
|
||||
|
||||
u := &url.URL{
|
||||
Scheme: "sqlserver",
|
||||
User: url.UserPassword(username, password),
|
||||
Host: fmt.Sprintf("%s:%d", hostname, port),
|
||||
// Path: instance, // if connecting to an instance instead of a port
|
||||
RawQuery: query.Encode(),
|
||||
}
|
||||
db, err := sql.Open("sqlserver", u.String())
|
||||
```
|
||||
|
||||
2. ADO: `key=value` pairs separated by `;`. Values may not contain `;`, leading and trailing whitespace is ignored.
|
||||
Examples:
|
||||
|
||||
* `server=localhost\\SQLExpress;user id=sa;database=master;app name=MyAppName`
|
||||
* `server=localhost;user id=sa;database=master;app name=MyAppName`
|
||||
|
||||
3. ODBC: Prefix with `odbc`, `key=value` pairs separated by `;`. Allow `;` by wrapping
|
||||
values in `{}`. Examples:
|
||||
|
||||
* `odbc:server=localhost\\SQLExpress;user id=sa;database=master;app name=MyAppName`
|
||||
* `odbc:server=localhost;user id=sa;database=master;app name=MyAppName`
|
||||
* `odbc:server=localhost;user id=sa;password={foo;bar}` // Value marked with `{}`, password is "foo;bar"
|
||||
* `odbc:server=localhost;user id=sa;password={foo{bar}` // Value marked with `{}`, password is "foo{bar"
|
||||
* `odbc:server=localhost;user id=sa;password={foobar }` // Value marked with `{}`, password is "foobar "
|
||||
* `odbc:server=localhost;user id=sa;password=foo{bar` // Literal `{`, password is "foo{bar"
|
||||
* `odbc:server=localhost;user id=sa;password=foo}bar` // Literal `}`, password is "foo}bar"
|
||||
* `odbc:server=localhost;user id=sa;password={foo{bar}` // Literal `{`, password is "foo{bar"
|
||||
* `odbc:server=localhost;user id=sa;password={foo}}bar}` // Escaped `} with `}}`, password is "foo}bar"
|
||||
|
||||
## Executing Stored Procedures
|
||||
|
||||
To run a stored procedure, set the query text to the procedure name:
|
||||
```go
|
||||
var account = "abc"
|
||||
_, err := db.ExecContext(ctx, "sp_RunMe",
|
||||
sql.Named("ID", 123),
|
||||
sql.Named("Account", sql.Out{Dest: &account}),
|
||||
)
|
||||
```
|
||||
|
||||
## Caveat for local temporary tables
|
||||
|
||||
Due to protocol limitations, temporary tables will only be allocated on the connection
|
||||
as a result of executing a query with zero parameters. The following query
|
||||
will, due to the use of a parameter, execute in its own session,
|
||||
and `#mytemp` will be de-allocated right away:
|
||||
|
||||
```go
|
||||
conn, err := pool.Conn(ctx)
|
||||
defer conn.Close()
|
||||
_, err := conn.ExecContext(ctx, "select @p1 as x into #mytemp", 1)
|
||||
// at this point #mytemp is already dropped again as the session of the ExecContext is over
|
||||
```
|
||||
|
||||
To work around this, always explicitly create the local temporary
|
||||
table in a query without any parameters. As a special case, the driver
|
||||
will then be able to execute the query directly on the
|
||||
connection-scoped session. The following example works:
|
||||
|
||||
```go
|
||||
conn, err := pool.Conn(ctx)
|
||||
|
||||
// Set us up so that temp table is always cleaned up, since conn.Close()
|
||||
// merely returns conn to pool, rather than actually closing the connection.
|
||||
defer func() {
|
||||
_, _ = conn.ExecContext(ctx, "drop table #mytemp") // always clean up
|
||||
conn.Close() // merely returns conn to pool
|
||||
}()
|
||||
|
||||
|
||||
// Since we not pass any parameters below, the query will execute on the scope of
|
||||
// the connection and succeed in creating the table.
|
||||
_, err := conn.ExecContext(ctx, "create table #mytemp ( x int )")
|
||||
|
||||
// #mytemp is now available even if you pass parameters
|
||||
_, err := conn.ExecContext(ctx, "insert into #mytemp (x) values (@p1)", 1)
|
||||
|
||||
```
|
||||
|
||||
## Return Status
|
||||
|
||||
To get the procedure return status, pass into the parameters a
|
||||
`*mssql.ReturnStatus`. For example:
|
||||
```
|
||||
var rs mssql.ReturnStatus
|
||||
_, err := db.ExecContext(ctx, "theproc", &rs)
|
||||
log.Printf("status=%d", rs)
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The `sqlserver` driver uses normal MS SQL Server syntax and expects parameters in
|
||||
the sql query to be in the form of either `@Name` or `@p1` to `@pN` (ordinal position).
|
||||
|
||||
```go
|
||||
db.QueryContext(ctx, `select * from t where ID = @ID and Name = @p2;`, sql.Named("ID", 6), "Bob")
|
||||
```
|
||||
|
||||
### Parameter Types
|
||||
|
||||
To pass specific types to the query parameters, say `varchar` or `date` types,
|
||||
you must convert the types to the type before passing in. The following types
|
||||
are supported:
|
||||
|
||||
* string -> nvarchar
|
||||
* mssql.VarChar -> varchar
|
||||
* time.Time -> datetimeoffset or datetime (TDS version dependent)
|
||||
* mssql.DateTime1 -> datetime
|
||||
* mssql.DateTimeOffset -> datetimeoffset
|
||||
* "cloud.google.com/go/civil".Date -> date
|
||||
* "cloud.google.com/go/civil".DateTime -> datetime2
|
||||
* "cloud.google.com/go/civil".Time -> time
|
||||
* mssql.TVPType -> Table Value Parameter (TDS version dependent)
|
||||
|
||||
## Important Notes
|
||||
|
||||
* [LastInsertId](https://golang.org/pkg/database/sql/#Result.LastInsertId) should
|
||||
not be used with this driver (or SQL Server) due to how the TDS protocol
|
||||
works. Please use the [OUTPUT Clause](https://docs.microsoft.com/en-us/sql/t-sql/queries/output-clause-transact-sql)
|
||||
or add a `select ID = convert(bigint, SCOPE_IDENTITY());` to the end of your
|
||||
query (ref [SCOPE_IDENTITY](https://docs.microsoft.com/en-us/sql/t-sql/functions/scope-identity-transact-sql)).
|
||||
This will ensure you are getting the correct ID and will prevent a network round trip.
|
||||
* [NewConnector](https://godoc.org/github.com/denisenkom/go-mssqldb#NewConnector)
|
||||
may be used with [OpenDB](https://golang.org/pkg/database/sql/#OpenDB).
|
||||
* [Connector.SessionInitSQL](https://godoc.org/github.com/denisenkom/go-mssqldb#Connector.SessionInitSQL)
|
||||
may be set to set any driver specific session settings after the session
|
||||
has been reset. If empty the session will still be reset but use the database
|
||||
defaults in Go1.10+.
|
||||
|
||||
## Features
|
||||
|
||||
* Can be used with SQL Server 2005 or newer
|
||||
* Can be used with Microsoft Azure SQL Database
|
||||
* Can be used on all go supported platforms (e.g. Linux, Mac OS X and Windows)
|
||||
* Supports new date/time types: date, time, datetime2, datetimeoffset
|
||||
* Supports string parameters longer than 8000 characters
|
||||
* Supports encryption using SSL/TLS
|
||||
* Supports SQL Server and Windows Authentication
|
||||
* Supports Single-Sign-On on Windows
|
||||
* Supports connections to AlwaysOn Availability Group listeners, including re-direction to read-only replicas.
|
||||
* Supports query notifications
|
||||
|
||||
## Tests
|
||||
|
||||
`go test` is used for testing. A running instance of MSSQL server is required.
|
||||
Environment variables are used to pass login information.
|
||||
|
||||
Example:
|
||||
|
||||
env SQLSERVER_DSN=sqlserver://user:pass@hostname/instance?database=test1 go test
|
||||
|
||||
## Deprecated
|
||||
|
||||
These features still exist in the driver, but they are are deprecated.
|
||||
|
||||
### Query Parameter Token Replace (driver "mssql")
|
||||
|
||||
If you use the driver name "mssql" (rather then "sqlserver") the SQL text
|
||||
will be loosly parsed and an attempt to extract identifiers using one of
|
||||
|
||||
* ?
|
||||
* ?nnn
|
||||
* :nnn
|
||||
* $nnn
|
||||
|
||||
will be used. This is not recommended with SQL Server.
|
||||
There is at least one existing `won't fix` issue with the query parsing.
|
||||
|
||||
Use the native "@Name" parameters instead with the "sqlserver" driver name.
|
||||
|
||||
## Known Issues
|
||||
|
||||
* SQL Server 2008 and 2008 R2 engine cannot handle login records when SSL encryption is not disabled.
|
||||
To fix SQL Server 2008 R2 issue, install SQL Server 2008 R2 Service Pack 2.
|
||||
To fix SQL Server 2008 issue, install Microsoft SQL Server 2008 Service Pack 3 and Cumulative update package 3 for SQL Server 2008 SP3.
|
||||
More information: http://support.microsoft.com/kb/2653857
|
||||
5
vendor/github.com/denisenkom/go-mssqldb/buf.go
generated
vendored
5
vendor/github.com/denisenkom/go-mssqldb/buf.go
generated
vendored
@@ -101,11 +101,10 @@ func (w *tdsBuffer) Write(p []byte) (total int, err error) {
|
||||
}
|
||||
p = p[copied:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (w *tdsBuffer) WriteByte(b byte) error {
|
||||
if int(w.wpos) == len(w.wbuf) {
|
||||
if int(w.wpos) == len(w.wbuf) || w.wpos == w.packetSize {
|
||||
if err := w.flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -144,7 +143,7 @@ func (r *tdsBuffer) readNextPacket() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if int(h.Size) > len(r.rbuf) {
|
||||
if int(h.Size) > r.packetSize {
|
||||
return errors.New("Invalid packet size, it is longer than buffer size")
|
||||
}
|
||||
if headerSize > int(h.Size) {
|
||||
|
||||
92
vendor/github.com/denisenkom/go-mssqldb/bulkcopy.go
generated
vendored
92
vendor/github.com/denisenkom/go-mssqldb/bulkcopy.go
generated
vendored
@@ -312,7 +312,7 @@ func (b *Bulk) getMetadata(ctx context.Context) (err error) {
|
||||
return rows.Close()
|
||||
}
|
||||
|
||||
func (b *Bulk) makeParam(val DataValue, col columnStruct) (res Param, err error) {
|
||||
func (b *Bulk) makeParam(val DataValue, col columnStruct) (res param, err error) {
|
||||
res.ti.Size = col.ti.Size
|
||||
res.ti.TypeId = col.ti.TypeId
|
||||
|
||||
@@ -408,60 +408,30 @@ func (b *Bulk) makeParam(val DataValue, col columnStruct) (res Param, err error)
|
||||
if val.(bool) {
|
||||
res.buffer[0] = 1
|
||||
}
|
||||
|
||||
case typeDateTime2N, typeDateTimeOffsetN:
|
||||
case typeDateTime2N:
|
||||
switch val := val.(type) {
|
||||
case time.Time:
|
||||
days, ns := dateTime2(val)
|
||||
ns /= int64(math.Pow10(int(col.ti.Scale)*-1) * 1000000000)
|
||||
|
||||
var data = make([]byte, 5)
|
||||
|
||||
data[0] = byte(ns)
|
||||
data[1] = byte(ns >> 8)
|
||||
data[2] = byte(ns >> 16)
|
||||
data[3] = byte(ns >> 24)
|
||||
data[4] = byte(ns >> 32)
|
||||
|
||||
if col.ti.Scale <= 2 {
|
||||
res.ti.Size = 6
|
||||
} else if col.ti.Scale <= 4 {
|
||||
res.ti.Size = 7
|
||||
} else {
|
||||
res.ti.Size = 8
|
||||
}
|
||||
var buf []byte
|
||||
buf = make([]byte, res.ti.Size)
|
||||
copy(buf, data[0:res.ti.Size-3])
|
||||
|
||||
buf[res.ti.Size-3] = byte(days)
|
||||
buf[res.ti.Size-2] = byte(days >> 8)
|
||||
buf[res.ti.Size-1] = byte(days >> 16)
|
||||
|
||||
if col.ti.TypeId == typeDateTimeOffsetN {
|
||||
_, offset := val.Zone()
|
||||
var offsetMinute = uint16(offset / 60)
|
||||
buf = append(buf, byte(offsetMinute))
|
||||
buf = append(buf, byte(offsetMinute>>8))
|
||||
res.ti.Size = res.ti.Size + 2
|
||||
}
|
||||
|
||||
res.buffer = buf
|
||||
|
||||
res.buffer = encodeDateTime2(val, int(col.ti.Scale))
|
||||
res.ti.Size = len(res.buffer)
|
||||
default:
|
||||
err = fmt.Errorf("mssql: invalid type for datetime2 column: %s", val)
|
||||
return
|
||||
}
|
||||
case typeDateTimeOffsetN:
|
||||
switch val := val.(type) {
|
||||
case time.Time:
|
||||
res.buffer = encodeDateTimeOffset(val, int(res.ti.Scale))
|
||||
res.ti.Size = len(res.buffer)
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("mssql: invalid type for datetimeoffset column: %s", val)
|
||||
return
|
||||
}
|
||||
case typeDateN:
|
||||
switch val := val.(type) {
|
||||
case time.Time:
|
||||
days, _ := dateTime2(val)
|
||||
|
||||
res.ti.Size = 3
|
||||
res.buffer = make([]byte, 3)
|
||||
res.buffer[0] = byte(days)
|
||||
res.buffer[1] = byte(days >> 8)
|
||||
res.buffer[2] = byte(days >> 16)
|
||||
res.buffer = encodeDate(val)
|
||||
res.ti.Size = len(res.buffer)
|
||||
default:
|
||||
err = fmt.Errorf("mssql: invalid type for date column: %s", val)
|
||||
return
|
||||
@@ -470,31 +440,11 @@ func (b *Bulk) makeParam(val DataValue, col columnStruct) (res Param, err error)
|
||||
switch val := val.(type) {
|
||||
case time.Time:
|
||||
if col.ti.Size == 4 {
|
||||
res.ti.Size = 4
|
||||
res.buffer = make([]byte, 4)
|
||||
|
||||
ref := time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
dur := val.Sub(ref)
|
||||
days := dur / (24 * time.Hour)
|
||||
if days < 0 {
|
||||
err = fmt.Errorf("mssql: Date %s is out of range", val)
|
||||
return
|
||||
}
|
||||
mins := val.Hour()*60 + val.Minute()
|
||||
|
||||
binary.LittleEndian.PutUint16(res.buffer[0:2], uint16(days))
|
||||
binary.LittleEndian.PutUint16(res.buffer[2:4], uint16(mins))
|
||||
res.buffer = encodeDateTim4(val)
|
||||
res.ti.Size = len(res.buffer)
|
||||
} else if col.ti.Size == 8 {
|
||||
res.ti.Size = 8
|
||||
res.buffer = make([]byte, 8)
|
||||
|
||||
days := divFloor(val.Unix(), 24*60*60)
|
||||
//25567 - number of days since Jan 1 1900 UTC to Jan 1 1970
|
||||
days = days + 25567
|
||||
tm := (val.Hour()*60*60+val.Minute()*60+val.Second())*300 + int(val.Nanosecond()/10000000*3)
|
||||
|
||||
binary.LittleEndian.PutUint32(res.buffer[0:4], uint32(days))
|
||||
binary.LittleEndian.PutUint32(res.buffer[4:8], uint32(tm))
|
||||
res.buffer = encodeDateTime(val)
|
||||
res.ti.Size = len(res.buffer)
|
||||
} else {
|
||||
err = fmt.Errorf("mssql: invalid size of column")
|
||||
}
|
||||
@@ -571,7 +521,7 @@ func (b *Bulk) makeParam(val DataValue, col columnStruct) (res Param, err error)
|
||||
buf[i] = ub[j]
|
||||
}
|
||||
res.buffer = buf
|
||||
case typeBigVarBin:
|
||||
case typeBigVarBin, typeBigBinary:
|
||||
switch val := val.(type) {
|
||||
case []byte:
|
||||
res.ti.Size = len(val)
|
||||
|
||||
2
vendor/github.com/denisenkom/go-mssqldb/bulkcopy_sql.go
generated
vendored
2
vendor/github.com/denisenkom/go-mssqldb/bulkcopy_sql.go
generated
vendored
@@ -66,7 +66,7 @@ func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {
|
||||
|
||||
func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
|
||||
if ci.closed {
|
||||
return nil, errors.New("errCopyInClosed")
|
||||
return nil, errors.New("copyin query is closed")
|
||||
}
|
||||
|
||||
if len(v) == 0 {
|
||||
|
||||
306
vendor/github.com/denisenkom/go-mssqldb/convert.go
generated
vendored
Normal file
306
vendor/github.com/denisenkom/go-mssqldb/convert.go
generated
vendored
Normal file
@@ -0,0 +1,306 @@
|
||||
package mssql
|
||||
|
||||
import "errors"
|
||||
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Type conversions for Scan.
|
||||
|
||||
// This file was imported from database.sql.convert for go 1.10.3 with minor modifications to get
|
||||
// convertAssign function
|
||||
// This function is used internally by sql to convert values during call to Scan, we need same
|
||||
// logic to return values for OUTPUT parameters.
|
||||
// TODO: sql library should instead expose function defaultCheckNamedValue to be callable by drivers
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
var errNilPtr = errors.New("destination pointer is nil") // embedded in descriptive error
|
||||
|
||||
// convertAssign copies to dest the value in src, converting it if possible.
|
||||
// An error is returned if the copy would result in loss of information.
|
||||
// dest should be a pointer type.
|
||||
func convertAssign(dest, src interface{}) error {
|
||||
// Common cases, without reflect.
|
||||
switch s := src.(type) {
|
||||
case string:
|
||||
switch d := dest.(type) {
|
||||
case *string:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = s
|
||||
return nil
|
||||
case *[]byte:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = []byte(s)
|
||||
return nil
|
||||
case *sql.RawBytes:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = append((*d)[:0], s...)
|
||||
return nil
|
||||
}
|
||||
case []byte:
|
||||
switch d := dest.(type) {
|
||||
case *string:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = string(s)
|
||||
return nil
|
||||
case *interface{}:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = cloneBytes(s)
|
||||
return nil
|
||||
case *[]byte:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = cloneBytes(s)
|
||||
return nil
|
||||
case *sql.RawBytes:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = s
|
||||
return nil
|
||||
}
|
||||
case time.Time:
|
||||
switch d := dest.(type) {
|
||||
case *time.Time:
|
||||
*d = s
|
||||
return nil
|
||||
case *string:
|
||||
*d = s.Format(time.RFC3339Nano)
|
||||
return nil
|
||||
case *[]byte:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = []byte(s.Format(time.RFC3339Nano))
|
||||
return nil
|
||||
case *sql.RawBytes:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = s.AppendFormat((*d)[:0], time.RFC3339Nano)
|
||||
return nil
|
||||
}
|
||||
case nil:
|
||||
switch d := dest.(type) {
|
||||
case *interface{}:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = nil
|
||||
return nil
|
||||
case *[]byte:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = nil
|
||||
return nil
|
||||
case *sql.RawBytes:
|
||||
if d == nil {
|
||||
return errNilPtr
|
||||
}
|
||||
*d = nil
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var sv reflect.Value
|
||||
|
||||
switch d := dest.(type) {
|
||||
case *string:
|
||||
sv = reflect.ValueOf(src)
|
||||
switch sv.Kind() {
|
||||
case reflect.Bool,
|
||||
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
|
||||
reflect.Float32, reflect.Float64:
|
||||
*d = asString(src)
|
||||
return nil
|
||||
}
|
||||
case *[]byte:
|
||||
sv = reflect.ValueOf(src)
|
||||
if b, ok := asBytes(nil, sv); ok {
|
||||
*d = b
|
||||
return nil
|
||||
}
|
||||
case *sql.RawBytes:
|
||||
sv = reflect.ValueOf(src)
|
||||
if b, ok := asBytes([]byte(*d)[:0], sv); ok {
|
||||
*d = sql.RawBytes(b)
|
||||
return nil
|
||||
}
|
||||
case *bool:
|
||||
bv, err := driver.Bool.ConvertValue(src)
|
||||
if err == nil {
|
||||
*d = bv.(bool)
|
||||
}
|
||||
return err
|
||||
case *interface{}:
|
||||
*d = src
|
||||
return nil
|
||||
}
|
||||
|
||||
if scanner, ok := dest.(sql.Scanner); ok {
|
||||
return scanner.Scan(src)
|
||||
}
|
||||
|
||||
dpv := reflect.ValueOf(dest)
|
||||
if dpv.Kind() != reflect.Ptr {
|
||||
return errors.New("destination not a pointer")
|
||||
}
|
||||
if dpv.IsNil() {
|
||||
return errNilPtr
|
||||
}
|
||||
|
||||
if !sv.IsValid() {
|
||||
sv = reflect.ValueOf(src)
|
||||
}
|
||||
|
||||
dv := reflect.Indirect(dpv)
|
||||
if sv.IsValid() && sv.Type().AssignableTo(dv.Type()) {
|
||||
switch b := src.(type) {
|
||||
case []byte:
|
||||
dv.Set(reflect.ValueOf(cloneBytes(b)))
|
||||
default:
|
||||
dv.Set(sv)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if dv.Kind() == sv.Kind() && sv.Type().ConvertibleTo(dv.Type()) {
|
||||
dv.Set(sv.Convert(dv.Type()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// The following conversions use a string value as an intermediate representation
|
||||
// to convert between various numeric types.
|
||||
//
|
||||
// This also allows scanning into user defined types such as "type Int int64".
|
||||
// For symmetry, also check for string destination types.
|
||||
switch dv.Kind() {
|
||||
case reflect.Ptr:
|
||||
if src == nil {
|
||||
dv.Set(reflect.Zero(dv.Type()))
|
||||
return nil
|
||||
} else {
|
||||
dv.Set(reflect.New(dv.Type().Elem()))
|
||||
return convertAssign(dv.Interface(), src)
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
s := asString(src)
|
||||
i64, err := strconv.ParseInt(s, 10, dv.Type().Bits())
|
||||
if err != nil {
|
||||
err = strconvErr(err)
|
||||
return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err)
|
||||
}
|
||||
dv.SetInt(i64)
|
||||
return nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
s := asString(src)
|
||||
u64, err := strconv.ParseUint(s, 10, dv.Type().Bits())
|
||||
if err != nil {
|
||||
err = strconvErr(err)
|
||||
return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err)
|
||||
}
|
||||
dv.SetUint(u64)
|
||||
return nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
s := asString(src)
|
||||
f64, err := strconv.ParseFloat(s, dv.Type().Bits())
|
||||
if err != nil {
|
||||
err = strconvErr(err)
|
||||
return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err)
|
||||
}
|
||||
dv.SetFloat(f64)
|
||||
return nil
|
||||
case reflect.String:
|
||||
switch v := src.(type) {
|
||||
case string:
|
||||
dv.SetString(v)
|
||||
return nil
|
||||
case []byte:
|
||||
dv.SetString(string(v))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("unsupported Scan, storing driver.Value type %T into type %T", src, dest)
|
||||
}
|
||||
|
||||
func strconvErr(err error) error {
|
||||
if ne, ok := err.(*strconv.NumError); ok {
|
||||
return ne.Err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func cloneBytes(b []byte) []byte {
|
||||
if b == nil {
|
||||
return nil
|
||||
} else {
|
||||
c := make([]byte, len(b))
|
||||
copy(c, b)
|
||||
return c
|
||||
}
|
||||
}
|
||||
|
||||
func asString(src interface{}) string {
|
||||
switch v := src.(type) {
|
||||
case string:
|
||||
return v
|
||||
case []byte:
|
||||
return string(v)
|
||||
}
|
||||
rv := reflect.ValueOf(src)
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return strconv.FormatInt(rv.Int(), 10)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return strconv.FormatUint(rv.Uint(), 10)
|
||||
case reflect.Float64:
|
||||
return strconv.FormatFloat(rv.Float(), 'g', -1, 64)
|
||||
case reflect.Float32:
|
||||
return strconv.FormatFloat(rv.Float(), 'g', -1, 32)
|
||||
case reflect.Bool:
|
||||
return strconv.FormatBool(rv.Bool())
|
||||
}
|
||||
return fmt.Sprintf("%v", src)
|
||||
}
|
||||
|
||||
func asBytes(buf []byte, rv reflect.Value) (b []byte, ok bool) {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return strconv.AppendInt(buf, rv.Int(), 10), true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return strconv.AppendUint(buf, rv.Uint(), 10), true
|
||||
case reflect.Float32:
|
||||
return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 32), true
|
||||
case reflect.Float64:
|
||||
return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 64), true
|
||||
case reflect.Bool:
|
||||
return strconv.AppendBool(buf, rv.Bool()), true
|
||||
case reflect.String:
|
||||
s := rv.String()
|
||||
return append(buf, s...), true
|
||||
}
|
||||
return
|
||||
}
|
||||
8
vendor/github.com/denisenkom/go-mssqldb/doc.go
generated
vendored
8
vendor/github.com/denisenkom/go-mssqldb/doc.go
generated
vendored
@@ -1,12 +1,14 @@
|
||||
// package mssql implements the TDS protocol used to connect to MS SQL Server (sqlserver)
|
||||
// database servers.
|
||||
//
|
||||
// This package registers two drivers:
|
||||
// This package registers the driver:
|
||||
// sqlserver: uses native "@" parameter placeholder names and does no pre-processing.
|
||||
// mssql: expects identifiers to be prefixed with ":" and pre-processes queries.
|
||||
//
|
||||
// If the ordinal position is used for query parameters, identifiers will be named
|
||||
// "@p1", "@p2", ... "@pN".
|
||||
//
|
||||
// Please refer to the README for the format of the DSN.
|
||||
// Please refer to the README for the format of the DSN. There are multiple DSN
|
||||
// formats accepted: ADO style, ODBC style, and URL style. The following is an
|
||||
// example of a URL style DSN:
|
||||
// sqlserver://sa:mypass@localhost:1234?database=master&connection+timeout=30
|
||||
package mssql
|
||||
|
||||
292
vendor/github.com/denisenkom/go-mssqldb/mssql.go
generated
vendored
292
vendor/github.com/denisenkom/go-mssqldb/mssql.go
generated
vendored
@@ -13,32 +13,35 @@ import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// ReturnStatus may be used to return the return value from a proc.
|
||||
//
|
||||
// var rs mssql.ReturnStatus
|
||||
// _, err := db.Exec("theproc", &rs)
|
||||
// log.Printf("return status = %d", rs)
|
||||
type ReturnStatus int32
|
||||
|
||||
var driverInstance = &Driver{processQueryText: true}
|
||||
var driverInstanceNoProcess = &Driver{processQueryText: false}
|
||||
|
||||
func init() {
|
||||
sql.Register("mssql", driverInstance)
|
||||
sql.Register("sqlserver", driverInstanceNoProcess)
|
||||
createDialer = func(p *connectParams) dialer {
|
||||
return tcpDialer{&net.Dialer{KeepAlive: p.keepAlive}}
|
||||
createDialer = func(p *connectParams) Dialer {
|
||||
return netDialer{&net.Dialer{KeepAlive: p.keepAlive}}
|
||||
}
|
||||
}
|
||||
|
||||
// Abstract the dialer for testing and for non-TCP based connections.
|
||||
type dialer interface {
|
||||
Dial(ctx context.Context, addr string) (net.Conn, error)
|
||||
}
|
||||
var createDialer func(p *connectParams) Dialer
|
||||
|
||||
var createDialer func(p *connectParams) dialer
|
||||
|
||||
type tcpDialer struct {
|
||||
type netDialer struct {
|
||||
nd *net.Dialer
|
||||
}
|
||||
|
||||
func (d tcpDialer) Dial(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return d.nd.DialContext(ctx, "tcp", addr)
|
||||
func (d netDialer) DialContext(ctx context.Context, network string, addr string) (net.Conn, error) {
|
||||
return d.nd.DialContext(ctx, network, addr)
|
||||
}
|
||||
|
||||
type Driver struct {
|
||||
@@ -72,6 +75,20 @@ func (d *Driver) SetLogger(logger Logger) {
|
||||
d.log = optionalLogger{logger}
|
||||
}
|
||||
|
||||
// NewConnector creates a new connector from a DSN.
|
||||
// The returned connector may be used with sql.OpenDB.
|
||||
func NewConnector(dsn string) (*Connector, error) {
|
||||
params, err := parseConnectParams(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Connector{
|
||||
params: params,
|
||||
driver: driverInstanceNoProcess,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Connector holds the parsed DSN and is ready to make a new connection
|
||||
// at any time.
|
||||
//
|
||||
@@ -81,11 +98,12 @@ type Connector struct {
|
||||
params connectParams
|
||||
driver *Driver
|
||||
|
||||
// ResetSQL is executed after marking a given connection to be reset.
|
||||
// When not present, the next query will be reset to the database
|
||||
// defaults.
|
||||
// When present the connection will immediately mark the connection to
|
||||
// be reset, then execute the ResetSQL text to setup the session
|
||||
// SessionInitSQL is executed after marking a given session to be reset.
|
||||
// When not present, the next query will still reset the session to the
|
||||
// database defaults.
|
||||
//
|
||||
// When present the connection will immediately mark the session to
|
||||
// be reset, then execute the SessionInitSQL text to setup the session
|
||||
// that may be different from the base database defaults.
|
||||
//
|
||||
// For Example, the application relies on the following defaults
|
||||
@@ -96,9 +114,27 @@ type Connector struct {
|
||||
// SET ANSI_NULLS ON;
|
||||
// SET LOCK_TIMEOUT 10000;
|
||||
//
|
||||
// ResetSQL should not attempt to manually call sp_reset_connection.
|
||||
// SessionInitSQL should not attempt to manually call sp_reset_connection.
|
||||
// This will happen at the TDS layer.
|
||||
ResetSQL string
|
||||
//
|
||||
// SessionInitSQL is optional. The session will be reset even if
|
||||
// SessionInitSQL is empty.
|
||||
SessionInitSQL string
|
||||
|
||||
// Dialer sets a custom dialer for all network operations.
|
||||
// If Dialer is not set, normal net dialers are used.
|
||||
Dialer Dialer
|
||||
}
|
||||
|
||||
type Dialer interface {
|
||||
DialContext(ctx context.Context, network string, addr string) (net.Conn, error)
|
||||
}
|
||||
|
||||
func (c *Connector) getDialer(p *connectParams) Dialer {
|
||||
if c != nil && c.Dialer != nil {
|
||||
return c.Dialer
|
||||
}
|
||||
return createDialer(p)
|
||||
}
|
||||
|
||||
type Conn struct {
|
||||
@@ -110,7 +146,15 @@ type Conn struct {
|
||||
processQueryText bool
|
||||
connectionGood bool
|
||||
|
||||
outs map[string]interface{}
|
||||
outs map[string]interface{}
|
||||
returnStatus *ReturnStatus
|
||||
}
|
||||
|
||||
func (c *Conn) setReturnStatus(s ReturnStatus) {
|
||||
if c.returnStatus == nil {
|
||||
return
|
||||
}
|
||||
*c.returnStatus = s
|
||||
}
|
||||
|
||||
func (c *Conn) checkBadConn(err error) error {
|
||||
@@ -257,7 +301,7 @@ func (c *Conn) sendBeginRequest(ctx context.Context, tdsIsolation isoLevel) erro
|
||||
c.sess.log.Printf("Failed to send BeginXact with %v", err)
|
||||
}
|
||||
c.connectionGood = false
|
||||
return fmt.Errorf("Failed to send BiginXant: %v", err)
|
||||
return fmt.Errorf("Failed to send BeginXact: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -276,12 +320,12 @@ func (d *Driver) open(ctx context.Context, dsn string) (*Conn, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.connect(ctx, params)
|
||||
return d.connect(ctx, nil, params)
|
||||
}
|
||||
|
||||
// connect to the server, using the provided context for dialing only.
|
||||
func (d *Driver) connect(ctx context.Context, params connectParams) (*Conn, error) {
|
||||
sess, err := connect(ctx, d.log, params)
|
||||
func (d *Driver) connect(ctx context.Context, c *Connector, params connectParams) (*Conn, error) {
|
||||
sess, err := connect(ctx, c, d.log, params)
|
||||
if err != nil {
|
||||
// main server failed, try fail-over partner
|
||||
if params.failOverPartner == "" {
|
||||
@@ -293,7 +337,7 @@ func (d *Driver) connect(ctx context.Context, params connectParams) (*Conn, erro
|
||||
params.port = params.failOverPort
|
||||
}
|
||||
|
||||
sess, err = connect(ctx, d.log, params)
|
||||
sess, err = connect(ctx, c, d.log, params)
|
||||
if err != nil {
|
||||
// fail-over partner also failed, now fail
|
||||
return nil, err
|
||||
@@ -301,6 +345,7 @@ func (d *Driver) connect(ctx context.Context, params connectParams) (*Conn, erro
|
||||
}
|
||||
|
||||
conn := &Conn{
|
||||
connector: c,
|
||||
sess: sess,
|
||||
transactionCtx: context.Background(),
|
||||
processQueryText: d.processQueryText,
|
||||
@@ -407,11 +452,14 @@ func (s *Stmt) sendQuery(args []namedValue) (err error) {
|
||||
return fmt.Errorf("failed to send SQL Batch: %v", err)
|
||||
}
|
||||
} else {
|
||||
proc := Sp_ExecuteSql
|
||||
var params []Param
|
||||
proc := sp_ExecuteSql
|
||||
var params []param
|
||||
if isProc(s.query) {
|
||||
proc.name = s.query
|
||||
params, _, err = s.makeRPCParams(args, 0)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
var decls []string
|
||||
params, decls, err = s.makeRPCParams(args, 2)
|
||||
@@ -438,15 +486,57 @@ func isProc(s string) bool {
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
}
|
||||
if s[0] == '[' && s[len(s)-1] == ']' && strings.ContainsAny(s, "\n\r") == false {
|
||||
return true
|
||||
const (
|
||||
outside = iota
|
||||
text
|
||||
escaped
|
||||
)
|
||||
st := outside
|
||||
var rn1, rPrev rune
|
||||
for _, r := range s {
|
||||
rPrev = rn1
|
||||
rn1 = r
|
||||
switch r {
|
||||
// No newlines or string sequences.
|
||||
case '\n', '\r', '\'', ';':
|
||||
return false
|
||||
}
|
||||
switch st {
|
||||
case outside:
|
||||
switch {
|
||||
case unicode.IsSpace(r):
|
||||
return false
|
||||
case r == '[':
|
||||
st = escaped
|
||||
continue
|
||||
case r == ']' && rPrev == ']':
|
||||
st = escaped
|
||||
continue
|
||||
case unicode.IsLetter(r):
|
||||
st = text
|
||||
}
|
||||
case text:
|
||||
switch {
|
||||
case r == '.':
|
||||
st = outside
|
||||
continue
|
||||
case unicode.IsSpace(r):
|
||||
return false
|
||||
}
|
||||
case escaped:
|
||||
switch {
|
||||
case r == ']':
|
||||
st = outside
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
return !strings.ContainsAny(s, " \t\n\r;")
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Stmt) makeRPCParams(args []namedValue, offset int) ([]Param, []string, error) {
|
||||
func (s *Stmt) makeRPCParams(args []namedValue, offset int) ([]param, []string, error) {
|
||||
var err error
|
||||
params := make([]Param, len(args)+offset)
|
||||
params := make([]param, len(args)+offset)
|
||||
decls := make([]string, len(args))
|
||||
for i, val := range args {
|
||||
params[i+offset], err = s.makeParam(val.Value)
|
||||
@@ -520,6 +610,8 @@ loop:
|
||||
if token.isError() {
|
||||
return nil, s.c.checkBadConn(token.getError())
|
||||
}
|
||||
case ReturnStatus:
|
||||
s.c.setReturnStatus(token)
|
||||
case error:
|
||||
return nil, s.c.checkBadConn(token)
|
||||
}
|
||||
@@ -563,6 +655,8 @@ func (s *Stmt) processExec(ctx context.Context) (res driver.Result, err error) {
|
||||
if token.isError() {
|
||||
return nil, token.getError()
|
||||
}
|
||||
case ReturnStatus:
|
||||
s.c.setReturnStatus(token)
|
||||
case error:
|
||||
return nil, token
|
||||
}
|
||||
@@ -688,14 +782,14 @@ func (r *Rows) ColumnTypeNullable(index int) (nullable, ok bool) {
|
||||
return
|
||||
}
|
||||
|
||||
func makeStrParam(val string) (res Param) {
|
||||
func makeStrParam(val string) (res param) {
|
||||
res.ti.TypeId = typeNVarChar
|
||||
res.buffer = str2ucs2(val)
|
||||
res.ti.Size = len(res.buffer)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Stmt) makeParam(val driver.Value) (res Param, err error) {
|
||||
func (s *Stmt) makeParam(val driver.Value) (res param, err error) {
|
||||
if val == nil {
|
||||
res.ti.TypeId = typeNull
|
||||
res.buffer = nil
|
||||
@@ -708,17 +802,34 @@ func (s *Stmt) makeParam(val driver.Value) (res Param, err error) {
|
||||
res.buffer = make([]byte, 8)
|
||||
res.ti.Size = 8
|
||||
binary.LittleEndian.PutUint64(res.buffer, uint64(val))
|
||||
case sql.NullInt64:
|
||||
// only null values should be getting here
|
||||
res.ti.TypeId = typeIntN
|
||||
res.ti.Size = 8
|
||||
res.buffer = []byte{}
|
||||
|
||||
case float64:
|
||||
res.ti.TypeId = typeFltN
|
||||
res.ti.Size = 8
|
||||
res.buffer = make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(res.buffer, math.Float64bits(val))
|
||||
case sql.NullFloat64:
|
||||
// only null values should be getting here
|
||||
res.ti.TypeId = typeFltN
|
||||
res.ti.Size = 8
|
||||
res.buffer = []byte{}
|
||||
|
||||
case []byte:
|
||||
res.ti.TypeId = typeBigVarBin
|
||||
res.ti.Size = len(val)
|
||||
res.buffer = val
|
||||
case string:
|
||||
res = makeStrParam(val)
|
||||
case sql.NullString:
|
||||
// only null values should be getting here
|
||||
res.ti.TypeId = typeNVarChar
|
||||
res.buffer = nil
|
||||
res.ti.Size = 8000
|
||||
case bool:
|
||||
res.ti.TypeId = typeBitN
|
||||
res.ti.Size = 1
|
||||
@@ -726,37 +837,22 @@ func (s *Stmt) makeParam(val driver.Value) (res Param, err error) {
|
||||
if val {
|
||||
res.buffer[0] = 1
|
||||
}
|
||||
case sql.NullBool:
|
||||
// only null values should be getting here
|
||||
res.ti.TypeId = typeBitN
|
||||
res.ti.Size = 1
|
||||
res.buffer = []byte{}
|
||||
|
||||
case time.Time:
|
||||
if s.c.sess.loginAck.TDSVersion >= verTDS73 {
|
||||
res.ti.TypeId = typeDateTimeOffsetN
|
||||
res.ti.Scale = 7
|
||||
res.ti.Size = 10
|
||||
buf := make([]byte, 10)
|
||||
res.buffer = buf
|
||||
days, ns := dateTime2(val)
|
||||
ns /= 100
|
||||
buf[0] = byte(ns)
|
||||
buf[1] = byte(ns >> 8)
|
||||
buf[2] = byte(ns >> 16)
|
||||
buf[3] = byte(ns >> 24)
|
||||
buf[4] = byte(ns >> 32)
|
||||
buf[5] = byte(days)
|
||||
buf[6] = byte(days >> 8)
|
||||
buf[7] = byte(days >> 16)
|
||||
_, offset := val.Zone()
|
||||
offset /= 60
|
||||
buf[8] = byte(offset)
|
||||
buf[9] = byte(offset >> 8)
|
||||
res.buffer = encodeDateTimeOffset(val, int(res.ti.Scale))
|
||||
res.ti.Size = len(res.buffer)
|
||||
} else {
|
||||
res.ti.TypeId = typeDateTimeN
|
||||
res.ti.Size = 8
|
||||
res.buffer = make([]byte, 8)
|
||||
ref := time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
dur := val.Sub(ref)
|
||||
days := dur / (24 * time.Hour)
|
||||
tm := (300 * (dur % (24 * time.Hour))) / time.Second
|
||||
binary.LittleEndian.PutUint32(res.buffer[0:4], uint32(days))
|
||||
binary.LittleEndian.PutUint32(res.buffer[4:8], uint32(tm))
|
||||
res.buffer = encodeDateTime(val)
|
||||
res.ti.Size = len(res.buffer)
|
||||
}
|
||||
default:
|
||||
return s.makeParamExtra(val)
|
||||
@@ -795,3 +891,83 @@ func (r *Result) LastInsertId() (int64, error) {
|
||||
lastInsertId := dest[0].(int64)
|
||||
return lastInsertId, nil
|
||||
}
|
||||
|
||||
var _ driver.Pinger = &Conn{}
|
||||
|
||||
// Ping is used to check if the remote server is available and satisfies the Pinger interface.
|
||||
func (c *Conn) Ping(ctx context.Context) error {
|
||||
if !c.connectionGood {
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
stmt := &Stmt{c, `select 1;`, 0, nil}
|
||||
_, err := stmt.ExecContext(ctx, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
var _ driver.ConnBeginTx = &Conn{}
|
||||
|
||||
// BeginTx satisfies ConnBeginTx.
|
||||
func (c *Conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
|
||||
if !c.connectionGood {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
if opts.ReadOnly {
|
||||
return nil, errors.New("Read-only transactions are not supported")
|
||||
}
|
||||
|
||||
var tdsIsolation isoLevel
|
||||
switch sql.IsolationLevel(opts.Isolation) {
|
||||
case sql.LevelDefault:
|
||||
tdsIsolation = isolationUseCurrent
|
||||
case sql.LevelReadUncommitted:
|
||||
tdsIsolation = isolationReadUncommited
|
||||
case sql.LevelReadCommitted:
|
||||
tdsIsolation = isolationReadCommited
|
||||
case sql.LevelWriteCommitted:
|
||||
return nil, errors.New("LevelWriteCommitted isolation level is not supported")
|
||||
case sql.LevelRepeatableRead:
|
||||
tdsIsolation = isolationRepeatableRead
|
||||
case sql.LevelSnapshot:
|
||||
tdsIsolation = isolationSnapshot
|
||||
case sql.LevelSerializable:
|
||||
tdsIsolation = isolationSerializable
|
||||
case sql.LevelLinearizable:
|
||||
return nil, errors.New("LevelLinearizable isolation level is not supported")
|
||||
default:
|
||||
return nil, errors.New("Isolation level is not supported or unknown")
|
||||
}
|
||||
return c.begin(ctx, tdsIsolation)
|
||||
}
|
||||
|
||||
func (c *Conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
|
||||
if !c.connectionGood {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
if len(query) > 10 && strings.EqualFold(query[:10], "INSERTBULK") {
|
||||
return c.prepareCopyIn(ctx, query)
|
||||
}
|
||||
|
||||
return c.prepareContext(ctx, query)
|
||||
}
|
||||
|
||||
func (s *Stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
|
||||
if !s.c.connectionGood {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
list := make([]namedValue, len(args))
|
||||
for i, nv := range args {
|
||||
list[i] = namedValue(nv)
|
||||
}
|
||||
return s.queryContext(ctx, list)
|
||||
}
|
||||
|
||||
func (s *Stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
|
||||
if !s.c.connectionGood {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
list := make([]namedValue, len(args))
|
||||
for i, nv := range args {
|
||||
list[i] = namedValue(nv)
|
||||
}
|
||||
return s.exec(ctx, list)
|
||||
}
|
||||
|
||||
9
vendor/github.com/denisenkom/go-mssqldb/mssql_go110.go
generated
vendored
9
vendor/github.com/denisenkom/go-mssqldb/mssql_go110.go
generated
vendored
@@ -16,11 +16,11 @@ func (c *Conn) ResetSession(ctx context.Context) error {
|
||||
}
|
||||
c.resetSession = true
|
||||
|
||||
if c.connector == nil || len(c.connector.ResetSQL) == 0 {
|
||||
if c.connector == nil || len(c.connector.SessionInitSQL) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
s, err := c.prepareContext(ctx, c.connector.ResetSQL)
|
||||
s, err := c.prepareContext(ctx, c.connector.SessionInitSQL)
|
||||
if err != nil {
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
@@ -34,10 +34,7 @@ func (c *Conn) ResetSession(ctx context.Context) error {
|
||||
|
||||
// Connect to the server and return a TDS connection.
|
||||
func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) {
|
||||
conn, err := c.driver.connect(ctx, c.params)
|
||||
if conn != nil {
|
||||
conn.connector = c
|
||||
}
|
||||
conn, err := c.driver.connect(ctx, c, c.params)
|
||||
if err == nil {
|
||||
err = conn.ResetSession(ctx)
|
||||
}
|
||||
|
||||
91
vendor/github.com/denisenkom/go-mssqldb/mssql_go18.go
generated
vendored
91
vendor/github.com/denisenkom/go-mssqldb/mssql_go18.go
generated
vendored
@@ -1,91 +0,0 @@
|
||||
// +build go1.8
|
||||
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var _ driver.Pinger = &Conn{}
|
||||
|
||||
// Ping is used to check if the remote server is available and satisfies the Pinger interface.
|
||||
func (c *Conn) Ping(ctx context.Context) error {
|
||||
if !c.connectionGood {
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
stmt := &Stmt{c, `select 1;`, 0, nil}
|
||||
_, err := stmt.ExecContext(ctx, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
var _ driver.ConnBeginTx = &Conn{}
|
||||
|
||||
// BeginTx satisfies ConnBeginTx.
|
||||
func (c *Conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
|
||||
if !c.connectionGood {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
if opts.ReadOnly {
|
||||
return nil, errors.New("Read-only transactions are not supported")
|
||||
}
|
||||
|
||||
var tdsIsolation isoLevel
|
||||
switch sql.IsolationLevel(opts.Isolation) {
|
||||
case sql.LevelDefault:
|
||||
tdsIsolation = isolationUseCurrent
|
||||
case sql.LevelReadUncommitted:
|
||||
tdsIsolation = isolationReadUncommited
|
||||
case sql.LevelReadCommitted:
|
||||
tdsIsolation = isolationReadCommited
|
||||
case sql.LevelWriteCommitted:
|
||||
return nil, errors.New("LevelWriteCommitted isolation level is not supported")
|
||||
case sql.LevelRepeatableRead:
|
||||
tdsIsolation = isolationRepeatableRead
|
||||
case sql.LevelSnapshot:
|
||||
tdsIsolation = isolationSnapshot
|
||||
case sql.LevelSerializable:
|
||||
tdsIsolation = isolationSerializable
|
||||
case sql.LevelLinearizable:
|
||||
return nil, errors.New("LevelLinearizable isolation level is not supported")
|
||||
default:
|
||||
return nil, errors.New("Isolation level is not supported or unknown")
|
||||
}
|
||||
return c.begin(ctx, tdsIsolation)
|
||||
}
|
||||
|
||||
func (c *Conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
|
||||
if !c.connectionGood {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
if len(query) > 10 && strings.EqualFold(query[:10], "INSERTBULK") {
|
||||
return c.prepareCopyIn(ctx, query)
|
||||
}
|
||||
|
||||
return c.prepareContext(ctx, query)
|
||||
}
|
||||
|
||||
func (s *Stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
|
||||
if !s.c.connectionGood {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
list := make([]namedValue, len(args))
|
||||
for i, nv := range args {
|
||||
list[i] = namedValue(nv)
|
||||
}
|
||||
return s.queryContext(ctx, list)
|
||||
}
|
||||
|
||||
func (s *Stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
|
||||
if !s.c.connectionGood {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
list := make([]namedValue, len(args))
|
||||
for i, nv := range args {
|
||||
list[i] = namedValue(nv)
|
||||
}
|
||||
return s.exec(ctx, list)
|
||||
}
|
||||
174
vendor/github.com/denisenkom/go-mssqldb/mssql_go19.go
generated
vendored
174
vendor/github.com/denisenkom/go-mssqldb/mssql_go19.go
generated
vendored
@@ -5,23 +5,64 @@ package mssql
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
// "github.com/cockroachdb/apd"
|
||||
"cloud.google.com/go/civil"
|
||||
)
|
||||
|
||||
// Type alias provided for compibility.
|
||||
//
|
||||
// Deprecated: users should transition to the new names when possible.
|
||||
type MssqlDriver = Driver
|
||||
type MssqlBulk = Bulk
|
||||
type MssqlBulkOptions = BulkOptions
|
||||
type MssqlConn = Conn
|
||||
type MssqlResult = Result
|
||||
type MssqlRows = Rows
|
||||
type MssqlStmt = Stmt
|
||||
// Type alias provided for compatibility.
|
||||
|
||||
type MssqlDriver = Driver // Deprecated: users should transition to the new name when possible.
|
||||
type MssqlBulk = Bulk // Deprecated: users should transition to the new name when possible.
|
||||
type MssqlBulkOptions = BulkOptions // Deprecated: users should transition to the new name when possible.
|
||||
type MssqlConn = Conn // Deprecated: users should transition to the new name when possible.
|
||||
type MssqlResult = Result // Deprecated: users should transition to the new name when possible.
|
||||
type MssqlRows = Rows // Deprecated: users should transition to the new name when possible.
|
||||
type MssqlStmt = Stmt // Deprecated: users should transition to the new name when possible.
|
||||
|
||||
var _ driver.NamedValueChecker = &Conn{}
|
||||
|
||||
// VarChar parameter types.
|
||||
type VarChar string
|
||||
|
||||
type NVarCharMax string
|
||||
type VarCharMax string
|
||||
|
||||
// DateTime1 encodes parameters to original DateTime SQL types.
|
||||
type DateTime1 time.Time
|
||||
|
||||
// DateTimeOffset encodes parameters to DateTimeOffset, preserving the UTC offset.
|
||||
type DateTimeOffset time.Time
|
||||
|
||||
func convertInputParameter(val interface{}) (interface{}, error) {
|
||||
switch v := val.(type) {
|
||||
case VarChar:
|
||||
return val, nil
|
||||
case NVarCharMax:
|
||||
return val, nil
|
||||
case VarCharMax:
|
||||
return val, nil
|
||||
case DateTime1:
|
||||
return val, nil
|
||||
case DateTimeOffset:
|
||||
return val, nil
|
||||
case civil.Date:
|
||||
return val, nil
|
||||
case civil.DateTime:
|
||||
return val, nil
|
||||
case civil.Time:
|
||||
return val, nil
|
||||
// case *apd.Decimal:
|
||||
// return nil
|
||||
default:
|
||||
return driver.DefaultParameterConverter.ConvertValue(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Conn) CheckNamedValue(nv *driver.NamedValue) error {
|
||||
switch v := nv.Value.(type) {
|
||||
case sql.Out:
|
||||
@@ -30,35 +71,114 @@ func (c *Conn) CheckNamedValue(nv *driver.NamedValue) error {
|
||||
}
|
||||
c.outs[nv.Name] = v.Dest
|
||||
|
||||
// Unwrap the Out value and check the inner value.
|
||||
lnv := *nv
|
||||
lnv.Value = v.Dest
|
||||
err := c.CheckNamedValue(&lnv)
|
||||
if err != nil {
|
||||
if err != driver.ErrSkip {
|
||||
return err
|
||||
}
|
||||
lnv.Value, err = driver.DefaultParameterConverter.ConvertValue(lnv.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if v.Dest == nil {
|
||||
return errors.New("destination is a nil pointer")
|
||||
}
|
||||
|
||||
dest_info := reflect.ValueOf(v.Dest)
|
||||
if dest_info.Kind() != reflect.Ptr {
|
||||
return errors.New("destination not a pointer")
|
||||
}
|
||||
|
||||
if dest_info.IsNil() {
|
||||
return errors.New("destination is a nil pointer")
|
||||
}
|
||||
|
||||
pointed_value := reflect.Indirect(dest_info)
|
||||
|
||||
// don't allow pointer to a pointer, only pointer to a value can be handled
|
||||
// correctly
|
||||
if pointed_value.Kind() == reflect.Ptr {
|
||||
return errors.New("destination is a pointer to a pointer")
|
||||
}
|
||||
|
||||
// Unwrap the Out value and check the inner value.
|
||||
val := pointed_value.Interface()
|
||||
if val == nil {
|
||||
return errors.New("MSSQL does not allow NULL value without type for OUTPUT parameters")
|
||||
}
|
||||
conv, err := convertInputParameter(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if conv == nil {
|
||||
// if we replace with nil we would lose type information
|
||||
nv.Value = sql.Out{Dest: val}
|
||||
} else {
|
||||
nv.Value = sql.Out{Dest: conv}
|
||||
}
|
||||
nv.Value = sql.Out{Dest: lnv.Value}
|
||||
return nil
|
||||
// case *apd.Decimal:
|
||||
// return nil
|
||||
case *ReturnStatus:
|
||||
*v = 0 // By default the return value should be zero.
|
||||
c.returnStatus = v
|
||||
return driver.ErrRemoveArgument
|
||||
case TVPType:
|
||||
return nil
|
||||
default:
|
||||
return driver.ErrSkip
|
||||
var err error
|
||||
nv.Value, err = convertInputParameter(nv.Value)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stmt) makeParamExtra(val driver.Value) (res Param, err error) {
|
||||
func (s *Stmt) makeParamExtra(val driver.Value) (res param, err error) {
|
||||
switch val := val.(type) {
|
||||
case VarChar:
|
||||
res.ti.TypeId = typeBigVarChar
|
||||
res.buffer = []byte(val)
|
||||
res.ti.Size = len(res.buffer)
|
||||
case VarCharMax:
|
||||
res.ti.TypeId = typeBigVarChar
|
||||
res.buffer = []byte(val)
|
||||
res.ti.Size = 0 // currently zero forces varchar(max)
|
||||
case NVarCharMax:
|
||||
res.ti.TypeId = typeNVarChar
|
||||
res.buffer = str2ucs2(string(val))
|
||||
res.ti.Size = 0 // currently zero forces nvarchar(max)
|
||||
case DateTime1:
|
||||
t := time.Time(val)
|
||||
res.ti.TypeId = typeDateTimeN
|
||||
res.buffer = encodeDateTime(t)
|
||||
res.ti.Size = len(res.buffer)
|
||||
case DateTimeOffset:
|
||||
res.ti.TypeId = typeDateTimeOffsetN
|
||||
res.ti.Scale = 7
|
||||
res.buffer = encodeDateTimeOffset(time.Time(val), int(res.ti.Scale))
|
||||
res.ti.Size = len(res.buffer)
|
||||
case civil.Date:
|
||||
res.ti.TypeId = typeDateN
|
||||
res.buffer = encodeDate(val.In(time.UTC))
|
||||
res.ti.Size = len(res.buffer)
|
||||
case civil.DateTime:
|
||||
res.ti.TypeId = typeDateTime2N
|
||||
res.ti.Scale = 7
|
||||
res.buffer = encodeDateTime2(val.In(time.UTC), int(res.ti.Scale))
|
||||
res.ti.Size = len(res.buffer)
|
||||
case civil.Time:
|
||||
res.ti.TypeId = typeTimeN
|
||||
res.ti.Scale = 7
|
||||
res.buffer = encodeTime(val.Hour, val.Minute, val.Second, val.Nanosecond, int(res.ti.Scale))
|
||||
res.ti.Size = len(res.buffer)
|
||||
case sql.Out:
|
||||
res, err = s.makeParam(val.Dest)
|
||||
res.Flags = fByRevValue
|
||||
case TVPType:
|
||||
err = val.check()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
res.ti.UdtInfo.TypeName = val.TVPTypeName
|
||||
res.ti.UdtInfo.SchemaName = val.TVPScheme
|
||||
res.ti.TypeId = typeTvp
|
||||
res.buffer, err = val.encode()
|
||||
res.ti.Size = len(res.buffer)
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("mssql: unknown type for %T", val)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func scanIntoOut(name string, fromServer, scanInto interface{}) error {
|
||||
return convertAssign(scanInto, fromServer)
|
||||
}
|
||||
|
||||
8
vendor/github.com/denisenkom/go-mssqldb/mssql_go19pre.go
generated
vendored
8
vendor/github.com/denisenkom/go-mssqldb/mssql_go19pre.go
generated
vendored
@@ -7,6 +7,10 @@ import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func (s *Stmt) makeParamExtra(val driver.Value) (Param, error) {
|
||||
return Param{}, fmt.Errorf("mssql: unknown type for %T", val)
|
||||
func (s *Stmt) makeParamExtra(val driver.Value) (param, error) {
|
||||
return param{}, fmt.Errorf("mssql: unknown type for %T", val)
|
||||
}
|
||||
|
||||
func scanIntoOut(name string, fromServer, scanInto interface{}) error {
|
||||
return fmt.Errorf("mssql: unsupported OUTPUT type, use a newer Go version")
|
||||
}
|
||||
|
||||
2
vendor/github.com/denisenkom/go-mssqldb/net.go
generated
vendored
2
vendor/github.com/denisenkom/go-mssqldb/net.go
generated
vendored
@@ -14,7 +14,7 @@ type timeoutConn struct {
|
||||
continueRead bool
|
||||
}
|
||||
|
||||
func NewTimeoutConn(conn net.Conn, timeout time.Duration) *timeoutConn {
|
||||
func newTimeoutConn(conn net.Conn, timeout time.Duration) *timeoutConn {
|
||||
return &timeoutConn{
|
||||
c: conn,
|
||||
timeout: timeout,
|
||||
|
||||
82
vendor/github.com/denisenkom/go-mssqldb/ntlm.go
generated
vendored
82
vendor/github.com/denisenkom/go-mssqldb/ntlm.go
generated
vendored
@@ -15,44 +15,44 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
NEGOTIATE_MESSAGE = 1
|
||||
CHALLENGE_MESSAGE = 2
|
||||
AUTHENTICATE_MESSAGE = 3
|
||||
_NEGOTIATE_MESSAGE = 1
|
||||
_CHALLENGE_MESSAGE = 2
|
||||
_AUTHENTICATE_MESSAGE = 3
|
||||
)
|
||||
|
||||
const (
|
||||
NEGOTIATE_UNICODE = 0x00000001
|
||||
NEGOTIATE_OEM = 0x00000002
|
||||
NEGOTIATE_TARGET = 0x00000004
|
||||
NEGOTIATE_SIGN = 0x00000010
|
||||
NEGOTIATE_SEAL = 0x00000020
|
||||
NEGOTIATE_DATAGRAM = 0x00000040
|
||||
NEGOTIATE_LMKEY = 0x00000080
|
||||
NEGOTIATE_NTLM = 0x00000200
|
||||
NEGOTIATE_ANONYMOUS = 0x00000800
|
||||
NEGOTIATE_OEM_DOMAIN_SUPPLIED = 0x00001000
|
||||
NEGOTIATE_OEM_WORKSTATION_SUPPLIED = 0x00002000
|
||||
NEGOTIATE_ALWAYS_SIGN = 0x00008000
|
||||
NEGOTIATE_TARGET_TYPE_DOMAIN = 0x00010000
|
||||
NEGOTIATE_TARGET_TYPE_SERVER = 0x00020000
|
||||
NEGOTIATE_EXTENDED_SESSIONSECURITY = 0x00080000
|
||||
NEGOTIATE_IDENTIFY = 0x00100000
|
||||
REQUEST_NON_NT_SESSION_KEY = 0x00400000
|
||||
NEGOTIATE_TARGET_INFO = 0x00800000
|
||||
NEGOTIATE_VERSION = 0x02000000
|
||||
NEGOTIATE_128 = 0x20000000
|
||||
NEGOTIATE_KEY_EXCH = 0x40000000
|
||||
NEGOTIATE_56 = 0x80000000
|
||||
_NEGOTIATE_UNICODE = 0x00000001
|
||||
_NEGOTIATE_OEM = 0x00000002
|
||||
_NEGOTIATE_TARGET = 0x00000004
|
||||
_NEGOTIATE_SIGN = 0x00000010
|
||||
_NEGOTIATE_SEAL = 0x00000020
|
||||
_NEGOTIATE_DATAGRAM = 0x00000040
|
||||
_NEGOTIATE_LMKEY = 0x00000080
|
||||
_NEGOTIATE_NTLM = 0x00000200
|
||||
_NEGOTIATE_ANONYMOUS = 0x00000800
|
||||
_NEGOTIATE_OEM_DOMAIN_SUPPLIED = 0x00001000
|
||||
_NEGOTIATE_OEM_WORKSTATION_SUPPLIED = 0x00002000
|
||||
_NEGOTIATE_ALWAYS_SIGN = 0x00008000
|
||||
_NEGOTIATE_TARGET_TYPE_DOMAIN = 0x00010000
|
||||
_NEGOTIATE_TARGET_TYPE_SERVER = 0x00020000
|
||||
_NEGOTIATE_EXTENDED_SESSIONSECURITY = 0x00080000
|
||||
_NEGOTIATE_IDENTIFY = 0x00100000
|
||||
_REQUEST_NON_NT_SESSION_KEY = 0x00400000
|
||||
_NEGOTIATE_TARGET_INFO = 0x00800000
|
||||
_NEGOTIATE_VERSION = 0x02000000
|
||||
_NEGOTIATE_128 = 0x20000000
|
||||
_NEGOTIATE_KEY_EXCH = 0x40000000
|
||||
_NEGOTIATE_56 = 0x80000000
|
||||
)
|
||||
|
||||
const NEGOTIATE_FLAGS = NEGOTIATE_UNICODE |
|
||||
NEGOTIATE_NTLM |
|
||||
NEGOTIATE_OEM_DOMAIN_SUPPLIED |
|
||||
NEGOTIATE_OEM_WORKSTATION_SUPPLIED |
|
||||
NEGOTIATE_ALWAYS_SIGN |
|
||||
NEGOTIATE_EXTENDED_SESSIONSECURITY
|
||||
const _NEGOTIATE_FLAGS = _NEGOTIATE_UNICODE |
|
||||
_NEGOTIATE_NTLM |
|
||||
_NEGOTIATE_OEM_DOMAIN_SUPPLIED |
|
||||
_NEGOTIATE_OEM_WORKSTATION_SUPPLIED |
|
||||
_NEGOTIATE_ALWAYS_SIGN |
|
||||
_NEGOTIATE_EXTENDED_SESSIONSECURITY
|
||||
|
||||
type NTLMAuth struct {
|
||||
type ntlmAuth struct {
|
||||
Domain string
|
||||
UserName string
|
||||
Password string
|
||||
@@ -64,7 +64,7 @@ func getAuth(user, password, service, workstation string) (auth, bool) {
|
||||
return nil, false
|
||||
}
|
||||
domain_user := strings.SplitN(user, "\\", 2)
|
||||
return &NTLMAuth{
|
||||
return &ntlmAuth{
|
||||
Domain: domain_user[0],
|
||||
UserName: domain_user[1],
|
||||
Password: password,
|
||||
@@ -86,13 +86,13 @@ func utf16le(val string) []byte {
|
||||
return v
|
||||
}
|
||||
|
||||
func (auth *NTLMAuth) InitialBytes() ([]byte, error) {
|
||||
func (auth *ntlmAuth) InitialBytes() ([]byte, error) {
|
||||
domain_len := len(auth.Domain)
|
||||
workstation_len := len(auth.Workstation)
|
||||
msg := make([]byte, 40+domain_len+workstation_len)
|
||||
copy(msg, []byte("NTLMSSP\x00"))
|
||||
binary.LittleEndian.PutUint32(msg[8:], NEGOTIATE_MESSAGE)
|
||||
binary.LittleEndian.PutUint32(msg[12:], NEGOTIATE_FLAGS)
|
||||
binary.LittleEndian.PutUint32(msg[8:], _NEGOTIATE_MESSAGE)
|
||||
binary.LittleEndian.PutUint32(msg[12:], _NEGOTIATE_FLAGS)
|
||||
// Domain Name Fields
|
||||
binary.LittleEndian.PutUint16(msg[16:], uint16(domain_len))
|
||||
binary.LittleEndian.PutUint16(msg[18:], uint16(domain_len))
|
||||
@@ -198,11 +198,11 @@ func ntlmSessionResponse(clientNonce [8]byte, serverChallenge [8]byte, password
|
||||
return response(hash, passwordHash)
|
||||
}
|
||||
|
||||
func (auth *NTLMAuth) NextBytes(bytes []byte) ([]byte, error) {
|
||||
func (auth *ntlmAuth) NextBytes(bytes []byte) ([]byte, error) {
|
||||
if string(bytes[0:8]) != "NTLMSSP\x00" {
|
||||
return nil, errorNTLM
|
||||
}
|
||||
if binary.LittleEndian.Uint32(bytes[8:12]) != CHALLENGE_MESSAGE {
|
||||
if binary.LittleEndian.Uint32(bytes[8:12]) != _CHALLENGE_MESSAGE {
|
||||
return nil, errorNTLM
|
||||
}
|
||||
flags := binary.LittleEndian.Uint32(bytes[20:24])
|
||||
@@ -210,7 +210,7 @@ func (auth *NTLMAuth) NextBytes(bytes []byte) ([]byte, error) {
|
||||
copy(challenge[:], bytes[24:32])
|
||||
|
||||
var lm, nt []byte
|
||||
if (flags & NEGOTIATE_EXTENDED_SESSIONSECURITY) != 0 {
|
||||
if (flags & _NEGOTIATE_EXTENDED_SESSIONSECURITY) != 0 {
|
||||
nonce := clientChallenge()
|
||||
var lm_bytes [24]byte
|
||||
copy(lm_bytes[:8], nonce[:])
|
||||
@@ -235,7 +235,7 @@ func (auth *NTLMAuth) NextBytes(bytes []byte) ([]byte, error) {
|
||||
|
||||
msg := make([]byte, 88+lm_len+nt_len+domain_len+user_len+workstation_len)
|
||||
copy(msg, []byte("NTLMSSP\x00"))
|
||||
binary.LittleEndian.PutUint32(msg[8:], AUTHENTICATE_MESSAGE)
|
||||
binary.LittleEndian.PutUint32(msg[8:], _AUTHENTICATE_MESSAGE)
|
||||
// Lm Challenge Response Fields
|
||||
binary.LittleEndian.PutUint16(msg[12:], uint16(lm_len))
|
||||
binary.LittleEndian.PutUint16(msg[14:], uint16(lm_len))
|
||||
@@ -279,5 +279,5 @@ func (auth *NTLMAuth) NextBytes(bytes []byte) ([]byte, error) {
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
func (auth *NTLMAuth) Free() {
|
||||
func (auth *ntlmAuth) Free() {
|
||||
}
|
||||
|
||||
45
vendor/github.com/denisenkom/go-mssqldb/rpc.go
generated
vendored
45
vendor/github.com/denisenkom/go-mssqldb/rpc.go
generated
vendored
@@ -4,7 +4,7 @@ import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
type ProcId struct {
|
||||
type procId struct {
|
||||
id uint16
|
||||
name string
|
||||
}
|
||||
@@ -15,24 +15,13 @@ const (
|
||||
fDefaultValue = 2
|
||||
)
|
||||
|
||||
type Param struct {
|
||||
type param struct {
|
||||
Name string
|
||||
Flags uint8
|
||||
ti typeInfo
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
func MakeProcId(name string) (res ProcId) {
|
||||
res.name = name
|
||||
if len(name) == 0 {
|
||||
panic("Proc name shouln't be empty")
|
||||
}
|
||||
if len(name) >= 0xffff {
|
||||
panic("Invalid length of procedure name, should be less than 0xffff")
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
const (
|
||||
fWithRecomp = 1
|
||||
fNoMetaData = 2
|
||||
@@ -40,24 +29,24 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
Sp_Cursor = ProcId{1, ""}
|
||||
Sp_CursorOpen = ProcId{2, ""}
|
||||
Sp_CursorPrepare = ProcId{3, ""}
|
||||
Sp_CursorExecute = ProcId{4, ""}
|
||||
Sp_CursorPrepExec = ProcId{5, ""}
|
||||
Sp_CursorUnprepare = ProcId{6, ""}
|
||||
Sp_CursorFetch = ProcId{7, ""}
|
||||
Sp_CursorOption = ProcId{8, ""}
|
||||
Sp_CursorClose = ProcId{9, ""}
|
||||
Sp_ExecuteSql = ProcId{10, ""}
|
||||
Sp_Prepare = ProcId{11, ""}
|
||||
Sp_PrepExec = ProcId{13, ""}
|
||||
Sp_PrepExecRpc = ProcId{14, ""}
|
||||
Sp_Unprepare = ProcId{15, ""}
|
||||
sp_Cursor = procId{1, ""}
|
||||
sp_CursorOpen = procId{2, ""}
|
||||
sp_CursorPrepare = procId{3, ""}
|
||||
sp_CursorExecute = procId{4, ""}
|
||||
sp_CursorPrepExec = procId{5, ""}
|
||||
sp_CursorUnprepare = procId{6, ""}
|
||||
sp_CursorFetch = procId{7, ""}
|
||||
sp_CursorOption = procId{8, ""}
|
||||
sp_CursorClose = procId{9, ""}
|
||||
sp_ExecuteSql = procId{10, ""}
|
||||
sp_Prepare = procId{11, ""}
|
||||
sp_PrepExec = procId{13, ""}
|
||||
sp_PrepExecRpc = procId{14, ""}
|
||||
sp_Unprepare = procId{15, ""}
|
||||
)
|
||||
|
||||
// http://msdn.microsoft.com/en-us/library/dd357576.aspx
|
||||
func sendRpc(buf *tdsBuffer, headers []headerStruct, proc ProcId, flags uint16, params []Param, resetSession bool) (err error) {
|
||||
func sendRpc(buf *tdsBuffer, headers []headerStruct, proc procId, flags uint16, params []param, resetSession bool) (err error) {
|
||||
buf.BeginPacket(packRPCRequest, resetSession)
|
||||
writeAllHeaders(buf, headers)
|
||||
if len(proc.name) == 0 {
|
||||
|
||||
51
vendor/github.com/denisenkom/go-mssqldb/tds.go
generated
vendored
51
vendor/github.com/denisenkom/go-mssqldb/tds.go
generated
vendored
@@ -50,12 +50,11 @@ func parseInstances(msg []byte) map[string]map[string]string {
|
||||
return results
|
||||
}
|
||||
|
||||
func getInstances(ctx context.Context, address string) (map[string]map[string]string, error) {
|
||||
func getInstances(ctx context.Context, d Dialer, address string) (map[string]map[string]string, error) {
|
||||
maxTime := 5 * time.Second
|
||||
dialer := &net.Dialer{
|
||||
Timeout: maxTime,
|
||||
}
|
||||
conn, err := dialer.DialContext(ctx, "udp", address+":1434")
|
||||
ctx, cancel := context.WithTimeout(ctx, maxTime)
|
||||
defer cancel()
|
||||
conn, err := d.DialContext(ctx, "udp", address+":1434")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -153,11 +152,11 @@ type columnStruct struct {
|
||||
ti typeInfo
|
||||
}
|
||||
|
||||
type KeySlice []uint8
|
||||
type keySlice []uint8
|
||||
|
||||
func (p KeySlice) Len() int { return len(p) }
|
||||
func (p KeySlice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p KeySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
func (p keySlice) Len() int { return len(p) }
|
||||
func (p keySlice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p keySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
// http://msdn.microsoft.com/en-us/library/dd357559.aspx
|
||||
func writePrelogin(w *tdsBuffer, fields map[uint8][]byte) error {
|
||||
@@ -165,7 +164,7 @@ func writePrelogin(w *tdsBuffer, fields map[uint8][]byte) error {
|
||||
|
||||
w.BeginPacket(packPrelogin, false)
|
||||
offset := uint16(5*len(fields) + 1)
|
||||
keys := make(KeySlice, 0, len(fields))
|
||||
keys := make(keySlice, 0, len(fields))
|
||||
for k, _ := range fields {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
@@ -993,10 +992,10 @@ func parseConnectParams(dsn string) (connectParams, error) {
|
||||
}
|
||||
|
||||
// https://msdn.microsoft.com/en-us/library/dd341108.aspx
|
||||
p.dial_timeout = 15 * time.Second
|
||||
p.conn_timeout = 30 * time.Second
|
||||
strconntimeout, ok := params["connection timeout"]
|
||||
if ok {
|
||||
//
|
||||
// Do not set a connection timeout. Use Context to manage such things.
|
||||
// Default to zero, but still allow it to be set.
|
||||
if strconntimeout, ok := params["connection timeout"]; ok {
|
||||
timeout, err := strconv.ParseUint(strconntimeout, 10, 64)
|
||||
if err != nil {
|
||||
f := "Invalid connection timeout '%v': %v"
|
||||
@@ -1004,8 +1003,8 @@ func parseConnectParams(dsn string) (connectParams, error) {
|
||||
}
|
||||
p.conn_timeout = time.Duration(timeout) * time.Second
|
||||
}
|
||||
strdialtimeout, ok := params["dial timeout"]
|
||||
if ok {
|
||||
p.dial_timeout = 15 * time.Second
|
||||
if strdialtimeout, ok := params["dial timeout"]; ok {
|
||||
timeout, err := strconv.ParseUint(strdialtimeout, 10, 64)
|
||||
if err != nil {
|
||||
f := "Invalid dial timeout '%v': %v"
|
||||
@@ -1017,7 +1016,6 @@ func parseConnectParams(dsn string) (connectParams, error) {
|
||||
// default keep alive should be 30 seconds according to spec:
|
||||
// https://msdn.microsoft.com/en-us/library/dd341108.aspx
|
||||
p.keepAlive = 30 * time.Second
|
||||
|
||||
if keepAlive, ok := params["keepalive"]; ok {
|
||||
timeout, err := strconv.ParseUint(keepAlive, 10, 64)
|
||||
if err != nil {
|
||||
@@ -1113,7 +1111,7 @@ type auth interface {
|
||||
// SQL Server AlwaysOn Availability Group Listeners are bound by DNS to a
|
||||
// list of IP addresses. So if there is more than one, try them all and
|
||||
// use the first one that allows a connection.
|
||||
func dialConnection(ctx context.Context, p connectParams) (conn net.Conn, err error) {
|
||||
func dialConnection(ctx context.Context, c *Connector, p connectParams) (conn net.Conn, err error) {
|
||||
var ips []net.IP
|
||||
ips, err = net.LookupIP(p.host)
|
||||
if err != nil {
|
||||
@@ -1124,9 +1122,9 @@ func dialConnection(ctx context.Context, p connectParams) (conn net.Conn, err er
|
||||
ips = []net.IP{ip}
|
||||
}
|
||||
if len(ips) == 1 {
|
||||
d := createDialer(&p)
|
||||
d := c.getDialer(&p)
|
||||
addr := net.JoinHostPort(ips[0].String(), strconv.Itoa(int(p.port)))
|
||||
conn, err = d.Dial(ctx, addr)
|
||||
conn, err = d.DialContext(ctx, "tcp", addr)
|
||||
|
||||
} else {
|
||||
//Try Dials in parallel to avoid waiting for timeouts.
|
||||
@@ -1135,9 +1133,9 @@ func dialConnection(ctx context.Context, p connectParams) (conn net.Conn, err er
|
||||
portStr := strconv.Itoa(int(p.port))
|
||||
for _, ip := range ips {
|
||||
go func(ip net.IP) {
|
||||
d := createDialer(&p)
|
||||
d := c.getDialer(&p)
|
||||
addr := net.JoinHostPort(ip.String(), portStr)
|
||||
conn, err := d.Dial(ctx, addr)
|
||||
conn, err := d.DialContext(ctx, "tcp", addr)
|
||||
if err == nil {
|
||||
connChan <- conn
|
||||
} else {
|
||||
@@ -1175,7 +1173,7 @@ func dialConnection(ctx context.Context, p connectParams) (conn net.Conn, err er
|
||||
return conn, err
|
||||
}
|
||||
|
||||
func connect(ctx context.Context, log optionalLogger, p connectParams) (res *tdsSession, err error) {
|
||||
func connect(ctx context.Context, c *Connector, log optionalLogger, p connectParams) (res *tdsSession, err error) {
|
||||
dialCtx := ctx
|
||||
if p.dial_timeout > 0 {
|
||||
var cancel func()
|
||||
@@ -1185,7 +1183,8 @@ func connect(ctx context.Context, log optionalLogger, p connectParams) (res *tds
|
||||
// if instance is specified use instance resolution service
|
||||
if p.instance != "" {
|
||||
p.instance = strings.ToUpper(p.instance)
|
||||
instances, err := getInstances(dialCtx, p.host)
|
||||
d := c.getDialer(&p)
|
||||
instances, err := getInstances(dialCtx, d, p.host)
|
||||
if err != nil {
|
||||
f := "Unable to get instances from Sql Server Browser on host %v: %v"
|
||||
return nil, fmt.Errorf(f, p.host, err.Error())
|
||||
@@ -1203,12 +1202,12 @@ func connect(ctx context.Context, log optionalLogger, p connectParams) (res *tds
|
||||
}
|
||||
|
||||
initiate_connection:
|
||||
conn, err := dialConnection(dialCtx, p)
|
||||
conn, err := dialConnection(dialCtx, c, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
toconn := NewTimeoutConn(conn, p.conn_timeout)
|
||||
toconn := newTimeoutConn(conn, p.conn_timeout)
|
||||
|
||||
outbuf := newTdsBuffer(p.packetSize, toconn)
|
||||
sess := tdsSession{
|
||||
|
||||
32
vendor/github.com/denisenkom/go-mssqldb/token.go
generated
vendored
32
vendor/github.com/denisenkom/go-mssqldb/token.go
generated
vendored
@@ -213,7 +213,7 @@ func processEnvChg(sess *tdsSession) {
|
||||
|
||||
// SQL Collation data should contain 5 bytes in length
|
||||
if collationSize != 5 {
|
||||
badStreamPanicf("Invalid SQL Collation size value returned from server: %s", collationSize)
|
||||
badStreamPanicf("Invalid SQL Collation size value returned from server: %d", collationSize)
|
||||
}
|
||||
|
||||
// 4 bytes, contains: LCID ColFlags Version
|
||||
@@ -385,11 +385,9 @@ func processEnvChg(sess *tdsSession) {
|
||||
}
|
||||
}
|
||||
|
||||
type returnStatus int32
|
||||
|
||||
// http://msdn.microsoft.com/en-us/library/dd358180.aspx
|
||||
func parseReturnStatus(r *tdsBuffer) returnStatus {
|
||||
return returnStatus(r.int32())
|
||||
func parseReturnStatus(r *tdsBuffer) ReturnStatus {
|
||||
return ReturnStatus(r.int32())
|
||||
}
|
||||
|
||||
func parseOrder(r *tdsBuffer) (res orderStruct) {
|
||||
@@ -640,7 +638,7 @@ func processSingleResponse(sess *tdsSession, ch chan tokenStruct, outs map[strin
|
||||
if len(nv.Name) > 0 {
|
||||
name := nv.Name[1:] // Remove the leading "@".
|
||||
if ov, has := outs[name]; has {
|
||||
err = scanIntoOut(nv.Value, ov)
|
||||
err = scanIntoOut(name, nv.Value, ov)
|
||||
if err != nil {
|
||||
fmt.Println("scan error", err)
|
||||
ch <- err
|
||||
@@ -653,28 +651,6 @@ func processSingleResponse(sess *tdsSession, ch chan tokenStruct, outs map[strin
|
||||
}
|
||||
}
|
||||
|
||||
func scanIntoOut(fromServer, scanInto interface{}) error {
|
||||
switch fs := fromServer.(type) {
|
||||
case int64:
|
||||
switch si := scanInto.(type) {
|
||||
case *int64:
|
||||
*si = fs
|
||||
default:
|
||||
return fmt.Errorf("unsupported scan into type %[1]T for server type %[2]T", scanInto, fromServer)
|
||||
}
|
||||
return nil
|
||||
case string:
|
||||
switch si := scanInto.(type) {
|
||||
case *string:
|
||||
*si = fs
|
||||
default:
|
||||
return fmt.Errorf("unsupported scan into type %[1]T for server type %[2]T", scanInto, fromServer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unsupported type from server %[1]T=%[1]v", fromServer)
|
||||
}
|
||||
|
||||
type parseRespIter byte
|
||||
|
||||
const (
|
||||
|
||||
167
vendor/github.com/denisenkom/go-mssqldb/tvp_go19.go
generated
vendored
Normal file
167
vendor/github.com/denisenkom/go-mssqldb/tvp_go19.go
generated
vendored
Normal file
@@ -0,0 +1,167 @@
|
||||
// +build go1.9
|
||||
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrorEmptyTVPName = errors.New("TVPTypeName must not be empty")
|
||||
ErrorTVPTypeSlice = errors.New("TVPType must be slice type")
|
||||
ErrorTVPTypeSliceIsEmpty = errors.New("TVPType mustn't be null value")
|
||||
)
|
||||
|
||||
//TVPType is driver type, which allows supporting Table Valued Parameters (TVP) in SQL Server
|
||||
type TVPType struct {
|
||||
//TVP param name, mustn't be default value
|
||||
TVPTypeName string
|
||||
//TVP scheme name
|
||||
TVPScheme string
|
||||
//TVP Value. Param must be the slice, mustn't be nil
|
||||
TVPValue interface{}
|
||||
}
|
||||
|
||||
func (tvp TVPType) check() error {
|
||||
if len(tvp.TVPTypeName) == 0 {
|
||||
return ErrorEmptyTVPName
|
||||
}
|
||||
valueOf := reflect.ValueOf(tvp.TVPValue)
|
||||
if valueOf.Kind() != reflect.Slice {
|
||||
return ErrorTVPTypeSlice
|
||||
}
|
||||
if valueOf.IsNil() {
|
||||
return ErrorTVPTypeSliceIsEmpty
|
||||
}
|
||||
if reflect.TypeOf(tvp.TVPValue).Elem().Kind() != reflect.Struct {
|
||||
return ErrorTVPTypeSlice
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tvp TVPType) encode() ([]byte, error) {
|
||||
columnStr, err := tvp.columnTypes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
preparedBuffer := make([]byte, 0, 20+(10*len(columnStr)))
|
||||
buf := bytes.NewBuffer(preparedBuffer)
|
||||
err = writeBVarChar(buf, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
writeBVarChar(buf, tvp.TVPScheme)
|
||||
writeBVarChar(buf, tvp.TVPTypeName)
|
||||
|
||||
binary.Write(buf, binary.LittleEndian, uint16(len(columnStr)))
|
||||
|
||||
for i, column := range columnStr {
|
||||
binary.Write(buf, binary.LittleEndian, uint32(column.UserType))
|
||||
binary.Write(buf, binary.LittleEndian, uint16(column.Flags))
|
||||
writeTypeInfo(buf, &columnStr[i].ti)
|
||||
writeBVarChar(buf, "")
|
||||
}
|
||||
buf.WriteByte(_TVP_END_TOKEN)
|
||||
conn := new(Conn)
|
||||
conn.sess = new(tdsSession)
|
||||
conn.sess.loginAck = loginAckStruct{TDSVersion: verTDS73}
|
||||
stmt := &Stmt{
|
||||
c: conn,
|
||||
}
|
||||
|
||||
val := reflect.ValueOf(tvp.TVPValue)
|
||||
for i := 0; i < val.Len(); i++ {
|
||||
refStr := reflect.ValueOf(val.Index(i).Interface())
|
||||
buf.WriteByte(_TVP_ROW_TOKEN)
|
||||
for j := 0; j < refStr.NumField(); j++ {
|
||||
field := refStr.Field(j)
|
||||
tvpVal := field.Interface()
|
||||
valOf := reflect.ValueOf(tvpVal)
|
||||
elemKind := field.Kind()
|
||||
if elemKind == reflect.Ptr && valOf.IsNil() {
|
||||
switch tvpVal.(type) {
|
||||
case *bool, *time.Time, *int8, *int16, *int32, *int64, *float32, *float64:
|
||||
binary.Write(buf, binary.LittleEndian, uint8(0))
|
||||
continue
|
||||
default:
|
||||
binary.Write(buf, binary.LittleEndian, uint64(_PLP_NULL))
|
||||
continue
|
||||
}
|
||||
}
|
||||
if elemKind == reflect.Slice && valOf.IsNil() {
|
||||
binary.Write(buf, binary.LittleEndian, uint64(_PLP_NULL))
|
||||
continue
|
||||
}
|
||||
|
||||
cval, err := convertInputParameter(tvpVal)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert tvp parameter row col: %s", err)
|
||||
}
|
||||
param, err := stmt.makeParam(cval)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make tvp parameter row col: %s", err)
|
||||
}
|
||||
columnStr[j].ti.Writer(buf, param.ti, param.buffer)
|
||||
}
|
||||
}
|
||||
buf.WriteByte(_TVP_END_TOKEN)
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (tvp TVPType) columnTypes() ([]columnStruct, error) {
|
||||
val := reflect.ValueOf(tvp.TVPValue)
|
||||
var firstRow interface{}
|
||||
if val.Len() != 0 {
|
||||
firstRow = val.Index(0).Interface()
|
||||
} else {
|
||||
firstRow = reflect.New(reflect.TypeOf(tvp.TVPValue).Elem()).Elem().Interface()
|
||||
}
|
||||
|
||||
tvpRow := reflect.TypeOf(firstRow)
|
||||
columnCount := tvpRow.NumField()
|
||||
defaultValues := make([]interface{}, 0, columnCount)
|
||||
|
||||
for i := 0; i < columnCount; i++ {
|
||||
typeField := tvpRow.Field(i).Type
|
||||
if typeField.Kind() == reflect.Ptr {
|
||||
v := reflect.New(typeField.Elem())
|
||||
defaultValues = append(defaultValues, v.Interface())
|
||||
continue
|
||||
}
|
||||
defaultValues = append(defaultValues, reflect.Zero(typeField).Interface())
|
||||
}
|
||||
|
||||
conn := new(Conn)
|
||||
conn.sess = new(tdsSession)
|
||||
conn.sess.loginAck = loginAckStruct{TDSVersion: verTDS73}
|
||||
stmt := &Stmt{
|
||||
c: conn,
|
||||
}
|
||||
|
||||
columnConfiguration := make([]columnStruct, 0, columnCount)
|
||||
for index, val := range defaultValues {
|
||||
cval, err := convertInputParameter(val)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert tvp parameter row %d col %d: %s", index, val, err)
|
||||
}
|
||||
param, err := stmt.makeParam(cval)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
column := columnStruct{
|
||||
ti: param.ti,
|
||||
}
|
||||
switch param.ti.TypeId {
|
||||
case typeNVarChar, typeBigVarBin:
|
||||
column.ti.Size = 0
|
||||
}
|
||||
columnConfiguration = append(columnConfiguration, column)
|
||||
}
|
||||
|
||||
return columnConfiguration, nil
|
||||
}
|
||||
194
vendor/github.com/denisenkom/go-mssqldb/types.go
generated
vendored
194
vendor/github.com/denisenkom/go-mssqldb/types.go
generated
vendored
@@ -62,6 +62,7 @@ const (
|
||||
typeNChar = 0xef
|
||||
typeXml = 0xf1
|
||||
typeUdt = 0xf0
|
||||
typeTvp = 0xf3
|
||||
|
||||
// long length types
|
||||
typeText = 0x23
|
||||
@@ -69,9 +70,17 @@ const (
|
||||
typeNText = 0x63
|
||||
typeVariant = 0x62
|
||||
)
|
||||
const PLP_NULL = 0xFFFFFFFFFFFFFFFF
|
||||
const UNKNOWN_PLP_LEN = 0xFFFFFFFFFFFFFFFE
|
||||
const PLP_TERMINATOR = 0x00000000
|
||||
const _PLP_NULL = 0xFFFFFFFFFFFFFFFF
|
||||
const _UNKNOWN_PLP_LEN = 0xFFFFFFFFFFFFFFFE
|
||||
const _PLP_TERMINATOR = 0x00000000
|
||||
const _TVP_NULL_TOKEN = 0xffff
|
||||
|
||||
// TVP COLUMN FLAGS
|
||||
const _TVP_COLUMN_DEFAULT_FLAG = 0x200
|
||||
const _TVP_END_TOKEN = 0x00
|
||||
const _TVP_ROW_TOKEN = 0x01
|
||||
const _TVP_ORDER_UNIQUE_TOKEN = 0x10
|
||||
const _TVP_COLUMN_ORDERING_TOKEN = 0x11
|
||||
|
||||
// TYPE_INFO rule
|
||||
// http://msdn.microsoft.com/en-us/library/dd358284.aspx
|
||||
@@ -133,6 +142,7 @@ func readTypeInfo(r *tdsBuffer) (res typeInfo) {
|
||||
return
|
||||
}
|
||||
|
||||
// https://msdn.microsoft.com/en-us/library/dd358284.aspx
|
||||
func writeTypeInfo(w io.Writer, ti *typeInfo) (err error) {
|
||||
err = binary.Write(w, binary.LittleEndian, ti.TypeId)
|
||||
if err != nil {
|
||||
@@ -142,6 +152,9 @@ func writeTypeInfo(w io.Writer, ti *typeInfo) (err error) {
|
||||
case typeNull, typeInt1, typeBit, typeInt2, typeInt4, typeDateTim4,
|
||||
typeFlt4, typeMoney, typeDateTime, typeFlt8, typeMoney4, typeInt8:
|
||||
// those are fixed length
|
||||
// https://msdn.microsoft.com/en-us/library/dd341171.aspx
|
||||
ti.Writer = writeFixedType
|
||||
case typeTvp:
|
||||
ti.Writer = writeFixedType
|
||||
default: // all others are VARLENTYPE
|
||||
err = writeVarLen(w, ti)
|
||||
@@ -157,8 +170,10 @@ func writeFixedType(w io.Writer, ti typeInfo, buf []byte) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// https://msdn.microsoft.com/en-us/library/dd358341.aspx
|
||||
func writeVarLen(w io.Writer, ti *typeInfo) (err error) {
|
||||
switch ti.TypeId {
|
||||
|
||||
case typeDateN:
|
||||
ti.Writer = writeByteLenType
|
||||
case typeTimeN, typeDateTime2N, typeDateTimeOffsetN:
|
||||
@@ -200,6 +215,7 @@ func writeVarLen(w io.Writer, ti *typeInfo) (err error) {
|
||||
ti.Writer = writeByteLenType
|
||||
case typeBigVarBin, typeBigVarChar, typeBigBinary, typeBigChar,
|
||||
typeNVarChar, typeNChar, typeXml, typeUdt:
|
||||
|
||||
// short len types
|
||||
if ti.Size > 8000 || ti.Size == 0 {
|
||||
if err = binary.Write(w, binary.LittleEndian, uint16(0xffff)); err != nil {
|
||||
@@ -245,6 +261,48 @@ func decodeDateTim4(buf []byte) time.Time {
|
||||
0, int(mins), 0, 0, time.UTC)
|
||||
}
|
||||
|
||||
func encodeDateTim4(val time.Time) (buf []byte) {
|
||||
buf = make([]byte, 4)
|
||||
|
||||
ref := time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
dur := val.Sub(ref)
|
||||
days := dur / (24 * time.Hour)
|
||||
mins := val.Hour()*60 + val.Minute()
|
||||
if days < 0 {
|
||||
days = 0
|
||||
mins = 0
|
||||
}
|
||||
|
||||
binary.LittleEndian.PutUint16(buf[:2], uint16(days))
|
||||
binary.LittleEndian.PutUint16(buf[2:], uint16(mins))
|
||||
return
|
||||
}
|
||||
|
||||
// encodes datetime value
|
||||
// type identifier is typeDateTimeN
|
||||
func encodeDateTime(t time.Time) (res []byte) {
|
||||
// base date in days since Jan 1st 1900
|
||||
basedays := gregorianDays(1900, 1)
|
||||
// days since Jan 1st 1900 (same TZ as t)
|
||||
days := gregorianDays(t.Year(), t.YearDay()) - basedays
|
||||
tm := 300*(t.Second()+t.Minute()*60+t.Hour()*60*60) + t.Nanosecond()*300/1e9
|
||||
// minimum and maximum possible
|
||||
mindays := gregorianDays(1753, 1) - basedays
|
||||
maxdays := gregorianDays(9999, 365) - basedays
|
||||
if days < mindays {
|
||||
days = mindays
|
||||
tm = 0
|
||||
}
|
||||
if days > maxdays {
|
||||
days = maxdays
|
||||
tm = (23*60*60+59*60+59)*300 + 299
|
||||
}
|
||||
res = make([]byte, 8)
|
||||
binary.LittleEndian.PutUint32(res[0:4], uint32(days))
|
||||
binary.LittleEndian.PutUint32(res[4:8], uint32(tm))
|
||||
return
|
||||
}
|
||||
|
||||
func decodeDateTime(buf []byte) time.Time {
|
||||
days := int32(binary.LittleEndian.Uint32(buf))
|
||||
tm := binary.LittleEndian.Uint32(buf[4:])
|
||||
@@ -320,7 +378,7 @@ func readByteLenType(ti *typeInfo, r *tdsBuffer) interface{} {
|
||||
case 8:
|
||||
return int64(binary.LittleEndian.Uint64(buf))
|
||||
default:
|
||||
badStreamPanicf("Invalid size for INTNTYPE")
|
||||
badStreamPanicf("Invalid size for INTNTYPE: %d", len(buf))
|
||||
}
|
||||
case typeDecimal, typeNumeric, typeDecimalN, typeNumericN:
|
||||
return decodeDecimal(ti.Prec, ti.Scale, buf)
|
||||
@@ -379,7 +437,7 @@ func writeByteLenType(w io.Writer, ti typeInfo, buf []byte) (err error) {
|
||||
if ti.Size > 0xff {
|
||||
panic("Invalid size for BYTELEN_TYPE")
|
||||
}
|
||||
err = binary.Write(w, binary.LittleEndian, uint8(ti.Size))
|
||||
err = binary.Write(w, binary.LittleEndian, uint8(len(buf)))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -601,10 +659,10 @@ func readPLPType(ti *typeInfo, r *tdsBuffer) interface{} {
|
||||
size := r.uint64()
|
||||
var buf *bytes.Buffer
|
||||
switch size {
|
||||
case PLP_NULL:
|
||||
case _PLP_NULL:
|
||||
// null
|
||||
return nil
|
||||
case UNKNOWN_PLP_LEN:
|
||||
case _UNKNOWN_PLP_LEN:
|
||||
// size unknown
|
||||
buf = bytes.NewBuffer(make([]byte, 0, 1000))
|
||||
default:
|
||||
@@ -635,13 +693,13 @@ func readPLPType(ti *typeInfo, r *tdsBuffer) interface{} {
|
||||
}
|
||||
|
||||
func writePLPType(w io.Writer, ti typeInfo, buf []byte) (err error) {
|
||||
if err = binary.Write(w, binary.LittleEndian, uint64(UNKNOWN_PLP_LEN)); err != nil {
|
||||
if err = binary.Write(w, binary.LittleEndian, uint64(_UNKNOWN_PLP_LEN)); err != nil {
|
||||
return
|
||||
}
|
||||
for {
|
||||
chunksize := uint32(len(buf))
|
||||
if chunksize == 0 {
|
||||
err = binary.Write(w, binary.LittleEndian, uint32(PLP_TERMINATOR))
|
||||
err = binary.Write(w, binary.LittleEndian, uint32(_PLP_TERMINATOR))
|
||||
return
|
||||
}
|
||||
if err = binary.Write(w, binary.LittleEndian, chunksize); err != nil {
|
||||
@@ -805,6 +863,15 @@ func decodeDate(buf []byte) time.Time {
|
||||
return time.Date(1, 1, 1+decodeDateInt(buf), 0, 0, 0, 0, time.UTC)
|
||||
}
|
||||
|
||||
func encodeDate(val time.Time) (buf []byte) {
|
||||
days, _, _ := dateTime2(val)
|
||||
buf = make([]byte, 3)
|
||||
buf[0] = byte(days)
|
||||
buf[1] = byte(days >> 8)
|
||||
buf[2] = byte(days >> 16)
|
||||
return
|
||||
}
|
||||
|
||||
func decodeTimeInt(scale uint8, buf []byte) (sec int, ns int) {
|
||||
var acc uint64 = 0
|
||||
for i := len(buf) - 1; i >= 0; i-- {
|
||||
@@ -820,11 +887,41 @@ func decodeTimeInt(scale uint8, buf []byte) (sec int, ns int) {
|
||||
return
|
||||
}
|
||||
|
||||
// calculate size of time field in bytes
|
||||
func calcTimeSize(scale int) int {
|
||||
if scale <= 2 {
|
||||
return 3
|
||||
} else if scale <= 4 {
|
||||
return 4
|
||||
} else {
|
||||
return 5
|
||||
}
|
||||
}
|
||||
|
||||
// writes time value into a field buffer
|
||||
// buffer should be at least calcTimeSize long
|
||||
func encodeTimeInt(seconds, ns, scale int, buf []byte) {
|
||||
ns_total := int64(seconds)*1000*1000*1000 + int64(ns)
|
||||
t := ns_total / int64(math.Pow10(int(scale)*-1)*1e9)
|
||||
buf[0] = byte(t)
|
||||
buf[1] = byte(t >> 8)
|
||||
buf[2] = byte(t >> 16)
|
||||
buf[3] = byte(t >> 24)
|
||||
buf[4] = byte(t >> 32)
|
||||
}
|
||||
|
||||
func decodeTime(scale uint8, buf []byte) time.Time {
|
||||
sec, ns := decodeTimeInt(scale, buf)
|
||||
return time.Date(1, 1, 1, 0, 0, sec, ns, time.UTC)
|
||||
}
|
||||
|
||||
func encodeTime(hour, minute, second, ns, scale int) (buf []byte) {
|
||||
seconds := hour*3600 + minute*60 + second
|
||||
buf = make([]byte, calcTimeSize(scale))
|
||||
encodeTimeInt(seconds, ns, scale, buf)
|
||||
return
|
||||
}
|
||||
|
||||
func decodeDateTime2(scale uint8, buf []byte) time.Time {
|
||||
timesize := len(buf) - 3
|
||||
sec, ns := decodeTimeInt(scale, buf[:timesize])
|
||||
@@ -832,6 +929,17 @@ func decodeDateTime2(scale uint8, buf []byte) time.Time {
|
||||
return time.Date(1, 1, 1+days, 0, 0, sec, ns, time.UTC)
|
||||
}
|
||||
|
||||
func encodeDateTime2(val time.Time, scale int) (buf []byte) {
|
||||
days, seconds, ns := dateTime2(val)
|
||||
timesize := calcTimeSize(scale)
|
||||
buf = make([]byte, 3+timesize)
|
||||
encodeTimeInt(seconds, ns, scale, buf)
|
||||
buf[timesize] = byte(days)
|
||||
buf[timesize+1] = byte(days >> 8)
|
||||
buf[timesize+2] = byte(days >> 16)
|
||||
return
|
||||
}
|
||||
|
||||
func decodeDateTimeOffset(scale uint8, buf []byte) time.Time {
|
||||
timesize := len(buf) - 3 - 2
|
||||
sec, ns := decodeTimeInt(scale, buf[:timesize])
|
||||
@@ -843,24 +951,43 @@ func decodeDateTimeOffset(scale uint8, buf []byte) time.Time {
|
||||
time.FixedZone("", offset*60))
|
||||
}
|
||||
|
||||
func divFloor(x int64, y int64) int64 {
|
||||
q := x / y
|
||||
r := x % y
|
||||
if r != 0 && ((r < 0) != (y < 0)) {
|
||||
q--
|
||||
}
|
||||
return q
|
||||
func encodeDateTimeOffset(val time.Time, scale int) (buf []byte) {
|
||||
timesize := calcTimeSize(scale)
|
||||
buf = make([]byte, timesize+2+3)
|
||||
days, seconds, ns := dateTime2(val.In(time.UTC))
|
||||
encodeTimeInt(seconds, ns, scale, buf)
|
||||
buf[timesize] = byte(days)
|
||||
buf[timesize+1] = byte(days >> 8)
|
||||
buf[timesize+2] = byte(days >> 16)
|
||||
_, offset := val.Zone()
|
||||
offset /= 60
|
||||
buf[timesize+3] = byte(offset)
|
||||
buf[timesize+4] = byte(offset >> 8)
|
||||
return
|
||||
}
|
||||
|
||||
func dateTime2(t time.Time) (days int32, ns int64) {
|
||||
// number of days since Jan 1 1970 UTC
|
||||
days64 := divFloor(t.Unix(), 24*60*60)
|
||||
// number of days since Jan 1 1 UTC
|
||||
days = int32(days64) + 1969*365 + 1969/4 - 1969/100 + 1969/400
|
||||
// number of seconds within day
|
||||
secs := t.Unix() - days64*24*60*60
|
||||
// number of nanoseconds within day
|
||||
ns = secs*1e9 + int64(t.Nanosecond())
|
||||
// returns days since Jan 1st 0001 in Gregorian calendar
|
||||
func gregorianDays(year, yearday int) int {
|
||||
year0 := year - 1
|
||||
return year0*365 + year0/4 - year0/100 + year0/400 + yearday - 1
|
||||
}
|
||||
|
||||
func dateTime2(t time.Time) (days int, seconds int, ns int) {
|
||||
// days since Jan 1 1 (in same TZ as t)
|
||||
days = gregorianDays(t.Year(), t.YearDay())
|
||||
seconds = t.Second() + t.Minute()*60 + t.Hour()*60*60
|
||||
ns = t.Nanosecond()
|
||||
if days < 0 {
|
||||
days = 0
|
||||
seconds = 0
|
||||
ns = 0
|
||||
}
|
||||
max := gregorianDays(9999, 365)
|
||||
if days > max {
|
||||
days = max
|
||||
seconds = 59 + 59*60 + 23*60*60
|
||||
ns = 999999900
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -989,7 +1116,7 @@ func makeGoLangScanType(ti typeInfo) reflect.Type {
|
||||
case typeVariant:
|
||||
return reflect.TypeOf(nil)
|
||||
default:
|
||||
panic(fmt.Sprintf("not implemented makeDecl for type %d", ti.TypeId))
|
||||
panic(fmt.Sprintf("not implemented makeGoLangScanType for type %d", ti.TypeId))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1001,6 +1128,8 @@ func makeDecl(ti typeInfo) string {
|
||||
return "nvarchar(1)"
|
||||
case typeInt1:
|
||||
return "tinyint"
|
||||
case typeBigBinary:
|
||||
return fmt.Sprintf("binary(%d)", ti.Size)
|
||||
case typeInt2:
|
||||
return "smallint"
|
||||
case typeInt4:
|
||||
@@ -1089,6 +1218,8 @@ func makeDecl(ti typeInfo) string {
|
||||
default:
|
||||
panic("invalid size of DATETIMNTYPE")
|
||||
}
|
||||
case typeTimeN:
|
||||
return "time"
|
||||
case typeDateTime2N:
|
||||
return fmt.Sprintf("datetime2(%d)", ti.Scale)
|
||||
case typeDateTimeOffsetN:
|
||||
@@ -1101,6 +1232,11 @@ func makeDecl(ti typeInfo) string {
|
||||
return ti.UdtInfo.TypeName
|
||||
case typeGuid:
|
||||
return "uniqueidentifier"
|
||||
case typeTvp:
|
||||
if ti.UdtInfo.SchemaName != "" {
|
||||
return fmt.Sprintf("%s.%s READONLY", ti.UdtInfo.SchemaName, ti.UdtInfo.TypeName)
|
||||
}
|
||||
return fmt.Sprintf("%s READONLY", ti.UdtInfo.TypeName)
|
||||
default:
|
||||
panic(fmt.Sprintf("not implemented makeDecl for type %#x", ti.TypeId))
|
||||
}
|
||||
@@ -1209,7 +1345,7 @@ func makeGoLangTypeName(ti typeInfo) string {
|
||||
case typeBigBinary:
|
||||
return "BINARY"
|
||||
default:
|
||||
panic(fmt.Sprintf("not implemented makeDecl for type %d", ti.TypeId))
|
||||
panic(fmt.Sprintf("not implemented makeGoLangTypeName for type %d", ti.TypeId))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1332,7 +1468,7 @@ func makeGoLangTypeLength(ti typeInfo) (int64, bool) {
|
||||
case typeBigBinary:
|
||||
return 0, false
|
||||
default:
|
||||
panic(fmt.Sprintf("not implemented makeDecl for type %d", ti.TypeId))
|
||||
panic(fmt.Sprintf("not implemented makeGoLangTypeLength for type %d", ti.TypeId))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1443,6 +1579,6 @@ func makeGoLangTypePrecisionScale(ti typeInfo) (int64, int64, bool) {
|
||||
case typeBigBinary:
|
||||
return 0, 0, false
|
||||
default:
|
||||
panic(fmt.Sprintf("not implemented makeDecl for type %d", ti.TypeId))
|
||||
panic(fmt.Sprintf("not implemented makeGoLangTypePrecisionScale for type %d", ti.TypeId))
|
||||
}
|
||||
}
|
||||
|
||||
43
vendor/github.com/facebookgo/inject/license
generated
vendored
43
vendor/github.com/facebookgo/inject/license
generated
vendored
@@ -1,30 +1,21 @@
|
||||
BSD License
|
||||
MIT License
|
||||
|
||||
For inject software
|
||||
Copyright (c) 2013-present, Facebook, Inc.
|
||||
|
||||
Copyright (c) 2015, Facebook, Inc. All rights reserved.
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name Facebook nor the names of its contributors may be used to
|
||||
endorse or promote products derived from this software without specific
|
||||
prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
33
vendor/github.com/facebookgo/inject/patents
generated
vendored
33
vendor/github.com/facebookgo/inject/patents
generated
vendored
@@ -1,33 +0,0 @@
|
||||
Additional Grant of Patent Rights Version 2
|
||||
|
||||
"Software" means the inject software distributed by Facebook, Inc.
|
||||
|
||||
Facebook, Inc. ("Facebook") hereby grants to each recipient of the Software
|
||||
("you") a perpetual, worldwide, royalty-free, non-exclusive, irrevocable
|
||||
(subject to the termination provision below) license under any Necessary
|
||||
Claims, to make, have made, use, sell, offer to sell, import, and otherwise
|
||||
transfer the Software. For avoidance of doubt, no license is granted under
|
||||
Facebook’s rights in any patent claims that are infringed by (i) modifications
|
||||
to the Software made by you or any third party or (ii) the Software in
|
||||
combination with any software or other technology.
|
||||
|
||||
The license granted hereunder will terminate, automatically and without notice,
|
||||
if you (or any of your subsidiaries, corporate affiliates or agents) initiate
|
||||
directly or indirectly, or take a direct financial interest in, any Patent
|
||||
Assertion: (i) against Facebook or any of its subsidiaries or corporate
|
||||
affiliates, (ii) against any party if such Patent Assertion arises in whole or
|
||||
in part from any software, technology, product or service of Facebook or any of
|
||||
its subsidiaries or corporate affiliates, or (iii) against any party relating
|
||||
to the Software. Notwithstanding the foregoing, if Facebook or any of its
|
||||
subsidiaries or corporate affiliates files a lawsuit alleging patent
|
||||
infringement against you in the first instance, and you respond by filing a
|
||||
patent infringement counterclaim in that lawsuit against that party that is
|
||||
unrelated to the Software, the license granted hereunder will not terminate
|
||||
under section (i) of this paragraph due to such counterclaim.
|
||||
|
||||
A "Necessary Claim" is a claim of a patent owned by Facebook that is
|
||||
necessarily infringed by the Software standing alone.
|
||||
|
||||
A "Patent Assertion" is any lawsuit or other action alleging direct, indirect,
|
||||
or contributory infringement or inducement to infringe any patent, including a
|
||||
cross-claim or counterclaim.
|
||||
4
vendor/github.com/facebookgo/inject/readme.md
generated
vendored
Normal file
4
vendor/github.com/facebookgo/inject/readme.md
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
inject [](https://travis-ci.org/facebookgo/inject)
|
||||
======
|
||||
|
||||
Documentation: https://godoc.org/github.com/facebookgo/inject
|
||||
1
vendor/github.com/facebookgo/structtag/.gitignore
generated
vendored
Normal file
1
vendor/github.com/facebookgo/structtag/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/go.structtag.test
|
||||
10
vendor/github.com/facebookgo/structtag/readme.md
generated
vendored
Normal file
10
vendor/github.com/facebookgo/structtag/readme.md
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
structtag [](https://travis-ci.org/facebookgo/structtag)
|
||||
=========
|
||||
|
||||
This is taken from the Go standard library but modified to return a boolean
|
||||
indicating if a struct tag was found or not to allow differentiating an empty
|
||||
struct tag from a non existing struct tag.
|
||||
|
||||
License: http://golang.org/LICENSE
|
||||
|
||||
Documentation: https://godoc.org/github.com/facebookgo/structtag
|
||||
27
vendor/github.com/fatih/color/Gopkg.lock
generated
vendored
Normal file
27
vendor/github.com/fatih/color/Gopkg.lock
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-colorable"
|
||||
packages = ["."]
|
||||
revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072"
|
||||
version = "v0.0.9"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-isatty"
|
||||
packages = ["."]
|
||||
revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39"
|
||||
version = "v0.0.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
revision = "37707fdb30a5b38865cfb95e5aab41707daec7fd"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "e8a50671c3cb93ea935bf210b1cd20702876b9d9226129be581ef646d1565cdc"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
30
vendor/github.com/fatih/color/Gopkg.toml
generated
vendored
Normal file
30
vendor/github.com/fatih/color/Gopkg.toml
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/mattn/go-colorable"
|
||||
version = "0.0.9"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/mattn/go-isatty"
|
||||
version = "0.0.3"
|
||||
179
vendor/github.com/fatih/color/README.md
generated
vendored
Normal file
179
vendor/github.com/fatih/color/README.md
generated
vendored
Normal file
@@ -0,0 +1,179 @@
|
||||
# Color [](https://godoc.org/github.com/fatih/color) [](https://travis-ci.org/fatih/color)
|
||||
|
||||
|
||||
|
||||
Color lets you use colorized outputs in terms of [ANSI Escape
|
||||
Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It
|
||||
has support for Windows too! The API can be used in several ways, pick one that
|
||||
suits you.
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
go get github.com/fatih/color
|
||||
```
|
||||
|
||||
Note that the `vendor` folder is here for stability. Remove the folder if you
|
||||
already have the dependencies in your GOPATH.
|
||||
|
||||
## Examples
|
||||
|
||||
### Standard colors
|
||||
|
||||
```go
|
||||
// Print with default helper functions
|
||||
color.Cyan("Prints text in cyan.")
|
||||
|
||||
// A newline will be appended automatically
|
||||
color.Blue("Prints %s in blue.", "text")
|
||||
|
||||
// These are using the default foreground colors
|
||||
color.Red("We have red")
|
||||
color.Magenta("And many others ..")
|
||||
|
||||
```
|
||||
|
||||
### Mix and reuse colors
|
||||
|
||||
```go
|
||||
// Create a new color object
|
||||
c := color.New(color.FgCyan).Add(color.Underline)
|
||||
c.Println("Prints cyan text with an underline.")
|
||||
|
||||
// Or just add them to New()
|
||||
d := color.New(color.FgCyan, color.Bold)
|
||||
d.Printf("This prints bold cyan %s\n", "too!.")
|
||||
|
||||
// Mix up foreground and background colors, create new mixes!
|
||||
red := color.New(color.FgRed)
|
||||
|
||||
boldRed := red.Add(color.Bold)
|
||||
boldRed.Println("This will print text in bold red.")
|
||||
|
||||
whiteBackground := red.Add(color.BgWhite)
|
||||
whiteBackground.Println("Red text with white background.")
|
||||
```
|
||||
|
||||
### Use your own output (io.Writer)
|
||||
|
||||
```go
|
||||
// Use your own io.Writer output
|
||||
color.New(color.FgBlue).Fprintln(myWriter, "blue color!")
|
||||
|
||||
blue := color.New(color.FgBlue)
|
||||
blue.Fprint(writer, "This will print text in blue.")
|
||||
```
|
||||
|
||||
### Custom print functions (PrintFunc)
|
||||
|
||||
```go
|
||||
// Create a custom print function for convenience
|
||||
red := color.New(color.FgRed).PrintfFunc()
|
||||
red("Warning")
|
||||
red("Error: %s", err)
|
||||
|
||||
// Mix up multiple attributes
|
||||
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
|
||||
notice("Don't forget this...")
|
||||
```
|
||||
|
||||
### Custom fprint functions (FprintFunc)
|
||||
|
||||
```go
|
||||
blue := color.New(FgBlue).FprintfFunc()
|
||||
blue(myWriter, "important notice: %s", stars)
|
||||
|
||||
// Mix up with multiple attributes
|
||||
success := color.New(color.Bold, color.FgGreen).FprintlnFunc()
|
||||
success(myWriter, "Don't forget this...")
|
||||
```
|
||||
|
||||
### Insert into noncolor strings (SprintFunc)
|
||||
|
||||
```go
|
||||
// Create SprintXxx functions to mix strings with other non-colorized strings:
|
||||
yellow := color.New(color.FgYellow).SprintFunc()
|
||||
red := color.New(color.FgRed).SprintFunc()
|
||||
fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error"))
|
||||
|
||||
info := color.New(color.FgWhite, color.BgGreen).SprintFunc()
|
||||
fmt.Printf("This %s rocks!\n", info("package"))
|
||||
|
||||
// Use helper functions
|
||||
fmt.Println("This", color.RedString("warning"), "should be not neglected.")
|
||||
fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.")
|
||||
|
||||
// Windows supported too! Just don't forget to change the output to color.Output
|
||||
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
|
||||
```
|
||||
|
||||
### Plug into existing code
|
||||
|
||||
```go
|
||||
// Use handy standard colors
|
||||
color.Set(color.FgYellow)
|
||||
|
||||
fmt.Println("Existing text will now be in yellow")
|
||||
fmt.Printf("This one %s\n", "too")
|
||||
|
||||
color.Unset() // Don't forget to unset
|
||||
|
||||
// You can mix up parameters
|
||||
color.Set(color.FgMagenta, color.Bold)
|
||||
defer color.Unset() // Use it in your function
|
||||
|
||||
fmt.Println("All text will now be bold magenta.")
|
||||
```
|
||||
|
||||
### Disable/Enable color
|
||||
|
||||
There might be a case where you want to explicitly disable/enable color output. the
|
||||
`go-isatty` package will automatically disable color output for non-tty output streams
|
||||
(for example if the output were piped directly to `less`)
|
||||
|
||||
`Color` has support to disable/enable colors both globally and for single color
|
||||
definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You
|
||||
can easily disable the color output with:
|
||||
|
||||
```go
|
||||
|
||||
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
|
||||
|
||||
if *flagNoColor {
|
||||
color.NoColor = true // disables colorized output
|
||||
}
|
||||
```
|
||||
|
||||
It also has support for single color definitions (local). You can
|
||||
disable/enable color output on the fly:
|
||||
|
||||
```go
|
||||
c := color.New(color.FgCyan)
|
||||
c.Println("Prints cyan text")
|
||||
|
||||
c.DisableColor()
|
||||
c.Println("This is printed without any color")
|
||||
|
||||
c.EnableColor()
|
||||
c.Println("This prints again cyan...")
|
||||
```
|
||||
|
||||
## Todo
|
||||
|
||||
* Save/Return previous values
|
||||
* Evaluate fmt.Formatter interface
|
||||
|
||||
|
||||
## Credits
|
||||
|
||||
* [Fatih Arslan](https://github.com/fatih)
|
||||
* Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable)
|
||||
|
||||
## License
|
||||
|
||||
The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details
|
||||
|
||||
1
vendor/github.com/go-macaron/binding/.gitignore
generated
vendored
Normal file
1
vendor/github.com/go-macaron/binding/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
.idea
|
||||
20
vendor/github.com/go-macaron/binding/README.md
generated
vendored
Normal file
20
vendor/github.com/go-macaron/binding/README.md
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
# binding [](https://travis-ci.org/go-macaron/binding) [](https://sourcegraph.com/github.com/go-macaron/binding?badge)
|
||||
|
||||
Middleware binding provides request data binding and validation for [Macaron](https://github.com/go-macaron/macaron).
|
||||
|
||||
### Installation
|
||||
|
||||
go get github.com/go-macaron/binding
|
||||
|
||||
## Getting Help
|
||||
|
||||
- [API Reference](https://gowalker.org/github.com/go-macaron/binding)
|
||||
- [Documentation](http://go-macaron.com/docs/middlewares/binding)
|
||||
|
||||
## Credits
|
||||
|
||||
This package is a modified version of [martini-contrib/binding](https://github.com/martini-contrib/binding).
|
||||
|
||||
## License
|
||||
|
||||
This project is under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for the full license text.
|
||||
20
vendor/github.com/go-macaron/gzip/README.md
generated
vendored
Normal file
20
vendor/github.com/go-macaron/gzip/README.md
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
# gzip [](https://travis-ci.org/go-macaron/gzip) [](http://gocover.io/github.com/go-macaron/gzip)
|
||||
|
||||
Middleware gzip provides compress to responses for [Macaron](https://github.com/go-macaron/macaron).
|
||||
|
||||
### Installation
|
||||
|
||||
go get github.com/go-macaron/gzip
|
||||
|
||||
## Getting Help
|
||||
|
||||
- [API Reference](https://gowalker.org/github.com/go-macaron/gzip)
|
||||
- [Documentation](http://go-macaron.com/docs/middlewares/gzip)
|
||||
|
||||
## Credits
|
||||
|
||||
This package is a modified version of [martini-contrib/gzip](https://github.com/martini-contrib/gzip).
|
||||
|
||||
## License
|
||||
|
||||
This project is under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for the full license text.
|
||||
11
vendor/github.com/go-macaron/inject/README.md
generated
vendored
Normal file
11
vendor/github.com/go-macaron/inject/README.md
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# inject [](https://travis-ci.org/go-macaron/inject) [](http://gocover.io/github.com/go-macaron/inject)
|
||||
|
||||
Package inject provides utilities for mapping and injecting dependencies in various ways.
|
||||
|
||||
**This a modified version of [codegangsta/inject](https://github.com/codegangsta/inject) for special purpose of Macaron**
|
||||
|
||||
**Please use the original version if you need dependency injection feature**
|
||||
|
||||
## License
|
||||
|
||||
This project is under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for the full license text.
|
||||
2
vendor/github.com/go-macaron/session/.gitignore
generated
vendored
Normal file
2
vendor/github.com/go-macaron/session/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
ledis/tmp.db
|
||||
nodb/tmp.db
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user