diff --git a/.floo b/.floo
new file mode 100644
index 00000000000..1c2038f98cc
--- /dev/null
+++ b/.floo
@@ -0,0 +1,3 @@
+{
+ "url": "https://floobits.com/raintank/grafana"
+}
diff --git a/.flooignore b/.flooignore
new file mode 100644
index 00000000000..43cddf93bdf
--- /dev/null
+++ b/.flooignore
@@ -0,0 +1,12 @@
+#*
+*.o
+*.pyc
+*.pyo
+*~
+extern/
+node_modules/
+tmp/
+data/
+vendor/
+public_gen/
+dist/
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index b7d73592ef4..f4b0efdf14f 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -1,20 +1,17 @@
-Thank you! For helping us make Grafana even better.
+* **I'm submitting a ...**
+- [ ] Bug report
+- [ ] Feature request
+- [ ] Question / Support request: **Please do not** open a github issue. [Support Options](http://grafana.org/support/)
-To help us respond to your issues faster, please make sure to add as much information as possible.
-
-If this issue is about a plugin, please open the issue in that repository.
-
-Start your issues title with [Feature Request] / [Bug] / [Question] or no tag if your unsure. Also, please be aware that GitHub now supports uploading of screenshots; look at the bottom of this input field.
-
-Please include some basic information:
-- What grafana version are you using?
+Please include this information:
+- What Grafana version are you using?
- What datasource are you using?
- What OS are you running grafana on?
- What did you do?
- What was the expected result?
-- What happenend instead?
+- What happened instead?
-If you question/bug relates to a metric query / unexpected data visualization, please include:
+**IMPORTANT** If it relates to metric data viz:
- An image or text representation of your metric query
-- The raw query and response from your data source (check this in chrome dev tools network tab)
+- The raw query and response for the network request (check this in chrome dev tools network tab, here you can see metric requests and other request, please include the request body and request response)
diff --git a/.jscs.json b/.jscs.json
index dcf694dcc63..8fdad332de5 100644
--- a/.jscs.json
+++ b/.jscs.json
@@ -10,4 +10,4 @@
"disallowSpacesInsideArrayBrackets": true,
"disallowSpacesInsideParentheses": true,
"validateIndentation": 2
-}
\ No newline at end of file
+}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 930a91af1da..368a8da5b34 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,73 @@
-# 3.0.0 stable (unreleased)
+# 4.0-pre (unreleased)
+### Enhancements
+* **Login**: Adds option to disable username/password logins, closes [#4674](https://github.com/grafana/grafana/issues/4674)
+* **SingleStat**: Add seriename as option in singlestat panel, closes [#4740](https://github.com/grafana/grafana/issues/4740)
+* **Localization**: Week start day now dependant on browser locale setting, closes [#3003](https://github.com/grafana/grafana/issues/3003)
+* **Templating**: Update panel repeats for variables that change on time refresh, closes [#5021](https://github.com/grafana/grafana/issues/5021)
+
+# 3.1.0 stable (unreleased)
+
+### Bugfixes
+* **User Alert Notices**: Backend error alert popups did not show properly, fixes [#5435](https://github.com/grafana/grafana/issues/5435)
+
+# 3.1.0-beta1 (2016-06-23)
+
+### Enhancements
+* **Dashboard Export/Import**: Dashboard export now templetize data sources and constant variables, users pick these on import, closes [#5084](https://github.com/grafana/grafana/issues/5084)
+* **Dashboard Url**: Time range changes updates url, closes [#458](https://github.com/grafana/grafana/issues/458)
+* **Dashboard Url**: Template variable change updates url, closes [#5002](https://github.com/grafana/grafana/issues/5002)
+* **Singlestat**: Add support for range to text mappings, closes [#1319](https://github.com/grafana/grafana/issues/1319)
+* **Graph**: Adds sort order options for graph tooltip, closes [#1189](https://github.com/grafana/grafana/issues/1189)
+* **Theme**: Add default theme to config file [#5011](https://github.com/grafana/grafana/pull/5011)
+* **Page Footer**: Added page footer with links to docs, shows Grafana version and info if new version is available, closes [#4889](https://github.com/grafana/grafana/pull/4889)
+* **InfluxDB**: Add spread function, closes [#5211](https://github.com/grafana/grafana/issues/5211)
+* **Scripts**: Use restart instead of start for deb package script, closes [#5282](https://github.com/grafana/grafana/pull/5282)
+* **Logging**: Moved to structured logging lib, and moved to component specific level filters via config file, closes [#4590](https://github.com/grafana/grafana/issues/4590)
+* **OpenTSDB**: Support nested template variables in tag_values function, closes [#4398](https://github.com/grafana/grafana/issues/4398)
+* **Datasource**: Pending data source requests are cancelled before new ones are issues (Graphite & Prometheus), closes [#5321](https://github.com/grafana/grafana/issues/5321)
+
+### Breaking changes
+* **Logging** : Changed default logging output format (now structured into message, and key value pairs, with logger key acting as component). You can also no change in config to json log ouput.
+* **Graphite** : The Graph panel no longer have a Graphite PNG option. closes [#5367](https://github.com/grafana/grafana/issues/5367)
+
+### Bug fixes
+* **PNG rendering**: Fixed phantomjs rendering and y-axis label rotation. fixes [#5220](https://github.com/grafana/grafana/issues/5220)
+* **CLI**: The cli tool now supports reading plugin.json from dist/plugin.json. fixes [#5410](https://github.com/grafana/grafana/issues/5410)
+
+# 3.0.4 Patch release (2016-05-25)
+* **Panel**: Fixed blank dashboard issue when switching to other dashboard while in fullscreen edit mode, fixes [#5163](https://github.com/grafana/grafana/pull/5163)
+* **Templating**: Fixed issue with nested multi select variables and cascading and updating child variable selection state, fixes [#4861](https://github.com/grafana/grafana/pull/4861)
+* **Templating**: Fixed issue with using templated data source in another template variable query, fixes [#5165](https://github.com/grafana/grafana/pull/5165)
+* **Singlestat gauge**: Fixed issue with gauge render position, fixes [#5143](https://github.com/grafana/grafana/pull/5143)
+* **Home dashboard**: Fixes broken home dashboard api, fixes [#5167](https://github.com/grafana/grafana/issues/5167)
+
+# 3.0.3 Patch release (2016-05-23)
+* **Annotations**: Annotations can now use a template variable as data source, closes [#5054](https://github.com/grafana/grafana/issues/5054)
+* **Time picker**: Fixed issue timepicker and UTC when reading time from URL, fixes [#5078](https://github.com/grafana/grafana/issues/5078)
+* **CloudWatch**: Support for Multiple Account by AssumeRole, closes [#3522](https://github.com/grafana/grafana/issues/3522)
+* **Singlestat**: Fixed alignment and minium height issue, fixes [#5113](https://github.com/grafana/grafana/issues/5113), fixes [#4679](https://github.com/grafana/grafana/issues/4679)
+* **Share modal**: Fixed link when using grafana under dashboard sub url, fixes [#5109](https://github.com/grafana/grafana/issues/5109)
+* **Prometheus**: Fixed bug in query editor that caused it not to load when reloading page, fixes [#5107](https://github.com/grafana/grafana/issues/5107)
+* **Elasticsearch**: Fixed bug when template variable query returns numeric values, fixes [#5097](https://github.com/grafana/grafana/issues/5097), fixes [#5088](https://github.com/grafana/grafana/issues/5088)
+* **Logging**: Fixed issue with reading logging level value, fixes [#5079](https://github.com/grafana/grafana/issues/5079)
+* **Timepicker**: Fixed issue with timepicker and UTC when reading time from URL, fixes [#5078](https://github.com/grafana/grafana/issues/5078)
+* **Docs**: Added docs for org & user preferences HTTP API, closes [#5069](https://github.com/grafana/grafana/issues/5069)
+* **Plugin list panel**: Now shows correct enable state for apps when not enabled, fixes [#5068](https://github.com/grafana/grafana/issues/5068)
+* **Elasticsearch**: Templating & Annotation queries that use template variables are now formatted correctly, fixes [#5135](https://github.com/grafana/grafana/issues/5135)
+
+# 3.0.2 Patch release (2016-05-16)
+
+* **Templating**: Fixed issue mixing row repeat and panel repeats, fixes [#4988](https://github.com/grafana/grafana/issues/4988)
+* **Templating**: Fixed issue detecting dependencies in nested variables, fixes [#4987](https://github.com/grafana/grafana/issues/4987), fixes [#4986](https://github.com/grafana/grafana/issues/4986)
+* **Graph**: Fixed broken PNG rendering in graph panel, fixes [#5025](https://github.com/grafana/grafana/issues/5025)
+* **Graph**: Fixed broken xaxis on graph panel, fixes [#5024](https://github.com/grafana/grafana/issues/5024)
+
+* **Influxdb**: Fixes crash when hiding middle serie, fixes [#5005](https://github.com/grafana/grafana/issues/5005)
+
+# 3.0.1 Stable (2016-05-11)
+
+### Bug fixes
* **Templating**: Fixed issue with new data source variable not persisting current selected value, fixes [#4934](https://github.com/grafana/grafana/issues/4934)
# 3.0.0-beta7 (2016-05-02)
diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index b48d5189e94..c9afb49850b 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -1,6 +1,7 @@
{
"ImportPath": "github.com/grafana/grafana",
"GoVersion": "go1.5.1",
+ "GodepVersion": "v60",
"Packages": [
"./pkg/..."
],
@@ -124,6 +125,11 @@
"Comment": "v1.0.0",
"Rev": "abb928e07c4108683d6b4d0b6ca08fe6bc0eee5f"
},
+ {
+ "ImportPath": "github.com/aws/aws-sdk-go/service/sts",
+ "Comment": "v1.0.0",
+ "Rev": "abb928e07c4108683d6b4d0b6ca08fe6bc0eee5f"
+ },
{
"ImportPath": "github.com/bmizerany/assert",
"Comment": "release.r60-6-ge17e998",
@@ -199,6 +205,11 @@
"Comment": "v1.2-171-g267b128",
"Rev": "267b128680c46286b9ca13475c3cca5de8f79bd7"
},
+ {
+ "ImportPath": "github.com/go-stack/stack",
+ "Comment": "v1.5.2",
+ "Rev": "100eb0c0a9c5b306ca2fb4f165df21d80ada4b82"
+ },
{
"ImportPath": "github.com/go-xorm/core",
"Comment": "v0.4.4-7-g9e608f7",
@@ -221,6 +232,16 @@
"ImportPath": "github.com/hashicorp/go-version",
"Rev": "7e3c02b30806fa5779d3bdfc152ce4c6f40e7b38"
},
+ {
+ "ImportPath": "github.com/inconshreveable/log15",
+ "Comment": "v2.3-61-g20bca5a",
+ "Rev": "20bca5a7a57282e241fac83ec9ea42538027f1c1"
+ },
+ {
+ "ImportPath": "github.com/inconshreveable/log15/term",
+ "Comment": "v2.3-61-g20bca5a",
+ "Rev": "20bca5a7a57282e241fac83ec9ea42538027f1c1"
+ },
{
"ImportPath": "github.com/jmespath/go-jmespath",
"Comment": "0.2.2",
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/api.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/api.go
new file mode 100644
index 00000000000..52755c9e061
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/api.go
@@ -0,0 +1,1127 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+// Package sts provides a client for AWS Security Token Service.
+package sts
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+const opAssumeRole = "AssumeRole"
+
+// AssumeRoleRequest generates a request for the AssumeRole operation.
+func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) {
+ op := &request.Operation{
+ Name: opAssumeRole,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &AssumeRoleOutput{}
+ req.Data = output
+ return
+}
+
+// Returns a set of temporary security credentials (consisting of an access
+// key ID, a secret access key, and a security token) that you can use to access
+// AWS resources that you might not normally have access to. Typically, you
+// use AssumeRole for cross-account access or federation.
+//
+// Important: You cannot call AssumeRole by using AWS account credentials;
+// access will be denied. You must use IAM user credentials or temporary security
+// credentials to call AssumeRole.
+//
+// For cross-account access, imagine that you own multiple accounts and need
+// to access resources in each account. You could create long-term credentials
+// in each account to access those resources. However, managing all those credentials
+// and remembering which one can access which account can be time consuming.
+// Instead, you can create one set of long-term credentials in one account and
+// then use temporary security credentials to access all the other accounts
+// by assuming roles in those accounts. For more information about roles, see
+// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html)
+// in the Using IAM.
+//
+// For federation, you can, for example, grant single sign-on access to the
+// AWS Management Console. If you already have an identity and authentication
+// system in your corporate network, you don't have to recreate user identities
+// in AWS in order to grant those user identities access to AWS. Instead, after
+// a user has been authenticated, you call AssumeRole (and specify the role
+// with the appropriate permissions) to get temporary security credentials for
+// that user. With those temporary security credentials, you construct a sign-in
+// URL that users can use to access the console. For more information, see Common
+// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction)
+// in the Using IAM.
+//
+// The temporary security credentials are valid for the duration that you specified
+// when calling AssumeRole, which can be from 900 seconds (15 minutes) to 3600
+// seconds (1 hour). The default is 1 hour.
+//
+// Optionally, you can pass an IAM access policy to this operation. If you
+// choose not to pass a policy, the temporary security credentials that are
+// returned by the operation have the permissions that are defined in the access
+// policy of the role that is being assumed. If you pass a policy to this operation,
+// the temporary security credentials that are returned by the operation have
+// the permissions that are allowed by both the access policy of the role that
+// is being assumed, and the policy that you pass. This gives you a way to further
+// restrict the permissions for the resulting temporary security credentials.
+// You cannot use the passed policy to grant permissions that are in excess
+// of those allowed by the access policy of the role that is being assumed.
+// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the Using IAM.
+//
+// To assume a role, your AWS account must be trusted by the role. The trust
+// relationship is defined in the role's trust policy when the role is created.
+// You must also have a policy that allows you to call sts:AssumeRole.
+//
+// Using MFA with AssumeRole
+//
+// You can optionally include multi-factor authentication (MFA) information
+// when you call AssumeRole. This is useful for cross-account scenarios in which
+// you want to make sure that the user who is assuming the role has been authenticated
+// using an AWS MFA device. In that scenario, the trust policy of the role being
+// assumed includes a condition that tests for MFA authentication; if the caller
+// does not include valid MFA information, the request to assume the role is
+// denied. The condition in a trust policy that tests for MFA authentication
+// might look like the following example.
+//
+// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
+//
+// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html)
+// in the Using IAM guide.
+//
+// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode
+// parameters. The SerialNumber value identifies the user's hardware or virtual
+// MFA device. The TokenCode is the time-based one-time password (TOTP) that
+// the MFA devices produces.
+func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
+ req, out := c.AssumeRoleRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
+
+// AssumeRoleWithSAMLRequest generates a request for the AssumeRoleWithSAML operation.
+func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) {
+ op := &request.Operation{
+ Name: opAssumeRoleWithSAML,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleWithSAMLInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &AssumeRoleWithSAMLOutput{}
+ req.Data = output
+ return
+}
+
+// Returns a set of temporary security credentials for users who have been authenticated
+// via a SAML authentication response. This operation provides a mechanism for
+// tying an enterprise identity store or directory to role-based AWS access
+// without user-specific credentials or configuration.
+//
+// The temporary security credentials returned by this operation consist of
+// an access key ID, a secret access key, and a security token. Applications
+// can use these temporary security credentials to sign calls to AWS services.
+// The credentials are valid for the duration that you specified when calling
+// AssumeRoleWithSAML, which can be up to 3600 seconds (1 hour) or until the
+// time specified in the SAML authentication response's SessionNotOnOrAfter
+// value, whichever is shorter.
+//
+// The maximum duration for a session is 1 hour, and the minimum duration is
+// 15 minutes, even if values outside this range are specified. Optionally,
+// you can pass an IAM access policy to this operation. If you choose not to
+// pass a policy, the temporary security credentials that are returned by the
+// operation have the permissions that are defined in the access policy of the
+// role that is being assumed. If you pass a policy to this operation, the temporary
+// security credentials that are returned by the operation have the permissions
+// that are allowed by both the access policy of the role that is being assumed,
+// and the policy that you pass. This gives you a way to further restrict the
+// permissions for the resulting temporary security credentials. You cannot
+// use the passed policy to grant permissions that are in excess of those allowed
+// by the access policy of the role that is being assumed. For more information,
+// see Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity
+// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the Using IAM.
+//
+// Before your application can call AssumeRoleWithSAML, you must configure
+// your SAML identity provider (IdP) to issue the claims required by AWS. Additionally,
+// you must use AWS Identity and Access Management (IAM) to create a SAML provider
+// entity in your AWS account that represents your identity provider, and create
+// an IAM role that specifies this SAML provider in its trust policy.
+//
+// Calling AssumeRoleWithSAML does not require the use of AWS security credentials.
+// The identity of the caller is validated by using keys in the metadata document
+// that is uploaded for the SAML provider entity for your identity provider.
+//
+// For more information, see the following resources:
+//
+// About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
+// in the Using IAM. Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
+// in the Using IAM. Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
+// in the Using IAM. Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
+// in the Using IAM.
+func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) {
+ req, out := c.AssumeRoleWithSAMLRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
+
+// AssumeRoleWithWebIdentityRequest generates a request for the AssumeRoleWithWebIdentity operation.
+func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) {
+ op := &request.Operation{
+ Name: opAssumeRoleWithWebIdentity,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleWithWebIdentityInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &AssumeRoleWithWebIdentityOutput{}
+ req.Data = output
+ return
+}
+
+// Returns a set of temporary security credentials for users who have been authenticated
+// in a mobile or web application with a web identity provider, such as Amazon
+// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible
+// identity provider.
+//
+// For mobile applications, we recommend that you use Amazon Cognito. You
+// can use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/)
+// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely
+// identify a user and supply the user with a consistent identity throughout
+// the lifetime of an application.
+//
+// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
+// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview
+// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
+// in the AWS SDK for iOS Developer Guide.
+//
+// Calling AssumeRoleWithWebIdentity does not require the use of AWS security
+// credentials. Therefore, you can distribute an application (for example, on
+// mobile devices) that requests temporary security credentials without including
+// long-term AWS credentials in the application, and without deploying server-based
+// proxy services that use long-term AWS credentials. Instead, the identity
+// of the caller is validated by using a token from the web identity provider.
+//
+// The temporary security credentials returned by this API consist of an access
+// key ID, a secret access key, and a security token. Applications can use these
+// temporary security credentials to sign calls to AWS service APIs. The credentials
+// are valid for the duration that you specified when calling AssumeRoleWithWebIdentity,
+// which can be from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default,
+// the temporary security credentials are valid for 1 hour.
+//
+// Optionally, you can pass an IAM access policy to this operation. If you
+// choose not to pass a policy, the temporary security credentials that are
+// returned by the operation have the permissions that are defined in the access
+// policy of the role that is being assumed. If you pass a policy to this operation,
+// the temporary security credentials that are returned by the operation have
+// the permissions that are allowed by both the access policy of the role that
+// is being assumed, and the policy that you pass. This gives you a way to further
+// restrict the permissions for the resulting temporary security credentials.
+// You cannot use the passed policy to grant permissions that are in excess
+// of those allowed by the access policy of the role that is being assumed.
+// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the Using IAM.
+//
+// Before your application can call AssumeRoleWithWebIdentity, you must have
+// an identity token from a supported identity provider and create a role that
+// the application can assume. The role that your application assumes must trust
+// the identity provider that is associated with the identity token. In other
+// words, the identity provider must be specified in the role's trust policy.
+//
+// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
+// API, see the following resources:
+//
+// Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual)
+// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+// Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
+// This interactive website lets you walk through the process of authenticating
+// via Login with Amazon, Facebook, or Google, getting temporary security credentials,
+// and then using those credentials to make a request to AWS. AWS SDK for
+// iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android (http://aws.amazon.com/sdkforandroid/).
+// These toolkits contain sample apps that show how to invoke the identity providers,
+// and then how to use the information from these providers to get and use temporary
+// security credentials. Web Identity Federation with Mobile Applications
+// (http://aws.amazon.com/articles/4617974389850313). This article discusses
+// web identity federation and shows an example of how to use web identity federation
+// to get access to content in Amazon S3.
+func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) {
+ req, out := c.AssumeRoleWithWebIdentityRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
+
+// DecodeAuthorizationMessageRequest generates a request for the DecodeAuthorizationMessage operation.
+func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) {
+ op := &request.Operation{
+ Name: opDecodeAuthorizationMessage,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DecodeAuthorizationMessageInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &DecodeAuthorizationMessageOutput{}
+ req.Data = output
+ return
+}
+
+// Decodes additional information about the authorization status of a request
+// from an encoded message returned in response to an AWS request.
+//
+// For example, if a user is not authorized to perform an action that he or
+// she has requested, the request returns a Client.UnauthorizedOperation response
+// (an HTTP 403 response). Some AWS actions additionally return an encoded message
+// that can provide details about this authorization failure.
+//
+// Only certain AWS actions return an encoded authorization message. The documentation
+// for an individual action indicates whether that action returns an encoded
+// message in addition to returning an HTTP code. The message is encoded because
+// the details of the authorization status can constitute privileged information
+// that the user who requested the action should not see. To decode an authorization
+// status message, a user must be granted permissions via an IAM policy to request
+// the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) action.
+//
+// The decoded message includes the following type of information:
+//
+// Whether the request was denied due to an explicit deny or due to the absence
+// of an explicit allow. For more information, see Determining Whether a Request
+// is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
+// in the Using IAM. The principal who made the request. The requested action.
+// The requested resource. The values of condition keys in the context of the
+// user's request.
+func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {
+ req, out := c.DecodeAuthorizationMessageRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetFederationToken = "GetFederationToken"
+
+// GetFederationTokenRequest generates a request for the GetFederationToken operation.
+func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) {
+ op := &request.Operation{
+ Name: opGetFederationToken,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetFederationTokenInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetFederationTokenOutput{}
+ req.Data = output
+ return
+}
+
+// Returns a set of temporary security credentials (consisting of an access
+// key ID, a secret access key, and a security token) for a federated user.
+// A typical use is in a proxy application that gets temporary security credentials
+// on behalf of distributed applications inside a corporate network. Because
+// you must call the GetFederationToken action using the long-term security
+// credentials of an IAM user, this call is appropriate in contexts where those
+// credentials can be safely stored, usually in a server-based application.
+//
+// If you are creating a mobile-based or browser-based app that can authenticate
+// users using a web identity provider like Login with Amazon, Facebook, Google,
+// or an OpenID Connect-compatible identity provider, we recommend that you
+// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
+// For more information, see Federation Through a Web-based Identity Provider
+// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//
+// The GetFederationToken action must be called by using the long-term AWS
+// security credentials of an IAM user. You can also call GetFederationToken
+// using the security credentials of an AWS account (root), but this is not
+// recommended. Instead, we recommend that you create an IAM user for the purpose
+// of the proxy application and then attach a policy to the IAM user that limits
+// federated users to only the actions and resources they need access to. For
+// more information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
+// in the Using IAM.
+//
+// The temporary security credentials that are obtained by using the long-term
+// credentials of an IAM user are valid for the specified duration, between
+// 900 seconds (15 minutes) and 129600 seconds (36 hours). Temporary credentials
+// that are obtained by using AWS account (root) credentials have a maximum
+// duration of 3600 seconds (1 hour)
+//
+// Permissions
+//
+// The permissions for the temporary security credentials returned by GetFederationToken
+// are determined by a combination of the following:
+//
+// The policy or policies that are attached to the IAM user whose credentials
+// are used to call GetFederationToken. The policy that is passed as a parameter
+// in the call. The passed policy is attached to the temporary security credentials
+// that result from the GetFederationToken API call--that is, to the federated
+// user. When the federated user makes an AWS request, AWS evaluates the policy
+// attached to the federated user in combination with the policy or policies
+// attached to the IAM user whose credentials were used to call GetFederationToken.
+// AWS allows the federated user's request only when both the federated user
+// and the IAM user are explicitly allowed to perform the requested action.
+// The passed policy cannot grant more permissions than those that are defined
+// in the IAM user policy.
+//
+// A typical use case is that the permissions of the IAM user whose credentials
+// are used to call GetFederationToken are designed to allow access to all the
+// actions and resources that any federated user will need. Then, for individual
+// users, you pass a policy to the operation that scopes down the permissions
+// to a level that's appropriate to that individual user, using a policy that
+// allows only a subset of permissions that are granted to the IAM user.
+//
+// If you do not pass a policy, the resulting temporary security credentials
+// have no effective permissions. The only exception is when the temporary security
+// credentials are used to access a resource that has a resource-based policy
+// that specifically allows the federated user to access the resource.
+//
+// For more information about how permissions work, see Permissions for GetFederationToken
+// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
+// For information about using GetFederationToken to create temporary security
+// credentials, see GetFederationToken—Federation Through a Custom Identity
+// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
+func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) {
+ req, out := c.GetFederationTokenRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetSessionToken = "GetSessionToken"
+
+// GetSessionTokenRequest generates a request for the GetSessionToken operation.
+func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) {
+ op := &request.Operation{
+ Name: opGetSessionToken,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetSessionTokenInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetSessionTokenOutput{}
+ req.Data = output
+ return
+}
+
+// Returns a set of temporary credentials for an AWS account or IAM user. The
+// credentials consist of an access key ID, a secret access key, and a security
+// token. Typically, you use GetSessionToken if you want to use MFA to protect
+// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled
+// IAM users would need to call GetSessionToken and submit an MFA code that
+// is associated with their MFA device. Using the temporary security credentials
+// that are returned from the call, IAM users can then make programmatic calls
+// to APIs that require MFA authentication. If you do not supply a correct MFA
+// code, then the API returns an access denied error.
+//
+// The GetSessionToken action must be called by using the long-term AWS security
+// credentials of the AWS account or an IAM user. Credentials that are created
+// by IAM users are valid for the duration that you specify, between 900 seconds
+// (15 minutes) and 129600 seconds (36 hours); credentials that are created
+// by using account credentials have a maximum duration of 3600 seconds (1 hour).
+//
+// We recommend that you do not call GetSessionToken with root account credentials.
+// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
+// by creating one or more IAM users, giving them the necessary permissions,
+// and using IAM users for everyday interaction with AWS.
+//
+// The permissions associated with the temporary security credentials returned
+// by GetSessionToken are based on the permissions associated with account or
+// IAM user whose credentials are used to call the action. If GetSessionToken
+// is called using root account credentials, the temporary credentials have
+// root account permissions. Similarly, if GetSessionToken is called using the
+// credentials of an IAM user, the temporary credentials have the same permissions
+// as the IAM user.
+//
+// For more information about using GetSessionToken to create temporary credentials,
+// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
+// in the Using IAM.
+func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) {
+ req, out := c.GetSessionTokenRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+type AssumeRoleInput struct {
+ // The duration, in seconds, of the role session. The value can range from 900
+ // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
+ // to 3600 seconds.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // A unique identifier that is used by third parties when assuming roles in
+ // their customers' accounts. For each role that the third party can assume,
+ // they should instruct their customers to ensure the role's trust policy checks
+ // for the external ID that the third party generated. Each time the third party
+ // assumes the role, they should pass the customer's external ID. The external
+ // ID is useful in order to help third parties bind a role to the customer who
+ // created it. For more information about the external ID, see How to Use an
+ // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
+ // in the Using IAM.
+ ExternalId *string `min:"2" type:"string"`
+
+ // An IAM policy in JSON format.
+ //
+ // This parameter is optional. If you pass a policy, the temporary security
+ // credentials that are returned by the operation have the permissions that
+ // are allowed by both (the intersection of) the access policy of the role that
+ // is being assumed, and the policy that you pass. This gives you a way to further
+ // restrict the permissions for the resulting temporary security credentials.
+ // You cannot use the passed policy to grant permissions that are in excess
+ // of those allowed by the access policy of the role that is being assumed.
+ // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+ // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+ // in the Using IAM.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string `min:"1" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the role to assume.
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // An identifier for the assumed role session.
+ //
+ // Use the role session name to uniquely identity a session when the same role
+ // is assumed by different principals or for different reasons. In cross-account
+ // scenarios, the role session name is visible to, and can be logged by the
+ // account that owns the role. The role session name is also used in the ARN
+ // of the assumed role principal. This means that subsequent cross-account API
+ // requests using the temporary security credentials will expose the role session
+ // name to the external account in their CloudTrail logs.
+ RoleSessionName *string `min:"2" type:"string" required:"true"`
+
+ // The identification number of the MFA device that is associated with the user
+ // who is making the AssumeRole call. Specify this value if the trust policy
+ // of the role being assumed includes a condition that requires MFA authentication.
+ // The value is either the serial number for a hardware device (such as GAHT12345678)
+ // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ SerialNumber *string `min:"9" type:"string"`
+
+ // The value provided by the MFA device, if the trust policy of the role being
+ // assumed requires MFA (that is, if the policy includes a condition that tests
+ // for MFA). If the role being assumed requires MFA and if the TokenCode value
+ // is missing or expired, the AssumeRole call returns an "access denied" error.
+ TokenCode *string `min:"6" type:"string"`
+
+ metadataAssumeRoleInput `json:"-" xml:"-"`
+}
+
+type metadataAssumeRoleInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s AssumeRoleInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleInput) GoString() string {
+ return s.String()
+}
+
+// Contains the response to a successful AssumeRole request, including temporary
+// AWS credentials that can be used to make AWS requests.
+type AssumeRoleOutput struct {
+ // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+ // that you can use to refer to the resulting temporary security credentials.
+ // For example, you can reference these credentials as a principal in a resource-based
+ // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
+ // that you specified when you called AssumeRole.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ Credentials *Credentials `type:"structure"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+
+ metadataAssumeRoleOutput `json:"-" xml:"-"`
+}
+
+type metadataAssumeRoleOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s AssumeRoleOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleOutput) GoString() string {
+ return s.String()
+}
+
+type AssumeRoleWithSAMLInput struct {
+ // The duration, in seconds, of the role session. The value can range from 900
+ // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
+ // to 3600 seconds. An expiration can also be specified in the SAML authentication
+ // response's SessionNotOnOrAfter value. The actual expiration time is whichever
+ // value is shorter.
+ //
+ // The maximum duration for a session is 1 hour, and the minimum duration is
+ // 15 minutes, even if values outside this range are specified.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // An IAM policy in JSON format.
+ //
+ // The policy parameter is optional. If you pass a policy, the temporary security
+ // credentials that are returned by the operation have the permissions that
+ // are allowed by both the access policy of the role that is being assumed,
+ // and the policy that you pass. This gives you a way to further restrict the
+ // permissions for the resulting temporary security credentials. You cannot
+ // use the passed policy to grant permissions that are in excess of those allowed
+ // by the access policy of the role that is being assumed. For more information,
+ // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity
+ // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+ // in the Using IAM.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string `min:"1" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes
+ // the IdP.
+ PrincipalArn *string `min:"20" type:"string" required:"true"`
+
+ // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // The base-64 encoded SAML authentication response provided by the IdP.
+ //
+ // For more information, see Configuring a Relying Party and Adding Claims
+ // (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
+ // in the Using IAM guide.
+ SAMLAssertion *string `min:"4" type:"string" required:"true"`
+
+ metadataAssumeRoleWithSAMLInput `json:"-" xml:"-"`
+}
+
+type metadataAssumeRoleWithSAMLInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithSAMLInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithSAMLInput) GoString() string {
+ return s.String()
+}
+
+// Contains the response to a successful AssumeRoleWithSAML request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type AssumeRoleWithSAMLOutput struct {
+ // The identifiers for the temporary security credentials that the operation
+ // returns.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The value of the Recipient attribute of the SubjectConfirmationData element
+ // of the SAML assertion.
+ Audience *string `type:"string"`
+
+ // AWS credentials for API authentication.
+ Credentials *Credentials `type:"structure"`
+
+ // The value of the Issuer element of the SAML assertion.
+ Issuer *string `type:"string"`
+
+ // A hash value based on the concatenation of the Issuer response value, the
+ // AWS account ID, and the friendly name (the last part of the ARN) of the SAML
+ // provider in IAM. The combination of NameQualifier and Subject can be used
+ // to uniquely identify a federated user.
+ //
+ // The following pseudocode shows how the hash value is calculated:
+ //
+ // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP"
+ // ) )
+ NameQualifier *string `type:"string"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+
+ // The value of the NameID element in the Subject element of the SAML assertion.
+ Subject *string `type:"string"`
+
+ // The format of the name ID, as defined by the Format attribute in the NameID
+ // element of the SAML assertion. Typical examples of the format are transient
+ // or persistent.
+ //
+ // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format,
+ // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient
+ // is returned as transient. If the format includes any other prefix, the format
+ // is returned with no modifications.
+ SubjectType *string `type:"string"`
+
+ metadataAssumeRoleWithSAMLOutput `json:"-" xml:"-"`
+}
+
+type metadataAssumeRoleWithSAMLOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithSAMLOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithSAMLOutput) GoString() string {
+ return s.String()
+}
+
+type AssumeRoleWithWebIdentityInput struct {
+ // The duration, in seconds, of the role session. The value can range from 900
+ // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
+ // to 3600 seconds.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // An IAM policy in JSON format.
+ //
+ // The policy parameter is optional. If you pass a policy, the temporary security
+ // credentials that are returned by the operation have the permissions that
+ // are allowed by both the access policy of the role that is being assumed,
+ // and the policy that you pass. This gives you a way to further restrict the
+ // permissions for the resulting temporary security credentials. You cannot
+ // use the passed policy to grant permissions that are in excess of those allowed
+ // by the access policy of the role that is being assumed. For more information,
+ // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+ // in the Using IAM.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string `min:"1" type:"string"`
+
+ // The fully qualified host component of the domain name of the identity provider.
+ //
+ // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com
+ // and graph.facebook.com are the only supported identity providers for OAuth
+ // 2.0 access tokens. Do not include URL schemes and port numbers.
+ //
+ // Do not specify this value for OpenID Connect ID tokens.
+ ProviderId *string `min:"4" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // An identifier for the assumed role session. Typically, you pass the name
+ // or identifier that is associated with the user who is using your application.
+ // That way, the temporary security credentials that your application will use
+ // are associated with that user. This session name is included as part of the
+ // ARN and assumed role ID in the AssumedRoleUser response element.
+ RoleSessionName *string `min:"2" type:"string" required:"true"`
+
+ // The OAuth 2.0 access token or OpenID Connect ID token that is provided by
+ // the identity provider. Your application must get this token by authenticating
+ // the user who is using your application with a web identity provider before
+ // the application makes an AssumeRoleWithWebIdentity call.
+ WebIdentityToken *string `min:"4" type:"string" required:"true"`
+
+ metadataAssumeRoleWithWebIdentityInput `json:"-" xml:"-"`
+}
+
+type metadataAssumeRoleWithWebIdentityInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithWebIdentityInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithWebIdentityInput) GoString() string {
+ return s.String()
+}
+
+// Contains the response to a successful AssumeRoleWithWebIdentity request,
+// including temporary AWS credentials that can be used to make AWS requests.
+type AssumeRoleWithWebIdentityOutput struct {
+ // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+ // that you can use to refer to the resulting temporary security credentials.
+ // For example, you can reference these credentials as a principal in a resource-based
+ // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
+ // that you specified when you called AssumeRole.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The intended audience (also known as client ID) of the web identity token.
+ // This is traditionally the client identifier issued to the application that
+ // requested the web identity token.
+ Audience *string `type:"string"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security token.
+ Credentials *Credentials `type:"structure"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+
+ // The issuing authority of the web identity token presented. For OpenID Connect
+ // ID Tokens this contains the value of the iss field. For OAuth 2.0 access
+ // tokens, this contains the value of the ProviderId parameter that was passed
+ // in the AssumeRoleWithWebIdentity request.
+ Provider *string `type:"string"`
+
+ // The unique user identifier that is returned by the identity provider. This
+ // identifier is associated with the WebIdentityToken that was submitted with
+ // the AssumeRoleWithWebIdentity call. The identifier is typically unique to
+ // the user and the application that acquired the WebIdentityToken (pairwise
+ // identifier). For OpenID Connect ID tokens, this field contains the value
+ // returned by the identity provider as the token's sub (Subject) claim.
+ SubjectFromWebIdentityToken *string `min:"6" type:"string"`
+
+ metadataAssumeRoleWithWebIdentityOutput `json:"-" xml:"-"`
+}
+
+type metadataAssumeRoleWithWebIdentityOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithWebIdentityOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithWebIdentityOutput) GoString() string {
+ return s.String()
+}
+
+// The identifiers for the temporary security credentials that the operation
+// returns.
+type AssumedRoleUser struct {
+ // The ARN of the temporary security credentials that are returned from the
+ // AssumeRole action. For more information about ARNs and how to use them in
+ // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+ // in Using IAM.
+ Arn *string `min:"20" type:"string" required:"true"`
+
+ // A unique identifier that contains the role ID and the role session name of
+ // the role that is being assumed. The role ID is generated by AWS when the
+ // role is created.
+ AssumedRoleId *string `min:"2" type:"string" required:"true"`
+
+ metadataAssumedRoleUser `json:"-" xml:"-"`
+}
+
+type metadataAssumedRoleUser struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s AssumedRoleUser) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumedRoleUser) GoString() string {
+ return s.String()
+}
+
+// AWS credentials for API authentication.
+type Credentials struct {
+ // The access key ID that identifies the temporary security credentials.
+ AccessKeyId *string `min:"16" type:"string" required:"true"`
+
+ // The date on which the current credentials expire.
+ Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"`
+
+ // The secret access key that can be used to sign requests.
+ SecretAccessKey *string `type:"string" required:"true"`
+
+ // The token that users must pass to the service API to use the temporary credentials.
+ SessionToken *string `type:"string" required:"true"`
+
+ metadataCredentials `json:"-" xml:"-"`
+}
+
+type metadataCredentials struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s Credentials) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Credentials) GoString() string {
+ return s.String()
+}
+
+type DecodeAuthorizationMessageInput struct {
+ // The encoded message that was returned with the response.
+ EncodedMessage *string `min:"1" type:"string" required:"true"`
+
+ metadataDecodeAuthorizationMessageInput `json:"-" xml:"-"`
+}
+
+type metadataDecodeAuthorizationMessageInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DecodeAuthorizationMessageInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DecodeAuthorizationMessageInput) GoString() string {
+ return s.String()
+}
+
+// A document that contains additional information about the authorization status
+// of a request from an encoded message that is returned in response to an AWS
+// request.
+type DecodeAuthorizationMessageOutput struct {
+ // An XML document that contains the decoded message. For more information,
+ // see DecodeAuthorizationMessage.
+ DecodedMessage *string `type:"string"`
+
+ metadataDecodeAuthorizationMessageOutput `json:"-" xml:"-"`
+}
+
+type metadataDecodeAuthorizationMessageOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DecodeAuthorizationMessageOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DecodeAuthorizationMessageOutput) GoString() string {
+ return s.String()
+}
+
+// Identifiers for the federated user that is associated with the credentials.
+type FederatedUser struct {
+ // The ARN that specifies the federated user that is associated with the credentials.
+ // For more information about ARNs and how to use them in policies, see IAM
+ // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+ // in Using IAM.
+ Arn *string `min:"20" type:"string" required:"true"`
+
+ // The string that identifies the federated user associated with the credentials,
+ // similar to the unique ID of an IAM user.
+ FederatedUserId *string `min:"2" type:"string" required:"true"`
+
+ metadataFederatedUser `json:"-" xml:"-"`
+}
+
+type metadataFederatedUser struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s FederatedUser) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s FederatedUser) GoString() string {
+ return s.String()
+}
+
+type GetFederationTokenInput struct {
+ // The duration, in seconds, that the session should last. Acceptable durations
+ // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds
+ // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained
+ // using AWS account (root) credentials are restricted to a maximum of 3600
+ // seconds (one hour). If the specified duration is longer than one hour, the
+ // session obtained by using AWS account (root) credentials defaults to one
+ // hour.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // The name of the federated user. The name is used as an identifier for the
+ // temporary security credentials (such as Bob). For example, you can reference
+ // the federated user name in a resource-based policy, such as in an Amazon
+ // S3 bucket policy.
+ Name *string `min:"2" type:"string" required:"true"`
+
+ // An IAM policy in JSON format that is passed with the GetFederationToken call
+ // and evaluated along with the policy or policies that are attached to the
+ // IAM user whose credentials are used to call GetFederationToken. The passed
+ // policy is used to scope down the permissions that are available to the IAM
+ // user, by allowing only a subset of the permissions that are granted to the
+ // IAM user. The passed policy cannot grant more permissions than those granted
+ // to the IAM user. The final permissions for the federated user are the most
+ // restrictive set based on the intersection of the passed policy and the IAM
+ // user policy.
+ //
+ // If you do not pass a policy, the resulting temporary security credentials
+ // have no effective permissions. The only exception is when the temporary security
+ // credentials are used to access a resource that has a resource-based policy
+ // that specifically allows the federated user to access the resource.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size. For more information about how permissions work, see Permissions for
+ // GetFederationToken (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
+ Policy *string `min:"1" type:"string"`
+
+ metadataGetFederationTokenInput `json:"-" xml:"-"`
+}
+
+type metadataGetFederationTokenInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetFederationTokenInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFederationTokenInput) GoString() string {
+ return s.String()
+}
+
+// Contains the response to a successful GetFederationToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type GetFederationTokenOutput struct {
+ // Credentials for the service API authentication.
+ Credentials *Credentials `type:"structure"`
+
+ // Identifiers for the federated user associated with the credentials (such
+ // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You
+ // can use the federated user's ARN in your resource-based policies, such as
+ // an Amazon S3 bucket policy.
+ FederatedUser *FederatedUser `type:"structure"`
+
+ // A percentage value indicating the size of the policy in packed form. The
+ // service rejects policies for which the packed size is greater than 100 percent
+ // of the allowed value.
+ PackedPolicySize *int64 `type:"integer"`
+
+ metadataGetFederationTokenOutput `json:"-" xml:"-"`
+}
+
+type metadataGetFederationTokenOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetFederationTokenOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFederationTokenOutput) GoString() string {
+ return s.String()
+}
+
+type GetSessionTokenInput struct {
+ // The duration, in seconds, that the credentials should remain valid. Acceptable
+ // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600
+ // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions
+ // for AWS account owners are restricted to a maximum of 3600 seconds (one hour).
+ // If the duration is longer than one hour, the session for AWS account owners
+ // defaults to one hour.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // The identification number of the MFA device that is associated with the IAM
+ // user who is making the GetSessionToken call. Specify this value if the IAM
+ // user has a policy that requires MFA authentication. The value is either the
+ // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource
+ // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ // You can find the device for an IAM user by going to the AWS Management Console
+ // and viewing the user's security credentials.
+ SerialNumber *string `min:"9" type:"string"`
+
+ // The value provided by the MFA device, if MFA is required. If any policy requires
+ // the IAM user to submit an MFA code, specify this value. If MFA authentication
+ // is required, and the user does not provide a code when requesting a set of
+ // temporary security credentials, the user will receive an "access denied"
+ // response when requesting resources that require MFA authentication.
+ TokenCode *string `min:"6" type:"string"`
+
+ metadataGetSessionTokenInput `json:"-" xml:"-"`
+}
+
+type metadataGetSessionTokenInput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetSessionTokenInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSessionTokenInput) GoString() string {
+ return s.String()
+}
+
+// Contains the response to a successful GetSessionToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type GetSessionTokenOutput struct {
+ // The session credentials for API authentication.
+ Credentials *Credentials `type:"structure"`
+
+ metadataGetSessionTokenOutput `json:"-" xml:"-"`
+}
+
+type metadataGetSessionTokenOutput struct {
+ SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetSessionTokenOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSessionTokenOutput) GoString() string {
+ return s.String()
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/customizations.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/customizations.go
new file mode 100644
index 00000000000..4010cc7fa14
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/customizations.go
@@ -0,0 +1,12 @@
+package sts
+
+import "github.com/aws/aws-sdk-go/aws/request"
+
+func init() {
+ initRequest = func(r *request.Request) {
+ switch r.Operation.Name {
+ case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity:
+ r.Handlers.Sign.Clear() // these operations are unsigned
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/customizations_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/customizations_test.go
new file mode 100644
index 00000000000..6f870d35e27
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/customizations_test.go
@@ -0,0 +1,39 @@
+package sts_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/awstesting/unit"
+ "github.com/aws/aws-sdk-go/service/sts"
+)
+
+var svc = sts.New(unit.Session, &aws.Config{
+ Region: aws.String("mock-region"),
+})
+
+func TestUnsignedRequest_AssumeRoleWithSAML(t *testing.T) {
+ req, _ := svc.AssumeRoleWithSAMLRequest(&sts.AssumeRoleWithSAMLInput{
+ PrincipalArn: aws.String("ARN01234567890123456789"),
+ RoleArn: aws.String("ARN01234567890123456789"),
+ SAMLAssertion: aws.String("ASSERT"),
+ })
+
+ err := req.Sign()
+ assert.NoError(t, err)
+ assert.Equal(t, "", req.HTTPRequest.Header.Get("Authorization"))
+}
+
+func TestUnsignedRequest_AssumeRoleWithWebIdentity(t *testing.T) {
+ req, _ := svc.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{
+ RoleArn: aws.String("ARN01234567890123456789"),
+ RoleSessionName: aws.String("SESSION"),
+ WebIdentityToken: aws.String("TOKEN"),
+ })
+
+ err := req.Sign()
+ assert.NoError(t, err)
+ assert.Equal(t, "", req.HTTPRequest.Header.Get("Authorization"))
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/examples_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/examples_test.go
new file mode 100644
index 00000000000..083bcbd6877
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/examples_test.go
@@ -0,0 +1,149 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+package sts_test
+
+import (
+ "bytes"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/sts"
+)
+
+var _ time.Duration
+var _ bytes.Buffer
+
+func ExampleSTS_AssumeRole() {
+ svc := sts.New(session.New())
+
+ params := &sts.AssumeRoleInput{
+ RoleArn: aws.String("arnType"), // Required
+ RoleSessionName: aws.String("roleSessionNameType"), // Required
+ DurationSeconds: aws.Int64(1),
+ ExternalId: aws.String("externalIdType"),
+ Policy: aws.String("sessionPolicyDocumentType"),
+ SerialNumber: aws.String("serialNumberType"),
+ TokenCode: aws.String("tokenCodeType"),
+ }
+ resp, err := svc.AssumeRole(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleSTS_AssumeRoleWithSAML() {
+ svc := sts.New(session.New())
+
+ params := &sts.AssumeRoleWithSAMLInput{
+ PrincipalArn: aws.String("arnType"), // Required
+ RoleArn: aws.String("arnType"), // Required
+ SAMLAssertion: aws.String("SAMLAssertionType"), // Required
+ DurationSeconds: aws.Int64(1),
+ Policy: aws.String("sessionPolicyDocumentType"),
+ }
+ resp, err := svc.AssumeRoleWithSAML(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleSTS_AssumeRoleWithWebIdentity() {
+ svc := sts.New(session.New())
+
+ params := &sts.AssumeRoleWithWebIdentityInput{
+ RoleArn: aws.String("arnType"), // Required
+ RoleSessionName: aws.String("roleSessionNameType"), // Required
+ WebIdentityToken: aws.String("clientTokenType"), // Required
+ DurationSeconds: aws.Int64(1),
+ Policy: aws.String("sessionPolicyDocumentType"),
+ ProviderId: aws.String("urlType"),
+ }
+ resp, err := svc.AssumeRoleWithWebIdentity(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleSTS_DecodeAuthorizationMessage() {
+ svc := sts.New(session.New())
+
+ params := &sts.DecodeAuthorizationMessageInput{
+ EncodedMessage: aws.String("encodedMessageType"), // Required
+ }
+ resp, err := svc.DecodeAuthorizationMessage(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleSTS_GetFederationToken() {
+ svc := sts.New(session.New())
+
+ params := &sts.GetFederationTokenInput{
+ Name: aws.String("userNameType"), // Required
+ DurationSeconds: aws.Int64(1),
+ Policy: aws.String("sessionPolicyDocumentType"),
+ }
+ resp, err := svc.GetFederationToken(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
+
+func ExampleSTS_GetSessionToken() {
+ svc := sts.New(session.New())
+
+ params := &sts.GetSessionTokenInput{
+ DurationSeconds: aws.Int64(1),
+ SerialNumber: aws.String("serialNumberType"),
+ TokenCode: aws.String("tokenCodeType"),
+ }
+ resp, err := svc.GetSessionToken(params)
+
+ if err != nil {
+ // Print the error, cast err to awserr.Error to get the Code and
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+ }
+
+ // Pretty-print the response data.
+ fmt.Println(resp)
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/service.go
new file mode 100644
index 00000000000..33f49001fa1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/service.go
@@ -0,0 +1,130 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+package sts
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/query"
+ "github.com/aws/aws-sdk-go/private/signer/v4"
+)
+
+// The AWS Security Token Service (STS) is a web service that enables you to
+// request temporary, limited-privilege credentials for AWS Identity and Access
+// Management (IAM) users or for users that you authenticate (federated users).
+// This guide provides descriptions of the STS API. For more detailed information
+// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
+//
+// As an alternative to using the API, you can use one of the AWS SDKs, which
+// consist of libraries and sample code for various programming languages and
+// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
+// way to create programmatic access to STS. For example, the SDKs take care
+// of cryptographically signing requests, managing errors, and retrying requests
+// automatically. For information about the AWS SDKs, including how to download
+// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
+// For information about setting up signatures and authorization through the
+// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html"
+// target="_blank) in the AWS General Reference. For general information about
+// the Query API, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html"
+// target="_blank) in Using IAM. For information about using security tokens
+// with other AWS products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
+// in the Using IAM.
+//
+// If you're new to AWS and need additional technical information about a specific
+// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
+// (http://aws.amazon.com/documentation/" target="_blank).
+//
+// Endpoints
+//
+// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com
+// that maps to the US East (N. Virginia) region. Additional regions are available,
+// but must first be activated in the AWS Management Console before you can
+// use a different region's endpoint. For more information about activating
+// a region for STS see Activating STS in a New Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the Using IAM.
+//
+// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
+// in the AWS General Reference.
+//
+// Recording API requests
+//
+// STS supports AWS CloudTrail, which is a service that records AWS calls for
+// your AWS account and delivers log files to an Amazon S3 bucket. By using
+// information collected by CloudTrail, you can determine what requests were
+// successfully made to STS, who made the request, when it was made, and so
+// on. To learn more about CloudTrail, including how to turn it on and find
+// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type STS struct {
+ *client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// A ServiceName is the name of the service the client will make API calls to.
+const ServiceName = "sts"
+
+// New creates a new instance of the STS client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a STS client from just a session.
+// svc := sts.New(mySession)
+//
+// // Create a STS client with additional configuration
+// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
+ c := p.ClientConfig(ServiceName, cfgs...)
+ return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *STS {
+ svc := &STS{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2011-06-15",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBack(query.Build)
+ svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+ svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+ svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a STS operation and runs any
+// custom request initialization.
+func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(req)
+ }
+
+ return req
+}
diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go
new file mode 100644
index 00000000000..09dae0c9dc3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go
@@ -0,0 +1,38 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+// Package stsiface provides an interface for the AWS Security Token Service.
+package stsiface
+
+import (
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/sts"
+)
+
+// STSAPI is the interface type for sts.STS.
+type STSAPI interface {
+ AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput)
+
+ AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
+
+ AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput)
+
+ AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error)
+
+ AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput)
+
+ AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error)
+
+ DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput)
+
+ DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error)
+
+ GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput)
+
+ GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error)
+
+ GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput)
+
+ GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error)
+}
+
+var _ STSAPI = (*sts.STS)(nil)
diff --git a/Godeps/_workspace/src/github.com/go-stack/stack/.travis.yml b/Godeps/_workspace/src/github.com/go-stack/stack/.travis.yml
new file mode 100644
index 00000000000..d5e5dd52da0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-stack/stack/.travis.yml
@@ -0,0 +1,16 @@
+language: go
+sudo: false
+go:
+ - 1.2
+ - 1.3
+ - 1.4
+ - 1.5
+ - 1.6
+ - tip
+
+before_install:
+ - go get github.com/mattn/goveralls
+ - go get golang.org/x/tools/cmd/cover
+
+script:
+ - goveralls -service=travis-ci
diff --git a/Godeps/_workspace/src/github.com/go-stack/stack/LICENSE.md b/Godeps/_workspace/src/github.com/go-stack/stack/LICENSE.md
new file mode 100644
index 00000000000..c8ca66c5ede
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-stack/stack/LICENSE.md
@@ -0,0 +1,13 @@
+Copyright 2014 Chris Hines
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/go-stack/stack/README.md b/Godeps/_workspace/src/github.com/go-stack/stack/README.md
new file mode 100644
index 00000000000..f11ccccaa43
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-stack/stack/README.md
@@ -0,0 +1,38 @@
+[](https://godoc.org/github.com/go-stack/stack)
+[](https://goreportcard.com/report/go-stack/stack)
+[](https://travis-ci.org/go-stack/stack)
+[](https://coveralls.io/github/go-stack/stack?branch=master)
+
+# stack
+
+Package stack implements utilities to capture, manipulate, and format call
+stacks. It provides a simpler API than package runtime.
+
+The implementation takes care of the minutia and special cases of interpreting
+the program counter (pc) values returned by runtime.Callers.
+
+## Versioning
+
+Package stack publishes releases via [semver](http://semver.org/) compatible Git
+tags prefixed with a single 'v'. The master branch always contains the latest
+release. The develop branch contains unreleased commits.
+
+## Formatting
+
+Package stack's types implement fmt.Formatter, which provides a simple and
+flexible way to declaratively configure formatting when used with logging or
+error tracking packages.
+
+```go
+func DoTheThing() {
+ c := stack.Caller(0)
+ log.Print(c) // "source.go:10"
+ log.Printf("%+v", c) // "pkg/path/source.go:10"
+ log.Printf("%n", c) // "DoTheThing"
+
+ s := stack.Trace().TrimRuntime()
+ log.Print(s) // "[source.go:15 caller.go:42 main.go:14]"
+}
+```
+
+See the docs for all of the supported formatting options.
diff --git a/Godeps/_workspace/src/github.com/go-stack/stack/stack.go b/Godeps/_workspace/src/github.com/go-stack/stack/stack.go
new file mode 100644
index 00000000000..a614eeebf16
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/go-stack/stack/stack.go
@@ -0,0 +1,349 @@
+// Package stack implements utilities to capture, manipulate, and format call
+// stacks. It provides a simpler API than package runtime.
+//
+// The implementation takes care of the minutia and special cases of
+// interpreting the program counter (pc) values returned by runtime.Callers.
+//
+// Package stack's types implement fmt.Formatter, which provides a simple and
+// flexible way to declaratively configure formatting when used with logging
+// or error tracking packages.
+package stack
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+// Call records a single function invocation from a goroutine stack.
+type Call struct {
+ fn *runtime.Func
+ pc uintptr
+}
+
+// Caller returns a Call from the stack of the current goroutine. The argument
+// skip is the number of stack frames to ascend, with 0 identifying the
+// calling function.
+func Caller(skip int) Call {
+ var pcs [2]uintptr
+ n := runtime.Callers(skip+1, pcs[:])
+
+ var c Call
+
+ if n < 2 {
+ return c
+ }
+
+ c.pc = pcs[1]
+ if runtime.FuncForPC(pcs[0]) != sigpanic {
+ c.pc--
+ }
+ c.fn = runtime.FuncForPC(c.pc)
+ return c
+}
+
+// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c).
+func (c Call) String() string {
+ return fmt.Sprint(c)
+}
+
+// MarshalText implements encoding.TextMarshaler. It formats the Call the same
+// as fmt.Sprintf("%v", c).
+func (c Call) MarshalText() ([]byte, error) {
+ if c.fn == nil {
+ return nil, ErrNoFunc
+ }
+ buf := bytes.Buffer{}
+ fmt.Fprint(&buf, c)
+ return buf.Bytes(), nil
+}
+
+// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely
+// cause is a Call with the zero value.
+var ErrNoFunc = errors.New("no call stack information")
+
+// Format implements fmt.Formatter with support for the following verbs.
+//
+// %s source file
+// %d line number
+// %n function name
+// %v equivalent to %s:%d
+//
+// It accepts the '+' and '#' flags for most of the verbs as follows.
+//
+// %+s path of source file relative to the compile time GOPATH
+// %#s full path of source file
+// %+n import path qualified function name
+// %+v equivalent to %+s:%d
+// %#v equivalent to %#s:%d
+func (c Call) Format(s fmt.State, verb rune) {
+ if c.fn == nil {
+ fmt.Fprintf(s, "%%!%c(NOFUNC)", verb)
+ return
+ }
+
+ switch verb {
+ case 's', 'v':
+ file, line := c.fn.FileLine(c.pc)
+ switch {
+ case s.Flag('#'):
+ // done
+ case s.Flag('+'):
+ file = file[pkgIndex(file, c.fn.Name()):]
+ default:
+ const sep = "/"
+ if i := strings.LastIndex(file, sep); i != -1 {
+ file = file[i+len(sep):]
+ }
+ }
+ io.WriteString(s, file)
+ if verb == 'v' {
+ buf := [7]byte{':'}
+ s.Write(strconv.AppendInt(buf[:1], int64(line), 10))
+ }
+
+ case 'd':
+ _, line := c.fn.FileLine(c.pc)
+ buf := [6]byte{}
+ s.Write(strconv.AppendInt(buf[:0], int64(line), 10))
+
+ case 'n':
+ name := c.fn.Name()
+ if !s.Flag('+') {
+ const pathSep = "/"
+ if i := strings.LastIndex(name, pathSep); i != -1 {
+ name = name[i+len(pathSep):]
+ }
+ const pkgSep = "."
+ if i := strings.Index(name, pkgSep); i != -1 {
+ name = name[i+len(pkgSep):]
+ }
+ }
+ io.WriteString(s, name)
+ }
+}
+
+// PC returns the program counter for this call frame; multiple frames may
+// have the same PC value.
+func (c Call) PC() uintptr {
+ return c.pc
+}
+
+// name returns the import path qualified name of the function containing the
+// call.
+func (c Call) name() string {
+ if c.fn == nil {
+ return "???"
+ }
+ return c.fn.Name()
+}
+
+func (c Call) file() string {
+ if c.fn == nil {
+ return "???"
+ }
+ file, _ := c.fn.FileLine(c.pc)
+ return file
+}
+
+func (c Call) line() int {
+ if c.fn == nil {
+ return 0
+ }
+ _, line := c.fn.FileLine(c.pc)
+ return line
+}
+
+// CallStack records a sequence of function invocations from a goroutine
+// stack.
+type CallStack []Call
+
+// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs).
+func (cs CallStack) String() string {
+ return fmt.Sprint(cs)
+}
+
+var (
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ spaceBytes = []byte(" ")
+)
+
+// MarshalText implements encoding.TextMarshaler. It formats the CallStack the
+// same as fmt.Sprintf("%v", cs).
+func (cs CallStack) MarshalText() ([]byte, error) {
+ buf := bytes.Buffer{}
+ buf.Write(openBracketBytes)
+ for i, pc := range cs {
+ if pc.fn == nil {
+ return nil, ErrNoFunc
+ }
+ if i > 0 {
+ buf.Write(spaceBytes)
+ }
+ fmt.Fprint(&buf, pc)
+ }
+ buf.Write(closeBracketBytes)
+ return buf.Bytes(), nil
+}
+
+// Format implements fmt.Formatter by printing the CallStack as square brackets
+// ([, ]) surrounding a space separated list of Calls each formatted with the
+// supplied verb and options.
+func (cs CallStack) Format(s fmt.State, verb rune) {
+ s.Write(openBracketBytes)
+ for i, pc := range cs {
+ if i > 0 {
+ s.Write(spaceBytes)
+ }
+ pc.Format(s, verb)
+ }
+ s.Write(closeBracketBytes)
+}
+
+// findSigpanic intentionally executes faulting code to generate a stack trace
+// containing an entry for runtime.sigpanic.
+func findSigpanic() *runtime.Func {
+ var fn *runtime.Func
+ var p *int
+ func() int {
+ defer func() {
+ if p := recover(); p != nil {
+ var pcs [512]uintptr
+ n := runtime.Callers(2, pcs[:])
+ for _, pc := range pcs[:n] {
+ f := runtime.FuncForPC(pc)
+ if f.Name() == "runtime.sigpanic" {
+ fn = f
+ break
+ }
+ }
+ }
+ }()
+ // intentional nil pointer dereference to trigger sigpanic
+ return *p
+ }()
+ return fn
+}
+
+var sigpanic = findSigpanic()
+
+// Trace returns a CallStack for the current goroutine with element 0
+// identifying the calling function.
+func Trace() CallStack {
+ var pcs [512]uintptr
+ n := runtime.Callers(2, pcs[:])
+ cs := make([]Call, n)
+
+ for i, pc := range pcs[:n] {
+ pcFix := pc
+ if i > 0 && cs[i-1].fn != sigpanic {
+ pcFix--
+ }
+ cs[i] = Call{
+ fn: runtime.FuncForPC(pcFix),
+ pc: pcFix,
+ }
+ }
+
+ return cs
+}
+
+// TrimBelow returns a slice of the CallStack with all entries below c
+// removed.
+func (cs CallStack) TrimBelow(c Call) CallStack {
+ for len(cs) > 0 && cs[0].pc != c.pc {
+ cs = cs[1:]
+ }
+ return cs
+}
+
+// TrimAbove returns a slice of the CallStack with all entries above c
+// removed.
+func (cs CallStack) TrimAbove(c Call) CallStack {
+ for len(cs) > 0 && cs[len(cs)-1].pc != c.pc {
+ cs = cs[:len(cs)-1]
+ }
+ return cs
+}
+
+// pkgIndex returns the index that results in file[index:] being the path of
+// file relative to the compile time GOPATH, and file[:index] being the
+// $GOPATH/src/ portion of file. funcName must be the name of a function in
+// file as returned by runtime.Func.Name.
+func pkgIndex(file, funcName string) int {
+ // As of Go 1.6.2 there is no direct way to know the compile time GOPATH
+ // at runtime, but we can infer the number of path segments in the GOPATH.
+ // We note that runtime.Func.Name() returns the function name qualified by
+ // the import path, which does not include the GOPATH. Thus we can trim
+ // segments from the beginning of the file path until the number of path
+ // separators remaining is one more than the number of path separators in
+ // the function name. For example, given:
+ //
+ // GOPATH /home/user
+ // file /home/user/src/pkg/sub/file.go
+ // fn.Name() pkg/sub.Type.Method
+ //
+ // We want to produce:
+ //
+ // file[:idx] == /home/user/src/
+ // file[idx:] == pkg/sub/file.go
+ //
+ // From this we can easily see that fn.Name() has one less path separator
+ // than our desired result for file[idx:]. We count separators from the
+ // end of the file path until it finds two more than in the function name
+ // and then move one character forward to preserve the initial path
+ // segment without a leading separator.
+ const sep = "/"
+ i := len(file)
+ for n := strings.Count(funcName, sep) + 2; n > 0; n-- {
+ i = strings.LastIndex(file[:i], sep)
+ if i == -1 {
+ i = -len(sep)
+ break
+ }
+ }
+ // get back to 0 or trim the leading separator
+ return i + len(sep)
+}
+
+var runtimePath string
+
+func init() {
+ var pcs [1]uintptr
+ runtime.Callers(0, pcs[:])
+ fn := runtime.FuncForPC(pcs[0])
+ file, _ := fn.FileLine(pcs[0])
+
+ idx := pkgIndex(file, fn.Name())
+
+ runtimePath = file[:idx]
+ if runtime.GOOS == "windows" {
+ runtimePath = strings.ToLower(runtimePath)
+ }
+}
+
+func inGoroot(c Call) bool {
+ file := c.file()
+ if len(file) == 0 || file[0] == '?' {
+ return true
+ }
+ if runtime.GOOS == "windows" {
+ file = strings.ToLower(file)
+ }
+ return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go")
+}
+
+// TrimRuntime returns a slice of the CallStack with the topmost entries from
+// the go runtime removed. It considers any calls originating from unknown
+// files, files under GOROOT, or _testmain.go as part of the runtime.
+func (cs CallStack) TrimRuntime() CallStack {
+ for len(cs) > 0 && inGoroot(cs[len(cs)-1]) {
+ cs = cs[:len(cs)-1]
+ }
+ return cs
+}
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/.travis.yml b/Godeps/_workspace/src/github.com/inconshreveable/log15/.travis.yml
new file mode 100644
index 00000000000..ff5d75e72b9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+
+go:
+ - 1.1
+ - 1.2
+ - 1.3
+ - 1.4
+ - 1.5
+ - 1.6
+ - tip
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/CONTRIBUTORS b/Godeps/_workspace/src/github.com/inconshreveable/log15/CONTRIBUTORS
new file mode 100644
index 00000000000..a0866713be0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/CONTRIBUTORS
@@ -0,0 +1,11 @@
+Contributors to log15:
+
+- Aaron L
+- Alan Shreve
+- Chris Hines
+- Ciaran Downey
+- Dmitry Chestnykh
+- Evan Shaw
+- Péter Szilágyi
+- Trevor Gattis
+- Vincent Vanackere
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/LICENSE b/Godeps/_workspace/src/github.com/inconshreveable/log15/LICENSE
new file mode 100644
index 00000000000..5f0d1fb6a7b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2014 Alan Shreve
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/README.md b/Godeps/_workspace/src/github.com/inconshreveable/log15/README.md
new file mode 100644
index 00000000000..8ccd5a38d05
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/README.md
@@ -0,0 +1,70 @@
+
+
+# log15 [](https://godoc.org/github.com/inconshreveable/log15) [](https://travis-ci.org/inconshreveable/log15)
+
+Package log15 provides an opinionated, simple toolkit for best-practice logging in Go (golang) that is both human and machine readable. It is modeled after the Go standard library's [`io`](http://golang.org/pkg/io/) and [`net/http`](http://golang.org/pkg/net/http/) packages and is an alternative to the standard library's [`log`](http://golang.org/pkg/log/) package.
+
+## Features
+- A simple, easy-to-understand API
+- Promotes structured logging by encouraging use of key/value pairs
+- Child loggers which inherit and add their own private context
+- Lazy evaluation of expensive operations
+- Simple Handler interface allowing for construction of flexible, custom logging configurations with a tiny API.
+- Color terminal support
+- Built-in support for logging to files, streams, syslog, and the network
+- Support for forking records to multiple handlers, buffering records for output, failing over from failed handler writes, + more
+
+## Versioning
+The API of the master branch of log15 should always be considered unstable. If you want to rely on a stable API,
+you must vendor the library.
+
+## Importing
+
+```go
+import log "github.com/inconshreveable/log15"
+```
+
+## Examples
+
+```go
+// all loggers can have key/value context
+srvlog := log.New("module", "app/server")
+
+// all log messages can have key/value context
+srvlog.Warn("abnormal conn rate", "rate", curRate, "low", lowRate, "high", highRate)
+
+// child loggers with inherited context
+connlog := srvlog.New("raddr", c.RemoteAddr())
+connlog.Info("connection open")
+
+// lazy evaluation
+connlog.Debug("ping remote", "latency", log.Lazy{pingRemote})
+
+// flexible configuration
+srvlog.SetHandler(log.MultiHandler(
+ log.StreamHandler(os.Stderr, log.LogfmtFormat()),
+ log.LvlFilterHandler(
+ log.LvlError,
+ log.Must.FileHandler("errors.json", log.JsonFormat())))
+```
+
+## Breaking API Changes
+The following commits broke API stability. This reference is intended to help you understand the consequences of updating to a newer version
+of log15.
+
+- 57a084d014d4150152b19e4e531399a7145d1540 - Added a `Get()` method to the `Logger` interface to retrieve the current handler
+- 93404652ee366648fa622b64d1e2b67d75a3094a - `Record` field `Call` changed to `stack.Call` with switch to `github.com/go-stack/stack`
+- a5e7613673c73281f58e15a87d2cf0cf111e8152 - Restored `syslog.Priority` argument to the `SyslogXxx` handler constructors
+
+## FAQ
+
+### The varargs style is brittle and error prone! Can I have type safety please?
+Yes. Use `log.Ctx`:
+
+```go
+srvlog := log.New(log.Ctx{"module": "app/server"})
+srvlog.Warn("abnormal conn rate", log.Ctx{"rate": curRate, "low": lowRate, "high": highRate})
+```
+
+## License
+Apache
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/doc.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/doc.go
new file mode 100644
index 00000000000..a5cc87419c4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/doc.go
@@ -0,0 +1,333 @@
+/*
+Package log15 provides an opinionated, simple toolkit for best-practice logging that is
+both human and machine readable. It is modeled after the standard library's io and net/http
+packages.
+
+This package enforces you to only log key/value pairs. Keys must be strings. Values may be
+any type that you like. The default output format is logfmt, but you may also choose to use
+JSON instead if that suits you. Here's how you log:
+
+ log.Info("page accessed", "path", r.URL.Path, "user_id", user.id)
+
+This will output a line that looks like:
+
+ lvl=info t=2014-05-02T16:07:23-0700 msg="page accessed" path=/org/71/profile user_id=9
+
+Getting Started
+
+To get started, you'll want to import the library:
+
+ import log "github.com/inconshreveable/log15"
+
+
+Now you're ready to start logging:
+
+ func main() {
+ log.Info("Program starting", "args", os.Args())
+ }
+
+
+Convention
+
+Because recording a human-meaningful message is common and good practice, the first argument to every
+logging method is the value to the *implicit* key 'msg'.
+
+Additionally, the level you choose for a message will be automatically added with the key 'lvl', and so
+will the current timestamp with key 't'.
+
+You may supply any additional context as a set of key/value pairs to the logging function. log15 allows
+you to favor terseness, ordering, and speed over safety. This is a reasonable tradeoff for
+logging functions. You don't need to explicitly state keys/values, log15 understands that they alternate
+in the variadic argument list:
+
+ log.Warn("size out of bounds", "low", lowBound, "high", highBound, "val", val)
+
+If you really do favor your type-safety, you may choose to pass a log.Ctx instead:
+
+ log.Warn("size out of bounds", log.Ctx{"low": lowBound, "high": highBound, "val": val})
+
+
+Context loggers
+
+Frequently, you want to add context to a logger so that you can track actions associated with it. An http
+request is a good example. You can easily create new loggers that have context that is automatically included
+with each log line:
+
+ requestlogger := log.New("path", r.URL.Path)
+
+ // later
+ requestlogger.Debug("db txn commit", "duration", txnTimer.Finish())
+
+This will output a log line that includes the path context that is attached to the logger:
+
+ lvl=dbug t=2014-05-02T16:07:23-0700 path=/repo/12/add_hook msg="db txn commit" duration=0.12
+
+
+Handlers
+
+The Handler interface defines where log lines are printed to and how they are formated. Handler is a
+single interface that is inspired by net/http's handler interface:
+
+ type Handler interface {
+ Log(r *Record) error
+ }
+
+
+Handlers can filter records, format them, or dispatch to multiple other Handlers.
+This package implements a number of Handlers for common logging patterns that are
+easily composed to create flexible, custom logging structures.
+
+Here's an example handler that prints logfmt output to Stdout:
+
+ handler := log.StreamHandler(os.Stdout, log.LogfmtFormat())
+
+Here's an example handler that defers to two other handlers. One handler only prints records
+from the rpc package in logfmt to standard out. The other prints records at Error level
+or above in JSON formatted output to the file /var/log/service.json
+
+ handler := log.MultiHandler(
+ log.LvlFilterHandler(log.LvlError, log.Must.FileHandler("/var/log/service.json", log.JsonFormat())),
+ log.MatchFilterHandler("pkg", "app/rpc" log.StdoutHandler())
+ )
+
+Logging File Names and Line Numbers
+
+This package implements three Handlers that add debugging information to the
+context, CallerFileHandler, CallerFuncHandler and CallerStackHandler. Here's
+an example that adds the source file and line number of each logging call to
+the context.
+
+ h := log.CallerFileHandler(log.StdoutHandler())
+ log.Root().SetHandler(h)
+ ...
+ log.Error("open file", "err", err)
+
+This will output a line that looks like:
+
+ lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" caller=data.go:42
+
+Here's an example that logs the call stack rather than just the call site.
+
+ h := log.CallerStackHandler("%+v", log.StdoutHandler())
+ log.Root().SetHandler(h)
+ ...
+ log.Error("open file", "err", err)
+
+This will output a line that looks like:
+
+ lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" stack="[pkg/data.go:42 pkg/cmd/main.go]"
+
+The "%+v" format instructs the handler to include the path of the source file
+relative to the compile time GOPATH. The github.com/go-stack/stack package
+documents the full list of formatting verbs and modifiers available.
+
+Custom Handlers
+
+The Handler interface is so simple that it's also trivial to write your own. Let's create an
+example handler which tries to write to one handler, but if that fails it falls back to
+writing to another handler and includes the error that it encountered when trying to write
+to the primary. This might be useful when trying to log over a network socket, but if that
+fails you want to log those records to a file on disk.
+
+ type BackupHandler struct {
+ Primary Handler
+ Secondary Handler
+ }
+
+ func (h *BackupHandler) Log (r *Record) error {
+ err := h.Primary.Log(r)
+ if err != nil {
+ r.Ctx = append(ctx, "primary_err", err)
+ return h.Secondary.Log(r)
+ }
+ return nil
+ }
+
+This pattern is so useful that a generic version that handles an arbitrary number of Handlers
+is included as part of this library called FailoverHandler.
+
+Logging Expensive Operations
+
+Sometimes, you want to log values that are extremely expensive to compute, but you don't want to pay
+the price of computing them if you haven't turned up your logging level to a high level of detail.
+
+This package provides a simple type to annotate a logging operation that you want to be evaluated
+lazily, just when it is about to be logged, so that it would not be evaluated if an upstream Handler
+filters it out. Just wrap any function which takes no arguments with the log.Lazy type. For example:
+
+ func factorRSAKey() (factors []int) {
+ // return the factors of a very large number
+ }
+
+ log.Debug("factors", log.Lazy{factorRSAKey})
+
+If this message is not logged for any reason (like logging at the Error level), then
+factorRSAKey is never evaluated.
+
+Dynamic context values
+
+The same log.Lazy mechanism can be used to attach context to a logger which you want to be
+evaluated when the message is logged, but not when the logger is created. For example, let's imagine
+a game where you have Player objects:
+
+ type Player struct {
+ name string
+ alive bool
+ log.Logger
+ }
+
+You always want to log a player's name and whether they're alive or dead, so when you create the player
+object, you might do:
+
+ p := &Player{name: name, alive: true}
+ p.Logger = log.New("name", p.name, "alive", p.alive)
+
+Only now, even after a player has died, the logger will still report they are alive because the logging
+context is evaluated when the logger was created. By using the Lazy wrapper, we can defer the evaluation
+of whether the player is alive or not to each log message, so that the log records will reflect the player's
+current state no matter when the log message is written:
+
+ p := &Player{name: name, alive: true}
+ isAlive := func() bool { return p.alive }
+ player.Logger = log.New("name", p.name, "alive", log.Lazy{isAlive})
+
+Terminal Format
+
+If log15 detects that stdout is a terminal, it will configure the default
+handler for it (which is log.StdoutHandler) to use TerminalFormat. This format
+logs records nicely for your terminal, including color-coded output based
+on log level.
+
+Error Handling
+
+Becasuse log15 allows you to step around the type system, there are a few ways you can specify
+invalid arguments to the logging functions. You could, for example, wrap something that is not
+a zero-argument function with log.Lazy or pass a context key that is not a string. Since logging libraries
+are typically the mechanism by which errors are reported, it would be onerous for the logging functions
+to return errors. Instead, log15 handles errors by making these guarantees to you:
+
+- Any log record containing an error will still be printed with the error explained to you as part of the log record.
+
+- Any log record containing an error will include the context key LOG15_ERROR, enabling you to easily
+(and if you like, automatically) detect if any of your logging calls are passing bad values.
+
+Understanding this, you might wonder why the Handler interface can return an error value in its Log method. Handlers
+are encouraged to return errors only if they fail to write their log records out to an external source like if the
+syslog daemon is not responding. This allows the construction of useful handlers which cope with those failures
+like the FailoverHandler.
+
+Library Use
+
+log15 is intended to be useful for library authors as a way to provide configurable logging to
+users of their library. Best practice for use in a library is to always disable all output for your logger
+by default and to provide a public Logger instance that consumers of your library can configure. Like so:
+
+ package yourlib
+
+ import "github.com/inconshreveable/log15"
+
+ var Log = log.New()
+
+ func init() {
+ Log.SetHandler(log.DiscardHandler())
+ }
+
+Users of your library may then enable it if they like:
+
+ import "github.com/inconshreveable/log15"
+ import "example.com/yourlib"
+
+ func main() {
+ handler := // custom handler setup
+ yourlib.Log.SetHandler(handler)
+ }
+
+Best practices attaching logger context
+
+The ability to attach context to a logger is a powerful one. Where should you do it and why?
+I favor embedding a Logger directly into any persistent object in my application and adding
+unique, tracing context keys to it. For instance, imagine I am writing a web browser:
+
+ type Tab struct {
+ url string
+ render *RenderingContext
+ // ...
+
+ Logger
+ }
+
+ func NewTab(url string) *Tab {
+ return &Tab {
+ // ...
+ url: url,
+
+ Logger: log.New("url", url),
+ }
+ }
+
+When a new tab is created, I assign a logger to it with the url of
+the tab as context so it can easily be traced through the logs.
+Now, whenever we perform any operation with the tab, we'll log with its
+embedded logger and it will include the tab title automatically:
+
+ tab.Debug("moved position", "idx", tab.idx)
+
+There's only one problem. What if the tab url changes? We could
+use log.Lazy to make sure the current url is always written, but that
+would mean that we couldn't trace a tab's full lifetime through our
+logs after the user navigate to a new URL.
+
+Instead, think about what values to attach to your loggers the
+same way you think about what to use as a key in a SQL database schema.
+If it's possible to use a natural key that is unique for the lifetime of the
+object, do so. But otherwise, log15's ext package has a handy RandId
+function to let you generate what you might call "surrogate keys"
+They're just random hex identifiers to use for tracing. Back to our
+Tab example, we would prefer to set up our Logger like so:
+
+ import logext "github.com/inconshreveable/log15/ext"
+
+ t := &Tab {
+ // ...
+ url: url,
+ }
+
+ t.Logger = log.New("id", logext.RandId(8), "url", log.Lazy{t.getUrl})
+ return t
+
+Now we'll have a unique traceable identifier even across loading new urls, but
+we'll still be able to see the tab's current url in the log messages.
+
+Must
+
+For all Handler functions which can return an error, there is a version of that
+function which will return no error but panics on failure. They are all available
+on the Must object. For example:
+
+ log.Must.FileHandler("/path", log.JsonFormat)
+ log.Must.NetHandler("tcp", ":1234", log.JsonFormat)
+
+Inspiration and Credit
+
+All of the following excellent projects inspired the design of this library:
+
+code.google.com/p/log4go
+
+github.com/op/go-logging
+
+github.com/technoweenie/grohl
+
+github.com/Sirupsen/logrus
+
+github.com/kr/logfmt
+
+github.com/spacemonkeygo/spacelog
+
+golang's stdlib, notably io and net/http
+
+The Name
+
+https://xkcd.com/927/
+
+*/
+package log15
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/format.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/format.go
new file mode 100644
index 00000000000..3468f3048f3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/format.go
@@ -0,0 +1,257 @@
+package log15
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ timeFormat = "2006-01-02T15:04:05-0700"
+ termTimeFormat = "01-02|15:04:05"
+ floatFormat = 'f'
+ termMsgJust = 40
+)
+
+type Format interface {
+ Format(r *Record) []byte
+}
+
+// FormatFunc returns a new Format object which uses
+// the given function to perform record formatting.
+func FormatFunc(f func(*Record) []byte) Format {
+ return formatFunc(f)
+}
+
+type formatFunc func(*Record) []byte
+
+func (f formatFunc) Format(r *Record) []byte {
+ return f(r)
+}
+
+// TerminalFormat formats log records optimized for human readability on
+// a terminal with color-coded level output and terser human friendly timestamp.
+// This format should only be used for interactive programs or while developing.
+//
+// [TIME] [LEVEL] MESAGE key=value key=value ...
+//
+// Example:
+//
+// [May 16 20:58:45] [DBUG] remove route ns=haproxy addr=127.0.0.1:50002
+//
+func TerminalFormat() Format {
+ return FormatFunc(func(r *Record) []byte {
+ var color = 0
+ switch r.Lvl {
+ case LvlCrit:
+ color = 35
+ case LvlError:
+ color = 31
+ case LvlWarn:
+ color = 33
+ case LvlInfo:
+ color = 32
+ case LvlDebug:
+ color = 36
+ }
+
+ b := &bytes.Buffer{}
+ lvl := strings.ToUpper(r.Lvl.String())
+ if color > 0 {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %s ", color, lvl, r.Time.Format(termTimeFormat), r.Msg)
+ } else {
+ fmt.Fprintf(b, "[%s] [%s] %s ", lvl, r.Time.Format(termTimeFormat), r.Msg)
+ }
+
+ // try to justify the log output for short messages
+ if len(r.Ctx) > 0 && len(r.Msg) < termMsgJust {
+ b.Write(bytes.Repeat([]byte{' '}, termMsgJust-len(r.Msg)))
+ }
+
+ // print the keys logfmt style
+ logfmt(b, r.Ctx, color)
+ return b.Bytes()
+ })
+}
+
+// LogfmtFormat prints records in logfmt format, an easy machine-parseable but human-readable
+// format for key/value pairs.
+//
+// For more details see: http://godoc.org/github.com/kr/logfmt
+//
+func LogfmtFormat() Format {
+ return FormatFunc(func(r *Record) []byte {
+ common := []interface{}{r.KeyNames.Time, r.Time, r.KeyNames.Lvl, r.Lvl, r.KeyNames.Msg, r.Msg}
+ buf := &bytes.Buffer{}
+ logfmt(buf, append(common, r.Ctx...), 0)
+ return buf.Bytes()
+ })
+}
+
+func logfmt(buf *bytes.Buffer, ctx []interface{}, color int) {
+ for i := 0; i < len(ctx); i += 2 {
+ if i != 0 {
+ buf.WriteByte(' ')
+ }
+
+ k, ok := ctx[i].(string)
+ v := formatLogfmtValue(ctx[i+1])
+ if !ok {
+ k, v = errorKey, formatLogfmtValue(k)
+ }
+
+ // XXX: we should probably check that all of your key bytes aren't invalid
+ if color > 0 {
+ fmt.Fprintf(buf, "\x1b[%dm%s\x1b[0m=%s", color, k, v)
+ } else {
+ fmt.Fprintf(buf, "%s=%s", k, v)
+ }
+ }
+
+ buf.WriteByte('\n')
+}
+
+// JsonFormat formats log records as JSON objects separated by newlines.
+// It is the equivalent of JsonFormatEx(false, true).
+func JsonFormat() Format {
+ return JsonFormatEx(false, true)
+}
+
+// JsonFormatEx formats log records as JSON objects. If pretty is true,
+// records will be pretty-printed. If lineSeparated is true, records
+// will be logged with a new line between each record.
+func JsonFormatEx(pretty, lineSeparated bool) Format {
+ jsonMarshal := json.Marshal
+ if pretty {
+ jsonMarshal = func(v interface{}) ([]byte, error) {
+ return json.MarshalIndent(v, "", " ")
+ }
+ }
+
+ return FormatFunc(func(r *Record) []byte {
+ props := make(map[string]interface{})
+
+ props[r.KeyNames.Time] = r.Time
+ props[r.KeyNames.Lvl] = r.Lvl.String()
+ props[r.KeyNames.Msg] = r.Msg
+
+ for i := 0; i < len(r.Ctx); i += 2 {
+ k, ok := r.Ctx[i].(string)
+ if !ok {
+ props[errorKey] = fmt.Sprintf("%+v is not a string key", r.Ctx[i])
+ }
+ props[k] = formatJsonValue(r.Ctx[i+1])
+ }
+
+ b, err := jsonMarshal(props)
+ if err != nil {
+ b, _ = jsonMarshal(map[string]string{
+ errorKey: err.Error(),
+ })
+ return b
+ }
+
+ if lineSeparated {
+ b = append(b, '\n')
+ }
+
+ return b
+ })
+}
+
+func formatShared(value interface{}) (result interface{}) {
+ defer func() {
+ if err := recover(); err != nil {
+ if v := reflect.ValueOf(value); v.Kind() == reflect.Ptr && v.IsNil() {
+ result = "nil"
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ switch v := value.(type) {
+ case time.Time:
+ return v.Format(timeFormat)
+
+ case error:
+ return v.Error()
+
+ case fmt.Stringer:
+ return v.String()
+
+ default:
+ return v
+ }
+}
+
+func formatJsonValue(value interface{}) interface{} {
+ value = formatShared(value)
+ switch value.(type) {
+ case int, int8, int16, int32, int64, float32, float64, uint, uint8, uint16, uint32, uint64, string:
+ return value
+ default:
+ return fmt.Sprintf("%+v", value)
+ }
+}
+
+// formatValue formats a value for serialization
+func formatLogfmtValue(value interface{}) string {
+ if value == nil {
+ return "nil"
+ }
+
+ value = formatShared(value)
+ switch v := value.(type) {
+ case bool:
+ return strconv.FormatBool(v)
+ case float32:
+ return strconv.FormatFloat(float64(v), floatFormat, 3, 64)
+ case float64:
+ return strconv.FormatFloat(v, floatFormat, 3, 64)
+ case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
+ return fmt.Sprintf("%d", value)
+ case string:
+ return escapeString(v)
+ default:
+ return escapeString(fmt.Sprintf("%+v", value))
+ }
+}
+
+func escapeString(s string) string {
+ needQuotes := false
+ e := bytes.Buffer{}
+ e.WriteByte('"')
+ for _, r := range s {
+ if r <= ' ' || r == '=' || r == '"' {
+ needQuotes = true
+ }
+
+ switch r {
+ case '\\', '"':
+ e.WriteByte('\\')
+ e.WriteByte(byte(r))
+ case '\n':
+ e.WriteByte('\\')
+ e.WriteByte('n')
+ case '\r':
+ e.WriteByte('\\')
+ e.WriteByte('r')
+ case '\t':
+ e.WriteByte('\\')
+ e.WriteByte('t')
+ default:
+ e.WriteRune(r)
+ }
+ }
+ e.WriteByte('"')
+ start, stop := 0, e.Len()
+ if !needQuotes {
+ start, stop = 1, stop-1
+ }
+ return string(e.Bytes()[start:stop])
+}
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/handler.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/handler.go
new file mode 100644
index 00000000000..43205608cc1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/handler.go
@@ -0,0 +1,356 @@
+package log15
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "reflect"
+ "sync"
+
+ "github.com/go-stack/stack"
+)
+
+// A Logger prints its log records by writing to a Handler.
+// The Handler interface defines where and how log records are written.
+// Handlers are composable, providing you great flexibility in combining
+// them to achieve the logging structure that suits your applications.
+type Handler interface {
+ Log(r *Record) error
+}
+
+// FuncHandler returns a Handler that logs records with the given
+// function.
+func FuncHandler(fn func(r *Record) error) Handler {
+ return funcHandler(fn)
+}
+
+type funcHandler func(r *Record) error
+
+func (h funcHandler) Log(r *Record) error {
+ return h(r)
+}
+
+// StreamHandler writes log records to an io.Writer
+// with the given format. StreamHandler can be used
+// to easily begin writing log records to other
+// outputs.
+//
+// StreamHandler wraps itself with LazyHandler and SyncHandler
+// to evaluate Lazy objects and perform safe concurrent writes.
+func StreamHandler(wr io.Writer, fmtr Format) Handler {
+ h := FuncHandler(func(r *Record) error {
+ _, err := wr.Write(fmtr.Format(r))
+ return err
+ })
+ return LazyHandler(SyncHandler(h))
+}
+
+// SyncHandler can be wrapped around a handler to guarantee that
+// only a single Log operation can proceed at a time. It's necessary
+// for thread-safe concurrent writes.
+func SyncHandler(h Handler) Handler {
+ var mu sync.Mutex
+ return FuncHandler(func(r *Record) error {
+ defer mu.Unlock()
+ mu.Lock()
+ return h.Log(r)
+ })
+}
+
+// FileHandler returns a handler which writes log records to the give file
+// using the given format. If the path
+// already exists, FileHandler will append to the given file. If it does not,
+// FileHandler will create the file with mode 0644.
+func FileHandler(path string, fmtr Format) (Handler, error) {
+ f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
+ if err != nil {
+ return nil, err
+ }
+ return closingHandler{f, StreamHandler(f, fmtr)}, nil
+}
+
+// NetHandler opens a socket to the given address and writes records
+// over the connection.
+func NetHandler(network, addr string, fmtr Format) (Handler, error) {
+ conn, err := net.Dial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+
+ return closingHandler{conn, StreamHandler(conn, fmtr)}, nil
+}
+
+// XXX: closingHandler is essentially unused at the moment
+// it's meant for a future time when the Handler interface supports
+// a possible Close() operation
+type closingHandler struct {
+ io.WriteCloser
+ Handler
+}
+
+func (h *closingHandler) Close() error {
+ return h.WriteCloser.Close()
+}
+
+// CallerFileHandler returns a Handler that adds the line number and file of
+// the calling function to the context with key "caller".
+func CallerFileHandler(h Handler) Handler {
+ return FuncHandler(func(r *Record) error {
+ r.Ctx = append(r.Ctx, "caller", fmt.Sprint(r.Call))
+ return h.Log(r)
+ })
+}
+
+// CallerFuncHandler returns a Handler that adds the calling function name to
+// the context with key "fn".
+func CallerFuncHandler(h Handler) Handler {
+ return FuncHandler(func(r *Record) error {
+ r.Ctx = append(r.Ctx, "fn", fmt.Sprintf("%+n", r.Call))
+ return h.Log(r)
+ })
+}
+
+// CallerStackHandler returns a Handler that adds a stack trace to the context
+// with key "stack". The stack trace is formated as a space separated list of
+// call sites inside matching []'s. The most recent call site is listed first.
+// Each call site is formatted according to format. See the documentation of
+// package github.com/go-stack/stack for the list of supported formats.
+func CallerStackHandler(format string, h Handler) Handler {
+ return FuncHandler(func(r *Record) error {
+ s := stack.Trace().TrimBelow(r.Call).TrimRuntime()
+ if len(s) > 0 {
+ r.Ctx = append(r.Ctx, "stack", fmt.Sprintf(format, s))
+ }
+ return h.Log(r)
+ })
+}
+
+// FilterHandler returns a Handler that only writes records to the
+// wrapped Handler if the given function evaluates true. For example,
+// to only log records where the 'err' key is not nil:
+//
+// logger.SetHandler(FilterHandler(func(r *Record) bool {
+// for i := 0; i < len(r.Ctx); i += 2 {
+// if r.Ctx[i] == "err" {
+// return r.Ctx[i+1] != nil
+// }
+// }
+// return false
+// }, h))
+//
+func FilterHandler(fn func(r *Record) bool, h Handler) Handler {
+ return FuncHandler(func(r *Record) error {
+ if fn(r) {
+ return h.Log(r)
+ }
+ return nil
+ })
+}
+
+// MatchFilterHandler returns a Handler that only writes records
+// to the wrapped Handler if the given key in the logged
+// context matches the value. For example, to only log records
+// from your ui package:
+//
+// log.MatchFilterHandler("pkg", "app/ui", log.StdoutHandler)
+//
+func MatchFilterHandler(key string, value interface{}, h Handler) Handler {
+ return FilterHandler(func(r *Record) (pass bool) {
+ switch key {
+ case r.KeyNames.Lvl:
+ return r.Lvl == value
+ case r.KeyNames.Time:
+ return r.Time == value
+ case r.KeyNames.Msg:
+ return r.Msg == value
+ }
+
+ for i := 0; i < len(r.Ctx); i += 2 {
+ if r.Ctx[i] == key {
+ return r.Ctx[i+1] == value
+ }
+ }
+ return false
+ }, h)
+}
+
+// LvlFilterHandler returns a Handler that only writes
+// records which are less than the given verbosity
+// level to the wrapped Handler. For example, to only
+// log Error/Crit records:
+//
+// log.LvlFilterHandler(log.Error, log.StdoutHandler)
+//
+func LvlFilterHandler(maxLvl Lvl, h Handler) Handler {
+ return FilterHandler(func(r *Record) (pass bool) {
+ return r.Lvl <= maxLvl
+ }, h)
+}
+
+// A MultiHandler dispatches any write to each of its handlers.
+// This is useful for writing different types of log information
+// to different locations. For example, to log to a file and
+// standard error:
+//
+// log.MultiHandler(
+// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()),
+// log.StderrHandler)
+//
+func MultiHandler(hs ...Handler) Handler {
+ return FuncHandler(func(r *Record) error {
+ for _, h := range hs {
+ // what to do about failures?
+ h.Log(r)
+ }
+ return nil
+ })
+}
+
+// A FailoverHandler writes all log records to the first handler
+// specified, but will failover and write to the second handler if
+// the first handler has failed, and so on for all handlers specified.
+// For example you might want to log to a network socket, but failover
+// to writing to a file if the network fails, and then to
+// standard out if the file write fails:
+//
+// log.FailoverHandler(
+// log.Must.NetHandler("tcp", ":9090", log.JsonFormat()),
+// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()),
+// log.StdoutHandler)
+//
+// All writes that do not go to the first handler will add context with keys of
+// the form "failover_err_{idx}" which explain the error encountered while
+// trying to write to the handlers before them in the list.
+func FailoverHandler(hs ...Handler) Handler {
+ return FuncHandler(func(r *Record) error {
+ var err error
+ for i, h := range hs {
+ err = h.Log(r)
+ if err == nil {
+ return nil
+ } else {
+ r.Ctx = append(r.Ctx, fmt.Sprintf("failover_err_%d", i), err)
+ }
+ }
+
+ return err
+ })
+}
+
+// ChannelHandler writes all records to the given channel.
+// It blocks if the channel is full. Useful for async processing
+// of log messages, it's used by BufferedHandler.
+func ChannelHandler(recs chan<- *Record) Handler {
+ return FuncHandler(func(r *Record) error {
+ recs <- r
+ return nil
+ })
+}
+
+// BufferedHandler writes all records to a buffered
+// channel of the given size which flushes into the wrapped
+// handler whenever it is available for writing. Since these
+// writes happen asynchronously, all writes to a BufferedHandler
+// never return an error and any errors from the wrapped handler are ignored.
+func BufferedHandler(bufSize int, h Handler) Handler {
+ recs := make(chan *Record, bufSize)
+ go func() {
+ for m := range recs {
+ _ = h.Log(m)
+ }
+ }()
+ return ChannelHandler(recs)
+}
+
+// LazyHandler writes all values to the wrapped handler after evaluating
+// any lazy functions in the record's context. It is already wrapped
+// around StreamHandler and SyslogHandler in this library, you'll only need
+// it if you write your own Handler.
+func LazyHandler(h Handler) Handler {
+ return FuncHandler(func(r *Record) error {
+ // go through the values (odd indices) and reassign
+ // the values of any lazy fn to the result of its execution
+ hadErr := false
+ for i := 1; i < len(r.Ctx); i += 2 {
+ lz, ok := r.Ctx[i].(Lazy)
+ if ok {
+ v, err := evaluateLazy(lz)
+ if err != nil {
+ hadErr = true
+ r.Ctx[i] = err
+ } else {
+ if cs, ok := v.(stack.CallStack); ok {
+ v = cs.TrimBelow(r.Call).TrimRuntime()
+ }
+ r.Ctx[i] = v
+ }
+ }
+ }
+
+ if hadErr {
+ r.Ctx = append(r.Ctx, errorKey, "bad lazy")
+ }
+
+ return h.Log(r)
+ })
+}
+
+func evaluateLazy(lz Lazy) (interface{}, error) {
+ t := reflect.TypeOf(lz.Fn)
+
+ if t.Kind() != reflect.Func {
+ return nil, fmt.Errorf("INVALID_LAZY, not func: %+v", lz.Fn)
+ }
+
+ if t.NumIn() > 0 {
+ return nil, fmt.Errorf("INVALID_LAZY, func takes args: %+v", lz.Fn)
+ }
+
+ if t.NumOut() == 0 {
+ return nil, fmt.Errorf("INVALID_LAZY, no func return val: %+v", lz.Fn)
+ }
+
+ value := reflect.ValueOf(lz.Fn)
+ results := value.Call([]reflect.Value{})
+ if len(results) == 1 {
+ return results[0].Interface(), nil
+ } else {
+ values := make([]interface{}, len(results))
+ for i, v := range results {
+ values[i] = v.Interface()
+ }
+ return values, nil
+ }
+}
+
+// DiscardHandler reports success for all writes but does nothing.
+// It is useful for dynamically disabling logging at runtime via
+// a Logger's SetHandler method.
+func DiscardHandler() Handler {
+ return FuncHandler(func(r *Record) error {
+ return nil
+ })
+}
+
+// The Must object provides the following Handler creation functions
+// which instead of returning an error parameter only return a Handler
+// and panic on failure: FileHandler, NetHandler, SyslogHandler, SyslogNetHandler
+var Must muster
+
+func must(h Handler, err error) Handler {
+ if err != nil {
+ panic(err)
+ }
+ return h
+}
+
+type muster struct{}
+
+func (m muster) FileHandler(path string, fmtr Format) Handler {
+ return must(FileHandler(path, fmtr))
+}
+
+func (m muster) NetHandler(network, addr string, fmtr Format) Handler {
+ return must(NetHandler(network, addr, fmtr))
+}
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go13.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go13.go
new file mode 100644
index 00000000000..f6181746e31
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go13.go
@@ -0,0 +1,26 @@
+// +build !go1.4
+
+package log15
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+// swapHandler wraps another handler that may be swapped out
+// dynamically at runtime in a thread-safe fashion.
+type swapHandler struct {
+ handler unsafe.Pointer
+}
+
+func (h *swapHandler) Log(r *Record) error {
+ return h.Get().Log(r)
+}
+
+func (h *swapHandler) Get() Handler {
+ return *(*Handler)(atomic.LoadPointer(&h.handler))
+}
+
+func (h *swapHandler) Swap(newHandler Handler) {
+ atomic.StorePointer(&h.handler, unsafe.Pointer(&newHandler))
+}
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go14.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go14.go
new file mode 100644
index 00000000000..6041f2302fb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go14.go
@@ -0,0 +1,23 @@
+// +build go1.4
+
+package log15
+
+import "sync/atomic"
+
+// swapHandler wraps another handler that may be swapped out
+// dynamically at runtime in a thread-safe fashion.
+type swapHandler struct {
+ handler atomic.Value
+}
+
+func (h *swapHandler) Log(r *Record) error {
+ return (*h.handler.Load().(*Handler)).Log(r)
+}
+
+func (h *swapHandler) Swap(newHandler Handler) {
+ h.handler.Store(&newHandler)
+}
+
+func (h *swapHandler) Get() Handler {
+ return *h.handler.Load().(*Handler)
+}
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/logger.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/logger.go
new file mode 100644
index 00000000000..3163653159f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/logger.go
@@ -0,0 +1,208 @@
+package log15
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/go-stack/stack"
+)
+
+const timeKey = "t"
+const lvlKey = "lvl"
+const msgKey = "msg"
+const errorKey = "LOG15_ERROR"
+
+type Lvl int
+
+const (
+ LvlCrit Lvl = iota
+ LvlError
+ LvlWarn
+ LvlInfo
+ LvlDebug
+)
+
+// Returns the name of a Lvl
+func (l Lvl) String() string {
+ switch l {
+ case LvlDebug:
+ return "dbug"
+ case LvlInfo:
+ return "info"
+ case LvlWarn:
+ return "warn"
+ case LvlError:
+ return "eror"
+ case LvlCrit:
+ return "crit"
+ default:
+ panic("bad level")
+ }
+}
+
+// Returns the appropriate Lvl from a string name.
+// Useful for parsing command line args and configuration files.
+func LvlFromString(lvlString string) (Lvl, error) {
+ switch lvlString {
+ case "debug", "dbug":
+ return LvlDebug, nil
+ case "info":
+ return LvlInfo, nil
+ case "warn":
+ return LvlWarn, nil
+ case "error", "eror":
+ return LvlError, nil
+ case "crit":
+ return LvlCrit, nil
+ default:
+ return LvlDebug, fmt.Errorf("Unknown level: %v", lvlString)
+ }
+}
+
+// A Record is what a Logger asks its handler to write
+type Record struct {
+ Time time.Time
+ Lvl Lvl
+ Msg string
+ Ctx []interface{}
+ Call stack.Call
+ KeyNames RecordKeyNames
+}
+
+type RecordKeyNames struct {
+ Time string
+ Msg string
+ Lvl string
+}
+
+// A Logger writes key/value pairs to a Handler
+type Logger interface {
+ // New returns a new Logger that has this logger's context plus the given context
+ New(ctx ...interface{}) Logger
+
+ // GetHandler gets the handler associated with the logger.
+ GetHandler() Handler
+
+ // SetHandler updates the logger to write records to the specified handler.
+ SetHandler(h Handler)
+
+ // Log a message at the given level with context key/value pairs
+ Debug(msg string, ctx ...interface{})
+ Info(msg string, ctx ...interface{})
+ Warn(msg string, ctx ...interface{})
+ Error(msg string, ctx ...interface{})
+ Crit(msg string, ctx ...interface{})
+}
+
+type logger struct {
+ ctx []interface{}
+ h *swapHandler
+}
+
+func (l *logger) write(msg string, lvl Lvl, ctx []interface{}) {
+ l.h.Log(&Record{
+ Time: time.Now(),
+ Lvl: lvl,
+ Msg: msg,
+ Ctx: newContext(l.ctx, ctx),
+ Call: stack.Caller(2),
+ KeyNames: RecordKeyNames{
+ Time: timeKey,
+ Msg: msgKey,
+ Lvl: lvlKey,
+ },
+ })
+}
+
+func (l *logger) New(ctx ...interface{}) Logger {
+ child := &logger{newContext(l.ctx, ctx), new(swapHandler)}
+ child.SetHandler(l.h)
+ return child
+}
+
+func newContext(prefix []interface{}, suffix []interface{}) []interface{} {
+ normalizedSuffix := normalize(suffix)
+ newCtx := make([]interface{}, len(prefix)+len(normalizedSuffix))
+ n := copy(newCtx, prefix)
+ copy(newCtx[n:], normalizedSuffix)
+ return newCtx
+}
+
+func (l *logger) Debug(msg string, ctx ...interface{}) {
+ l.write(msg, LvlDebug, ctx)
+}
+
+func (l *logger) Info(msg string, ctx ...interface{}) {
+ l.write(msg, LvlInfo, ctx)
+}
+
+func (l *logger) Warn(msg string, ctx ...interface{}) {
+ l.write(msg, LvlWarn, ctx)
+}
+
+func (l *logger) Error(msg string, ctx ...interface{}) {
+ l.write(msg, LvlError, ctx)
+}
+
+func (l *logger) Crit(msg string, ctx ...interface{}) {
+ l.write(msg, LvlCrit, ctx)
+}
+
+func (l *logger) GetHandler() Handler {
+ return l.h.Get()
+}
+
+func (l *logger) SetHandler(h Handler) {
+ l.h.Swap(h)
+}
+
+func normalize(ctx []interface{}) []interface{} {
+ // if the caller passed a Ctx object, then expand it
+ if len(ctx) == 1 {
+ if ctxMap, ok := ctx[0].(Ctx); ok {
+ ctx = ctxMap.toArray()
+ }
+ }
+
+ // ctx needs to be even because it's a series of key/value pairs
+ // no one wants to check for errors on logging functions,
+ // so instead of erroring on bad input, we'll just make sure
+ // that things are the right length and users can fix bugs
+ // when they see the output looks wrong
+ if len(ctx)%2 != 0 {
+ ctx = append(ctx, nil, errorKey, "Normalized odd number of arguments by adding nil")
+ }
+
+ return ctx
+}
+
+// Lazy allows you to defer calculation of a logged value that is expensive
+// to compute until it is certain that it must be evaluated with the given filters.
+//
+// Lazy may also be used in conjunction with a Logger's New() function
+// to generate a child logger which always reports the current value of changing
+// state.
+//
+// You may wrap any function which takes no arguments to Lazy. It may return any
+// number of values of any type.
+type Lazy struct {
+ Fn interface{}
+}
+
+// Ctx is a map of key/value pairs to pass as context to a log function
+// Use this only if you really need greater safety around the arguments you pass
+// to the logging functions.
+type Ctx map[string]interface{}
+
+func (c Ctx) toArray() []interface{} {
+ arr := make([]interface{}, len(c)*2)
+
+ i := 0
+ for k, v := range c {
+ arr[i] = k
+ arr[i+1] = v
+ i += 2
+ }
+
+ return arr
+}
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/root.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/root.go
new file mode 100644
index 00000000000..c5118d4090f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/root.go
@@ -0,0 +1,67 @@
+package log15
+
+import (
+ "os"
+
+ "github.com/inconshreveable/log15/term"
+ "github.com/mattn/go-colorable"
+)
+
+var (
+ root *logger
+ StdoutHandler = StreamHandler(os.Stdout, LogfmtFormat())
+ StderrHandler = StreamHandler(os.Stderr, LogfmtFormat())
+)
+
+func init() {
+ if term.IsTty(os.Stdout.Fd()) {
+ StdoutHandler = StreamHandler(colorable.NewColorableStdout(), TerminalFormat())
+ }
+
+ if term.IsTty(os.Stderr.Fd()) {
+ StderrHandler = StreamHandler(colorable.NewColorableStderr(), TerminalFormat())
+ }
+
+ root = &logger{[]interface{}{}, new(swapHandler)}
+ root.SetHandler(StdoutHandler)
+}
+
+// New returns a new logger with the given context.
+// New is a convenient alias for Root().New
+func New(ctx ...interface{}) Logger {
+ return root.New(ctx...)
+}
+
+// Root returns the root logger
+func Root() Logger {
+ return root
+}
+
+// The following functions bypass the exported logger methods (logger.Debug,
+// etc.) to keep the call depth the same for all paths to logger.write so
+// runtime.Caller(2) always refers to the call site in client code.
+
+// Debug is a convenient alias for Root().Debug
+func Debug(msg string, ctx ...interface{}) {
+ root.write(msg, LvlDebug, ctx)
+}
+
+// Info is a convenient alias for Root().Info
+func Info(msg string, ctx ...interface{}) {
+ root.write(msg, LvlInfo, ctx)
+}
+
+// Warn is a convenient alias for Root().Warn
+func Warn(msg string, ctx ...interface{}) {
+ root.write(msg, LvlWarn, ctx)
+}
+
+// Error is a convenient alias for Root().Error
+func Error(msg string, ctx ...interface{}) {
+ root.write(msg, LvlError, ctx)
+}
+
+// Crit is a convenient alias for Root().Crit
+func Crit(msg string, ctx ...interface{}) {
+ root.write(msg, LvlCrit, ctx)
+}
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/syslog.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/syslog.go
new file mode 100644
index 00000000000..5f95f99f1ee
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/syslog.go
@@ -0,0 +1,55 @@
+// +build !windows,!plan9
+
+package log15
+
+import (
+ "log/syslog"
+ "strings"
+)
+
+// SyslogHandler opens a connection to the system syslog daemon by calling
+// syslog.New and writes all records to it.
+func SyslogHandler(priority syslog.Priority, tag string, fmtr Format) (Handler, error) {
+ wr, err := syslog.New(priority, tag)
+ return sharedSyslog(fmtr, wr, err)
+}
+
+// SyslogHandler opens a connection to a log daemon over the network and writes
+// all log records to it.
+func SyslogNetHandler(net, addr string, priority syslog.Priority, tag string, fmtr Format) (Handler, error) {
+ wr, err := syslog.Dial(net, addr, priority, tag)
+ return sharedSyslog(fmtr, wr, err)
+}
+
+func sharedSyslog(fmtr Format, sysWr *syslog.Writer, err error) (Handler, error) {
+ if err != nil {
+ return nil, err
+ }
+ h := FuncHandler(func(r *Record) error {
+ var syslogFn = sysWr.Info
+ switch r.Lvl {
+ case LvlCrit:
+ syslogFn = sysWr.Crit
+ case LvlError:
+ syslogFn = sysWr.Err
+ case LvlWarn:
+ syslogFn = sysWr.Warning
+ case LvlInfo:
+ syslogFn = sysWr.Info
+ case LvlDebug:
+ syslogFn = sysWr.Debug
+ }
+
+ s := strings.TrimSpace(string(fmtr.Format(r)))
+ return syslogFn(s)
+ })
+ return LazyHandler(&closingHandler{sysWr, h}), nil
+}
+
+func (m muster) SyslogHandler(priority syslog.Priority, tag string, fmtr Format) Handler {
+ return must(SyslogHandler(priority, tag, fmtr))
+}
+
+func (m muster) SyslogNetHandler(net, addr string, priority syslog.Priority, tag string, fmtr Format) Handler {
+ return must(SyslogNetHandler(net, addr, priority, tag, fmtr))
+}
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/LICENSE b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/LICENSE
new file mode 100644
index 00000000000..f090cb42f37
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_appengine.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_appengine.go
new file mode 100644
index 00000000000..c1b5d2a3b1a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_appengine.go
@@ -0,0 +1,13 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package term
+
+// IsTty always returns false on AppEngine.
+func IsTty(fd uintptr) bool {
+ return false
+}
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_darwin.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_darwin.go
new file mode 100644
index 00000000000..b05de4cb8c8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_darwin.go
@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package term
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_freebsd.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_freebsd.go
new file mode 100644
index 00000000000..cfaceab337a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_freebsd.go
@@ -0,0 +1,18 @@
+package term
+
+import (
+ "syscall"
+)
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+// Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
+type Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed uint32
+ Ospeed uint32
+}
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_linux.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_linux.go
new file mode 100644
index 00000000000..5290468d698
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_linux.go
@@ -0,0 +1,14 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package term
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_notwindows.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_notwindows.go
new file mode 100644
index 00000000000..87df7d5b029
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_notwindows.go
@@ -0,0 +1,20 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux,!appengine darwin freebsd openbsd
+
+package term
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// IsTty returns true if the given file descriptor is a terminal.
+func IsTty(fd uintptr) bool {
+ var termios Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_openbsd.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_openbsd.go
new file mode 100644
index 00000000000..f9bb9e1c23b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_openbsd.go
@@ -0,0 +1,7 @@
+package term
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_windows.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_windows.go
new file mode 100644
index 00000000000..df3c30c1589
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_windows.go
@@ -0,0 +1,26 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package term
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTty returns true if the given file descriptor is a terminal.
+func IsTty(fd uintptr) bool {
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
diff --git a/Godeps/_workspace/src/github.com/influxdata/influxdb/LICENSE b/Godeps/_workspace/src/github.com/influxdata/influxdb/LICENSE
new file mode 100644
index 00000000000..63cef79ba6f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/influxdata/influxdb/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013-2016 Errplane Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md b/Godeps/_workspace/src/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md
new file mode 100644
index 00000000000..f0794abc112
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md
@@ -0,0 +1,27 @@
+# List
+- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE)
+- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
+- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE)
+- github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING)
+- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license)
+- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
+- github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE)
+- github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE)
+- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
+- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE)
+- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
+- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
+- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
+- github.com/hashicorp/raft-boltdb [MOZILLA PUBLIC LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
+- github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt)
+- github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE)
+- github.com/kimor79/gollectd [BSD LICENSE](https://github.com/kimor79/gollectd/blob/master/LICENSE)
+- github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE)
+- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING)
+- github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE)
+- glyphicons [LICENSE](http://glyphicons.com/license/)
+- golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
+- golang.org/x/tools [BSD LICENSE](https://github.com/golang/tools/blob/master/LICENSE)
+- gopkg.in/fatih/pool.v2 [MIT LICENSE](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
+- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt)
+- react 0.13.3 [BSD LICENSE](https://github.com/facebook/react/blob/master/LICENSE)
diff --git a/README.md b/README.md
index e678e48a14d..6dbfc5388c2 100644
--- a/README.md
+++ b/README.md
@@ -16,6 +16,7 @@ Graphite, Elasticsearch, OpenTSDB, Prometheus and InfluxDB.
- [What's New in Grafana 2.0](http://docs.grafana.org/guides/whats-new-in-v2/)
- [What's New in Grafana 2.1](http://docs.grafana.org/guides/whats-new-in-v2-1/)
- [What's New in Grafana 2.5](http://docs.grafana.org/guides/whats-new-in-v2-5/)
+- [What's New in Grafana 3.0](http://docs.grafana.org/guides/whats-new-in-v3/)
## Features
### Graphite Target Editor
@@ -78,7 +79,7 @@ the latest master builds [here](http://grafana.org/download/builds)
### Dependencies
- Go 1.5
-- NodeJS v0.12.0
+- NodeJS v4+
- [Godep](https://github.com/tools/godep)
### Get Code
@@ -109,7 +110,7 @@ go run build.go build
### Building frontend assets
-To build less to css for the frontend you will need a recent version of of **node (v0.12.0)**,
+To build less to css for the frontend you will need a recent version of of **node (v4+)**,
npm (v2.5.0) and grunt (v0.4.5). Run the following:
```bash
diff --git a/appveyor.yml b/appveyor.yml
index 7d84bafc148..1b6027b5eb6 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -14,7 +14,7 @@ install:
- npm install
- npm install -g grunt-cli
# install gcc (needed for sqlite3)
- - choco install -y mingw
+ - choco install -y --limit-output mingw
- set PATH=C:\tools\mingw64\bin;%PATH%
- echo %PATH%
- echo %GOPATH%
diff --git a/build.go b/build.go
index e1fc3599aa8..4347c486063 100644
--- a/build.go
+++ b/build.go
@@ -132,12 +132,10 @@ func readVersionFromPackageJson() {
if len(parts) > 1 {
linuxPackageVersion = parts[0]
linuxPackageIteration = parts[1]
- if linuxPackageIteration != "" {
- // add timestamp to iteration
- linuxPackageIteration = fmt.Sprintf("%s%v", linuxPackageIteration, time.Now().Unix())
- }
- log.Println(fmt.Sprintf("Iteration %v", linuxPackageIteration))
}
+
+ // add timestamp to iteration
+ linuxPackageIteration = fmt.Sprintf("%d%s", time.Now().Unix(), linuxPackageIteration)
}
type linuxPackageOptions struct {
diff --git a/circle.yml b/circle.yml
index 02f9f91e103..ee19b50ee46 100644
--- a/circle.yml
+++ b/circle.yml
@@ -1,6 +1,6 @@
machine:
node:
- version: 4.0
+ version: 5.11.1
environment:
GOPATH: "/home/ubuntu/.go_workspace"
ORG_PATH: "github.com/grafana"
diff --git a/conf/defaults.ini b/conf/defaults.ini
index f78287619a3..5233fe89722 100644
--- a/conf/defaults.ini
+++ b/conf/defaults.ini
@@ -6,6 +6,9 @@
# possible values : production, development
app_mode = production
+# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
+instance_name = ${HOSTNAME}
+
#################################### Paths ####################################
[paths]
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
@@ -143,7 +146,7 @@ cookie_remember_name = grafana_remember
# disable gravatar profile images
disable_gravatar = false
-# data source proxy whitelist (ip_or_domain:port seperated by spaces)
+# data source proxy whitelist (ip_or_domain:port separated by spaces)
data_source_proxy_whitelist =
[snapshots]
@@ -172,6 +175,12 @@ verify_email_enabled = false
# Background text for the user field on the login page
login_hint = email or username
+# Default UI theme ("dark" or "light")
+default_theme = dark
+
+# Allow users to sign in using username and password
+allow_user_pass_login = true
+
#################################### Anonymous Auth ##########################
[auth.anonymous]
# enable anonymous access
@@ -242,24 +251,26 @@ templates_pattern = emails/*.html
#################################### Logging ##########################
[log]
# Either "console", "file", "syslog". Default is console and file
-# Use comma to separate multiple modes, e.g. "console, file"
+# Use space to separate multiple modes, e.g. "console file"
mode = console, file
-# Buffer length of channel, keep it as it is if you don't know what it is.
-buffer_len = 10000
-
-# Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Info"
-level = Info
+# Either "debug", "info", "warn", "error", "critical", default is "info"
+level = info
# For "console" mode only
[log.console]
level =
-# Set formatting to "false" to disable color formatting of console logs
-formatting = false
+
+# log line format, valid options are text, console and json
+format = console
# For "file" mode only
[log.file]
level =
+
+# log line format, valid options are text, console and json
+format = text
+
# This enables automated log rotate(switch of following options), default is true
log_rotate = true
@@ -267,7 +278,7 @@ log_rotate = true
max_lines = 1000000
# Max size shift of single file, default is 28 means 1 << 28, 256MB
-max_lines_shift = 28
+max_size_shift = 28
# Segment log daily, default is true
daily_rotate = true
@@ -277,6 +288,10 @@ max_days = 7
[log.syslog]
level =
+
+# log line format, valid options are text, console and json
+format = text
+
# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
network =
address =
@@ -287,7 +302,8 @@ facility =
# Syslog tag. By default, the process' argv[0] is used.
tag =
-#################################### AMPQ Event Publisher ##########################
+
+#################################### AMQP Event Publisher ##########################
[event_publisher]
enabled = false
rabbitmq_url = amqp://localhost/
@@ -332,3 +348,17 @@ global_api_key = -1
# global limit on number of logged in users.
global_session = -1
+
+#################################### Internal Grafana Metrics ##########################
+# Metrics available at HTTP API Url /api/metrics
+[metrics]
+enabled = true
+interval_seconds = 60
+
+# Send internal Grafana metrics to graphite
+; [metrics.graphite]
+; address = localhost:2003
+; prefix = prod.grafana.%(instance_name)s.
+
+[grafana_net]
+url = https://grafana.net
diff --git a/conf/sample.ini b/conf/sample.ini
index 6a26589d40d..6abc8ba416d 100644
--- a/conf/sample.ini
+++ b/conf/sample.ini
@@ -6,6 +6,9 @@
# possible values : production, development
; app_mode = production
+# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
+; instance_name = ${HOSTNAME}
+
#################################### Paths ####################################
[paths]
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
@@ -39,8 +42,9 @@
# Prevents DNS rebinding attacks
;enforce_domain = false
-# The full public facing url
-;root_url = %(protocol)s://%(domain)s:%(http_port)s/
+# The full public facing url you use in browser, used for redirects and emails
+# If you use reverse proxy and sub path specify full url (with sub path)
+;root_url = http://localhost:3000
# Log web requests
;router_logging = false
@@ -129,7 +133,7 @@ check_for_updates = true
# disable gravatar profile images
;disable_gravatar = false
-# data source proxy whitelist (ip_or_domain:port seperated by spaces)
+# data source proxy whitelist (ip_or_domain:port separated by spaces)
;data_source_proxy_whitelist =
[snapshots]
@@ -155,6 +159,9 @@ check_for_updates = true
# Background text for the user field on the login page
;login_hint = email or username
+# Default UI theme ("dark" or "light")
+;default_theme = dark
+
#################################### Anonymous Auth ##########################
[auth.anonymous]
# enable anonymous access
@@ -224,22 +231,26 @@ check_for_updates = true
#################################### Logging ##########################
[log]
# Either "console", "file", "syslog". Default is console and file
-# Use comma to separate multiple modes, e.g. "console, file"
+# Use space to separate multiple modes, e.g. "console file"
;mode = console, file
-# Buffer length of channel, keep it as it is if you don't know what it is.
-;buffer_len = 10000
-
-# Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Info"
-;level = Info
+# Either "trace", "debug", "info", "warn", "error", "critical", default is "info"
+;level = info
# For "console" mode only
[log.console]
;level =
+# log line format, valid options are text, console and json
+;format = console
+
# For "file" mode only
[log.file]
;level =
+
+# log line format, valid options are text, console and json
+;format = text
+
# This enables automated log rotate(switch of following options), default is true
;log_rotate = true
@@ -247,7 +258,7 @@ check_for_updates = true
;max_lines = 1000000
# Max size shift of single file, default is 28 means 1 << 28, 256MB
-;max_lines_shift = 28
+;max_size_shift = 28
# Segment log daily, default is true
;daily_rotate = true
@@ -255,7 +266,24 @@ check_for_updates = true
# Expired days of log file(delete after max days), default is 7
;max_days = 7
-#################################### AMPQ Event Publisher ##########################
+[log.syslog]
+;level =
+
+# log line format, valid options are text, console and json
+;format = text
+
+# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
+;network =
+;address =
+
+# Syslog facility. user, daemon and local0 through local7 are valid.
+;facility =
+
+# Syslog tag. By default, the process' argv[0] is used.
+;tag =
+
+
+#################################### AMQP Event Publisher ##########################
[event_publisher]
;enabled = false
;rabbitmq_url = amqp://localhost/
@@ -266,5 +294,21 @@ check_for_updates = true
;enabled = false
;path = /var/lib/grafana/dashboards
+#################################### Internal Grafana Metrics ##########################
+# Metrics available at HTTP API Url /api/metrics
+[metrics]
+# Disable / Enable internal metrics
+;enabled = true
+# Publish interval
+;interval_seconds = 10
+# Send internal metrics to Graphite
+; [metrics.graphite]
+; address = localhost:2003
+; prefix = prod.grafana.%(instance_name)s.
+
+#################################### Internal Grafana Metrics ##########################
+# Url used to to import dashboards directly from Grafana.net
+[grafana_net]
+url = https://grafana.net
diff --git a/docker/blocks/collectd/Dockerfile b/docker/blocks/collectd/Dockerfile
new file mode 100644
index 00000000000..a08b1f9c1b2
--- /dev/null
+++ b/docker/blocks/collectd/Dockerfile
@@ -0,0 +1,16 @@
+FROM ubuntu:xenial
+
+ENV DEBIAN_FRONTEND noninteractive
+
+RUN apt-get -y update
+RUN apt-get -y install collectd curl python-pip
+
+# add a fake mtab for host disk stats
+ADD etc_mtab /etc/mtab
+
+ADD collectd.conf.tpl /etc/collectd/collectd.conf.tpl
+
+RUN pip install envtpl
+ADD start_container /usr/bin/start_container
+RUN chmod +x /usr/bin/start_container
+CMD start_container
diff --git a/docker/blocks/collectd/README.md b/docker/blocks/collectd/README.md
new file mode 100644
index 00000000000..2c1a8cb79fc
--- /dev/null
+++ b/docker/blocks/collectd/README.md
@@ -0,0 +1,37 @@
+collectd-write-graphite
+=======================
+
+Basic collectd-based server monitoring. Sends stats to Graphite.
+
+Collectd metrics:
+
+* CPU used/free/idle/etc
+* Free disk (via mounting hosts '/' into container, eg: -v /:/hostfs:ro)
+* Disk performance
+* Load average
+* Memory used/free/etc
+* Uptime
+* Network interface
+* Swap
+
+Environment variables
+---------------------
+
+* `HOST_NAME`
+ - Will be sent to Graphite
+ - Required
+* `GRAPHITE_HOST`
+ - Graphite IP or hostname
+ - Required
+* `GRAPHITE_PORT`
+ - Graphite port
+ - Optional, defaults to 2003
+* `GRAPHITE_PREFIX`
+ - Graphite prefix
+ - Optional, defaults to collectd.
+* `REPORT_BY_CPU`
+ - Report per-CPU metrics if true, global sum of CPU metrics if false (details: [collectd.conf man page](https://collectd.org/documentation/manpages/collectd.conf.5.shtml#plugin_cpu))
+ - Optional, defaults to false.
+* `COLLECT_INTERVAL`
+ - Collection interval and thus resolution of metrics
+ - Optional, defaults to 10
diff --git a/docker/blocks/collectd/collectd.conf.tpl b/docker/blocks/collectd/collectd.conf.tpl
new file mode 100644
index 00000000000..69b019007fb
--- /dev/null
+++ b/docker/blocks/collectd/collectd.conf.tpl
@@ -0,0 +1,106 @@
+Hostname "{{ HOST_NAME }}"
+
+FQDNLookup false
+Interval {{ COLLECT_INTERVAL | default("10") }}
+Timeout 2
+ReadThreads 5
+
+LoadPlugin cpu
+LoadPlugin df
+LoadPlugin load
+LoadPlugin memory
+LoadPlugin disk
+LoadPlugin interface
+LoadPlugin uptime
+LoadPlugin swap
+LoadPlugin write_graphite
+LoadPlugin processes
+LoadPlugin aggregation
+LoadPlugin match_regex
+# LoadPlugin memcached
+
+
+ # expose host's mounts into container using -v /:/host:ro (location inside container does not matter much)
+ # ignore rootfs; else, the root file-system would appear twice, causing
+ # one of the updates to fail and spam the log
+ FSType rootfs
+ # ignore the usual virtual / temporary file-systems
+ FSType sysfs
+ FSType proc
+ FSType devtmpfs
+ FSType devpts
+ FSType tmpfs
+ FSType fusectl
+ FSType cgroup
+ FSType overlay
+ FSType debugfs
+ FSType pstore
+ FSType securityfs
+ FSType hugetlbfs
+ FSType squashfs
+ FSType mqueue
+ MountPoint "/etc/resolv.conf"
+ MountPoint "/etc/hostname"
+ MountPoint "/etc/hosts"
+ IgnoreSelected true
+ ReportByDevice false
+ ReportReserved true
+ ReportInodes true
+ ValuesAbsolute true
+ ValuesPercentage true
+ ReportInodes true
+
+
+
+ Disk "/^[hs]d[a-z]/"
+ IgnoreSelected false
+
+
+
+
+ Plugin "cpu"
+ Type "cpu"
+ GroupBy "Host"
+ GroupBy "TypeInstance"
+ CalculateAverage true
+
+
+
+
+ Interface "lo"
+ Interface "/^veth.*/"
+ Interface "/^docker.*/"
+ IgnoreSelected true
+
+
+#
+# Host "memcached"
+# Port "11211"
+#
+
+
+
+
+ Plugin "^cpu$"
+ PluginInstance "^[0-9]+$"
+
+
+ Plugin "aggregation"
+
+ Target stop
+
+ Target "write"
+
+
+
+
+ Host "{{ GRAPHITE_HOST }}"
+ Port "{{ GRAPHITE_PORT | default("2003") }}"
+ Prefix "{{ GRAPHITE_PREFIX | default("collectd.") }}"
+ EscapeCharacter "_"
+ SeparateInstances true
+ StoreRates true
+ AlwaysAppendDS false
+
+
+
diff --git a/docker/blocks/collectd/etc_mtab b/docker/blocks/collectd/etc_mtab
new file mode 100644
index 00000000000..749f9789482
--- /dev/null
+++ b/docker/blocks/collectd/etc_mtab
@@ -0,0 +1 @@
+hostfs /.dockerinit ext4 ro,relatime,user_xattr,barrier=1,data=ordered 0 0
diff --git a/docker/blocks/collectd/fig b/docker/blocks/collectd/fig
new file mode 100644
index 00000000000..6c2e7e25893
--- /dev/null
+++ b/docker/blocks/collectd/fig
@@ -0,0 +1,12 @@
+collectd:
+ build: blocks/collectd
+ environment:
+ HOST_NAME: myserver
+ GRAPHITE_HOST: graphite
+ GRAPHITE_PORT: 2003
+ GRAPHITE_PREFIX: collectd.
+ REPORT_BY_CPU: 'false'
+ COLLECT_INTERVAL: 10
+ links:
+ - graphite
+ - memcached
diff --git a/docker/blocks/collectd/start_container b/docker/blocks/collectd/start_container
new file mode 100644
index 00000000000..b01cd0d5ff2
--- /dev/null
+++ b/docker/blocks/collectd/start_container
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+envtpl /etc/collectd/collectd.conf.tpl
+
+collectd -f
diff --git a/docker/blocks/graphite/fig b/docker/blocks/graphite/fig
index 84da45341e1..60acb8c1131 100644
--- a/docker/blocks/graphite/fig
+++ b/docker/blocks/graphite/fig
@@ -8,3 +8,10 @@ graphite:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
+fake-graphite-data:
+ image: grafana/fake-data-gen
+ net: bridge
+ environment:
+ FD_DATASOURCE: graphite
+ FD_PORT: 2003
+
diff --git a/docker/blocks/influxdb/fig b/docker/blocks/influxdb/fig
index c537a74b003..bdb4a274634 100644
--- a/docker/blocks/influxdb/fig
+++ b/docker/blocks/influxdb/fig
@@ -4,3 +4,11 @@ influxdb:
- "2004:2004"
- "8083:8083"
- "8086:8086"
+
+fake-influxdb-data:
+ image: grafana/fake-data-gen
+ net: bridge
+ environment:
+ FD_DATASOURCE: influxdb
+ FD_PORT: 8086
+
diff --git a/docker/blocks/memcached/fig b/docker/blocks/memcached/fig
new file mode 100644
index 00000000000..a0da9df2bc2
--- /dev/null
+++ b/docker/blocks/memcached/fig
@@ -0,0 +1,5 @@
+memcached:
+ image: memcached:latest
+ ports:
+ - "11211:11211"
+
diff --git a/docker/blocks/opentsdb/fig b/docker/blocks/opentsdb/fig
index 34bbf4b854c..c346475e9a3 100644
--- a/docker/blocks/opentsdb/fig
+++ b/docker/blocks/opentsdb/fig
@@ -2,4 +2,10 @@ opentsdb:
image: opower/opentsdb:latest
ports:
- "4242:4242"
-
+
+fake-opentsdb-data:
+ image: grafana/fake-data-gen
+ net: bridge
+ environment:
+ FD_DATASOURCE: opentsdb
+
diff --git a/docker/blocks/prometheus/fig b/docker/blocks/prometheus/fig
index 0880902c9fd..b4979918149 100644
--- a/docker/blocks/prometheus/fig
+++ b/docker/blocks/prometheus/fig
@@ -1,6 +1,22 @@
prometheus:
build: blocks/prometheus
+ net: bridge
ports:
- "9090:9090"
volumes:
- /var/docker/prometheus:/prometheus-data
+
+node_exporter:
+ image: prom/node-exporter
+ net: bridge
+ ports:
+ - "9100:9100"
+
+fake-prometheus-data:
+ image: grafana/fake-data-gen
+ net: bridge
+ ports:
+ - "9091:9091"
+ environment:
+ FD_DATASOURCE: prom
+
diff --git a/docker/blocks/prometheus/prometheus.yml b/docker/blocks/prometheus/prometheus.yml
index 5c853622af3..f3e8c8c3469 100644
--- a/docker/blocks/prometheus/prometheus.yml
+++ b/docker/blocks/prometheus/prometheus.yml
@@ -23,4 +23,4 @@ scrape_configs:
# scheme defaults to 'http'.
target_groups:
- - targets: ['localhost:9090', '172.17.0.1:9091']
+ - targets: ['localhost:9090', '172.17.0.1:9091', '172.17.0.1:9100', '172.17.0.1:9150']
diff --git a/docs/README.md b/docs/README.md
index 36c636fcc72..65bd5714615 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -1,7 +1,15 @@
-To build the docs locally, you need to have docker installed. The docs are built using a custom [docker](https://www.docker.com/)
-image and [mkdocs](http://www.mkdocs.org/).
+# Building The Docs
-Build the `grafana/docs-base:latest` image:
+To build the docs locally, you need to have docker installed. The
+docs are built using a custom [docker](https://www.docker.com/) image
+and the [mkdocs](http://www.mkdocs.org/) tool.
+
+**Prepare the Docker Image**:
+
+Build the `grafana/docs-base:latest` image. Run these commands in the
+same directory this file is in. **Note** that you may require ``sudo``
+when running ``make docs-build`` depending on how your system's docker
+service is configured):
```
$ git clone https://github.com/grafana/docs-base
@@ -9,10 +17,45 @@ $ cd docs-base
$ make docs-build
```
-To build the docs:
+**Build the Documentation**:
+
+Now that the docker image has been prepared we can build the
+docs. Switch your working directory back to the directory this file
+(README.md) is in and run (possibly with ``sudo``):
+
```
-$ cd docs
$ make docs
```
+This command will not return control of the shell to the user. Instead
+the command is now running a new docker container built from the image
+we created in the previous step.
+
Open [localhost:8180](http://localhost:8180) to view the docs.
+
+**Note** that after running ``make docs`` you may notice a message
+like this in the console output
+
+> Running at: http://0.0.0.0:8000/
+
+This is misleading. That is **not** the port the documentation is
+served from. You must browse to port **8180** to view the new
+documentation.
+
+
+# Adding a New Page
+
+Adding a new page requires updating the ``mkdocs.yml`` file which is
+located in this directory.
+
+For example, if you are adding documentation for a new HTTP API called
+``preferences`` you would:
+
+1. Create the file ``docs/sources/http_api/preferences.md``
+1. Add a reference to it in ``docs/sources/http_api/overview.md``
+1. Update the list under the **pages** key in the ``docs/mkdocs.yml`` file with a reference to your new page:
+
+
+```yaml
+- ['http_api/preferences.md', 'API', 'Preferences API']
+```
diff --git a/docs/VERSION b/docs/VERSION
index 4a36342fcab..fd2a01863fd 100644
--- a/docs/VERSION
+++ b/docs/VERSION
@@ -1 +1 @@
-3.0.0
+3.1.0
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index ff88133dfdd..e4a528dcdbd 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -45,6 +45,7 @@ pages:
- ['guides/basic_concepts.md', 'User Guides', 'Basic Concepts']
- ['guides/gettingstarted.md', 'User Guides', 'Getting Started']
+- ['guides/whats-new-in-v3-1.md', 'User Guides', "What's New in Grafana v3.1"]
- ['guides/whats-new-in-v3.md', 'User Guides', "What's New in Grafana v3.0"]
- ['guides/whats-new-in-v2-6.md', 'User Guides', "What's New in Grafana v2.6"]
- ['guides/whats-new-in-v2-5.md', 'User Guides', "What's New in Grafana v2.5"]
@@ -84,6 +85,7 @@ pages:
- ['http_api/user.md', 'API', 'User API']
- ['http_api/admin.md', 'API', 'Admin API']
- ['http_api/snapshot.md', 'API', 'Snapshot API']
+- ['http_api/preferences.md', 'API', 'Preferences API']
- ['http_api/other.md', 'API', 'Other API']
- ['plugins/index.md', 'Plugins', 'Overview']
diff --git a/docs/sources/datasources/cloudwatch.md b/docs/sources/datasources/cloudwatch.md
index c69d3579784..92f4367d9ae 100644
--- a/docs/sources/datasources/cloudwatch.md
+++ b/docs/sources/datasources/cloudwatch.md
@@ -26,6 +26,8 @@ Name | The data source name, important that this is the same as in Grafana v1.x
Default | Default data source means that it will be pre-selected for new panels.
Credentials profile name | Specify the name of the profile to use (if you use `~/aws/credentials` file), leave blank for default. This option was introduced in Grafana 2.5.1
Default Region | Used in query editor to set region (can be changed on per query basis)
+Custom Metrics namespace | Specify the CloudWatch namespace of Custom metrics
+Assume Role Arn | Specify the ARN of the role to assume
## Authentication
@@ -95,8 +97,8 @@ Example `ec2_instance_attribute()` query
## Cost
-It's worth to mention that Amazon will charge you for CloudWatch API usage. CloudWatch costs
-$0.01 per 1,000 GetMetricStatistics or ListMetrics requests. For each query Grafana will
+Amazon provides 1 million CloudWatch API requests each month at no additional charge. Past this,
+it costs $0.01 per 1,000 GetMetricStatistics or ListMetrics requests. For each query Grafana will
issue a GetMetricStatistics request and every time you pick a dimension in the query editor
Grafana will issue a ListMetrics request.
diff --git a/docs/sources/datasources/elasticsearch.md b/docs/sources/datasources/elasticsearch.md
index 314a0f4870b..e669d760985 100644
--- a/docs/sources/datasources/elasticsearch.md
+++ b/docs/sources/datasources/elasticsearch.md
@@ -26,7 +26,7 @@ Name | Description
Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards.
Default | Default data source means that it will be pre-selected for new panels.
Url | The http protocol, ip and port of you elasticsearch server.
-Access | Proxy = access via Grafana backend, Direct = access directory from browser.
+Access | Proxy = access via Grafana backend, Direct = access directly from browser.
Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication details to the Data Source to the browser.
diff --git a/docs/sources/datasources/graphite.md b/docs/sources/datasources/graphite.md
index af53d0bf60c..feb896c1c02 100644
--- a/docs/sources/datasources/graphite.md
+++ b/docs/sources/datasources/graphite.md
@@ -26,7 +26,7 @@ Name | Description
Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards.
Default | Default data source means that it will be pre-selected for new panels.
Url | The http protocol, ip and port of your graphite-web or graphite-api install.
-Access | Proxy = access via Grafana backend, Direct = access directory from browser.
+Access | Proxy = access via Grafana backend, Direct = access directly from browser.
Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication details to the Data Source to the browser.
diff --git a/docs/sources/datasources/kairosdb.md b/docs/sources/datasources/kairosdb.md
index 4430b427250..2a2adf94acd 100644
--- a/docs/sources/datasources/kairosdb.md
+++ b/docs/sources/datasources/kairosdb.md
@@ -25,7 +25,7 @@ Name | Description
Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards.
Default | Default data source means that it will be pre-selected for new panels.
Url | The http protocol, ip and port of your kairosdb server (default port is usually 8080)
-Access | Proxy = access via Grafana backend, Direct = access directory from browser.
+Access | Proxy = access via Grafana backend, Direct = access directly from browser.
## Query editor
Open a graph in edit mode by click the title.
diff --git a/docs/sources/datasources/opentsdb.md b/docs/sources/datasources/opentsdb.md
index 28d90c19b00..b3ca5b8ea8f 100644
--- a/docs/sources/datasources/opentsdb.md
+++ b/docs/sources/datasources/opentsdb.md
@@ -7,10 +7,10 @@ page_keywords: grafana, opentsdb, documentation
# OpenTSDB Guide
The newest release of Grafana adds additional functionality when using an OpenTSDB Data source.
-
+
-1. Open the side menu by clicking the the Grafana icon in the top header.
-2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.
+1. Open the side menu by clicking the the Grafana icon in the top header.
+2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.
> NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization.
@@ -22,7 +22,7 @@ Name | Description
Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards.
Default | Default data source means that it will be pre-selected for new panels.
Url | The http protocol, ip and port of you opentsdb server (default port is usually 4242)
-Access | Proxy = access via Grafana backend, Direct = access directory from browser.
+Access | Proxy = access via Grafana backend, Direct = access directly from browser.
Version | Version = opentsdb version, either <=2.1 or 2.2
Resolution | Metrics from opentsdb may have datapoints with either second or millisecond resolution.
@@ -51,6 +51,13 @@ When using OpenTSDB with a template variable of `query` type you can use followi
If you do not see template variables being populated in `Preview of values` section, you need to enable `tsd.core.meta.enable_realtime_ts` in the OpenTSDB server settings. Also, to populate metadata of the existing time series data in OpenTSDB, you need to run `tsdb uid metasync` on the OpenTSDB server.
+### Nested Templating
+
+One template variable can be used to filter tag values for another template varible. Very importantly, the order of the parameters matter in tag_values function. First parameter is the metric name, second parameter is the tag key for which you need to find tag values, and after that all other dependent template variables. Some examples are mentioned below to make nested template queries work successfully.
+
+ tag_values(cpu, hostname, env=$env) // return tag values for cpu metric, selected env tag value and tag key hostname
+ tag_values(cpu, hostanme, env=$env, region=$region) // return tag values for cpu metric, selected env tag value, selected region tag value and tag key hostname
+
> Note: This is required for the OpenTSDB `lookup` api to work.
For details on opentsdb metric queries checkout the official [OpenTSDB documentation](http://opentsdb.net/docs/build/html/index.html)
diff --git a/docs/sources/datasources/prometheus.md b/docs/sources/datasources/prometheus.md
index 9ad435270c4..0e981c89823 100644
--- a/docs/sources/datasources/prometheus.md
+++ b/docs/sources/datasources/prometheus.md
@@ -23,7 +23,7 @@ Name | Description
Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards.
Default | Default data source means that it will be pre-selected for new panels.
Url | The http protocol, ip and port of you Prometheus server (default port is usually 9090)
-Access | Proxy = access via Grafana backend, Direct = access directory from browser.
+Access | Proxy = access via Grafana backend, Direct = access directly from browser.
Basic Auth | Enable basic authentication to the Prometheus datasource.
User | Name of your Prometheus user
Password | Database user's password
diff --git a/docs/sources/guides/whats-new-in-v3-1.md b/docs/sources/guides/whats-new-in-v3-1.md
new file mode 100644
index 00000000000..9613cc1682c
--- /dev/null
+++ b/docs/sources/guides/whats-new-in-v3-1.md
@@ -0,0 +1,68 @@
+---
+page_title: What's New in Grafana v3.1
+page_description: What's new in Grafana v3.1
+page_keywords: grafana, new, changes, features, documentation
+---
+
+# What's New in Grafana v3.1
+
+## Dashboard Export & Import
+
+The export feature is now accessed from the share menu.
+
+
+
+Dashboards exported from Grafana 3.1 are now more portable and easier for others to import than before.
+The export process extracts information data source types used by panels and adds these to a new `inputs`
+section in the dashboard json. So when you or another person tries to import the dashboard they will be asked to
+select data source and optional metrix prefix options.
+
+
+
+The above screenshot shows the new import modal that gives you 3 options for how to import a dashboard.
+One notable new addition here is the ability to import directly from Dashboards shared on [Grafana.net](https://grafana.net).
+
+The next step in the import process:
+
+
+
+Here you can change the name of the dashboard and also pick what data sources you want the dashboard to use. The above screenshot
+shows a CollectD dashboard for Graphite that requires a metric prefix be specified.
+
+## Discover Dashboards
+
+On [Grafana.net](https://grafana.net) you can now browse & search for dashboards. We have already added a few but
+more are being uploaded every day. To import a dashboard just copy the dashboard url and head back to Grafana,
+then Dashboard Search -> Import -> Paste Grafana.net Dashboard URL.
+
+
+
+## Constant template variables
+
+We added a new template variable named constant that makes it easier to share and export dashboard that have custom prefixes.
+
+## Dashboard Urls
+Having current time range and template variable value always sync with the URL makes it possible to always copy your current
+Grafana url to share with a colleague without having to use the Share modal.
+
+## Internal metrics
+
+Do you want metrics about viewing metrics? Ofc you do! In this release we added support for sending metrics about Grafana to graphite.
+You can configure interval and server in the config file.
+
+## Logging
+
+Switched logging framework to log15 to enable key value per logging and filtering based on different log levels.
+Its now possible to configure different log levels for different modules.
+
+### Breaking changes
+- **Logging** format have been changed to improve log filtering.
+- **Graphite PNG** Graphite PNG support dropped from Graph panel (use Grafana native PNG instead).
+- **Migration** No longer possible to migrate dashboards from 1.x (Stored in ES or Influx 0.8).
+
+## CHANGELOG
+
+For a detailed list and link to github issues for everything included
+in the 3.1 release please view the
+[CHANGELOG.md](https://github.com/grafana/grafana/blob/master/CHANGELOG.md)
+file.
diff --git a/docs/sources/guides/whats-new-in-v3.md b/docs/sources/guides/whats-new-in-v3.md
index eb1030f8f31..051691e0384 100644
--- a/docs/sources/guides/whats-new-in-v3.md
+++ b/docs/sources/guides/whats-new-in-v3.md
@@ -39,12 +39,13 @@ entire experience right within Grafana.
-A preview of [Grafana.net](http://grafana.net) is launching along with this release. We
-think it’s the perfect compliment to Grafana.
+[Grafana.net](https://grafana.net) offers a central repository where the community can come together to discover, create and
+share plugins (data sources, panels, apps) and dashboards.
-Grafana.net currently offers a central repository where the community
-can come together to discover and share plugins (Data Sources, Panels,
-Apps) and Dashboards for Grafana 3.0 and above.
+We are also working on a hosted Graphite-compatible data source that will be optimized for use with Grafana.
+It’ll be easy to combine your existing data source(s) with this OpenSaaS option. Finally, Grafana.net can
+also be a hub to manage all your Grafana instances. You’ll be able to monitor their health and availability,
+perform dashboard backups, and more.
We are also working on a hosted Graphite-compatible Data Source that
will be optimized for use with Grafana. It’ll be easy to combine your
@@ -65,7 +66,6 @@ Grafana 3.0 comes with a new command line tool called grafana-cli. You
can easily install plugins from Grafana.net with it. For
example:
-
```
grafana-cli install grafana-pie-chart-panel
```
@@ -188,6 +188,33 @@ you can still install manually from [Grafana.net](http://grafana.net)
* KairosDB: This data source has also no longer shipped with Grafana,
you can install it manually from [Grafana.net](http://grafana.net)
+## Plugin showcase
+
+Discovering and installing plugins is very quick and easy with Grafana 3.0 and [Grafana.net](https://grafana.net). Here
+are a couple that I incurage you try!
+
+#### [Clock Panel](https://grafana.net/plugins/grafana-clock-panel)
+Support's both current time and count down mode.
+
+
+#### [Pie Chart Panel](https://grafana.net/plugins/grafana-piechart-panel)
+A simple pie chart panel is now available as an external plugin.
+
+
+#### [WorldPing App](https://grafana.net/plugins/raintank-worldping-app)
+This is full blown Grafana App that adds new panels, data sources and pages to give
+feature rich global performance monitoring directly from your on-prem Grafana.
+
+
+
+#### [Zabbix App](https://grafana.net/plugins/alexanderzobnin-zabbix-app)
+This app contains the already very pouplar Zabbix data source plugin, 2 dashboards and a triggers panel. It is
+created and maintained by [Alexander Zobnin](https://github.com/alexanderzobnin/grafana-zabbix).
+
+
+
+Checkout the full list of plugins on [Grafana.net](https://grafana.net/plugins)
+
## CHANGELOG
For a detailed list and link to github issues for everything included
diff --git a/docs/sources/http_api/overview.md b/docs/sources/http_api/overview.md
index 8e7e2d60ad3..7f5a3ecfac8 100644
--- a/docs/sources/http_api/overview.md
+++ b/docs/sources/http_api/overview.md
@@ -18,4 +18,5 @@ dashboards, creating users and updating data sources.
* [User API](/http_api/user/)
* [Admin API](/http_api/admin/)
* [Snapshot API](/http_api/snapshot/)
+* [Preferences API](/http_api/preferences/)
* [Other API](/http_api/other/)
diff --git a/docs/sources/http_api/preferences.md b/docs/sources/http_api/preferences.md
new file mode 100644
index 00000000000..6bb00ed8132
--- /dev/null
+++ b/docs/sources/http_api/preferences.md
@@ -0,0 +1,100 @@
+----
+page_title: Preferences API
+page_description: Grafana Preferences API Reference
+page_keywords: grafana, preferences, http, api, documentation
+---
+
+# User and Org Preferences API
+
+Keys:
+
+- **theme** - One of: ``light``, ``dark``, or an empty string for the default theme
+- **homeDashboardId** - The numerical ``:id`` of a favorited dashboard, default: ``0``
+- **timezone** - One of: ``utc``, ``browser``, or an empty string for the default
+
+Omitting a key will cause the current value to be replaced with the
+system default value.
+
+## Get Current User Prefs
+
+`GET /api/user/preferences`
+
+**Example Request**:
+
+ GET /api/user/preferences HTTP/1.1
+ Accept: application/json
+ Content-Type: application/json
+ Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
+
+**Example Response**:
+
+ HTTP/1.1 200
+ Content-Type: application/json
+
+ {"theme":"","homeDashboardId":0,"timezone":""}
+
+## Update Current User Prefs
+
+`PUT /api/user/preferences`
+
+**Example Request**:
+
+ PUT /api/user/preferences HTTP/1.1
+ Accept: application/json
+ Content-Type: application/json
+ Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
+
+ {
+ "theme": "",
+ "homeDashboardId":0,
+ "timezone":"utc"
+ }
+
+**Example Response**:
+
+ HTTP/1.1 200
+ Content-Type: text/plain; charset=utf-8
+
+ {"message":"Preferences updated"}
+
+## Get Current Org Prefs
+
+`GET /api/org/preferences`
+
+**Example Request**:
+
+ GET /api/org/preferences HTTP/1.1
+ Accept: application/json
+ Content-Type: application/json
+ Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
+
+**Example Response**:
+
+ HTTP/1.1 200
+ Content-Type: application/json
+
+ {"theme":"","homeDashboardId":0,"timezone":""}
+
+## Update Current Org Prefs
+
+`PUT /api/org/preferences`
+
+**Example Request**:
+
+ PUT /api/org/preferences HTTP/1.1
+ Accept: application/json
+ Content-Type: application/json
+ Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
+
+ {
+ "theme": "",
+ "homeDashboardId":0,
+ "timezone":"utc"
+ }
+
+**Example Response**:
+
+ HTTP/1.1 200
+ Content-Type: text/plain; charset=utf-8
+
+ {"message":"Preferences updated"}
diff --git a/docs/sources/installation/configuration.md b/docs/sources/installation/configuration.md
index e7d1d68523a..0c2bd7366e7 100644
--- a/docs/sources/installation/configuration.md
+++ b/docs/sources/installation/configuration.md
@@ -44,6 +44,12 @@ Then you can override them using:
+## instance_name
+Set the name of the grafana-server instance. Used in logging and internal metrics and in
+clustering info. Defaults to: `${HOSTNAME}, which will be replaced with
+environment variable `HOSTNAME`, if that is empty or does not exist Grafana will try to use
+system calls to get the machine name.
+
## [paths]
### data
@@ -226,7 +232,7 @@ organization to be created for that new user.
The role new users will be assigned for the main organization (if the
above setting is set to true). Defaults to `Viewer`, other valid
-options are `Admin` and `Editor`.
+options are `Admin` and `Editor` and `Read-Only Editor`.
@@ -439,3 +445,35 @@ Grafana backend index those json dashboards which will make them appear in regul
### path
The full path to a directory containing your json dashboards.
+
+## [log]
+
+### mode
+Either "console", "file", "syslog". Default is console and file
+Use space to separate multiple modes, e.g. "console file"
+
+### level
+Either "debug", "info", "warn", "error", "critical", default is "info"
+
+### filter
+optional settings to set different levels for specific loggers.
+Ex `filters = sqlstore:debug`
+
+## [metrics]
+
+### enabled
+Enable metrics reporting. defaults true. Available via HTTP API `/api/metrics`.
+
+### interval_seconds
+
+Flush/Write interval when sending metrics to external TSDB. Defaults to 60s.
+
+## [metrics.graphite]
+Include this section if you want to send internal Grafana metrics to Graphite.
+
+### address
+Format ``:port
+
+### prefix
+Graphite metric prefix. Defaults to `prod.grafana.%(instance_name)s.`
+
diff --git a/docs/sources/installation/debian.md b/docs/sources/installation/debian.md
index cac6a7c92b8..92f11764aa4 100644
--- a/docs/sources/installation/debian.md
+++ b/docs/sources/installation/debian.md
@@ -10,32 +10,32 @@ page_keywords: grafana, installation, debian, ubuntu, guide
Description | Download
------------ | -------------
-Stable .deb for Debian-based Linux | [grafana_2.6.0_amd64.deb](https://grafanarel.s3.amazonaws.com/builds/grafana_2.6.0_amd64.deb)
-Beta .deb for Debian-based Linux | [grafana_3.0.0-beta71462173753_amd64.deb](https://grafanarel.s3.amazonaws.com/builds/grafana_3.0.0-beta71462173753_amd64.deb)
+Stable .deb for Debian-based Linux | [3.0.4](https://grafanarel.s3.amazonaws.com/builds/grafana_3.0.4-1464167696_amd64.deb)
+Beta .deb for Debian-based Linux | [3.1.0-beta1](https://grafanarel.s3.amazonaws.com/builds/grafana_3.1.0-1466666977beta1_amd64.deb)
## Install Stable
- $ wget https://grafanarel.s3.amazonaws.com/builds/grafana_2.6.0_amd64.deb
+ $ wget https://grafanarel.s3.amazonaws.com/builds/grafana_3.0.4-1464167696_amd64.deb
$ sudo apt-get install -y adduser libfontconfig
- $ sudo dpkg -i grafana_2.6.0_amd64.deb
+ $ sudo dpkg -i grafana_3.0.4-1464167696_amd64.deb
-## Install 3.0 Beta
+## Install 3.1 beta
- $ wget https://grafanarel.s3.amazonaws.com/builds/grafana_3.0.0-beta71462173753_amd64.deb
+ $ wget https://grafanarel.s3.amazonaws.com/builds/grafana_3.1.0-1466666977beta1_amd64.deb
$ sudo apt-get install -y adduser libfontconfig
- $ sudo dpkg -i grafana_3.0.0-beta71462173753_amd64.deb
+ $ sudo dpkg -i grafana_3.1.0-1466666977beta1_amd64.deb
## APT Repository
Add the following line to your `/etc/apt/sources.list` file.
- deb https://packagecloud.io/grafana/stable/debian/ wheezy main
+ deb https://packagecloud.io/grafana/stable/debian/ jessie main
Use the above line even if you are on Ubuntu or another Debian version.
There is also a testing repository if you want beta or release
candidates.
- deb https://packagecloud.io/grafana/testing/debian/ wheezy main
+ deb https://packagecloud.io/grafana/testing/debian/ jessie main
Then add the [Package Cloud](https://packagecloud.io/grafana) key. This
allows you to install signed packages.
diff --git a/docs/sources/installation/mac.md b/docs/sources/installation/mac.md
index 7246033b640..e65c2823666 100644
--- a/docs/sources/installation/mac.md
+++ b/docs/sources/installation/mac.md
@@ -6,8 +6,33 @@ page_keywords: grafana, installation, mac, osx, guide
# Installing on Mac
-There is currently no binary build for Mac, but Grafana will happily build on Mac. Read the [build from
-source](/project/building_from_source) page for instructions on how to
-build it yourself.
+Installation can be done using [homebrew](http://brew.sh/)
+
+Install latest stable:
+
+```
+brew install grafana/grafana/grafana
+```
+
+To start grafana look at the command printed after the homebrew install completes.
+
+You can also add the grafana as tap.
+
+```
+brew tap grafana/grafana
+brew install grafana
+```
+
+Install latest unstable from master:
+
+```
+brew install --HEAD grafana/grafana/grafana
+```
+
+To upgrade use the reinstall command
+
+```
+brew reinstall --HEAD grafana/grafana/grafana
+```
diff --git a/docs/sources/installation/rpm.md b/docs/sources/installation/rpm.md
index 744cafe93db..0e423eb0af4 100644
--- a/docs/sources/installation/rpm.md
+++ b/docs/sources/installation/rpm.md
@@ -10,43 +10,42 @@ page_keywords: grafana, installation, centos, fedora, opensuse, redhat, guide
Description | Download
------------ | -------------
-Stable .RPM for CentOS / Fedora / OpenSuse / Redhat Linux | [grafana-2.6.0-1.x86_64.rpm](https://grafanarel.s3.amazonaws.com/builds/grafana-2.6.0-1.x86_64.rpm)
-Beta .RPM for CentOS / Fedor / OpenSuse / Redhat Linux | [grafana-3.0.0-beta71462173753.x86_64.rpm](https://grafanarel.s3.amazonaws.com/builds/grafana-3.0.0-beta71462173753.x86_64.rpm)
+Stable .RPM for CentOS / Fedora / OpenSuse / Redhat Linux | [3.0.4 (x86-64 rpm)](https://grafanarel.s3.amazonaws.com/builds/grafana-3.0.4-1464167696.x86_64.rpm)
+Beta .RPM for CentOS / Fedora / OpenSuse / Redhat Linux | [3.1.0-beta1 (x86-64 rpm)](https://grafanarel.s3.amazonaws.com/builds/grafana-3.1.0-1466666977beta1.x86_64.rpm)
-## Install Stable Release from package file
+## Install Latest Stable
You can install Grafana using Yum directly.
- $ sudo yum install https://grafanarel.s3.amazonaws.com/builds/grafana-2.6.0-1.x86_64.rpm
+ $ sudo yum install https://grafanarel.s3.amazonaws.com/builds/grafana-3.0.4-1464167696.x86_64.rpm
Or install manually using `rpm`.
#### On CentOS / Fedora / Redhat:
$ sudo yum install initscripts fontconfig
- $ sudo rpm -Uvh grafana-2.6.0-1.x86_64.rpm
+ $ sudo rpm -Uvh grafana-3.0.4-1464167696.x86_64.rpm
#### On OpenSuse:
- $ sudo rpm -i --nodeps grafana-2.6.0-1.x86_64.rpm
+ $ sudo rpm -i --nodeps grafana-3.0.4-1464167696.x86_64.rpm
-## Install Beta Release from package file
+## Install 3.1 Beta
You can install Grafana using Yum directly.
- $ sudo yum install https://grafanarel.s3.amazonaws.com/builds/grafana-3.0.0-beta71462173753.x86_64.rpm
+ $ sudo yum install https://grafanarel.s3.amazonaws.com/builds/grafana-3.1.0-1466666977beta1.x86_64.rpm
Or install manually using `rpm`.
#### On CentOS / Fedora / Redhat:
$ sudo yum install initscripts fontconfig
- $ sudo rpm -Uvh grafana-3.0.0-beta71462173753.x86_64.rpm
+ $ sudo rpm -Uvh https://grafanarel.s3.amazonaws.com/builds/grafana-3.1.0-1466666977beta1.x86_64.rpm
#### On OpenSuse:
- $ sudo rpm -i --nodeps grafana-3.0.0-beta71462173753.x86_64.rpm
-
+ $ sudo rpm -i --nodeps https://grafanarel.s3.amazonaws.com/builds/grafana-3.1.0-1466666977beta1.x86_64.rpm
## Install via YUM Repository
diff --git a/docs/sources/installation/windows.md b/docs/sources/installation/windows.md
index 1d6c5fc76cd..858f8cfea2d 100644
--- a/docs/sources/installation/windows.md
+++ b/docs/sources/installation/windows.md
@@ -10,7 +10,7 @@ page_keywords: grafana, installation, windows guide
Description | Download
------------ | -------------
-Stable Zip package for Windows | [grafana.2.6.0.windows-x64.zip](https://grafanarel.s3.amazonaws.com/winbuilds/dist/grafana-2.5.0.windows-x64.zip)
+Stable Zip package for Windows | [grafana.3.0.4.windows-x64.zip](https://grafanarel.s3.amazonaws.com/winbuilds/dist/grafana-3.0.4.windows-x64.zip)
## Configure
diff --git a/docs/sources/reference/annotations.md b/docs/sources/reference/annotations.md
index 51852abcdf2..668a163b672 100644
--- a/docs/sources/reference/annotations.md
+++ b/docs/sources/reference/annotations.md
@@ -40,3 +40,10 @@ as the name for the fields that should be used for the annotation title, tags an
For InfluxDB you need to enter a query like in the above screenshot. You need to have the ```where $timeFilter``` part.
If you only select one column you will not need to enter anything in the column mapping fields.
+## Prometheus Annotations
+
+
+Prometheus supports two ways to query annotations.
+
+- A regular metric query
+- A Prometheus query for pending and firing alerts (for details see [Inspecting alerts during runtime](https://prometheus.io/docs/alerting/rules/#inspecting-alerts-during-runtime))
diff --git a/docs/sources/reference/dashboard.md b/docs/sources/reference/dashboard.md
index 93adf5cd789..831dbe3abdc 100644
--- a/docs/sources/reference/dashboard.md
+++ b/docs/sources/reference/dashboard.md
@@ -26,7 +26,6 @@ When a user creates a new dashboard, a new dashboard JSON object is initialized
{
"id": null,
"title": "New dashboard",
- "originalTitle": "New dashboard",
"tags": [],
"style": "dark",
"timezone": "browser",
@@ -59,7 +58,6 @@ Each field in the dashboard JSON is explained below with its usage:
| ---- | ----- |
| **id** | unique dashboard id, an integer |
| **title** | current title of dashboard |
-| **originalTitle** | title of dashboard when saved for the first time |
| **tags** | tags associated with dashboard, an array of strings |
| **style** | theme of dashboard, i.e. `dark` or `light` |
| **timezone** | timezone of dashboard, i.e. `utc` or `browser` |
diff --git a/docs/sources/reference/export_import.md b/docs/sources/reference/export_import.md
index e83c68401d4..0e830db959f 100644
--- a/docs/sources/reference/export_import.md
+++ b/docs/sources/reference/export_import.md
@@ -8,78 +8,97 @@ page_keywords: grafana, export, import, documentation
## Exporting a dashboard
-Dashboards are exported in Grafana JSON format, and contain everything you need (layout, variables, styles, data sources, queries, etc)to import the dashboard at a later time.
+Dashboards are exported in Grafana JSON format, and contain everything you need (layout, variables, styles, data sources, queries, etc)to import the dashboard at a later time.
-#### Export to file
+The export feature is accessed from the share menu.
-To export a dashboard, locate the settings menu within the desired dashboard and click the gear icon. The export option will always be available, and will open a browser save-as dialog window.
+
-
+### Making a dashboard portable
-#### Copy JSON
+If you want to export a dashboard for others to use then it could be a good idea to
+add template variables for things like a metric prefix (use contant variable) and server name.
-The raw JSON may be accessed directly from within the interface and copy/pasted into an editor of your choice to be saved later. To view this JSON, locate the settings menu within the desired dashboard and click the gear icon. The View JSON option will always be available, and will open the raw JSON in a text area. To copy the entire JSON file, click into the text area, the select all `CTRL`+`A` (PC, Linux) or `⌘`+`A` (Mac).
-
-
+A template varible of the type `Constant` will automatically be hidden in
+the dashboard, and will also be added as an required input when the dashboard is imported.
## Importing a dashboard
-Grafana 2.0 now has integrated dashboard storage engine that can be configured to use an internal sqlite3 database, MySQL, or Postgres. This eliminates the need to use Elasticsearch for dashboard storage for Graphite users. Grafana 2.0 does not support storing dashboards in InfluxDB.
+To import a dashboard open dashboard search and then hit the import button.
-The import view can be found at the Dashboard Picker dropdown, next to the New Dashboard and Playlist buttons.
+
-
+From here you can upload a dashboard json file, paste a [Grafana.net](https://grafana.net) dashboard
+url or paste dashboard json text directly into the text area.
+
-#### Import from a file
+In step 2 of the import process Grafana will let you change the name of the dashboard, pick what
+data source you want the dashboard to use and specify any metric prefixes (if the dashboard use any).
-To import a dashboard through a local JSON file, click the 'Choose file' button in the Import from File section. Note that JSON is not linted or validated prior during upload, so we recommend validating locally if you're editing. In a pinch, you can use http://jsonlint.com/, and if you are editing dashboard JSON frequently, there are linter plugins for popular text editors.
+## Discover dashboards on Grafana.net
+Find dashboads for common server applications at [Grafana.net/dashboards](https://grafana.net/dashboards).
-#### Importing dashboards from Elasticsearch
+
-Start by going to the `Data Sources` view (via the side menu), and make sure your Elasticsearch data source is added. Specify the Elasticsearch index name where your existing Grafana v1.x dashboards are stored (the default is `grafana-dash`).
+## Import & Sharing with Grafana 2.x or 3.0
-
+Dashboards on Grafana.net use a new feature in Grafana 3.1 that allows the import process
+to update each panel so that they are using a data source of your choosing. If you are running a
+Grafana version older than 3.1 then you might need to do some manual steps either
+before or after import in order for the dashboard to work properly.
-#### Importing dashboards from InfluxDB
+Dashboards exported from Grafana 3.1+ have a new json section `__inputs`
+that define what data sources and metric prefixes the dashboard uses.
-Start by going to the `Data Sources` view (via the side menu), and make sure your InfluxDB data source is added. Specify the database name where your Grafana v1.x dashboards are stored, the default is `grafana`.
+Example:
+```json
+{
+ "__inputs": [
+ {
+ "name": "DS_GRAPHITE",
+ "label": "graphite",
+ "description": "",
+ "type": "datasource",
+ "pluginId": "graphite",
+ "pluginName": "Graphite"
+ },
+ {
+ "name": "VAR_PREFIX",
+ "type": "constant",
+ "label": "prefix",
+ "value": "collectd",
+ "description": ""
+ }
+ ],
+}
-### Import view
+```
-In the Import view you find the section `Migrate dashboards`. Pick the data source you added (from Elasticsearch or InfluxDB), and click the `Import` button.
+These are then referenced in the dashboard panels like this:
-
+```json
+{
+ "rows": [
+ {
+ "panels": [
+ {
+ "type": "graph",
+ "datasource": "${DS_GRAPHITE}",
+ }
+ ]
+ }
+ ]
+}
+```
-Your dashboards should be automatically imported into the Grafana 2.0 back-end. Dashboards will no longer be stored in your previous Elasticsearch or InfluxDB databases.
+These inputs and their usage in data source properties are automatically added during export in Grafana 3.1.
+If you run an older version of Grafana and want to share a dashboard on Grafana.net you need to manually
+add the inputs and templatize the datasource properties like above.
+If you want to import a dashboard from Grafana.net into an older version of Grafana then you can either import
+it as usual and then update the data source option in the metrics tab so that the panel is using the correct
+data source. Another alternative is to open the json file in a a text editor and update the data source properties
+to value that matches a name of your data source.
-## Troubleshooting
-
-### Template variables could not be initialized.
-
-When importing a dashboard, keep an eye out for template variables in your JSON that may not exist in your instance of Grafana. For example,
-
- "templating": {
- "list": [
- {
- "allFormat": "glob",
- "current": {
- "tags": [],
- "text": "google_com + monkey_id_au",
- "value": [
- "google_com",
- "monkey_id_au"
- ]
- },
- "datasource": null,
-
-To resolve this, remove any unnecessary JSON that may be specific to the instance you are exporting from. In this case, we can remove the entire "current" section entirely, and Grafana will populate default.
-
- "templating": {
- "list": [
- {
- "allFormat": "glob",
- "datasource": null,
-
\ No newline at end of file
diff --git a/docs/sources/versions.html_fragment b/docs/sources/versions.html_fragment
index 0d62ee1e461..df6dbd4db7f 100644
--- a/docs/sources/versions.html_fragment
+++ b/docs/sources/versions.html_fragment
@@ -1,3 +1,4 @@
+
diff --git a/public/app/features/dashboard/dashnav/dashnav.ts b/public/app/features/dashboard/dashnav/dashnav.ts
index 99b113b88c5..62f598e1cbd 100644
--- a/public/app/features/dashboard/dashnav/dashnav.ts
+++ b/public/app/features/dashboard/dashnav/dashnav.ts
@@ -4,15 +4,16 @@ import _ from 'lodash';
import moment from 'moment';
import angular from 'angular';
+import {DashboardExporter} from '../export/exporter';
+
export class DashNavCtrl {
/** @ngInject */
- constructor($scope, $rootScope, alertSrv, $location, playlistSrv, backendSrv, $timeout) {
+ constructor($scope, $rootScope, alertSrv, $location, playlistSrv, backendSrv, $timeout, datasourceSrv) {
$scope.init = function() {
$scope.onAppEvent('save-dashboard', $scope.saveDashboard);
$scope.onAppEvent('delete-dashboard', $scope.deleteDashboard);
- $scope.onAppEvent('export-dashboard', $scope.snapshot);
$scope.onAppEvent('quick-snapshot', $scope.quickSnapshot);
$scope.showSettingsMenu = $scope.dashboardMeta.canEdit || $scope.contextSrv.isEditor;
@@ -168,11 +169,11 @@ export class DashNavCtrl {
});
};
- $scope.exportDashboard = function() {
+ $scope.viewJson = function() {
var clone = $scope.dashboard.getSaveModelClone();
- var blob = new Blob([angular.toJson(clone, true)], { type: "application/json;charset=utf-8" });
- var wnd: any = window;
- wnd.saveAs(blob, $scope.dashboard.title + '-' + new Date().getTime() + '.json');
+ var html = angular.toJson(clone, true);
+ var uri = "data:application/json," + encodeURIComponent(html);
+ var newWindow = window.open(uri);
};
$scope.snapshot = function() {
@@ -180,7 +181,6 @@ export class DashNavCtrl {
$rootScope.$broadcast('refresh');
$timeout(function() {
- $scope.exportDashboard();
$scope.dashboard.snapshot = false;
$scope.appEvent('dashboard-snapshot-cleanup');
}, 1000);
diff --git a/public/app/features/dashboard/dynamicDashboardSrv.js b/public/app/features/dashboard/dynamicDashboardSrv.js
index 9e369733f45..f131b47b557 100644
--- a/public/app/features/dashboard/dynamicDashboardSrv.js
+++ b/public/app/features/dashboard/dynamicDashboardSrv.js
@@ -52,6 +52,8 @@ function (angular, _) {
else if (panel.repeatPanelId && panel.repeatIteration !== this.iteration) {
row.panels = _.without(row.panels, panel);
j = j - 1;
+ } else if (row.repeat || row.repeatRowId) {
+ continue;
} else if (!_.isEmpty(panel.scopedVars) && panel.repeatIteration !== this.iteration) {
panel.scopedVars = {};
}
@@ -118,7 +120,6 @@ function (angular, _) {
panel = copy.panels[i];
panel.scopedVars = {};
panel.scopedVars[variable.name] = option;
- panel.repeatIteration = this.iteration;
}
}, this);
};
diff --git a/public/app/features/dashboard/dynamic_dashboard_srv.ts b/public/app/features/dashboard/dynamic_dashboard_srv.ts
new file mode 100644
index 00000000000..6b58f448085
--- /dev/null
+++ b/public/app/features/dashboard/dynamic_dashboard_srv.ts
@@ -0,0 +1,188 @@
+///
+
+import config from 'app/core/config';
+import angular from 'angular';
+import _ from 'lodash';
+
+import coreModule from 'app/core/core_module';
+
+export class DynamicDashboardSrv {
+ iteration: number;
+ dashboard: any;
+
+ constructor() {
+ this.iteration = new Date().getTime();
+ }
+
+ init(dashboard) {
+ if (dashboard.snapshot) { return; }
+ this.process(dashboard, {});
+ }
+
+ update(dashboard) {
+ if (dashboard.snapshot) { return; }
+
+ this.iteration = this.iteration + 1;
+ this.process(dashboard, {});
+ }
+
+ process(dashboard, options) {
+ if (dashboard.templating.list.length === 0) { return; }
+ this.dashboard = dashboard;
+
+ var cleanUpOnly = options.cleanUpOnly;
+
+ var i, j, row, panel;
+ for (i = 0; i < this.dashboard.rows.length; i++) {
+ row = this.dashboard.rows[i];
+ // handle row repeats
+ if (row.repeat) {
+ if (!cleanUpOnly) {
+ this.repeatRow(row, i);
+ }
+ } else if (row.repeatRowId && row.repeatIteration !== this.iteration) {
+ // clean up old left overs
+ this.dashboard.rows.splice(i, 1);
+ i = i - 1;
+ continue;
+ }
+
+ // repeat panels
+ for (j = 0; j < row.panels.length; j++) {
+ panel = row.panels[j];
+ if (panel.repeat) {
+ if (!cleanUpOnly) {
+ this.repeatPanel(panel, row);
+ }
+ } else if (panel.repeatPanelId && panel.repeatIteration !== this.iteration) {
+ // clean up old left overs
+ row.panels = _.without(row.panels, panel);
+ j = j - 1;
+ } else if (!_.isEmpty(panel.scopedVars) && panel.repeatIteration !== this.iteration) {
+ panel.scopedVars = {};
+ }
+ }
+ }
+ }
+
+ // returns a new row clone or reuses a clone from previous iteration
+ getRowClone(sourceRow, repeatIndex, sourceRowIndex) {
+ if (repeatIndex === 0) {
+ return sourceRow;
+ }
+
+ var i, panel, row, copy;
+ var sourceRowId = sourceRowIndex + 1;
+
+ // look for row to reuse
+ for (i = 0; i < this.dashboard.rows.length; i++) {
+ row = this.dashboard.rows[i];
+ if (row.repeatRowId === sourceRowId && row.repeatIteration !== this.iteration) {
+ copy = row;
+ break;
+ }
+ }
+
+ if (!copy) {
+ copy = angular.copy(sourceRow);
+ this.dashboard.rows.splice(sourceRowIndex + repeatIndex, 0, copy);
+
+ // set new panel ids
+ for (i = 0; i < copy.panels.length; i++) {
+ panel = copy.panels[i];
+ panel.id = this.dashboard.getNextPanelId();
+ }
+ }
+
+ copy.repeat = null;
+ copy.repeatRowId = sourceRowId;
+ copy.repeatIteration = this.iteration;
+ return copy;
+ }
+
+ // returns a new row clone or reuses a clone from previous iteration
+ repeatRow(row, rowIndex) {
+ var variables = this.dashboard.templating.list;
+ var variable = _.findWhere(variables, {name: row.repeat});
+ if (!variable) {
+ return;
+ }
+
+ var selected, copy, i, panel;
+ if (variable.current.text === 'All') {
+ selected = variable.options.slice(1, variable.options.length);
+ } else {
+ selected = _.filter(variable.options, {selected: true});
+ }
+
+ _.each(selected, (option, index) => {
+ copy = this.getRowClone(row, index, rowIndex);
+ copy.scopedVars = {};
+ copy.scopedVars[variable.name] = option;
+
+ for (i = 0; i < copy.panels.length; i++) {
+ panel = copy.panels[i];
+ panel.scopedVars = {};
+ panel.scopedVars[variable.name] = option;
+ panel.repeatIteration = this.iteration;
+ }
+ });
+ }
+
+ getPanelClone(sourcePanel, row, index) {
+ // if first clone return source
+ if (index === 0) {
+ return sourcePanel;
+ }
+
+ var i, tmpId, panel, clone;
+
+ // first try finding an existing clone to use
+ for (i = 0; i < row.panels.length; i++) {
+ panel = row.panels[i];
+ if (panel.repeatIteration !== this.iteration && panel.repeatPanelId === sourcePanel.id) {
+ clone = panel;
+ break;
+ }
+ }
+
+ if (!clone) {
+ clone = { id: this.dashboard.getNextPanelId() };
+ row.panels.push(clone);
+ }
+
+ // save id
+ tmpId = clone.id;
+ // copy properties from source
+ angular.copy(sourcePanel, clone);
+ // restore id
+ clone.id = tmpId;
+ clone.repeatIteration = this.iteration;
+ clone.repeatPanelId = sourcePanel.id;
+ clone.repeat = null;
+ return clone;
+ }
+
+ repeatPanel(panel, row) {
+ var variables = this.dashboard.templating.list;
+ var variable = _.findWhere(variables, {name: panel.repeat});
+ if (!variable) { return; }
+
+ var selected;
+ if (variable.current.text === 'All') {
+ selected = variable.options.slice(1, variable.options.length);
+ } else {
+ selected = _.filter(variable.options, {selected: true});
+ }
+
+ _.each(selected, (option, index) => {
+ var copy = this.getPanelClone(panel, row, index);
+ copy.span = Math.max(12 / selected.length, panel.minSpan);
+ copy.scopedVars = copy.scopedVars || {};
+ copy.scopedVars[variable.name] = option;
+ });
+ }
+}
+
+coreModule.service('dynamicDashboardSrv', DynamicDashboardSrv);
+
diff --git a/public/app/features/dashboard/export/export_modal.html b/public/app/features/dashboard/export/export_modal.html
new file mode 100644
index 00000000000..e890e405020
--- /dev/null
+++ b/public/app/features/dashboard/export/export_modal.html
@@ -0,0 +1,29 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ Export the dashboard to a JSON file. The exporter will templatize the
+ dashboard's data sources to make it easy for other's to to import and reuse.
+ You can share dashboards on Grafana.net
+