This commit is contained in:
Leandro Piccilli 2016-07-07 00:58:26 +02:00
commit 0ebf469f5f
336 changed files with 20735 additions and 3728 deletions

3
.floo Normal file
View File

@ -0,0 +1,3 @@
{
"url": "https://floobits.com/raintank/grafana"
}

12
.flooignore Normal file
View File

@ -0,0 +1,12 @@
#*
*.o
*.pyc
*.pyo
*~
extern/
node_modules/
tmp/
data/
vendor/
public_gen/
dist/

View File

@ -1,20 +1,17 @@
Thank you! For helping us make Grafana even better.
* **I'm submitting a ...**
- [ ] Bug report
- [ ] Feature request
- [ ] Question / Support request: **Please do not** open a github issue. [Support Options](http://grafana.org/support/)
To help us respond to your issues faster, please make sure to add as much information as possible.
If this issue is about a plugin, please open the issue in that repository.
Start your issues title with [Feature Request] / [Bug] / [Question] or no tag if your unsure. Also, please be aware that GitHub now supports uploading of screenshots; look at the bottom of this input field.
Please include some basic information:
- What grafana version are you using?
Please include this information:
- What Grafana version are you using?
- What datasource are you using?
- What OS are you running grafana on?
- What did you do?
- What was the expected result?
- What happenend instead?
- What happened instead?
If you question/bug relates to a metric query / unexpected data visualization, please include:
**IMPORTANT** If it relates to metric data viz:
- An image or text representation of your metric query
- The raw query and response from your data source (check this in chrome dev tools network tab)
- The raw query and response for the network request (check this in chrome dev tools network tab, here you can see metric requests and other request, please include the request body and request response)

View File

@ -1,5 +1,73 @@
# 3.0.0 stable (unreleased)
# 4.0-pre (unreleased)
### Enhancements
* **Login**: Adds option to disable username/password logins, closes [#4674](https://github.com/grafana/grafana/issues/4674)
* **SingleStat**: Add seriename as option in singlestat panel, closes [#4740](https://github.com/grafana/grafana/issues/4740)
* **Localization**: Week start day now dependant on browser locale setting, closes [#3003](https://github.com/grafana/grafana/issues/3003)
* **Templating**: Update panel repeats for variables that change on time refresh, closes [#5021](https://github.com/grafana/grafana/issues/5021)
# 3.1.0 stable (unreleased)
### Bugfixes
* **User Alert Notices**: Backend error alert popups did not show properly, fixes [#5435](https://github.com/grafana/grafana/issues/5435)
# 3.1.0-beta1 (2016-06-23)
### Enhancements
* **Dashboard Export/Import**: Dashboard export now templetize data sources and constant variables, users pick these on import, closes [#5084](https://github.com/grafana/grafana/issues/5084)
* **Dashboard Url**: Time range changes updates url, closes [#458](https://github.com/grafana/grafana/issues/458)
* **Dashboard Url**: Template variable change updates url, closes [#5002](https://github.com/grafana/grafana/issues/5002)
* **Singlestat**: Add support for range to text mappings, closes [#1319](https://github.com/grafana/grafana/issues/1319)
* **Graph**: Adds sort order options for graph tooltip, closes [#1189](https://github.com/grafana/grafana/issues/1189)
* **Theme**: Add default theme to config file [#5011](https://github.com/grafana/grafana/pull/5011)
* **Page Footer**: Added page footer with links to docs, shows Grafana version and info if new version is available, closes [#4889](https://github.com/grafana/grafana/pull/4889)
* **InfluxDB**: Add spread function, closes [#5211](https://github.com/grafana/grafana/issues/5211)
* **Scripts**: Use restart instead of start for deb package script, closes [#5282](https://github.com/grafana/grafana/pull/5282)
* **Logging**: Moved to structured logging lib, and moved to component specific level filters via config file, closes [#4590](https://github.com/grafana/grafana/issues/4590)
* **OpenTSDB**: Support nested template variables in tag_values function, closes [#4398](https://github.com/grafana/grafana/issues/4398)
* **Datasource**: Pending data source requests are cancelled before new ones are issues (Graphite & Prometheus), closes [#5321](https://github.com/grafana/grafana/issues/5321)
### Breaking changes
* **Logging** : Changed default logging output format (now structured into message, and key value pairs, with logger key acting as component). You can also no change in config to json log ouput.
* **Graphite** : The Graph panel no longer have a Graphite PNG option. closes [#5367](https://github.com/grafana/grafana/issues/5367)
### Bug fixes
* **PNG rendering**: Fixed phantomjs rendering and y-axis label rotation. fixes [#5220](https://github.com/grafana/grafana/issues/5220)
* **CLI**: The cli tool now supports reading plugin.json from dist/plugin.json. fixes [#5410](https://github.com/grafana/grafana/issues/5410)
# 3.0.4 Patch release (2016-05-25)
* **Panel**: Fixed blank dashboard issue when switching to other dashboard while in fullscreen edit mode, fixes [#5163](https://github.com/grafana/grafana/pull/5163)
* **Templating**: Fixed issue with nested multi select variables and cascading and updating child variable selection state, fixes [#4861](https://github.com/grafana/grafana/pull/4861)
* **Templating**: Fixed issue with using templated data source in another template variable query, fixes [#5165](https://github.com/grafana/grafana/pull/5165)
* **Singlestat gauge**: Fixed issue with gauge render position, fixes [#5143](https://github.com/grafana/grafana/pull/5143)
* **Home dashboard**: Fixes broken home dashboard api, fixes [#5167](https://github.com/grafana/grafana/issues/5167)
# 3.0.3 Patch release (2016-05-23)
* **Annotations**: Annotations can now use a template variable as data source, closes [#5054](https://github.com/grafana/grafana/issues/5054)
* **Time picker**: Fixed issue timepicker and UTC when reading time from URL, fixes [#5078](https://github.com/grafana/grafana/issues/5078)
* **CloudWatch**: Support for Multiple Account by AssumeRole, closes [#3522](https://github.com/grafana/grafana/issues/3522)
* **Singlestat**: Fixed alignment and minium height issue, fixes [#5113](https://github.com/grafana/grafana/issues/5113), fixes [#4679](https://github.com/grafana/grafana/issues/4679)
* **Share modal**: Fixed link when using grafana under dashboard sub url, fixes [#5109](https://github.com/grafana/grafana/issues/5109)
* **Prometheus**: Fixed bug in query editor that caused it not to load when reloading page, fixes [#5107](https://github.com/grafana/grafana/issues/5107)
* **Elasticsearch**: Fixed bug when template variable query returns numeric values, fixes [#5097](https://github.com/grafana/grafana/issues/5097), fixes [#5088](https://github.com/grafana/grafana/issues/5088)
* **Logging**: Fixed issue with reading logging level value, fixes [#5079](https://github.com/grafana/grafana/issues/5079)
* **Timepicker**: Fixed issue with timepicker and UTC when reading time from URL, fixes [#5078](https://github.com/grafana/grafana/issues/5078)
* **Docs**: Added docs for org & user preferences HTTP API, closes [#5069](https://github.com/grafana/grafana/issues/5069)
* **Plugin list panel**: Now shows correct enable state for apps when not enabled, fixes [#5068](https://github.com/grafana/grafana/issues/5068)
* **Elasticsearch**: Templating & Annotation queries that use template variables are now formatted correctly, fixes [#5135](https://github.com/grafana/grafana/issues/5135)
# 3.0.2 Patch release (2016-05-16)
* **Templating**: Fixed issue mixing row repeat and panel repeats, fixes [#4988](https://github.com/grafana/grafana/issues/4988)
* **Templating**: Fixed issue detecting dependencies in nested variables, fixes [#4987](https://github.com/grafana/grafana/issues/4987), fixes [#4986](https://github.com/grafana/grafana/issues/4986)
* **Graph**: Fixed broken PNG rendering in graph panel, fixes [#5025](https://github.com/grafana/grafana/issues/5025)
* **Graph**: Fixed broken xaxis on graph panel, fixes [#5024](https://github.com/grafana/grafana/issues/5024)
* **Influxdb**: Fixes crash when hiding middle serie, fixes [#5005](https://github.com/grafana/grafana/issues/5005)
# 3.0.1 Stable (2016-05-11)
### Bug fixes
* **Templating**: Fixed issue with new data source variable not persisting current selected value, fixes [#4934](https://github.com/grafana/grafana/issues/4934)
# 3.0.0-beta7 (2016-05-02)

21
Godeps/Godeps.json generated
View File

@ -1,6 +1,7 @@
{
"ImportPath": "github.com/grafana/grafana",
"GoVersion": "go1.5.1",
"GodepVersion": "v60",
"Packages": [
"./pkg/..."
],
@ -124,6 +125,11 @@
"Comment": "v1.0.0",
"Rev": "abb928e07c4108683d6b4d0b6ca08fe6bc0eee5f"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/sts",
"Comment": "v1.0.0",
"Rev": "abb928e07c4108683d6b4d0b6ca08fe6bc0eee5f"
},
{
"ImportPath": "github.com/bmizerany/assert",
"Comment": "release.r60-6-ge17e998",
@ -199,6 +205,11 @@
"Comment": "v1.2-171-g267b128",
"Rev": "267b128680c46286b9ca13475c3cca5de8f79bd7"
},
{
"ImportPath": "github.com/go-stack/stack",
"Comment": "v1.5.2",
"Rev": "100eb0c0a9c5b306ca2fb4f165df21d80ada4b82"
},
{
"ImportPath": "github.com/go-xorm/core",
"Comment": "v0.4.4-7-g9e608f7",
@ -221,6 +232,16 @@
"ImportPath": "github.com/hashicorp/go-version",
"Rev": "7e3c02b30806fa5779d3bdfc152ce4c6f40e7b38"
},
{
"ImportPath": "github.com/inconshreveable/log15",
"Comment": "v2.3-61-g20bca5a",
"Rev": "20bca5a7a57282e241fac83ec9ea42538027f1c1"
},
{
"ImportPath": "github.com/inconshreveable/log15/term",
"Comment": "v2.3-61-g20bca5a",
"Rev": "20bca5a7a57282e241fac83ec9ea42538027f1c1"
},
{
"ImportPath": "github.com/jmespath/go-jmespath",
"Comment": "0.2.2",

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,12 @@
package sts
import "github.com/aws/aws-sdk-go/aws/request"
func init() {
initRequest = func(r *request.Request) {
switch r.Operation.Name {
case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity:
r.Handlers.Sign.Clear() // these operations are unsigned
}
}
}

View File

@ -0,0 +1,39 @@
package sts_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/awstesting/unit"
"github.com/aws/aws-sdk-go/service/sts"
)
var svc = sts.New(unit.Session, &aws.Config{
Region: aws.String("mock-region"),
})
func TestUnsignedRequest_AssumeRoleWithSAML(t *testing.T) {
req, _ := svc.AssumeRoleWithSAMLRequest(&sts.AssumeRoleWithSAMLInput{
PrincipalArn: aws.String("ARN01234567890123456789"),
RoleArn: aws.String("ARN01234567890123456789"),
SAMLAssertion: aws.String("ASSERT"),
})
err := req.Sign()
assert.NoError(t, err)
assert.Equal(t, "", req.HTTPRequest.Header.Get("Authorization"))
}
func TestUnsignedRequest_AssumeRoleWithWebIdentity(t *testing.T) {
req, _ := svc.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{
RoleArn: aws.String("ARN01234567890123456789"),
RoleSessionName: aws.String("SESSION"),
WebIdentityToken: aws.String("TOKEN"),
})
err := req.Sign()
assert.NoError(t, err)
assert.Equal(t, "", req.HTTPRequest.Header.Get("Authorization"))
}

View File

@ -0,0 +1,149 @@
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package sts_test
import (
"bytes"
"fmt"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sts"
)
var _ time.Duration
var _ bytes.Buffer
func ExampleSTS_AssumeRole() {
svc := sts.New(session.New())
params := &sts.AssumeRoleInput{
RoleArn: aws.String("arnType"), // Required
RoleSessionName: aws.String("roleSessionNameType"), // Required
DurationSeconds: aws.Int64(1),
ExternalId: aws.String("externalIdType"),
Policy: aws.String("sessionPolicyDocumentType"),
SerialNumber: aws.String("serialNumberType"),
TokenCode: aws.String("tokenCodeType"),
}
resp, err := svc.AssumeRole(params)
if err != nil {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
return
}
// Pretty-print the response data.
fmt.Println(resp)
}
func ExampleSTS_AssumeRoleWithSAML() {
svc := sts.New(session.New())
params := &sts.AssumeRoleWithSAMLInput{
PrincipalArn: aws.String("arnType"), // Required
RoleArn: aws.String("arnType"), // Required
SAMLAssertion: aws.String("SAMLAssertionType"), // Required
DurationSeconds: aws.Int64(1),
Policy: aws.String("sessionPolicyDocumentType"),
}
resp, err := svc.AssumeRoleWithSAML(params)
if err != nil {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
return
}
// Pretty-print the response data.
fmt.Println(resp)
}
func ExampleSTS_AssumeRoleWithWebIdentity() {
svc := sts.New(session.New())
params := &sts.AssumeRoleWithWebIdentityInput{
RoleArn: aws.String("arnType"), // Required
RoleSessionName: aws.String("roleSessionNameType"), // Required
WebIdentityToken: aws.String("clientTokenType"), // Required
DurationSeconds: aws.Int64(1),
Policy: aws.String("sessionPolicyDocumentType"),
ProviderId: aws.String("urlType"),
}
resp, err := svc.AssumeRoleWithWebIdentity(params)
if err != nil {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
return
}
// Pretty-print the response data.
fmt.Println(resp)
}
func ExampleSTS_DecodeAuthorizationMessage() {
svc := sts.New(session.New())
params := &sts.DecodeAuthorizationMessageInput{
EncodedMessage: aws.String("encodedMessageType"), // Required
}
resp, err := svc.DecodeAuthorizationMessage(params)
if err != nil {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
return
}
// Pretty-print the response data.
fmt.Println(resp)
}
func ExampleSTS_GetFederationToken() {
svc := sts.New(session.New())
params := &sts.GetFederationTokenInput{
Name: aws.String("userNameType"), // Required
DurationSeconds: aws.Int64(1),
Policy: aws.String("sessionPolicyDocumentType"),
}
resp, err := svc.GetFederationToken(params)
if err != nil {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
return
}
// Pretty-print the response data.
fmt.Println(resp)
}
func ExampleSTS_GetSessionToken() {
svc := sts.New(session.New())
params := &sts.GetSessionTokenInput{
DurationSeconds: aws.Int64(1),
SerialNumber: aws.String("serialNumberType"),
TokenCode: aws.String("tokenCodeType"),
}
resp, err := svc.GetSessionToken(params)
if err != nil {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
return
}
// Pretty-print the response data.
fmt.Println(resp)
}

View File

@ -0,0 +1,130 @@
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package sts
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/query"
"github.com/aws/aws-sdk-go/private/signer/v4"
)
// The AWS Security Token Service (STS) is a web service that enables you to
// request temporary, limited-privilege credentials for AWS Identity and Access
// Management (IAM) users or for users that you authenticate (federated users).
// This guide provides descriptions of the STS API. For more detailed information
// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
//
// As an alternative to using the API, you can use one of the AWS SDKs, which
// consist of libraries and sample code for various programming languages and
// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
// way to create programmatic access to STS. For example, the SDKs take care
// of cryptographically signing requests, managing errors, and retrying requests
// automatically. For information about the AWS SDKs, including how to download
// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
// For information about setting up signatures and authorization through the
// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html"
// target="_blank) in the AWS General Reference. For general information about
// the Query API, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html"
// target="_blank) in Using IAM. For information about using security tokens
// with other AWS products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
// in the Using IAM.
//
// If you're new to AWS and need additional technical information about a specific
// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
// (http://aws.amazon.com/documentation/" target="_blank).
//
// Endpoints
//
// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com
// that maps to the US East (N. Virginia) region. Additional regions are available,
// but must first be activated in the AWS Management Console before you can
// use a different region's endpoint. For more information about activating
// a region for STS see Activating STS in a New Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the Using IAM.
//
// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
// in the AWS General Reference.
//
// Recording API requests
//
// STS supports AWS CloudTrail, which is a service that records AWS calls for
// your AWS account and delivers log files to an Amazon S3 bucket. By using
// information collected by CloudTrail, you can determine what requests were
// successfully made to STS, who made the request, when it was made, and so
// on. To learn more about CloudTrail, including how to turn it on and find
// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
//The service client's operations are safe to be used concurrently.
// It is not safe to mutate any of the client's properties though.
type STS struct {
*client.Client
}
// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
// A ServiceName is the name of the service the client will make API calls to.
const ServiceName = "sts"
// New creates a new instance of the STS client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a STS client from just a session.
// svc := sts.New(mySession)
//
// // Create a STS client with additional configuration
// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
c := p.ClientConfig(ServiceName, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *STS {
svc := &STS{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "2011-06-15",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBack(v4.Sign)
svc.Handlers.Build.PushBack(query.Build)
svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
return svc
}
// newRequest creates a new request for a STS operation and runs any
// custom request initialization.
func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
return req
}

View File

@ -0,0 +1,38 @@
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
// Package stsiface provides an interface for the AWS Security Token Service.
package stsiface
import (
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/sts"
)
// STSAPI is the interface type for sts.STS.
type STSAPI interface {
AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput)
AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput)
AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error)
AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput)
AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error)
DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput)
DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error)
GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput)
GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error)
GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput)
GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error)
}
var _ STSAPI = (*sts.STS)(nil)

View File

@ -0,0 +1,16 @@
language: go
sudo: false
go:
- 1.2
- 1.3
- 1.4
- 1.5
- 1.6
- tip
before_install:
- go get github.com/mattn/goveralls
- go get golang.org/x/tools/cmd/cover
script:
- goveralls -service=travis-ci

View File

@ -0,0 +1,13 @@
Copyright 2014 Chris Hines
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,38 @@
[![GoDoc](https://godoc.org/github.com/go-stack/stack?status.svg)](https://godoc.org/github.com/go-stack/stack)
[![Go Report Card](https://goreportcard.com/badge/go-stack/stack)](https://goreportcard.com/report/go-stack/stack)
[![TravisCI](https://travis-ci.org/go-stack/stack.svg?branch=master)](https://travis-ci.org/go-stack/stack)
[![Coverage Status](https://coveralls.io/repos/github/go-stack/stack/badge.svg?branch=master)](https://coveralls.io/github/go-stack/stack?branch=master)
# stack
Package stack implements utilities to capture, manipulate, and format call
stacks. It provides a simpler API than package runtime.
The implementation takes care of the minutia and special cases of interpreting
the program counter (pc) values returned by runtime.Callers.
## Versioning
Package stack publishes releases via [semver](http://semver.org/) compatible Git
tags prefixed with a single 'v'. The master branch always contains the latest
release. The develop branch contains unreleased commits.
## Formatting
Package stack's types implement fmt.Formatter, which provides a simple and
flexible way to declaratively configure formatting when used with logging or
error tracking packages.
```go
func DoTheThing() {
c := stack.Caller(0)
log.Print(c) // "source.go:10"
log.Printf("%+v", c) // "pkg/path/source.go:10"
log.Printf("%n", c) // "DoTheThing"
s := stack.Trace().TrimRuntime()
log.Print(s) // "[source.go:15 caller.go:42 main.go:14]"
}
```
See the docs for all of the supported formatting options.

View File

@ -0,0 +1,349 @@
// Package stack implements utilities to capture, manipulate, and format call
// stacks. It provides a simpler API than package runtime.
//
// The implementation takes care of the minutia and special cases of
// interpreting the program counter (pc) values returned by runtime.Callers.
//
// Package stack's types implement fmt.Formatter, which provides a simple and
// flexible way to declaratively configure formatting when used with logging
// or error tracking packages.
package stack
import (
"bytes"
"errors"
"fmt"
"io"
"runtime"
"strconv"
"strings"
)
// Call records a single function invocation from a goroutine stack.
type Call struct {
fn *runtime.Func
pc uintptr
}
// Caller returns a Call from the stack of the current goroutine. The argument
// skip is the number of stack frames to ascend, with 0 identifying the
// calling function.
func Caller(skip int) Call {
var pcs [2]uintptr
n := runtime.Callers(skip+1, pcs[:])
var c Call
if n < 2 {
return c
}
c.pc = pcs[1]
if runtime.FuncForPC(pcs[0]) != sigpanic {
c.pc--
}
c.fn = runtime.FuncForPC(c.pc)
return c
}
// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c).
func (c Call) String() string {
return fmt.Sprint(c)
}
// MarshalText implements encoding.TextMarshaler. It formats the Call the same
// as fmt.Sprintf("%v", c).
func (c Call) MarshalText() ([]byte, error) {
if c.fn == nil {
return nil, ErrNoFunc
}
buf := bytes.Buffer{}
fmt.Fprint(&buf, c)
return buf.Bytes(), nil
}
// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely
// cause is a Call with the zero value.
var ErrNoFunc = errors.New("no call stack information")
// Format implements fmt.Formatter with support for the following verbs.
//
// %s source file
// %d line number
// %n function name
// %v equivalent to %s:%d
//
// It accepts the '+' and '#' flags for most of the verbs as follows.
//
// %+s path of source file relative to the compile time GOPATH
// %#s full path of source file
// %+n import path qualified function name
// %+v equivalent to %+s:%d
// %#v equivalent to %#s:%d
func (c Call) Format(s fmt.State, verb rune) {
if c.fn == nil {
fmt.Fprintf(s, "%%!%c(NOFUNC)", verb)
return
}
switch verb {
case 's', 'v':
file, line := c.fn.FileLine(c.pc)
switch {
case s.Flag('#'):
// done
case s.Flag('+'):
file = file[pkgIndex(file, c.fn.Name()):]
default:
const sep = "/"
if i := strings.LastIndex(file, sep); i != -1 {
file = file[i+len(sep):]
}
}
io.WriteString(s, file)
if verb == 'v' {
buf := [7]byte{':'}
s.Write(strconv.AppendInt(buf[:1], int64(line), 10))
}
case 'd':
_, line := c.fn.FileLine(c.pc)
buf := [6]byte{}
s.Write(strconv.AppendInt(buf[:0], int64(line), 10))
case 'n':
name := c.fn.Name()
if !s.Flag('+') {
const pathSep = "/"
if i := strings.LastIndex(name, pathSep); i != -1 {
name = name[i+len(pathSep):]
}
const pkgSep = "."
if i := strings.Index(name, pkgSep); i != -1 {
name = name[i+len(pkgSep):]
}
}
io.WriteString(s, name)
}
}
// PC returns the program counter for this call frame; multiple frames may
// have the same PC value.
func (c Call) PC() uintptr {
return c.pc
}
// name returns the import path qualified name of the function containing the
// call.
func (c Call) name() string {
if c.fn == nil {
return "???"
}
return c.fn.Name()
}
func (c Call) file() string {
if c.fn == nil {
return "???"
}
file, _ := c.fn.FileLine(c.pc)
return file
}
func (c Call) line() int {
if c.fn == nil {
return 0
}
_, line := c.fn.FileLine(c.pc)
return line
}
// CallStack records a sequence of function invocations from a goroutine
// stack.
type CallStack []Call
// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs).
func (cs CallStack) String() string {
return fmt.Sprint(cs)
}
var (
openBracketBytes = []byte("[")
closeBracketBytes = []byte("]")
spaceBytes = []byte(" ")
)
// MarshalText implements encoding.TextMarshaler. It formats the CallStack the
// same as fmt.Sprintf("%v", cs).
func (cs CallStack) MarshalText() ([]byte, error) {
buf := bytes.Buffer{}
buf.Write(openBracketBytes)
for i, pc := range cs {
if pc.fn == nil {
return nil, ErrNoFunc
}
if i > 0 {
buf.Write(spaceBytes)
}
fmt.Fprint(&buf, pc)
}
buf.Write(closeBracketBytes)
return buf.Bytes(), nil
}
// Format implements fmt.Formatter by printing the CallStack as square brackets
// ([, ]) surrounding a space separated list of Calls each formatted with the
// supplied verb and options.
func (cs CallStack) Format(s fmt.State, verb rune) {
s.Write(openBracketBytes)
for i, pc := range cs {
if i > 0 {
s.Write(spaceBytes)
}
pc.Format(s, verb)
}
s.Write(closeBracketBytes)
}
// findSigpanic intentionally executes faulting code to generate a stack trace
// containing an entry for runtime.sigpanic.
func findSigpanic() *runtime.Func {
var fn *runtime.Func
var p *int
func() int {
defer func() {
if p := recover(); p != nil {
var pcs [512]uintptr
n := runtime.Callers(2, pcs[:])
for _, pc := range pcs[:n] {
f := runtime.FuncForPC(pc)
if f.Name() == "runtime.sigpanic" {
fn = f
break
}
}
}
}()
// intentional nil pointer dereference to trigger sigpanic
return *p
}()
return fn
}
var sigpanic = findSigpanic()
// Trace returns a CallStack for the current goroutine with element 0
// identifying the calling function.
func Trace() CallStack {
var pcs [512]uintptr
n := runtime.Callers(2, pcs[:])
cs := make([]Call, n)
for i, pc := range pcs[:n] {
pcFix := pc
if i > 0 && cs[i-1].fn != sigpanic {
pcFix--
}
cs[i] = Call{
fn: runtime.FuncForPC(pcFix),
pc: pcFix,
}
}
return cs
}
// TrimBelow returns a slice of the CallStack with all entries below c
// removed.
func (cs CallStack) TrimBelow(c Call) CallStack {
for len(cs) > 0 && cs[0].pc != c.pc {
cs = cs[1:]
}
return cs
}
// TrimAbove returns a slice of the CallStack with all entries above c
// removed.
func (cs CallStack) TrimAbove(c Call) CallStack {
for len(cs) > 0 && cs[len(cs)-1].pc != c.pc {
cs = cs[:len(cs)-1]
}
return cs
}
// pkgIndex returns the index that results in file[index:] being the path of
// file relative to the compile time GOPATH, and file[:index] being the
// $GOPATH/src/ portion of file. funcName must be the name of a function in
// file as returned by runtime.Func.Name.
func pkgIndex(file, funcName string) int {
// As of Go 1.6.2 there is no direct way to know the compile time GOPATH
// at runtime, but we can infer the number of path segments in the GOPATH.
// We note that runtime.Func.Name() returns the function name qualified by
// the import path, which does not include the GOPATH. Thus we can trim
// segments from the beginning of the file path until the number of path
// separators remaining is one more than the number of path separators in
// the function name. For example, given:
//
// GOPATH /home/user
// file /home/user/src/pkg/sub/file.go
// fn.Name() pkg/sub.Type.Method
//
// We want to produce:
//
// file[:idx] == /home/user/src/
// file[idx:] == pkg/sub/file.go
//
// From this we can easily see that fn.Name() has one less path separator
// than our desired result for file[idx:]. We count separators from the
// end of the file path until it finds two more than in the function name
// and then move one character forward to preserve the initial path
// segment without a leading separator.
const sep = "/"
i := len(file)
for n := strings.Count(funcName, sep) + 2; n > 0; n-- {
i = strings.LastIndex(file[:i], sep)
if i == -1 {
i = -len(sep)
break
}
}
// get back to 0 or trim the leading separator
return i + len(sep)
}
var runtimePath string
func init() {
var pcs [1]uintptr
runtime.Callers(0, pcs[:])
fn := runtime.FuncForPC(pcs[0])
file, _ := fn.FileLine(pcs[0])
idx := pkgIndex(file, fn.Name())
runtimePath = file[:idx]
if runtime.GOOS == "windows" {
runtimePath = strings.ToLower(runtimePath)
}
}
func inGoroot(c Call) bool {
file := c.file()
if len(file) == 0 || file[0] == '?' {
return true
}
if runtime.GOOS == "windows" {
file = strings.ToLower(file)
}
return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go")
}
// TrimRuntime returns a slice of the CallStack with the topmost entries from
// the go runtime removed. It considers any calls originating from unknown
// files, files under GOROOT, or _testmain.go as part of the runtime.
func (cs CallStack) TrimRuntime() CallStack {
for len(cs) > 0 && inGoroot(cs[len(cs)-1]) {
cs = cs[:len(cs)-1]
}
return cs
}

View File

@ -0,0 +1,10 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- 1.5
- 1.6
- tip

View File

@ -0,0 +1,11 @@
Contributors to log15:
- Aaron L
- Alan Shreve
- Chris Hines
- Ciaran Downey
- Dmitry Chestnykh
- Evan Shaw
- Péter Szilágyi
- Trevor Gattis
- Vincent Vanackere

View File

@ -0,0 +1,13 @@
Copyright 2014 Alan Shreve
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,70 @@
![obligatory xkcd](http://imgs.xkcd.com/comics/standards.png)
# log15 [![godoc reference](https://godoc.org/github.com/inconshreveable/log15?status.png)](https://godoc.org/github.com/inconshreveable/log15) [![Build Status](https://travis-ci.org/inconshreveable/log15.svg?branch=master)](https://travis-ci.org/inconshreveable/log15)
Package log15 provides an opinionated, simple toolkit for best-practice logging in Go (golang) that is both human and machine readable. It is modeled after the Go standard library's [`io`](http://golang.org/pkg/io/) and [`net/http`](http://golang.org/pkg/net/http/) packages and is an alternative to the standard library's [`log`](http://golang.org/pkg/log/) package.
## Features
- A simple, easy-to-understand API
- Promotes structured logging by encouraging use of key/value pairs
- Child loggers which inherit and add their own private context
- Lazy evaluation of expensive operations
- Simple Handler interface allowing for construction of flexible, custom logging configurations with a tiny API.
- Color terminal support
- Built-in support for logging to files, streams, syslog, and the network
- Support for forking records to multiple handlers, buffering records for output, failing over from failed handler writes, + more
## Versioning
The API of the master branch of log15 should always be considered unstable. If you want to rely on a stable API,
you must vendor the library.
## Importing
```go
import log "github.com/inconshreveable/log15"
```
## Examples
```go
// all loggers can have key/value context
srvlog := log.New("module", "app/server")
// all log messages can have key/value context
srvlog.Warn("abnormal conn rate", "rate", curRate, "low", lowRate, "high", highRate)
// child loggers with inherited context
connlog := srvlog.New("raddr", c.RemoteAddr())
connlog.Info("connection open")
// lazy evaluation
connlog.Debug("ping remote", "latency", log.Lazy{pingRemote})
// flexible configuration
srvlog.SetHandler(log.MultiHandler(
log.StreamHandler(os.Stderr, log.LogfmtFormat()),
log.LvlFilterHandler(
log.LvlError,
log.Must.FileHandler("errors.json", log.JsonFormat())))
```
## Breaking API Changes
The following commits broke API stability. This reference is intended to help you understand the consequences of updating to a newer version
of log15.
- 57a084d014d4150152b19e4e531399a7145d1540 - Added a `Get()` method to the `Logger` interface to retrieve the current handler
- 93404652ee366648fa622b64d1e2b67d75a3094a - `Record` field `Call` changed to `stack.Call` with switch to `github.com/go-stack/stack`
- a5e7613673c73281f58e15a87d2cf0cf111e8152 - Restored `syslog.Priority` argument to the `SyslogXxx` handler constructors
## FAQ
### The varargs style is brittle and error prone! Can I have type safety please?
Yes. Use `log.Ctx`:
```go
srvlog := log.New(log.Ctx{"module": "app/server"})
srvlog.Warn("abnormal conn rate", log.Ctx{"rate": curRate, "low": lowRate, "high": highRate})
```
## License
Apache

View File

@ -0,0 +1,333 @@
/*
Package log15 provides an opinionated, simple toolkit for best-practice logging that is
both human and machine readable. It is modeled after the standard library's io and net/http
packages.
This package enforces you to only log key/value pairs. Keys must be strings. Values may be
any type that you like. The default output format is logfmt, but you may also choose to use
JSON instead if that suits you. Here's how you log:
log.Info("page accessed", "path", r.URL.Path, "user_id", user.id)
This will output a line that looks like:
lvl=info t=2014-05-02T16:07:23-0700 msg="page accessed" path=/org/71/profile user_id=9
Getting Started
To get started, you'll want to import the library:
import log "github.com/inconshreveable/log15"
Now you're ready to start logging:
func main() {
log.Info("Program starting", "args", os.Args())
}
Convention
Because recording a human-meaningful message is common and good practice, the first argument to every
logging method is the value to the *implicit* key 'msg'.
Additionally, the level you choose for a message will be automatically added with the key 'lvl', and so
will the current timestamp with key 't'.
You may supply any additional context as a set of key/value pairs to the logging function. log15 allows
you to favor terseness, ordering, and speed over safety. This is a reasonable tradeoff for
logging functions. You don't need to explicitly state keys/values, log15 understands that they alternate
in the variadic argument list:
log.Warn("size out of bounds", "low", lowBound, "high", highBound, "val", val)
If you really do favor your type-safety, you may choose to pass a log.Ctx instead:
log.Warn("size out of bounds", log.Ctx{"low": lowBound, "high": highBound, "val": val})
Context loggers
Frequently, you want to add context to a logger so that you can track actions associated with it. An http
request is a good example. You can easily create new loggers that have context that is automatically included
with each log line:
requestlogger := log.New("path", r.URL.Path)
// later
requestlogger.Debug("db txn commit", "duration", txnTimer.Finish())
This will output a log line that includes the path context that is attached to the logger:
lvl=dbug t=2014-05-02T16:07:23-0700 path=/repo/12/add_hook msg="db txn commit" duration=0.12
Handlers
The Handler interface defines where log lines are printed to and how they are formated. Handler is a
single interface that is inspired by net/http's handler interface:
type Handler interface {
Log(r *Record) error
}
Handlers can filter records, format them, or dispatch to multiple other Handlers.
This package implements a number of Handlers for common logging patterns that are
easily composed to create flexible, custom logging structures.
Here's an example handler that prints logfmt output to Stdout:
handler := log.StreamHandler(os.Stdout, log.LogfmtFormat())
Here's an example handler that defers to two other handlers. One handler only prints records
from the rpc package in logfmt to standard out. The other prints records at Error level
or above in JSON formatted output to the file /var/log/service.json
handler := log.MultiHandler(
log.LvlFilterHandler(log.LvlError, log.Must.FileHandler("/var/log/service.json", log.JsonFormat())),
log.MatchFilterHandler("pkg", "app/rpc" log.StdoutHandler())
)
Logging File Names and Line Numbers
This package implements three Handlers that add debugging information to the
context, CallerFileHandler, CallerFuncHandler and CallerStackHandler. Here's
an example that adds the source file and line number of each logging call to
the context.
h := log.CallerFileHandler(log.StdoutHandler())
log.Root().SetHandler(h)
...
log.Error("open file", "err", err)
This will output a line that looks like:
lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" caller=data.go:42
Here's an example that logs the call stack rather than just the call site.
h := log.CallerStackHandler("%+v", log.StdoutHandler())
log.Root().SetHandler(h)
...
log.Error("open file", "err", err)
This will output a line that looks like:
lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" stack="[pkg/data.go:42 pkg/cmd/main.go]"
The "%+v" format instructs the handler to include the path of the source file
relative to the compile time GOPATH. The github.com/go-stack/stack package
documents the full list of formatting verbs and modifiers available.
Custom Handlers
The Handler interface is so simple that it's also trivial to write your own. Let's create an
example handler which tries to write to one handler, but if that fails it falls back to
writing to another handler and includes the error that it encountered when trying to write
to the primary. This might be useful when trying to log over a network socket, but if that
fails you want to log those records to a file on disk.
type BackupHandler struct {
Primary Handler
Secondary Handler
}
func (h *BackupHandler) Log (r *Record) error {
err := h.Primary.Log(r)
if err != nil {
r.Ctx = append(ctx, "primary_err", err)
return h.Secondary.Log(r)
}
return nil
}
This pattern is so useful that a generic version that handles an arbitrary number of Handlers
is included as part of this library called FailoverHandler.
Logging Expensive Operations
Sometimes, you want to log values that are extremely expensive to compute, but you don't want to pay
the price of computing them if you haven't turned up your logging level to a high level of detail.
This package provides a simple type to annotate a logging operation that you want to be evaluated
lazily, just when it is about to be logged, so that it would not be evaluated if an upstream Handler
filters it out. Just wrap any function which takes no arguments with the log.Lazy type. For example:
func factorRSAKey() (factors []int) {
// return the factors of a very large number
}
log.Debug("factors", log.Lazy{factorRSAKey})
If this message is not logged for any reason (like logging at the Error level), then
factorRSAKey is never evaluated.
Dynamic context values
The same log.Lazy mechanism can be used to attach context to a logger which you want to be
evaluated when the message is logged, but not when the logger is created. For example, let's imagine
a game where you have Player objects:
type Player struct {
name string
alive bool
log.Logger
}
You always want to log a player's name and whether they're alive or dead, so when you create the player
object, you might do:
p := &Player{name: name, alive: true}
p.Logger = log.New("name", p.name, "alive", p.alive)
Only now, even after a player has died, the logger will still report they are alive because the logging
context is evaluated when the logger was created. By using the Lazy wrapper, we can defer the evaluation
of whether the player is alive or not to each log message, so that the log records will reflect the player's
current state no matter when the log message is written:
p := &Player{name: name, alive: true}
isAlive := func() bool { return p.alive }
player.Logger = log.New("name", p.name, "alive", log.Lazy{isAlive})
Terminal Format
If log15 detects that stdout is a terminal, it will configure the default
handler for it (which is log.StdoutHandler) to use TerminalFormat. This format
logs records nicely for your terminal, including color-coded output based
on log level.
Error Handling
Becasuse log15 allows you to step around the type system, there are a few ways you can specify
invalid arguments to the logging functions. You could, for example, wrap something that is not
a zero-argument function with log.Lazy or pass a context key that is not a string. Since logging libraries
are typically the mechanism by which errors are reported, it would be onerous for the logging functions
to return errors. Instead, log15 handles errors by making these guarantees to you:
- Any log record containing an error will still be printed with the error explained to you as part of the log record.
- Any log record containing an error will include the context key LOG15_ERROR, enabling you to easily
(and if you like, automatically) detect if any of your logging calls are passing bad values.
Understanding this, you might wonder why the Handler interface can return an error value in its Log method. Handlers
are encouraged to return errors only if they fail to write their log records out to an external source like if the
syslog daemon is not responding. This allows the construction of useful handlers which cope with those failures
like the FailoverHandler.
Library Use
log15 is intended to be useful for library authors as a way to provide configurable logging to
users of their library. Best practice for use in a library is to always disable all output for your logger
by default and to provide a public Logger instance that consumers of your library can configure. Like so:
package yourlib
import "github.com/inconshreveable/log15"
var Log = log.New()
func init() {
Log.SetHandler(log.DiscardHandler())
}
Users of your library may then enable it if they like:
import "github.com/inconshreveable/log15"
import "example.com/yourlib"
func main() {
handler := // custom handler setup
yourlib.Log.SetHandler(handler)
}
Best practices attaching logger context
The ability to attach context to a logger is a powerful one. Where should you do it and why?
I favor embedding a Logger directly into any persistent object in my application and adding
unique, tracing context keys to it. For instance, imagine I am writing a web browser:
type Tab struct {
url string
render *RenderingContext
// ...
Logger
}
func NewTab(url string) *Tab {
return &Tab {
// ...
url: url,
Logger: log.New("url", url),
}
}
When a new tab is created, I assign a logger to it with the url of
the tab as context so it can easily be traced through the logs.
Now, whenever we perform any operation with the tab, we'll log with its
embedded logger and it will include the tab title automatically:
tab.Debug("moved position", "idx", tab.idx)
There's only one problem. What if the tab url changes? We could
use log.Lazy to make sure the current url is always written, but that
would mean that we couldn't trace a tab's full lifetime through our
logs after the user navigate to a new URL.
Instead, think about what values to attach to your loggers the
same way you think about what to use as a key in a SQL database schema.
If it's possible to use a natural key that is unique for the lifetime of the
object, do so. But otherwise, log15's ext package has a handy RandId
function to let you generate what you might call "surrogate keys"
They're just random hex identifiers to use for tracing. Back to our
Tab example, we would prefer to set up our Logger like so:
import logext "github.com/inconshreveable/log15/ext"
t := &Tab {
// ...
url: url,
}
t.Logger = log.New("id", logext.RandId(8), "url", log.Lazy{t.getUrl})
return t
Now we'll have a unique traceable identifier even across loading new urls, but
we'll still be able to see the tab's current url in the log messages.
Must
For all Handler functions which can return an error, there is a version of that
function which will return no error but panics on failure. They are all available
on the Must object. For example:
log.Must.FileHandler("/path", log.JsonFormat)
log.Must.NetHandler("tcp", ":1234", log.JsonFormat)
Inspiration and Credit
All of the following excellent projects inspired the design of this library:
code.google.com/p/log4go
github.com/op/go-logging
github.com/technoweenie/grohl
github.com/Sirupsen/logrus
github.com/kr/logfmt
github.com/spacemonkeygo/spacelog
golang's stdlib, notably io and net/http
The Name
https://xkcd.com/927/
*/
package log15

View File

@ -0,0 +1,257 @@
package log15
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"strconv"
"strings"
"time"
)
const (
timeFormat = "2006-01-02T15:04:05-0700"
termTimeFormat = "01-02|15:04:05"
floatFormat = 'f'
termMsgJust = 40
)
type Format interface {
Format(r *Record) []byte
}
// FormatFunc returns a new Format object which uses
// the given function to perform record formatting.
func FormatFunc(f func(*Record) []byte) Format {
return formatFunc(f)
}
type formatFunc func(*Record) []byte
func (f formatFunc) Format(r *Record) []byte {
return f(r)
}
// TerminalFormat formats log records optimized for human readability on
// a terminal with color-coded level output and terser human friendly timestamp.
// This format should only be used for interactive programs or while developing.
//
// [TIME] [LEVEL] MESAGE key=value key=value ...
//
// Example:
//
// [May 16 20:58:45] [DBUG] remove route ns=haproxy addr=127.0.0.1:50002
//
func TerminalFormat() Format {
return FormatFunc(func(r *Record) []byte {
var color = 0
switch r.Lvl {
case LvlCrit:
color = 35
case LvlError:
color = 31
case LvlWarn:
color = 33
case LvlInfo:
color = 32
case LvlDebug:
color = 36
}
b := &bytes.Buffer{}
lvl := strings.ToUpper(r.Lvl.String())
if color > 0 {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %s ", color, lvl, r.Time.Format(termTimeFormat), r.Msg)
} else {
fmt.Fprintf(b, "[%s] [%s] %s ", lvl, r.Time.Format(termTimeFormat), r.Msg)
}
// try to justify the log output for short messages
if len(r.Ctx) > 0 && len(r.Msg) < termMsgJust {
b.Write(bytes.Repeat([]byte{' '}, termMsgJust-len(r.Msg)))
}
// print the keys logfmt style
logfmt(b, r.Ctx, color)
return b.Bytes()
})
}
// LogfmtFormat prints records in logfmt format, an easy machine-parseable but human-readable
// format for key/value pairs.
//
// For more details see: http://godoc.org/github.com/kr/logfmt
//
func LogfmtFormat() Format {
return FormatFunc(func(r *Record) []byte {
common := []interface{}{r.KeyNames.Time, r.Time, r.KeyNames.Lvl, r.Lvl, r.KeyNames.Msg, r.Msg}
buf := &bytes.Buffer{}
logfmt(buf, append(common, r.Ctx...), 0)
return buf.Bytes()
})
}
func logfmt(buf *bytes.Buffer, ctx []interface{}, color int) {
for i := 0; i < len(ctx); i += 2 {
if i != 0 {
buf.WriteByte(' ')
}
k, ok := ctx[i].(string)
v := formatLogfmtValue(ctx[i+1])
if !ok {
k, v = errorKey, formatLogfmtValue(k)
}
// XXX: we should probably check that all of your key bytes aren't invalid
if color > 0 {
fmt.Fprintf(buf, "\x1b[%dm%s\x1b[0m=%s", color, k, v)
} else {
fmt.Fprintf(buf, "%s=%s", k, v)
}
}
buf.WriteByte('\n')
}
// JsonFormat formats log records as JSON objects separated by newlines.
// It is the equivalent of JsonFormatEx(false, true).
func JsonFormat() Format {
return JsonFormatEx(false, true)
}
// JsonFormatEx formats log records as JSON objects. If pretty is true,
// records will be pretty-printed. If lineSeparated is true, records
// will be logged with a new line between each record.
func JsonFormatEx(pretty, lineSeparated bool) Format {
jsonMarshal := json.Marshal
if pretty {
jsonMarshal = func(v interface{}) ([]byte, error) {
return json.MarshalIndent(v, "", " ")
}
}
return FormatFunc(func(r *Record) []byte {
props := make(map[string]interface{})
props[r.KeyNames.Time] = r.Time
props[r.KeyNames.Lvl] = r.Lvl.String()
props[r.KeyNames.Msg] = r.Msg
for i := 0; i < len(r.Ctx); i += 2 {
k, ok := r.Ctx[i].(string)
if !ok {
props[errorKey] = fmt.Sprintf("%+v is not a string key", r.Ctx[i])
}
props[k] = formatJsonValue(r.Ctx[i+1])
}
b, err := jsonMarshal(props)
if err != nil {
b, _ = jsonMarshal(map[string]string{
errorKey: err.Error(),
})
return b
}
if lineSeparated {
b = append(b, '\n')
}
return b
})
}
func formatShared(value interface{}) (result interface{}) {
defer func() {
if err := recover(); err != nil {
if v := reflect.ValueOf(value); v.Kind() == reflect.Ptr && v.IsNil() {
result = "nil"
} else {
panic(err)
}
}
}()
switch v := value.(type) {
case time.Time:
return v.Format(timeFormat)
case error:
return v.Error()
case fmt.Stringer:
return v.String()
default:
return v
}
}
func formatJsonValue(value interface{}) interface{} {
value = formatShared(value)
switch value.(type) {
case int, int8, int16, int32, int64, float32, float64, uint, uint8, uint16, uint32, uint64, string:
return value
default:
return fmt.Sprintf("%+v", value)
}
}
// formatValue formats a value for serialization
func formatLogfmtValue(value interface{}) string {
if value == nil {
return "nil"
}
value = formatShared(value)
switch v := value.(type) {
case bool:
return strconv.FormatBool(v)
case float32:
return strconv.FormatFloat(float64(v), floatFormat, 3, 64)
case float64:
return strconv.FormatFloat(v, floatFormat, 3, 64)
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
return fmt.Sprintf("%d", value)
case string:
return escapeString(v)
default:
return escapeString(fmt.Sprintf("%+v", value))
}
}
func escapeString(s string) string {
needQuotes := false
e := bytes.Buffer{}
e.WriteByte('"')
for _, r := range s {
if r <= ' ' || r == '=' || r == '"' {
needQuotes = true
}
switch r {
case '\\', '"':
e.WriteByte('\\')
e.WriteByte(byte(r))
case '\n':
e.WriteByte('\\')
e.WriteByte('n')
case '\r':
e.WriteByte('\\')
e.WriteByte('r')
case '\t':
e.WriteByte('\\')
e.WriteByte('t')
default:
e.WriteRune(r)
}
}
e.WriteByte('"')
start, stop := 0, e.Len()
if !needQuotes {
start, stop = 1, stop-1
}
return string(e.Bytes()[start:stop])
}

View File

@ -0,0 +1,356 @@
package log15
import (
"fmt"
"io"
"net"
"os"
"reflect"
"sync"
"github.com/go-stack/stack"
)
// A Logger prints its log records by writing to a Handler.
// The Handler interface defines where and how log records are written.
// Handlers are composable, providing you great flexibility in combining
// them to achieve the logging structure that suits your applications.
type Handler interface {
Log(r *Record) error
}
// FuncHandler returns a Handler that logs records with the given
// function.
func FuncHandler(fn func(r *Record) error) Handler {
return funcHandler(fn)
}
type funcHandler func(r *Record) error
func (h funcHandler) Log(r *Record) error {
return h(r)
}
// StreamHandler writes log records to an io.Writer
// with the given format. StreamHandler can be used
// to easily begin writing log records to other
// outputs.
//
// StreamHandler wraps itself with LazyHandler and SyncHandler
// to evaluate Lazy objects and perform safe concurrent writes.
func StreamHandler(wr io.Writer, fmtr Format) Handler {
h := FuncHandler(func(r *Record) error {
_, err := wr.Write(fmtr.Format(r))
return err
})
return LazyHandler(SyncHandler(h))
}
// SyncHandler can be wrapped around a handler to guarantee that
// only a single Log operation can proceed at a time. It's necessary
// for thread-safe concurrent writes.
func SyncHandler(h Handler) Handler {
var mu sync.Mutex
return FuncHandler(func(r *Record) error {
defer mu.Unlock()
mu.Lock()
return h.Log(r)
})
}
// FileHandler returns a handler which writes log records to the give file
// using the given format. If the path
// already exists, FileHandler will append to the given file. If it does not,
// FileHandler will create the file with mode 0644.
func FileHandler(path string, fmtr Format) (Handler, error) {
f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
return closingHandler{f, StreamHandler(f, fmtr)}, nil
}
// NetHandler opens a socket to the given address and writes records
// over the connection.
func NetHandler(network, addr string, fmtr Format) (Handler, error) {
conn, err := net.Dial(network, addr)
if err != nil {
return nil, err
}
return closingHandler{conn, StreamHandler(conn, fmtr)}, nil
}
// XXX: closingHandler is essentially unused at the moment
// it's meant for a future time when the Handler interface supports
// a possible Close() operation
type closingHandler struct {
io.WriteCloser
Handler
}
func (h *closingHandler) Close() error {
return h.WriteCloser.Close()
}
// CallerFileHandler returns a Handler that adds the line number and file of
// the calling function to the context with key "caller".
func CallerFileHandler(h Handler) Handler {
return FuncHandler(func(r *Record) error {
r.Ctx = append(r.Ctx, "caller", fmt.Sprint(r.Call))
return h.Log(r)
})
}
// CallerFuncHandler returns a Handler that adds the calling function name to
// the context with key "fn".
func CallerFuncHandler(h Handler) Handler {
return FuncHandler(func(r *Record) error {
r.Ctx = append(r.Ctx, "fn", fmt.Sprintf("%+n", r.Call))
return h.Log(r)
})
}
// CallerStackHandler returns a Handler that adds a stack trace to the context
// with key "stack". The stack trace is formated as a space separated list of
// call sites inside matching []'s. The most recent call site is listed first.
// Each call site is formatted according to format. See the documentation of
// package github.com/go-stack/stack for the list of supported formats.
func CallerStackHandler(format string, h Handler) Handler {
return FuncHandler(func(r *Record) error {
s := stack.Trace().TrimBelow(r.Call).TrimRuntime()
if len(s) > 0 {
r.Ctx = append(r.Ctx, "stack", fmt.Sprintf(format, s))
}
return h.Log(r)
})
}
// FilterHandler returns a Handler that only writes records to the
// wrapped Handler if the given function evaluates true. For example,
// to only log records where the 'err' key is not nil:
//
// logger.SetHandler(FilterHandler(func(r *Record) bool {
// for i := 0; i < len(r.Ctx); i += 2 {
// if r.Ctx[i] == "err" {
// return r.Ctx[i+1] != nil
// }
// }
// return false
// }, h))
//
func FilterHandler(fn func(r *Record) bool, h Handler) Handler {
return FuncHandler(func(r *Record) error {
if fn(r) {
return h.Log(r)
}
return nil
})
}
// MatchFilterHandler returns a Handler that only writes records
// to the wrapped Handler if the given key in the logged
// context matches the value. For example, to only log records
// from your ui package:
//
// log.MatchFilterHandler("pkg", "app/ui", log.StdoutHandler)
//
func MatchFilterHandler(key string, value interface{}, h Handler) Handler {
return FilterHandler(func(r *Record) (pass bool) {
switch key {
case r.KeyNames.Lvl:
return r.Lvl == value
case r.KeyNames.Time:
return r.Time == value
case r.KeyNames.Msg:
return r.Msg == value
}
for i := 0; i < len(r.Ctx); i += 2 {
if r.Ctx[i] == key {
return r.Ctx[i+1] == value
}
}
return false
}, h)
}
// LvlFilterHandler returns a Handler that only writes
// records which are less than the given verbosity
// level to the wrapped Handler. For example, to only
// log Error/Crit records:
//
// log.LvlFilterHandler(log.Error, log.StdoutHandler)
//
func LvlFilterHandler(maxLvl Lvl, h Handler) Handler {
return FilterHandler(func(r *Record) (pass bool) {
return r.Lvl <= maxLvl
}, h)
}
// A MultiHandler dispatches any write to each of its handlers.
// This is useful for writing different types of log information
// to different locations. For example, to log to a file and
// standard error:
//
// log.MultiHandler(
// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()),
// log.StderrHandler)
//
func MultiHandler(hs ...Handler) Handler {
return FuncHandler(func(r *Record) error {
for _, h := range hs {
// what to do about failures?
h.Log(r)
}
return nil
})
}
// A FailoverHandler writes all log records to the first handler
// specified, but will failover and write to the second handler if
// the first handler has failed, and so on for all handlers specified.
// For example you might want to log to a network socket, but failover
// to writing to a file if the network fails, and then to
// standard out if the file write fails:
//
// log.FailoverHandler(
// log.Must.NetHandler("tcp", ":9090", log.JsonFormat()),
// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()),
// log.StdoutHandler)
//
// All writes that do not go to the first handler will add context with keys of
// the form "failover_err_{idx}" which explain the error encountered while
// trying to write to the handlers before them in the list.
func FailoverHandler(hs ...Handler) Handler {
return FuncHandler(func(r *Record) error {
var err error
for i, h := range hs {
err = h.Log(r)
if err == nil {
return nil
} else {
r.Ctx = append(r.Ctx, fmt.Sprintf("failover_err_%d", i), err)
}
}
return err
})
}
// ChannelHandler writes all records to the given channel.
// It blocks if the channel is full. Useful for async processing
// of log messages, it's used by BufferedHandler.
func ChannelHandler(recs chan<- *Record) Handler {
return FuncHandler(func(r *Record) error {
recs <- r
return nil
})
}
// BufferedHandler writes all records to a buffered
// channel of the given size which flushes into the wrapped
// handler whenever it is available for writing. Since these
// writes happen asynchronously, all writes to a BufferedHandler
// never return an error and any errors from the wrapped handler are ignored.
func BufferedHandler(bufSize int, h Handler) Handler {
recs := make(chan *Record, bufSize)
go func() {
for m := range recs {
_ = h.Log(m)
}
}()
return ChannelHandler(recs)
}
// LazyHandler writes all values to the wrapped handler after evaluating
// any lazy functions in the record's context. It is already wrapped
// around StreamHandler and SyslogHandler in this library, you'll only need
// it if you write your own Handler.
func LazyHandler(h Handler) Handler {
return FuncHandler(func(r *Record) error {
// go through the values (odd indices) and reassign
// the values of any lazy fn to the result of its execution
hadErr := false
for i := 1; i < len(r.Ctx); i += 2 {
lz, ok := r.Ctx[i].(Lazy)
if ok {
v, err := evaluateLazy(lz)
if err != nil {
hadErr = true
r.Ctx[i] = err
} else {
if cs, ok := v.(stack.CallStack); ok {
v = cs.TrimBelow(r.Call).TrimRuntime()
}
r.Ctx[i] = v
}
}
}
if hadErr {
r.Ctx = append(r.Ctx, errorKey, "bad lazy")
}
return h.Log(r)
})
}
func evaluateLazy(lz Lazy) (interface{}, error) {
t := reflect.TypeOf(lz.Fn)
if t.Kind() != reflect.Func {
return nil, fmt.Errorf("INVALID_LAZY, not func: %+v", lz.Fn)
}
if t.NumIn() > 0 {
return nil, fmt.Errorf("INVALID_LAZY, func takes args: %+v", lz.Fn)
}
if t.NumOut() == 0 {
return nil, fmt.Errorf("INVALID_LAZY, no func return val: %+v", lz.Fn)
}
value := reflect.ValueOf(lz.Fn)
results := value.Call([]reflect.Value{})
if len(results) == 1 {
return results[0].Interface(), nil
} else {
values := make([]interface{}, len(results))
for i, v := range results {
values[i] = v.Interface()
}
return values, nil
}
}
// DiscardHandler reports success for all writes but does nothing.
// It is useful for dynamically disabling logging at runtime via
// a Logger's SetHandler method.
func DiscardHandler() Handler {
return FuncHandler(func(r *Record) error {
return nil
})
}
// The Must object provides the following Handler creation functions
// which instead of returning an error parameter only return a Handler
// and panic on failure: FileHandler, NetHandler, SyslogHandler, SyslogNetHandler
var Must muster
func must(h Handler, err error) Handler {
if err != nil {
panic(err)
}
return h
}
type muster struct{}
func (m muster) FileHandler(path string, fmtr Format) Handler {
return must(FileHandler(path, fmtr))
}
func (m muster) NetHandler(network, addr string, fmtr Format) Handler {
return must(NetHandler(network, addr, fmtr))
}

View File

@ -0,0 +1,26 @@
// +build !go1.4
package log15
import (
"sync/atomic"
"unsafe"
)
// swapHandler wraps another handler that may be swapped out
// dynamically at runtime in a thread-safe fashion.
type swapHandler struct {
handler unsafe.Pointer
}
func (h *swapHandler) Log(r *Record) error {
return h.Get().Log(r)
}
func (h *swapHandler) Get() Handler {
return *(*Handler)(atomic.LoadPointer(&h.handler))
}
func (h *swapHandler) Swap(newHandler Handler) {
atomic.StorePointer(&h.handler, unsafe.Pointer(&newHandler))
}

View File

@ -0,0 +1,23 @@
// +build go1.4
package log15
import "sync/atomic"
// swapHandler wraps another handler that may be swapped out
// dynamically at runtime in a thread-safe fashion.
type swapHandler struct {
handler atomic.Value
}
func (h *swapHandler) Log(r *Record) error {
return (*h.handler.Load().(*Handler)).Log(r)
}
func (h *swapHandler) Swap(newHandler Handler) {
h.handler.Store(&newHandler)
}
func (h *swapHandler) Get() Handler {
return *h.handler.Load().(*Handler)
}

View File

@ -0,0 +1,208 @@
package log15
import (
"fmt"
"time"
"github.com/go-stack/stack"
)
const timeKey = "t"
const lvlKey = "lvl"
const msgKey = "msg"
const errorKey = "LOG15_ERROR"
type Lvl int
const (
LvlCrit Lvl = iota
LvlError
LvlWarn
LvlInfo
LvlDebug
)
// Returns the name of a Lvl
func (l Lvl) String() string {
switch l {
case LvlDebug:
return "dbug"
case LvlInfo:
return "info"
case LvlWarn:
return "warn"
case LvlError:
return "eror"
case LvlCrit:
return "crit"
default:
panic("bad level")
}
}
// Returns the appropriate Lvl from a string name.
// Useful for parsing command line args and configuration files.
func LvlFromString(lvlString string) (Lvl, error) {
switch lvlString {
case "debug", "dbug":
return LvlDebug, nil
case "info":
return LvlInfo, nil
case "warn":
return LvlWarn, nil
case "error", "eror":
return LvlError, nil
case "crit":
return LvlCrit, nil
default:
return LvlDebug, fmt.Errorf("Unknown level: %v", lvlString)
}
}
// A Record is what a Logger asks its handler to write
type Record struct {
Time time.Time
Lvl Lvl
Msg string
Ctx []interface{}
Call stack.Call
KeyNames RecordKeyNames
}
type RecordKeyNames struct {
Time string
Msg string
Lvl string
}
// A Logger writes key/value pairs to a Handler
type Logger interface {
// New returns a new Logger that has this logger's context plus the given context
New(ctx ...interface{}) Logger
// GetHandler gets the handler associated with the logger.
GetHandler() Handler
// SetHandler updates the logger to write records to the specified handler.
SetHandler(h Handler)
// Log a message at the given level with context key/value pairs
Debug(msg string, ctx ...interface{})
Info(msg string, ctx ...interface{})
Warn(msg string, ctx ...interface{})
Error(msg string, ctx ...interface{})
Crit(msg string, ctx ...interface{})
}
type logger struct {
ctx []interface{}
h *swapHandler
}
func (l *logger) write(msg string, lvl Lvl, ctx []interface{}) {
l.h.Log(&Record{
Time: time.Now(),
Lvl: lvl,
Msg: msg,
Ctx: newContext(l.ctx, ctx),
Call: stack.Caller(2),
KeyNames: RecordKeyNames{
Time: timeKey,
Msg: msgKey,
Lvl: lvlKey,
},
})
}
func (l *logger) New(ctx ...interface{}) Logger {
child := &logger{newContext(l.ctx, ctx), new(swapHandler)}
child.SetHandler(l.h)
return child
}
func newContext(prefix []interface{}, suffix []interface{}) []interface{} {
normalizedSuffix := normalize(suffix)
newCtx := make([]interface{}, len(prefix)+len(normalizedSuffix))
n := copy(newCtx, prefix)
copy(newCtx[n:], normalizedSuffix)
return newCtx
}
func (l *logger) Debug(msg string, ctx ...interface{}) {
l.write(msg, LvlDebug, ctx)
}
func (l *logger) Info(msg string, ctx ...interface{}) {
l.write(msg, LvlInfo, ctx)
}
func (l *logger) Warn(msg string, ctx ...interface{}) {
l.write(msg, LvlWarn, ctx)
}
func (l *logger) Error(msg string, ctx ...interface{}) {
l.write(msg, LvlError, ctx)
}
func (l *logger) Crit(msg string, ctx ...interface{}) {
l.write(msg, LvlCrit, ctx)
}
func (l *logger) GetHandler() Handler {
return l.h.Get()
}
func (l *logger) SetHandler(h Handler) {
l.h.Swap(h)
}
func normalize(ctx []interface{}) []interface{} {
// if the caller passed a Ctx object, then expand it
if len(ctx) == 1 {
if ctxMap, ok := ctx[0].(Ctx); ok {
ctx = ctxMap.toArray()
}
}
// ctx needs to be even because it's a series of key/value pairs
// no one wants to check for errors on logging functions,
// so instead of erroring on bad input, we'll just make sure
// that things are the right length and users can fix bugs
// when they see the output looks wrong
if len(ctx)%2 != 0 {
ctx = append(ctx, nil, errorKey, "Normalized odd number of arguments by adding nil")
}
return ctx
}
// Lazy allows you to defer calculation of a logged value that is expensive
// to compute until it is certain that it must be evaluated with the given filters.
//
// Lazy may also be used in conjunction with a Logger's New() function
// to generate a child logger which always reports the current value of changing
// state.
//
// You may wrap any function which takes no arguments to Lazy. It may return any
// number of values of any type.
type Lazy struct {
Fn interface{}
}
// Ctx is a map of key/value pairs to pass as context to a log function
// Use this only if you really need greater safety around the arguments you pass
// to the logging functions.
type Ctx map[string]interface{}
func (c Ctx) toArray() []interface{} {
arr := make([]interface{}, len(c)*2)
i := 0
for k, v := range c {
arr[i] = k
arr[i+1] = v
i += 2
}
return arr
}

View File

@ -0,0 +1,67 @@
package log15
import (
"os"
"github.com/inconshreveable/log15/term"
"github.com/mattn/go-colorable"
)
var (
root *logger
StdoutHandler = StreamHandler(os.Stdout, LogfmtFormat())
StderrHandler = StreamHandler(os.Stderr, LogfmtFormat())
)
func init() {
if term.IsTty(os.Stdout.Fd()) {
StdoutHandler = StreamHandler(colorable.NewColorableStdout(), TerminalFormat())
}
if term.IsTty(os.Stderr.Fd()) {
StderrHandler = StreamHandler(colorable.NewColorableStderr(), TerminalFormat())
}
root = &logger{[]interface{}{}, new(swapHandler)}
root.SetHandler(StdoutHandler)
}
// New returns a new logger with the given context.
// New is a convenient alias for Root().New
func New(ctx ...interface{}) Logger {
return root.New(ctx...)
}
// Root returns the root logger
func Root() Logger {
return root
}
// The following functions bypass the exported logger methods (logger.Debug,
// etc.) to keep the call depth the same for all paths to logger.write so
// runtime.Caller(2) always refers to the call site in client code.
// Debug is a convenient alias for Root().Debug
func Debug(msg string, ctx ...interface{}) {
root.write(msg, LvlDebug, ctx)
}
// Info is a convenient alias for Root().Info
func Info(msg string, ctx ...interface{}) {
root.write(msg, LvlInfo, ctx)
}
// Warn is a convenient alias for Root().Warn
func Warn(msg string, ctx ...interface{}) {
root.write(msg, LvlWarn, ctx)
}
// Error is a convenient alias for Root().Error
func Error(msg string, ctx ...interface{}) {
root.write(msg, LvlError, ctx)
}
// Crit is a convenient alias for Root().Crit
func Crit(msg string, ctx ...interface{}) {
root.write(msg, LvlCrit, ctx)
}

View File

@ -0,0 +1,55 @@
// +build !windows,!plan9
package log15
import (
"log/syslog"
"strings"
)
// SyslogHandler opens a connection to the system syslog daemon by calling
// syslog.New and writes all records to it.
func SyslogHandler(priority syslog.Priority, tag string, fmtr Format) (Handler, error) {
wr, err := syslog.New(priority, tag)
return sharedSyslog(fmtr, wr, err)
}
// SyslogHandler opens a connection to a log daemon over the network and writes
// all log records to it.
func SyslogNetHandler(net, addr string, priority syslog.Priority, tag string, fmtr Format) (Handler, error) {
wr, err := syslog.Dial(net, addr, priority, tag)
return sharedSyslog(fmtr, wr, err)
}
func sharedSyslog(fmtr Format, sysWr *syslog.Writer, err error) (Handler, error) {
if err != nil {
return nil, err
}
h := FuncHandler(func(r *Record) error {
var syslogFn = sysWr.Info
switch r.Lvl {
case LvlCrit:
syslogFn = sysWr.Crit
case LvlError:
syslogFn = sysWr.Err
case LvlWarn:
syslogFn = sysWr.Warning
case LvlInfo:
syslogFn = sysWr.Info
case LvlDebug:
syslogFn = sysWr.Debug
}
s := strings.TrimSpace(string(fmtr.Format(r)))
return syslogFn(s)
})
return LazyHandler(&closingHandler{sysWr, h}), nil
}
func (m muster) SyslogHandler(priority syslog.Priority, tag string, fmtr Format) Handler {
return must(SyslogHandler(priority, tag, fmtr))
}
func (m muster) SyslogNetHandler(net, addr string, priority syslog.Priority, tag string, fmtr Format) Handler {
return must(SyslogNetHandler(net, addr, priority, tag, fmtr))
}

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Simon Eskildsen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -0,0 +1,13 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build appengine
package term
// IsTty always returns false on AppEngine.
func IsTty(fd uintptr) bool {
return false
}

View File

@ -0,0 +1,12 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package term
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

View File

@ -0,0 +1,18 @@
package term
import (
"syscall"
)
const ioctlReadTermios = syscall.TIOCGETA
// Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]uint8
Ispeed uint32
Ospeed uint32
}

View File

@ -0,0 +1,14 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
package term
import "syscall"
const ioctlReadTermios = syscall.TCGETS
type Termios syscall.Termios

View File

@ -0,0 +1,20 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux,!appengine darwin freebsd openbsd
package term
import (
"syscall"
"unsafe"
)
// IsTty returns true if the given file descriptor is a terminal.
func IsTty(fd uintptr) bool {
var termios Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
}

View File

@ -0,0 +1,7 @@
package term
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

View File

@ -0,0 +1,26 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package term
import (
"syscall"
"unsafe"
)
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
var (
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
)
// IsTty returns true if the given file descriptor is a terminal.
func IsTty(fd uintptr) bool {
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
}

View File

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2013-2016 Errplane Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,27 @@
# List
- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE)
- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE)
- github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING)
- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license)
- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
- github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE)
- github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE)
- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE)
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
- github.com/hashicorp/raft-boltdb [MOZILLA PUBLIC LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
- github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt)
- github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE)
- github.com/kimor79/gollectd [BSD LICENSE](https://github.com/kimor79/gollectd/blob/master/LICENSE)
- github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE)
- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING)
- github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE)
- glyphicons [LICENSE](http://glyphicons.com/license/)
- golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
- golang.org/x/tools [BSD LICENSE](https://github.com/golang/tools/blob/master/LICENSE)
- gopkg.in/fatih/pool.v2 [MIT LICENSE](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt)
- react 0.13.3 [BSD LICENSE](https://github.com/facebook/react/blob/master/LICENSE)

View File

@ -16,6 +16,7 @@ Graphite, Elasticsearch, OpenTSDB, Prometheus and InfluxDB.
- [What's New in Grafana 2.0](http://docs.grafana.org/guides/whats-new-in-v2/)
- [What's New in Grafana 2.1](http://docs.grafana.org/guides/whats-new-in-v2-1/)
- [What's New in Grafana 2.5](http://docs.grafana.org/guides/whats-new-in-v2-5/)
- [What's New in Grafana 3.0](http://docs.grafana.org/guides/whats-new-in-v3/)
## Features
### Graphite Target Editor
@ -78,7 +79,7 @@ the latest master builds [here](http://grafana.org/download/builds)
### Dependencies
- Go 1.5
- NodeJS v0.12.0
- NodeJS v4+
- [Godep](https://github.com/tools/godep)
### Get Code
@ -109,7 +110,7 @@ go run build.go build
### Building frontend assets
To build less to css for the frontend you will need a recent version of of **node (v0.12.0)**,
To build less to css for the frontend you will need a recent version of of **node (v4+)**,
npm (v2.5.0) and grunt (v0.4.5). Run the following:
```bash

View File

@ -14,7 +14,7 @@ install:
- npm install
- npm install -g grunt-cli
# install gcc (needed for sqlite3)
- choco install -y mingw
- choco install -y --limit-output mingw
- set PATH=C:\tools\mingw64\bin;%PATH%
- echo %PATH%
- echo %GOPATH%

View File

@ -132,12 +132,10 @@ func readVersionFromPackageJson() {
if len(parts) > 1 {
linuxPackageVersion = parts[0]
linuxPackageIteration = parts[1]
if linuxPackageIteration != "" {
}
// add timestamp to iteration
linuxPackageIteration = fmt.Sprintf("%s%v", linuxPackageIteration, time.Now().Unix())
}
log.Println(fmt.Sprintf("Iteration %v", linuxPackageIteration))
}
linuxPackageIteration = fmt.Sprintf("%d%s", time.Now().Unix(), linuxPackageIteration)
}
type linuxPackageOptions struct {

View File

@ -1,6 +1,6 @@
machine:
node:
version: 4.0
version: 5.11.1
environment:
GOPATH: "/home/ubuntu/.go_workspace"
ORG_PATH: "github.com/grafana"

View File

@ -6,6 +6,9 @@
# possible values : production, development
app_mode = production
# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
instance_name = ${HOSTNAME}
#################################### Paths ####################################
[paths]
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
@ -143,7 +146,7 @@ cookie_remember_name = grafana_remember
# disable gravatar profile images
disable_gravatar = false
# data source proxy whitelist (ip_or_domain:port seperated by spaces)
# data source proxy whitelist (ip_or_domain:port separated by spaces)
data_source_proxy_whitelist =
[snapshots]
@ -172,6 +175,12 @@ verify_email_enabled = false
# Background text for the user field on the login page
login_hint = email or username
# Default UI theme ("dark" or "light")
default_theme = dark
# Allow users to sign in using username and password
allow_user_pass_login = true
#################################### Anonymous Auth ##########################
[auth.anonymous]
# enable anonymous access
@ -242,24 +251,26 @@ templates_pattern = emails/*.html
#################################### Logging ##########################
[log]
# Either "console", "file", "syslog". Default is console and file
# Use comma to separate multiple modes, e.g. "console, file"
# Use space to separate multiple modes, e.g. "console file"
mode = console, file
# Buffer length of channel, keep it as it is if you don't know what it is.
buffer_len = 10000
# Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Info"
level = Info
# Either "debug", "info", "warn", "error", "critical", default is "info"
level = info
# For "console" mode only
[log.console]
level =
# Set formatting to "false" to disable color formatting of console logs
formatting = false
# log line format, valid options are text, console and json
format = console
# For "file" mode only
[log.file]
level =
# log line format, valid options are text, console and json
format = text
# This enables automated log rotate(switch of following options), default is true
log_rotate = true
@ -267,7 +278,7 @@ log_rotate = true
max_lines = 1000000
# Max size shift of single file, default is 28 means 1 << 28, 256MB
max_lines_shift = 28
max_size_shift = 28
# Segment log daily, default is true
daily_rotate = true
@ -277,6 +288,10 @@ max_days = 7
[log.syslog]
level =
# log line format, valid options are text, console and json
format = text
# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
network =
address =
@ -287,7 +302,8 @@ facility =
# Syslog tag. By default, the process' argv[0] is used.
tag =
#################################### AMPQ Event Publisher ##########################
#################################### AMQP Event Publisher ##########################
[event_publisher]
enabled = false
rabbitmq_url = amqp://localhost/
@ -332,3 +348,17 @@ global_api_key = -1
# global limit on number of logged in users.
global_session = -1
#################################### Internal Grafana Metrics ##########################
# Metrics available at HTTP API Url /api/metrics
[metrics]
enabled = true
interval_seconds = 60
# Send internal Grafana metrics to graphite
; [metrics.graphite]
; address = localhost:2003
; prefix = prod.grafana.%(instance_name)s.
[grafana_net]
url = https://grafana.net

View File

@ -6,6 +6,9 @@
# possible values : production, development
; app_mode = production
# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
; instance_name = ${HOSTNAME}
#################################### Paths ####################################
[paths]
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
@ -39,8 +42,9 @@
# Prevents DNS rebinding attacks
;enforce_domain = false
# The full public facing url
;root_url = %(protocol)s://%(domain)s:%(http_port)s/
# The full public facing url you use in browser, used for redirects and emails
# If you use reverse proxy and sub path specify full url (with sub path)
;root_url = http://localhost:3000
# Log web requests
;router_logging = false
@ -129,7 +133,7 @@ check_for_updates = true
# disable gravatar profile images
;disable_gravatar = false
# data source proxy whitelist (ip_or_domain:port seperated by spaces)
# data source proxy whitelist (ip_or_domain:port separated by spaces)
;data_source_proxy_whitelist =
[snapshots]
@ -155,6 +159,9 @@ check_for_updates = true
# Background text for the user field on the login page
;login_hint = email or username
# Default UI theme ("dark" or "light")
;default_theme = dark
#################################### Anonymous Auth ##########################
[auth.anonymous]
# enable anonymous access
@ -224,22 +231,26 @@ check_for_updates = true
#################################### Logging ##########################
[log]
# Either "console", "file", "syslog". Default is console and file
# Use comma to separate multiple modes, e.g. "console, file"
# Use space to separate multiple modes, e.g. "console file"
;mode = console, file
# Buffer length of channel, keep it as it is if you don't know what it is.
;buffer_len = 10000
# Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Info"
;level = Info
# Either "trace", "debug", "info", "warn", "error", "critical", default is "info"
;level = info
# For "console" mode only
[log.console]
;level =
# log line format, valid options are text, console and json
;format = console
# For "file" mode only
[log.file]
;level =
# log line format, valid options are text, console and json
;format = text
# This enables automated log rotate(switch of following options), default is true
;log_rotate = true
@ -247,7 +258,7 @@ check_for_updates = true
;max_lines = 1000000
# Max size shift of single file, default is 28 means 1 << 28, 256MB
;max_lines_shift = 28
;max_size_shift = 28
# Segment log daily, default is true
;daily_rotate = true
@ -255,7 +266,24 @@ check_for_updates = true
# Expired days of log file(delete after max days), default is 7
;max_days = 7
#################################### AMPQ Event Publisher ##########################
[log.syslog]
;level =
# log line format, valid options are text, console and json
;format = text
# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
;network =
;address =
# Syslog facility. user, daemon and local0 through local7 are valid.
;facility =
# Syslog tag. By default, the process' argv[0] is used.
;tag =
#################################### AMQP Event Publisher ##########################
[event_publisher]
;enabled = false
;rabbitmq_url = amqp://localhost/
@ -266,5 +294,21 @@ check_for_updates = true
;enabled = false
;path = /var/lib/grafana/dashboards
#################################### Internal Grafana Metrics ##########################
# Metrics available at HTTP API Url /api/metrics
[metrics]
# Disable / Enable internal metrics
;enabled = true
# Publish interval
;interval_seconds = 10
# Send internal metrics to Graphite
; [metrics.graphite]
; address = localhost:2003
; prefix = prod.grafana.%(instance_name)s.
#################################### Internal Grafana Metrics ##########################
# Url used to to import dashboards directly from Grafana.net
[grafana_net]
url = https://grafana.net

View File

@ -0,0 +1,16 @@
FROM ubuntu:xenial
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get -y update
RUN apt-get -y install collectd curl python-pip
# add a fake mtab for host disk stats
ADD etc_mtab /etc/mtab
ADD collectd.conf.tpl /etc/collectd/collectd.conf.tpl
RUN pip install envtpl
ADD start_container /usr/bin/start_container
RUN chmod +x /usr/bin/start_container
CMD start_container

View File

@ -0,0 +1,37 @@
collectd-write-graphite
=======================
Basic collectd-based server monitoring. Sends stats to Graphite.
Collectd metrics:
* CPU used/free/idle/etc
* Free disk (via mounting hosts '/' into container, eg: -v /:/hostfs:ro)
* Disk performance
* Load average
* Memory used/free/etc
* Uptime
* Network interface
* Swap
Environment variables
---------------------
* `HOST_NAME`
- Will be sent to Graphite
- Required
* `GRAPHITE_HOST`
- Graphite IP or hostname
- Required
* `GRAPHITE_PORT`
- Graphite port
- Optional, defaults to 2003
* `GRAPHITE_PREFIX`
- Graphite prefix
- Optional, defaults to collectd.
* `REPORT_BY_CPU`
- Report per-CPU metrics if true, global sum of CPU metrics if false (details: [collectd.conf man page](https://collectd.org/documentation/manpages/collectd.conf.5.shtml#plugin_cpu))
- Optional, defaults to false.
* `COLLECT_INTERVAL`
- Collection interval and thus resolution of metrics
- Optional, defaults to 10

View File

@ -0,0 +1,106 @@
Hostname "{{ HOST_NAME }}"
FQDNLookup false
Interval {{ COLLECT_INTERVAL | default("10") }}
Timeout 2
ReadThreads 5
LoadPlugin cpu
LoadPlugin df
LoadPlugin load
LoadPlugin memory
LoadPlugin disk
LoadPlugin interface
LoadPlugin uptime
LoadPlugin swap
LoadPlugin write_graphite
LoadPlugin processes
LoadPlugin aggregation
LoadPlugin match_regex
# LoadPlugin memcached
<Plugin df>
# expose host's mounts into container using -v /:/host:ro (location inside container does not matter much)
# ignore rootfs; else, the root file-system would appear twice, causing
# one of the updates to fail and spam the log
FSType rootfs
# ignore the usual virtual / temporary file-systems
FSType sysfs
FSType proc
FSType devtmpfs
FSType devpts
FSType tmpfs
FSType fusectl
FSType cgroup
FSType overlay
FSType debugfs
FSType pstore
FSType securityfs
FSType hugetlbfs
FSType squashfs
FSType mqueue
MountPoint "/etc/resolv.conf"
MountPoint "/etc/hostname"
MountPoint "/etc/hosts"
IgnoreSelected true
ReportByDevice false
ReportReserved true
ReportInodes true
ValuesAbsolute true
ValuesPercentage true
ReportInodes true
</Plugin>
<Plugin "disk">
Disk "/^[hs]d[a-z]/"
IgnoreSelected false
</Plugin>
<Plugin "aggregation">
<Aggregation>
Plugin "cpu"
Type "cpu"
GroupBy "Host"
GroupBy "TypeInstance"
CalculateAverage true
</Aggregation>
</Plugin>
<Plugin interface>
Interface "lo"
Interface "/^veth.*/"
Interface "/^docker.*/"
IgnoreSelected true
</Plugin>
# <Plugin "memcached">
# Host "memcached"
# Port "11211"
# </Plugin>
<Chain "PostCache">
<Rule>
<Match regex>
Plugin "^cpu$"
PluginInstance "^[0-9]+$"
</Match>
<Target write>
Plugin "aggregation"
</Target>
Target stop
</Rule>
Target "write"
</Chain>
<Plugin "write_graphite">
<Carbon>
Host "{{ GRAPHITE_HOST }}"
Port "{{ GRAPHITE_PORT | default("2003") }}"
Prefix "{{ GRAPHITE_PREFIX | default("collectd.") }}"
EscapeCharacter "_"
SeparateInstances true
StoreRates true
AlwaysAppendDS false
</Carbon>
</Plugin>

View File

@ -0,0 +1 @@
hostfs /.dockerinit ext4 ro,relatime,user_xattr,barrier=1,data=ordered 0 0

View File

@ -0,0 +1,12 @@
collectd:
build: blocks/collectd
environment:
HOST_NAME: myserver
GRAPHITE_HOST: graphite
GRAPHITE_PORT: 2003
GRAPHITE_PREFIX: collectd.
REPORT_BY_CPU: 'false'
COLLECT_INTERVAL: 10
links:
- graphite
- memcached

View File

@ -0,0 +1,5 @@
#!/bin/bash
envtpl /etc/collectd/collectd.conf.tpl
collectd -f

View File

@ -8,3 +8,10 @@ graphite:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
fake-graphite-data:
image: grafana/fake-data-gen
net: bridge
environment:
FD_DATASOURCE: graphite
FD_PORT: 2003

View File

@ -4,3 +4,11 @@ influxdb:
- "2004:2004"
- "8083:8083"
- "8086:8086"
fake-influxdb-data:
image: grafana/fake-data-gen
net: bridge
environment:
FD_DATASOURCE: influxdb
FD_PORT: 8086

View File

@ -0,0 +1,5 @@
memcached:
image: memcached:latest
ports:
- "11211:11211"

View File

@ -3,3 +3,9 @@ opentsdb:
ports:
- "4242:4242"
fake-opentsdb-data:
image: grafana/fake-data-gen
net: bridge
environment:
FD_DATASOURCE: opentsdb

View File

@ -1,6 +1,22 @@
prometheus:
build: blocks/prometheus
net: bridge
ports:
- "9090:9090"
volumes:
- /var/docker/prometheus:/prometheus-data
node_exporter:
image: prom/node-exporter
net: bridge
ports:
- "9100:9100"
fake-prometheus-data:
image: grafana/fake-data-gen
net: bridge
ports:
- "9091:9091"
environment:
FD_DATASOURCE: prom

View File

@ -23,4 +23,4 @@ scrape_configs:
# scheme defaults to 'http'.
target_groups:
- targets: ['localhost:9090', '172.17.0.1:9091']
- targets: ['localhost:9090', '172.17.0.1:9091', '172.17.0.1:9100', '172.17.0.1:9150']

View File

@ -1,7 +1,15 @@
To build the docs locally, you need to have docker installed. The docs are built using a custom [docker](https://www.docker.com/)
image and [mkdocs](http://www.mkdocs.org/).
# Building The Docs
Build the `grafana/docs-base:latest` image:
To build the docs locally, you need to have docker installed. The
docs are built using a custom [docker](https://www.docker.com/) image
and the [mkdocs](http://www.mkdocs.org/) tool.
**Prepare the Docker Image**:
Build the `grafana/docs-base:latest` image. Run these commands in the
same directory this file is in. **Note** that you may require ``sudo``
when running ``make docs-build`` depending on how your system's docker
service is configured):
```
$ git clone https://github.com/grafana/docs-base
@ -9,10 +17,45 @@ $ cd docs-base
$ make docs-build
```
To build the docs:
**Build the Documentation**:
Now that the docker image has been prepared we can build the
docs. Switch your working directory back to the directory this file
(README.md) is in and run (possibly with ``sudo``):
```
$ cd docs
$ make docs
```
This command will not return control of the shell to the user. Instead
the command is now running a new docker container built from the image
we created in the previous step.
Open [localhost:8180](http://localhost:8180) to view the docs.
**Note** that after running ``make docs`` you may notice a message
like this in the console output
> Running at: http://0.0.0.0:8000/
This is misleading. That is **not** the port the documentation is
served from. You must browse to port **8180** to view the new
documentation.
# Adding a New Page
Adding a new page requires updating the ``mkdocs.yml`` file which is
located in this directory.
For example, if you are adding documentation for a new HTTP API called
``preferences`` you would:
1. Create the file ``docs/sources/http_api/preferences.md``
1. Add a reference to it in ``docs/sources/http_api/overview.md``
1. Update the list under the **pages** key in the ``docs/mkdocs.yml`` file with a reference to your new page:
```yaml
- ['http_api/preferences.md', 'API', 'Preferences API']
```

View File

@ -1 +1 @@
3.0.0
3.1.0

View File

@ -45,6 +45,7 @@ pages:
- ['guides/basic_concepts.md', 'User Guides', 'Basic Concepts']
- ['guides/gettingstarted.md', 'User Guides', 'Getting Started']
- ['guides/whats-new-in-v3-1.md', 'User Guides', "What's New in Grafana v3.1"]
- ['guides/whats-new-in-v3.md', 'User Guides', "What's New in Grafana v3.0"]
- ['guides/whats-new-in-v2-6.md', 'User Guides', "What's New in Grafana v2.6"]
- ['guides/whats-new-in-v2-5.md', 'User Guides', "What's New in Grafana v2.5"]
@ -84,6 +85,7 @@ pages:
- ['http_api/user.md', 'API', 'User API']
- ['http_api/admin.md', 'API', 'Admin API']
- ['http_api/snapshot.md', 'API', 'Snapshot API']
- ['http_api/preferences.md', 'API', 'Preferences API']
- ['http_api/other.md', 'API', 'Other API']
- ['plugins/index.md', 'Plugins', 'Overview']

View File

@ -26,6 +26,8 @@ Name | The data source name, important that this is the same as in Grafana v1.x
Default | Default data source means that it will be pre-selected for new panels.
Credentials profile name | Specify the name of the profile to use (if you use `~/aws/credentials` file), leave blank for default. This option was introduced in Grafana 2.5.1
Default Region | Used in query editor to set region (can be changed on per query basis)
Custom Metrics namespace | Specify the CloudWatch namespace of Custom metrics
Assume Role Arn | Specify the ARN of the role to assume
## Authentication
@ -95,8 +97,8 @@ Example `ec2_instance_attribute()` query
## Cost
It's worth to mention that Amazon will charge you for CloudWatch API usage. CloudWatch costs
$0.01 per 1,000 GetMetricStatistics or ListMetrics requests. For each query Grafana will
Amazon provides 1 million CloudWatch API requests each month at no additional charge. Past this,
it costs $0.01 per 1,000 GetMetricStatistics or ListMetrics requests. For each query Grafana will
issue a GetMetricStatistics request and every time you pick a dimension in the query editor
Grafana will issue a ListMetrics request.

View File

@ -26,7 +26,7 @@ Name | Description
Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards.
Default | Default data source means that it will be pre-selected for new panels.
Url | The http protocol, ip and port of you elasticsearch server.
Access | Proxy = access via Grafana backend, Direct = access directory from browser.
Access | Proxy = access via Grafana backend, Direct = access directly from browser.
Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication details to the Data Source to the browser.

View File

@ -26,7 +26,7 @@ Name | Description
Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards.
Default | Default data source means that it will be pre-selected for new panels.
Url | The http protocol, ip and port of your graphite-web or graphite-api install.
Access | Proxy = access via Grafana backend, Direct = access directory from browser.
Access | Proxy = access via Grafana backend, Direct = access directly from browser.
Proxy access means that the Grafana backend will proxy all requests from the browser, and send them on to the Data Source. This is useful because it can eliminate CORS (Cross Origin Site Resource) issues, as well as eliminate the need to disseminate authentication details to the Data Source to the browser.

View File

@ -25,7 +25,7 @@ Name | Description
Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards.
Default | Default data source means that it will be pre-selected for new panels.
Url | The http protocol, ip and port of your kairosdb server (default port is usually 8080)
Access | Proxy = access via Grafana backend, Direct = access directory from browser.
Access | Proxy = access via Grafana backend, Direct = access directly from browser.
## Query editor
Open a graph in edit mode by click the title.

View File

@ -7,7 +7,7 @@ page_keywords: grafana, opentsdb, documentation
# OpenTSDB Guide
The newest release of Grafana adds additional functionality when using an OpenTSDB Data source.
![](/img/v2/add_OpenTSDB.jpg)
![](/img/v2/add_OpenTSDB.png)
1. Open the side menu by clicking the the Grafana icon in the top header.
2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.
@ -22,7 +22,7 @@ Name | Description
Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards.
Default | Default data source means that it will be pre-selected for new panels.
Url | The http protocol, ip and port of you opentsdb server (default port is usually 4242)
Access | Proxy = access via Grafana backend, Direct = access directory from browser.
Access | Proxy = access via Grafana backend, Direct = access directly from browser.
Version | Version = opentsdb version, either <=2.1 or 2.2
Resolution | Metrics from opentsdb may have datapoints with either second or millisecond resolution.
@ -51,6 +51,13 @@ When using OpenTSDB with a template variable of `query` type you can use followi
If you do not see template variables being populated in `Preview of values` section, you need to enable `tsd.core.meta.enable_realtime_ts` in the OpenTSDB server settings. Also, to populate metadata of the existing time series data in OpenTSDB, you need to run `tsdb uid metasync` on the OpenTSDB server.
### Nested Templating
One template variable can be used to filter tag values for another template varible. Very importantly, the order of the parameters matter in tag_values function. First parameter is the metric name, second parameter is the tag key for which you need to find tag values, and after that all other dependent template variables. Some examples are mentioned below to make nested template queries work successfully.
tag_values(cpu, hostname, env=$env) // return tag values for cpu metric, selected env tag value and tag key hostname
tag_values(cpu, hostanme, env=$env, region=$region) // return tag values for cpu metric, selected env tag value, selected region tag value and tag key hostname
> Note: This is required for the OpenTSDB `lookup` api to work.
For details on opentsdb metric queries checkout the official [OpenTSDB documentation](http://opentsdb.net/docs/build/html/index.html)

View File

@ -23,7 +23,7 @@ Name | Description
Name | The data source name, important that this is the same as in Grafana v1.x if you plan to import old dashboards.
Default | Default data source means that it will be pre-selected for new panels.
Url | The http protocol, ip and port of you Prometheus server (default port is usually 9090)
Access | Proxy = access via Grafana backend, Direct = access directory from browser.
Access | Proxy = access via Grafana backend, Direct = access directly from browser.
Basic Auth | Enable basic authentication to the Prometheus datasource.
User | Name of your Prometheus user
Password | Database user's password

View File

@ -0,0 +1,68 @@
---
page_title: What's New in Grafana v3.1
page_description: What's new in Grafana v3.1
page_keywords: grafana, new, changes, features, documentation
---
# What's New in Grafana v3.1
## Dashboard Export & Import
The export feature is now accessed from the share menu.
<img src="/img/v31/export_menu.png">
Dashboards exported from Grafana 3.1 are now more portable and easier for others to import than before.
The export process extracts information data source types used by panels and adds these to a new `inputs`
section in the dashboard json. So when you or another person tries to import the dashboard they will be asked to
select data source and optional metrix prefix options.
<img src="/img/v31/import_step1.png">
The above screenshot shows the new import modal that gives you 3 options for how to import a dashboard.
One notable new addition here is the ability to import directly from Dashboards shared on [Grafana.net](https://grafana.net).
The next step in the import process:
<img src="/img/v31/import_step2.png">
Here you can change the name of the dashboard and also pick what data sources you want the dashboard to use. The above screenshot
shows a CollectD dashboard for Graphite that requires a metric prefix be specified.
## Discover Dashboards
On [Grafana.net](https://grafana.net) you can now browse & search for dashboards. We have already added a few but
more are being uploaded every day. To import a dashboard just copy the dashboard url and head back to Grafana,
then Dashboard Search -> Import -> Paste Grafana.net Dashboard URL.
<img src="/img/v31/gnet_dashboards_list.png">
## Constant template variables
We added a new template variable named constant that makes it easier to share and export dashboard that have custom prefixes.
## Dashboard Urls
Having current time range and template variable value always sync with the URL makes it possible to always copy your current
Grafana url to share with a colleague without having to use the Share modal.
## Internal metrics
Do you want metrics about viewing metrics? Ofc you do! In this release we added support for sending metrics about Grafana to graphite.
You can configure interval and server in the config file.
## Logging
Switched logging framework to log15 to enable key value per logging and filtering based on different log levels.
Its now possible to configure different log levels for different modules.
### Breaking changes
- **Logging** format have been changed to improve log filtering.
- **Graphite PNG** Graphite PNG support dropped from Graph panel (use Grafana native PNG instead).
- **Migration** No longer possible to migrate dashboards from 1.x (Stored in ES or Influx 0.8).
## CHANGELOG
For a detailed list and link to github issues for everything included
in the 3.1 release please view the
[CHANGELOG.md](https://github.com/grafana/grafana/blob/master/CHANGELOG.md)
file.

View File

@ -39,12 +39,13 @@ entire experience right within Grafana.
<img src="/img/v3/grafana_net_tour.png">
A preview of [Grafana.net](http://grafana.net) is launching along with this release. We
think its the perfect compliment to Grafana.
[Grafana.net](https://grafana.net) offers a central repository where the community can come together to discover, create and
share plugins (data sources, panels, apps) and dashboards.
Grafana.net currently offers a central repository where the community
can come together to discover and share plugins (Data Sources, Panels,
Apps) and Dashboards for Grafana 3.0 and above.
We are also working on a hosted Graphite-compatible data source that will be optimized for use with Grafana.
Itll be easy to combine your existing data source(s) with this OpenSaaS option. Finally, Grafana.net can
also be a hub to manage all your Grafana instances. Youll be able to monitor their health and availability,
perform dashboard backups, and more.
We are also working on a hosted Graphite-compatible Data Source that
will be optimized for use with Grafana. Itll be easy to combine your
@ -65,7 +66,6 @@ Grafana 3.0 comes with a new command line tool called grafana-cli. You
can easily install plugins from Grafana.net with it. For
example:
```
grafana-cli install grafana-pie-chart-panel
```
@ -188,6 +188,33 @@ you can still install manually from [Grafana.net](http://grafana.net)
* KairosDB: This data source has also no longer shipped with Grafana,
you can install it manually from [Grafana.net](http://grafana.net)
## Plugin showcase
Discovering and installing plugins is very quick and easy with Grafana 3.0 and [Grafana.net](https://grafana.net). Here
are a couple that I incurage you try!
#### [Clock Panel](https://grafana.net/plugins/grafana-clock-panel)
Support's both current time and count down mode.
<img src="/img/v3/clock_panel.png">
#### [Pie Chart Panel](https://grafana.net/plugins/grafana-piechart-panel)
A simple pie chart panel is now available as an external plugin.
<img src="/img/v3/pie_chart_panel.png">
#### [WorldPing App](https://grafana.net/plugins/raintank-worldping-app)
This is full blown Grafana App that adds new panels, data sources and pages to give
feature rich global performance monitoring directly from your on-prem Grafana.
<img src="/img/v3/wP-Screenshot-dash-web.png">
#### [Zabbix App](https://grafana.net/plugins/alexanderzobnin-zabbix-app)
This app contains the already very pouplar Zabbix data source plugin, 2 dashboards and a triggers panel. It is
created and maintained by [Alexander Zobnin](https://github.com/alexanderzobnin/grafana-zabbix).
<img src="/img/v3/zabbix_app.png">
Checkout the full list of plugins on [Grafana.net](https://grafana.net/plugins)
## CHANGELOG
For a detailed list and link to github issues for everything included

View File

@ -18,4 +18,5 @@ dashboards, creating users and updating data sources.
* [User API](/http_api/user/)
* [Admin API](/http_api/admin/)
* [Snapshot API](/http_api/snapshot/)
* [Preferences API](/http_api/preferences/)
* [Other API](/http_api/other/)

View File

@ -0,0 +1,100 @@
----
page_title: Preferences API
page_description: Grafana Preferences API Reference
page_keywords: grafana, preferences, http, api, documentation
---
# User and Org Preferences API
Keys:
- **theme** - One of: ``light``, ``dark``, or an empty string for the default theme
- **homeDashboardId** - The numerical ``:id`` of a favorited dashboard, default: ``0``
- **timezone** - One of: ``utc``, ``browser``, or an empty string for the default
Omitting a key will cause the current value to be replaced with the
system default value.
## Get Current User Prefs
`GET /api/user/preferences`
**Example Request**:
GET /api/user/preferences HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
**Example Response**:
HTTP/1.1 200
Content-Type: application/json
{"theme":"","homeDashboardId":0,"timezone":""}
## Update Current User Prefs
`PUT /api/user/preferences`
**Example Request**:
PUT /api/user/preferences HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
{
"theme": "",
"homeDashboardId":0,
"timezone":"utc"
}
**Example Response**:
HTTP/1.1 200
Content-Type: text/plain; charset=utf-8
{"message":"Preferences updated"}
## Get Current Org Prefs
`GET /api/org/preferences`
**Example Request**:
GET /api/org/preferences HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
**Example Response**:
HTTP/1.1 200
Content-Type: application/json
{"theme":"","homeDashboardId":0,"timezone":""}
## Update Current Org Prefs
`PUT /api/org/preferences`
**Example Request**:
PUT /api/org/preferences HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
{
"theme": "",
"homeDashboardId":0,
"timezone":"utc"
}
**Example Response**:
HTTP/1.1 200
Content-Type: text/plain; charset=utf-8
{"message":"Preferences updated"}

View File

@ -44,6 +44,12 @@ Then you can override them using:
<hr />
## instance_name
Set the name of the grafana-server instance. Used in logging and internal metrics and in
clustering info. Defaults to: `${HOSTNAME}, which will be replaced with
environment variable `HOSTNAME`, if that is empty or does not exist Grafana will try to use
system calls to get the machine name.
## [paths]
### data
@ -226,7 +232,7 @@ organization to be created for that new user.
The role new users will be assigned for the main organization (if the
above setting is set to true). Defaults to `Viewer`, other valid
options are `Admin` and `Editor`.
options are `Admin` and `Editor` and `Read-Only Editor`.
<hr>
@ -439,3 +445,35 @@ Grafana backend index those json dashboards which will make them appear in regul
### path
The full path to a directory containing your json dashboards.
## [log]
### mode
Either "console", "file", "syslog". Default is console and file
Use space to separate multiple modes, e.g. "console file"
### level
Either "debug", "info", "warn", "error", "critical", default is "info"
### filter
optional settings to set different levels for specific loggers.
Ex `filters = sqlstore:debug`
## [metrics]
### enabled
Enable metrics reporting. defaults true. Available via HTTP API `/api/metrics`.
### interval_seconds
Flush/Write interval when sending metrics to external TSDB. Defaults to 60s.
## [metrics.graphite]
Include this section if you want to send internal Grafana metrics to Graphite.
### address
Format `<Hostname or ip>`:port
### prefix
Graphite metric prefix. Defaults to `prod.grafana.%(instance_name)s.`

View File

@ -10,32 +10,32 @@ page_keywords: grafana, installation, debian, ubuntu, guide
Description | Download
------------ | -------------
Stable .deb for Debian-based Linux | [grafana_2.6.0_amd64.deb](https://grafanarel.s3.amazonaws.com/builds/grafana_2.6.0_amd64.deb)
Beta .deb for Debian-based Linux | [grafana_3.0.0-beta71462173753_amd64.deb](https://grafanarel.s3.amazonaws.com/builds/grafana_3.0.0-beta71462173753_amd64.deb)
Stable .deb for Debian-based Linux | [3.0.4](https://grafanarel.s3.amazonaws.com/builds/grafana_3.0.4-1464167696_amd64.deb)
Beta .deb for Debian-based Linux | [3.1.0-beta1](https://grafanarel.s3.amazonaws.com/builds/grafana_3.1.0-1466666977beta1_amd64.deb)
## Install Stable
$ wget https://grafanarel.s3.amazonaws.com/builds/grafana_2.6.0_amd64.deb
$ wget https://grafanarel.s3.amazonaws.com/builds/grafana_3.0.4-1464167696_amd64.deb
$ sudo apt-get install -y adduser libfontconfig
$ sudo dpkg -i grafana_2.6.0_amd64.deb
$ sudo dpkg -i grafana_3.0.4-1464167696_amd64.deb
## Install 3.0 Beta
## Install 3.1 beta
$ wget https://grafanarel.s3.amazonaws.com/builds/grafana_3.0.0-beta71462173753_amd64.deb
$ wget https://grafanarel.s3.amazonaws.com/builds/grafana_3.1.0-1466666977beta1_amd64.deb
$ sudo apt-get install -y adduser libfontconfig
$ sudo dpkg -i grafana_3.0.0-beta71462173753_amd64.deb
$ sudo dpkg -i grafana_3.1.0-1466666977beta1_amd64.deb
## APT Repository
Add the following line to your `/etc/apt/sources.list` file.
deb https://packagecloud.io/grafana/stable/debian/ wheezy main
deb https://packagecloud.io/grafana/stable/debian/ jessie main
Use the above line even if you are on Ubuntu or another Debian version.
There is also a testing repository if you want beta or release
candidates.
deb https://packagecloud.io/grafana/testing/debian/ wheezy main
deb https://packagecloud.io/grafana/testing/debian/ jessie main
Then add the [Package Cloud](https://packagecloud.io/grafana) key. This
allows you to install signed packages.

View File

@ -6,8 +6,33 @@ page_keywords: grafana, installation, mac, osx, guide
# Installing on Mac
There is currently no binary build for Mac, but Grafana will happily build on Mac. Read the [build from
source](/project/building_from_source) page for instructions on how to
build it yourself.
Installation can be done using [homebrew](http://brew.sh/)
Install latest stable:
```
brew install grafana/grafana/grafana
```
To start grafana look at the command printed after the homebrew install completes.
You can also add the grafana as tap.
```
brew tap grafana/grafana
brew install grafana
```
Install latest unstable from master:
```
brew install --HEAD grafana/grafana/grafana
```
To upgrade use the reinstall command
```
brew reinstall --HEAD grafana/grafana/grafana
```

View File

@ -10,43 +10,42 @@ page_keywords: grafana, installation, centos, fedora, opensuse, redhat, guide
Description | Download
------------ | -------------
Stable .RPM for CentOS / Fedora / OpenSuse / Redhat Linux | [grafana-2.6.0-1.x86_64.rpm](https://grafanarel.s3.amazonaws.com/builds/grafana-2.6.0-1.x86_64.rpm)
Beta .RPM for CentOS / Fedor / OpenSuse / Redhat Linux | [grafana-3.0.0-beta71462173753.x86_64.rpm](https://grafanarel.s3.amazonaws.com/builds/grafana-3.0.0-beta71462173753.x86_64.rpm)
Stable .RPM for CentOS / Fedora / OpenSuse / Redhat Linux | [3.0.4 (x86-64 rpm)](https://grafanarel.s3.amazonaws.com/builds/grafana-3.0.4-1464167696.x86_64.rpm)
Beta .RPM for CentOS / Fedora / OpenSuse / Redhat Linux | [3.1.0-beta1 (x86-64 rpm)](https://grafanarel.s3.amazonaws.com/builds/grafana-3.1.0-1466666977beta1.x86_64.rpm)
## Install Stable Release from package file
## Install Latest Stable
You can install Grafana using Yum directly.
$ sudo yum install https://grafanarel.s3.amazonaws.com/builds/grafana-2.6.0-1.x86_64.rpm
$ sudo yum install https://grafanarel.s3.amazonaws.com/builds/grafana-3.0.4-1464167696.x86_64.rpm
Or install manually using `rpm`.
#### On CentOS / Fedora / Redhat:
$ sudo yum install initscripts fontconfig
$ sudo rpm -Uvh grafana-2.6.0-1.x86_64.rpm
$ sudo rpm -Uvh grafana-3.0.4-1464167696.x86_64.rpm
#### On OpenSuse:
$ sudo rpm -i --nodeps grafana-2.6.0-1.x86_64.rpm
$ sudo rpm -i --nodeps grafana-3.0.4-1464167696.x86_64.rpm
## Install Beta Release from package file
## Install 3.1 Beta
You can install Grafana using Yum directly.
$ sudo yum install https://grafanarel.s3.amazonaws.com/builds/grafana-3.0.0-beta71462173753.x86_64.rpm
$ sudo yum install https://grafanarel.s3.amazonaws.com/builds/grafana-3.1.0-1466666977beta1.x86_64.rpm
Or install manually using `rpm`.
#### On CentOS / Fedora / Redhat:
$ sudo yum install initscripts fontconfig
$ sudo rpm -Uvh grafana-3.0.0-beta71462173753.x86_64.rpm
$ sudo rpm -Uvh https://grafanarel.s3.amazonaws.com/builds/grafana-3.1.0-1466666977beta1.x86_64.rpm
#### On OpenSuse:
$ sudo rpm -i --nodeps grafana-3.0.0-beta71462173753.x86_64.rpm
$ sudo rpm -i --nodeps https://grafanarel.s3.amazonaws.com/builds/grafana-3.1.0-1466666977beta1.x86_64.rpm
## Install via YUM Repository

View File

@ -10,7 +10,7 @@ page_keywords: grafana, installation, windows guide
Description | Download
------------ | -------------
Stable Zip package for Windows | [grafana.2.6.0.windows-x64.zip](https://grafanarel.s3.amazonaws.com/winbuilds/dist/grafana-2.5.0.windows-x64.zip)
Stable Zip package for Windows | [grafana.3.0.4.windows-x64.zip](https://grafanarel.s3.amazonaws.com/winbuilds/dist/grafana-3.0.4.windows-x64.zip)
## Configure

View File

@ -40,3 +40,10 @@ as the name for the fields that should be used for the annotation title, tags an
For InfluxDB you need to enter a query like in the above screenshot. You need to have the ```where $timeFilter``` part.
If you only select one column you will not need to enter anything in the column mapping fields.
## Prometheus Annotations
![](/img/v3/annotations_prom.png)
Prometheus supports two ways to query annotations.
- A regular metric query
- A Prometheus query for pending and firing alerts (for details see [Inspecting alerts during runtime](https://prometheus.io/docs/alerting/rules/#inspecting-alerts-during-runtime))

View File

@ -26,7 +26,6 @@ When a user creates a new dashboard, a new dashboard JSON object is initialized
{
"id": null,
"title": "New dashboard",
"originalTitle": "New dashboard",
"tags": [],
"style": "dark",
"timezone": "browser",
@ -59,7 +58,6 @@ Each field in the dashboard JSON is explained below with its usage:
| ---- | ----- |
| **id** | unique dashboard id, an integer |
| **title** | current title of dashboard |
| **originalTitle** | title of dashboard when saved for the first time |
| **tags** | tags associated with dashboard, an array of strings |
| **style** | theme of dashboard, i.e. `dark` or `light` |
| **timezone** | timezone of dashboard, i.e. `utc` or `browser` |

View File

@ -10,76 +10,95 @@ page_keywords: grafana, export, import, documentation
Dashboards are exported in Grafana JSON format, and contain everything you need (layout, variables, styles, data sources, queries, etc)to import the dashboard at a later time.
#### Export to file
The export feature is accessed from the share menu.
To export a dashboard, locate the settings menu within the desired dashboard and click the gear icon. The export option will always be available, and will open a browser save-as dialog window.
<img src="/img/v31/export_menu.png">
<img class="no-shadow" src="/img/v2/export.gif">
### Making a dashboard portable
#### Copy JSON
If you want to export a dashboard for others to use then it could be a good idea to
add template variables for things like a metric prefix (use contant variable) and server name.
The raw JSON may be accessed directly from within the interface and copy/pasted into an editor of your choice to be saved later. To view this JSON, locate the settings menu within the desired dashboard and click the gear icon. The View JSON option will always be available, and will open the raw JSON in a text area. To copy the entire JSON file, click into the text area, the select all `CTRL`+`A` (PC, Linux) or `⌘`+`A` (Mac).
<img class="no-shadow" src="/img/v2/export-2.gif">
A template varible of the type `Constant` will automatically be hidden in
the dashboard, and will also be added as an required input when the dashboard is imported.
## Importing a dashboard
Grafana 2.0 now has integrated dashboard storage engine that can be configured to use an internal sqlite3 database, MySQL, or Postgres. This eliminates the need to use Elasticsearch for dashboard storage for Graphite users. Grafana 2.0 does not support storing dashboards in InfluxDB.
To import a dashboard open dashboard search and then hit the import button.
The import view can be found at the Dashboard Picker dropdown, next to the New Dashboard and Playlist buttons.
<img src="/img/v31/import_step1.png">
<img class="no-shadow" src="/img/v2/import.gif">
From here you can upload a dashboard json file, paste a [Grafana.net](https://grafana.net) dashboard
url or paste dashboard json text directly into the text area.
<img src="/img/v31/import_step2.png">
#### Import from a file
In step 2 of the import process Grafana will let you change the name of the dashboard, pick what
data source you want the dashboard to use and specify any metric prefixes (if the dashboard use any).
To import a dashboard through a local JSON file, click the 'Choose file' button in the Import from File section. Note that JSON is not linted or validated prior during upload, so we recommend validating locally if you're editing. In a pinch, you can use http://jsonlint.com/, and if you are editing dashboard JSON frequently, there are linter plugins for popular text editors.
## Discover dashboards on Grafana.net
Find dashboads for common server applications at [Grafana.net/dashboards](https://grafana.net/dashboards).
#### Importing dashboards from Elasticsearch
<img src="/img/v31/gnet_dashboards_list.png">
Start by going to the `Data Sources` view (via the side menu), and make sure your Elasticsearch data source is added. Specify the Elasticsearch index name where your existing Grafana v1.x dashboards are stored (the default is `grafana-dash`).
## Import & Sharing with Grafana 2.x or 3.0
![](/img/v2/datasource_edit_elastic.jpg)
Dashboards on Grafana.net use a new feature in Grafana 3.1 that allows the import process
to update each panel so that they are using a data source of your choosing. If you are running a
Grafana version older than 3.1 then you might need to do some manual steps either
before or after import in order for the dashboard to work properly.
#### Importing dashboards from InfluxDB
Dashboards exported from Grafana 3.1+ have a new json section `__inputs`
that define what data sources and metric prefixes the dashboard uses.
Start by going to the `Data Sources` view (via the side menu), and make sure your InfluxDB data source is added. Specify the database name where your Grafana v1.x dashboards are stored, the default is `grafana`.
### Import view
In the Import view you find the section `Migrate dashboards`. Pick the data source you added (from Elasticsearch or InfluxDB), and click the `Import` button.
![](/img/v2/migrate_dashboards.jpg)
Your dashboards should be automatically imported into the Grafana 2.0 back-end. Dashboards will no longer be stored in your previous Elasticsearch or InfluxDB databases.
## Troubleshooting
### Template variables could not be initialized.
When importing a dashboard, keep an eye out for template variables in your JSON that may not exist in your instance of Grafana. For example,
"templating": {
"list": [
Example:
```json
{
"__inputs": [
{
"allFormat": "glob",
"current": {
"tags": [],
"text": "google_com + monkey_id_au",
"value": [
"google_com",
"monkey_id_au"
]
"name": "DS_GRAPHITE",
"label": "graphite",
"description": "",
"type": "datasource",
"pluginId": "graphite",
"pluginName": "Graphite"
},
"datasource": null,
To resolve this, remove any unnecessary JSON that may be specific to the instance you are exporting from. In this case, we can remove the entire "current" section entirely, and Grafana will populate default.
"templating": {
"list": [
{
"allFormat": "glob",
"datasource": null,
"name": "VAR_PREFIX",
"type": "constant",
"label": "prefix",
"value": "collectd",
"description": ""
}
],
}
```
These are then referenced in the dashboard panels like this:
```json
{
"rows": [
{
"panels": [
{
"type": "graph",
"datasource": "${DS_GRAPHITE}",
}
]
}
]
}
```
These inputs and their usage in data source properties are automatically added during export in Grafana 3.1.
If you run an older version of Grafana and want to share a dashboard on Grafana.net you need to manually
add the inputs and templatize the datasource properties like above.
If you want to import a dashboard from Grafana.net into an older version of Grafana then you can either import
it as usual and then update the data source option in the metrics tab so that the panel is using the correct
data source. Another alternative is to open the json file in a a text editor and update the data source properties
to value that matches a name of your data source.

View File

@ -1,3 +1,4 @@
<li><a class='version' href='/v3.1'>Version v3.1</a></li>
<li><a class='version' href='/v3.0'>Version v3.0</a></li>
<li><a class='version' href='/v2.6'>Version v2.6</a></li>
<li><a class='version' href='/v2.5'>Version v2.5</a></li>

View File

@ -26,7 +26,7 @@ module.exports = function(config) {
browsers: ['PhantomJS'],
captureTimeout: 20000,
singleRun: true,
autoWatchBatchDelay: 10000,
autoWatchBatchDelay: 1000,
browserNoActivityTimeout: 60000,
});

View File

@ -1,4 +1,4 @@
{
"stable": "2.6.0",
"testing": "3.0.0-beta7"
"stable": "3.0.4",
"testing": "3.1.0-beta1"
}

View File

@ -4,7 +4,7 @@
"company": "Coding Instinct AB"
},
"name": "grafana",
"version": "3.0.0-beta7",
"version": "4.0.0-pre1",
"repository": {
"type": "git",
"url": "http://github.com/grafana/grafana.git"
@ -13,14 +13,14 @@
"zone.js": "^0.6.6",
"autoprefixer": "^6.3.3",
"es6-promise": "^3.0.2",
"es6-shim": "^0.35.0",
"es6-shim": "^0.35.1",
"expect.js": "~0.2.0",
"glob": "~3.2.7",
"grunt": "~0.4.0",
"grunt-angular-templates": "^0.5.5",
"grunt-cli": "~0.1.13",
"grunt-contrib-clean": "~0.7.0",
"grunt-contrib-compress": "~0.14.0",
"grunt-contrib-compress": "^1.3.0",
"grunt-contrib-concat": "^0.5.1",
"grunt-contrib-copy": "~0.8.2",
"grunt-contrib-cssmin": "~0.14.0",
@ -50,14 +50,14 @@
"karma-phantomjs-launcher": "1.0.0",
"load-grunt-tasks": "3.4.0",
"mocha": "2.3.4",
"phantomjs-prebuilt": "^2.1.3",
"phantomjs-prebuilt": "^2.1.7",
"reflect-metadata": "0.1.2",
"rxjs": "5.0.0-beta.4",
"sass-lint": "^1.6.0",
"systemjs": "0.19.24"
},
"engines": {
"node": "0.4.x",
"node": "4.x",
"npm": "2.14.x"
},
"scripts": {

View File

@ -7,12 +7,12 @@ set -e
startGrafana() {
if [ -x /bin/systemctl ]; then
/bin/systemctl daemon-reload
/bin/systemctl start grafana-server
/bin/systemctl restart grafana-server
elif [ -x "/etc/init.d/grafana-server" ]; then
if [ -x "`which invoke-rc.d 2>/dev/null`" ]; then
invoke-rc.d grafana-server start || true
invoke-rc.d grafana-server restart || true
else
/etc/init.d/grafana-server start || true
/etc/init.d/grafana-server restart || true
fi
fi
}

View File

@ -1,22 +1,20 @@
#! /usr/bin/env bash
deb_ver=3.0.0-beta51460725904
rpm_ver=3.0.0-beta51460725904
deb_ver=3.1.0-1466666977beta1
rpm_ver=3.1.0-1466666977beta1
#rpm_ver=3.0.0-1
# wget https://grafanarel.s3.amazonaws.com/builds/grafana_${deb_ver}_amd64.deb
#wget https://grafanarel.s3.amazonaws.com/builds/grafana_${deb_ver}_amd64.deb
# package_cloud push grafana/stable/debian/jessie grafana_${deb_ver}_amd64.deb
# package_cloud push grafana/stable/debian/wheezy grafana_${deb_ver}_amd64.deb
#package_cloud push grafana/stable/debian/jessie grafana_${deb_ver}_amd64.deb
#package_cloud push grafana/stable/debian/wheezy grafana_${deb_ver}_amd64.deb
package_cloud push grafana/testing/debian/jessie grafana_${deb_ver}_amd64.deb
package_cloud push grafana/testing/debian/wheezy grafana_${deb_ver}_amd64.deb
#package_cloud push grafana/testing/debian/jessie grafana_${deb_ver}_amd64.deb
#package_cloud push grafana/testing/debian/wheezy grafana_${deb_ver}_amd64.deb
# wget https://grafanarel.s3.amazonaws.com/builds/grafana-${rpm_ver}.x86_64.rpm
#wget https://grafanarel.s3.amazonaws.com/builds/grafana-${rpm_ver}.x86_64.rpm
#package_cloud push grafana/testing/el/6 grafana-${rpm_ver}.x86_64.rpm
package_cloud push grafana/testing/el/6 grafana-${rpm_ver}.x86_64.rpm
package_cloud push grafana/testing/el/7 grafana-${rpm_ver}.x86_64.rpm
# package_cloud push grafana/stable/el/7 grafana-${version}-1.x86_64.rpm
# package_cloud push grafana/stable/el/6 grafana-${version}-1.x86_64.rpm
# package_cloud push grafana/stable/el/7 grafana-${rpm_ver}.x86_64.rpm
# package_cloud push grafana/stable/el/6 grafana-${rpm_ver}.x86_64.rpm

View File

@ -34,6 +34,7 @@ func Register(r *macaron.Macaron) {
r.Get("/org/", reqSignedIn, Index)
r.Get("/org/new", reqSignedIn, Index)
r.Get("/datasources/", reqSignedIn, Index)
r.Get("/datasources/new", reqSignedIn, Index)
r.Get("/datasources/edit/*", reqSignedIn, Index)
r.Get("/org/users/", reqSignedIn, Index)
r.Get("/org/apikeys/", reqSignedIn, Index)
@ -55,6 +56,8 @@ func Register(r *macaron.Macaron) {
r.Get("/dashboard/*", reqSignedIn, Index)
r.Get("/dashboard-solo/*", reqSignedIn, Index)
r.Get("/import/dashboard", reqSignedIn, Index)
r.Get("/dashboards/*", reqSignedIn, Index)
r.Get("/playlists/", reqSignedIn, Index)
r.Get("/playlists/*", reqSignedIn, Index)
@ -115,6 +118,7 @@ func Register(r *macaron.Macaron) {
r.Get("/:id", wrap(GetUserById))
r.Get("/:id/orgs", wrap(GetUserOrgList))
r.Put("/:id", bind(m.UpdateUserCommand{}), wrap(UpdateUser))
r.Post("/:id/using/:orgId", wrap(UpdateUserActiveOrg))
}, reqGrafanaAdmin)
// org information available to all users.
@ -209,7 +213,7 @@ func Register(r *macaron.Macaron) {
r.Combo("/db/:slug").Get(GetDashboard).Delete(DeleteDashboard)
r.Post("/db", reqEditorRole, bind(m.SaveDashboardCommand{}), PostDashboard)
r.Get("/file/:file", GetDashboardFromJsonFile)
r.Get("/home", GetHomeDashboard)
r.Get("/home", wrap(GetHomeDashboard))
r.Get("/tags", GetDashboardTags)
r.Post("/import", bind(dtos.ImportDashboardCommand{}), wrap(ImportDashboard))
})
@ -234,7 +238,13 @@ func Register(r *macaron.Macaron) {
r.Get("/search/", Search)
// metrics
r.Get("/metrics/test", GetTestMetrics)
r.Get("/metrics/test", wrap(GetTestMetrics))
// metrics
r.Get("/metrics", wrap(GetInternalMetrics))
// error test
r.Get("/metrics/error", wrap(GenerateError))
}, reqSignedIn)

View File

@ -30,7 +30,7 @@ func InitAppPluginRoutes(r *macaron.Macaron) {
}
handlers = append(handlers, AppPluginRoute(route, plugin.Id))
r.Route(url, route.Method, handlers...)
log.Info("Plugins: Adding proxy route %s", url)
log.Debug("Plugins: Adding proxy route %s", url)
}
}
}

View File

@ -4,6 +4,8 @@ import (
"encoding/json"
"errors"
"io/ioutil"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
@ -14,6 +16,8 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatch"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/middleware"
m "github.com/grafana/grafana/pkg/models"
)
@ -44,31 +48,97 @@ func init() {
}
}
var awsCredentials map[string]*credentials.Credentials = make(map[string]*credentials.Credentials)
type cache struct {
credential *credentials.Credentials
expiration *time.Time
}
func getCredentials(profile string) *credentials.Credentials {
if _, ok := awsCredentials[profile]; ok {
return awsCredentials[profile]
var awsCredentialCache map[string]cache = make(map[string]cache)
var credentialCacheLock sync.RWMutex
func getCredentials(profile string, region string, assumeRoleArn string) *credentials.Credentials {
cacheKey := profile + ":" + assumeRoleArn
credentialCacheLock.RLock()
if _, ok := awsCredentialCache[cacheKey]; ok {
if awsCredentialCache[cacheKey].expiration != nil &&
(*awsCredentialCache[cacheKey].expiration).After(time.Now().UTC()) {
result := awsCredentialCache[cacheKey].credential
credentialCacheLock.RUnlock()
return result
}
}
credentialCacheLock.RUnlock()
accessKeyId := ""
secretAccessKey := ""
sessionToken := ""
var expiration *time.Time
expiration = nil
if strings.Index(assumeRoleArn, "arn:aws:iam:") == 0 {
params := &sts.AssumeRoleInput{
RoleArn: aws.String(assumeRoleArn),
RoleSessionName: aws.String("GrafanaSession"),
DurationSeconds: aws.Int64(900),
}
stsSess := session.New()
stsCreds := credentials.NewChainCredentials(
[]credentials.Provider{
&credentials.EnvProvider{},
&credentials.SharedCredentialsProvider{Filename: "", Profile: profile},
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(stsSess), ExpiryWindow: 5 * time.Minute},
})
stsConfig := &aws.Config{
Region: aws.String(region),
Credentials: stsCreds,
}
svc := sts.New(session.New(stsConfig), stsConfig)
resp, err := svc.AssumeRole(params)
if err != nil {
// ignore
log.Error(3, "CloudWatch: Failed to assume role", err)
}
if resp.Credentials != nil {
accessKeyId = *resp.Credentials.AccessKeyId
secretAccessKey = *resp.Credentials.SecretAccessKey
sessionToken = *resp.Credentials.SessionToken
expiration = resp.Credentials.Expiration
}
}
sess := session.New()
creds := credentials.NewChainCredentials(
[]credentials.Provider{
&credentials.StaticProvider{Value: credentials.Value{
AccessKeyID: accessKeyId,
SecretAccessKey: secretAccessKey,
SessionToken: sessionToken,
}},
&credentials.EnvProvider{},
&credentials.SharedCredentialsProvider{Filename: "", Profile: profile},
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(sess), ExpiryWindow: 5 * time.Minute},
})
awsCredentials[profile] = creds
credentialCacheLock.Lock()
awsCredentialCache[cacheKey] = cache{
credential: creds,
expiration: expiration,
}
credentialCacheLock.Unlock()
return creds
}
func handleGetMetricStatistics(req *cwRequest, c *middleware.Context) {
func getAwsConfig(req *cwRequest) *aws.Config {
assumeRoleArn := req.DataSource.JsonData.Get("assumeRoleArn").MustString()
cfg := &aws.Config{
Region: aws.String(req.Region),
Credentials: getCredentials(req.DataSource.Database),
Credentials: getCredentials(req.DataSource.Database, req.Region, assumeRoleArn),
}
return cfg
}
func handleGetMetricStatistics(req *cwRequest, c *middleware.Context) {
cfg := getAwsConfig(req)
svc := cloudwatch.New(session.New(cfg), cfg)
reqParam := &struct {
@ -104,11 +174,7 @@ func handleGetMetricStatistics(req *cwRequest, c *middleware.Context) {
}
func handleListMetrics(req *cwRequest, c *middleware.Context) {
cfg := &aws.Config{
Region: aws.String(req.Region),
Credentials: getCredentials(req.DataSource.Database),
}
cfg := getAwsConfig(req)
svc := cloudwatch.New(session.New(cfg), cfg)
reqParam := &struct {
@ -144,11 +210,7 @@ func handleListMetrics(req *cwRequest, c *middleware.Context) {
}
func handleDescribeAlarms(req *cwRequest, c *middleware.Context) {
cfg := &aws.Config{
Region: aws.String(req.Region),
Credentials: getCredentials(req.DataSource.Database),
}
cfg := getAwsConfig(req)
svc := cloudwatch.New(session.New(cfg), cfg)
reqParam := &struct {
@ -187,11 +249,7 @@ func handleDescribeAlarms(req *cwRequest, c *middleware.Context) {
}
func handleDescribeAlarmsForMetric(req *cwRequest, c *middleware.Context) {
cfg := &aws.Config{
Region: aws.String(req.Region),
Credentials: getCredentials(req.DataSource.Database),
}
cfg := getAwsConfig(req)
svc := cloudwatch.New(session.New(cfg), cfg)
reqParam := &struct {
@ -227,11 +285,7 @@ func handleDescribeAlarmsForMetric(req *cwRequest, c *middleware.Context) {
}
func handleDescribeAlarmHistory(req *cwRequest, c *middleware.Context) {
cfg := &aws.Config{
Region: aws.String(req.Region),
Credentials: getCredentials(req.DataSource.Database),
}
cfg := getAwsConfig(req)
svc := cloudwatch.New(session.New(cfg), cfg)
reqParam := &struct {
@ -263,11 +317,7 @@ func handleDescribeAlarmHistory(req *cwRequest, c *middleware.Context) {
}
func handleDescribeInstances(req *cwRequest, c *middleware.Context) {
cfg := &aws.Config{
Region: aws.String(req.Region),
Credentials: getCredentials(req.DataSource.Database),
}
cfg := getAwsConfig(req)
svc := ec2.New(session.New(cfg), cfg)
reqParam := &struct {

View File

@ -32,8 +32,13 @@ func init() {
"AWS/Billing": {"EstimatedCharges"},
"AWS/CloudFront": {"Requests", "BytesDownloaded", "BytesUploaded", "TotalErrorRate", "4xxErrorRate", "5xxErrorRate"},
"AWS/CloudSearch": {"SuccessfulRequests", "SearchableDocuments", "IndexUtilization", "Partitions"},
"AWS/DynamoDB": {"ConditionalCheckFailedRequests", "ConsumedReadCapacityUnits", "ConsumedWriteCapacityUnits", "OnlineIndexConsumedWriteCapacity", "OnlineIndexPercentageProgress", "OnlineIndexThrottleEvents", "ProvisionedReadCapacityUnits", "ProvisionedWriteCapacityUnits", "ReadThrottleEvents", "ReturnedItemCount", "SuccessfulRequestLatency", "SystemErrors", "ThrottledRequests", "UserErrors", "WriteThrottleEvents"},
"AWS/ECS": {"CPUUtilization", "MemoryUtilization"},
"AWS/DynamoDB": {"ConditionalCheckFailedRequests", "ConsumedReadCapacityUnits", "ConsumedWriteCapacityUnits", "OnlineIndexConsumedWriteCapacity", "OnlineIndexPercentageProgress", "OnlineIndexThrottleEvents", "ProvisionedReadCapacityUnits", "ProvisionedWriteCapacityUnits", "ReadThrottleEvents", "ReturnedBytes", "ReturnedItemCount", "ReturnedRecordsCount", "SuccessfulRequestLatency", "SystemErrors", "ThrottledRequests", "UserErrors", "WriteThrottleEvents"},
"AWS/EBS": {"VolumeReadBytes", "VolumeWriteBytes", "VolumeReadOps", "VolumeWriteOps", "VolumeTotalReadTime", "VolumeTotalWriteTime", "VolumeIdleTime", "VolumeQueueLength", "VolumeThroughputPercentage", "VolumeConsumedReadWriteOps", "BurstBalance"},
"AWS/EC2": {"CPUCreditUsage", "CPUCreditBalance", "CPUUtilization", "DiskReadOps", "DiskWriteOps", "DiskReadBytes", "DiskWriteBytes", "NetworkIn", "NetworkOut", "NetworkPacketsIn", "NetworkPacketsOut", "StatusCheckFailed", "StatusCheckFailed_Instance", "StatusCheckFailed_System"},
"AWS/EC2Spot": {"AvailableInstancePoolsCount", "BidsSubmittedForCapacity", "EligibleInstancePoolCount", "FulfilledCapacity", "MaxPercentCapacityAllocation", "PendingCapacity", "PercentCapacityAllocation", "TargetCapacity", "TerminatingCapacity"},
"AWS/ECS": {"CPUReservation", "MemoryReservation", "CPUUtilization", "MemoryUtilization"},
"AWS/EFS": {"BurstCreditBalance", "ClientConnections", "DataReadIOBytes", "DataWriteIOBytes", "MetadataIOBytes", "TotalIOBytes", "PermittedThroughput", "PercentIOLimit"},
"AWS/ELB": {"HealthyHostCount", "UnHealthyHostCount", "RequestCount", "Latency", "HTTPCode_ELB_4XX", "HTTPCode_ELB_5XX", "HTTPCode_Backend_2XX", "HTTPCode_Backend_3XX", "HTTPCode_Backend_4XX", "HTTPCode_Backend_5XX", "BackendConnectionErrors", "SurgeQueueLength", "SpilloverCount"},
"AWS/ElastiCache": {
"CPUUtilization", "FreeableMemory", "NetworkBytesIn", "NetworkBytesOut", "SwapUsage",
"BytesUsedForCacheItems", "BytesReadIntoMemcached", "BytesWrittenOutFromMemcached", "CasBadval", "CasHits", "CasMisses", "CmdFlush", "CmdGet", "CmdSet", "CurrConnections", "CurrItems", "DecrHits", "DecrMisses", "DeleteHits", "DeleteMisses", "Evictions", "GetHits", "GetMisses", "IncrHits", "IncrMisses", "Reclaimed",
@ -42,9 +47,15 @@ func init() {
"BytesUsedForCache", "CacheHits", "CacheMisses", "CurrConnections", "Evictions", "HyperLogLogBasedCmds", "NewConnections", "Reclaimed", "ReplicationBytes", "ReplicationLag", "SaveInProgress",
"CurrItems", "GetTypeCmds", "HashBasedCmds", "KeyBasedCmds", "ListBasedCmds", "SetBasedCmds", "SetTypeCmds", "SortedSetBasedCmds", "StringBasedCmds",
},
"AWS/EBS": {"VolumeReadBytes", "VolumeWriteBytes", "VolumeReadOps", "VolumeWriteOps", "VolumeTotalReadTime", "VolumeTotalWriteTime", "VolumeIdleTime", "VolumeQueueLength", "VolumeThroughputPercentage", "VolumeConsumedReadWriteOps"},
"AWS/EC2": {"CPUCreditUsage", "CPUCreditBalance", "CPUUtilization", "DiskReadOps", "DiskWriteOps", "DiskReadBytes", "DiskWriteBytes", "NetworkIn", "NetworkOut", "StatusCheckFailed", "StatusCheckFailed_Instance", "StatusCheckFailed_System"},
"AWS/ELB": {"HealthyHostCount", "UnHealthyHostCount", "RequestCount", "Latency", "HTTPCode_ELB_4XX", "HTTPCode_ELB_5XX", "HTTPCode_Backend_2XX", "HTTPCode_Backend_3XX", "HTTPCode_Backend_4XX", "HTTPCode_Backend_5XX", "BackendConnectionErrors", "SurgeQueueLength", "SpilloverCount"},
"AWS/ElasticBeanstalk": {
"EnvironmentHealth",
"ApplicationLatencyP10", "ApplicationLatencyP50", "ApplicationLatencyP75", "ApplicationLatencyP85", "ApplicationLatencyP90", "ApplicationLatencyP95", "ApplicationLatencyP99", "ApplicationLatencyP99.9",
"ApplicationRequests2xx", "ApplicationRequests3xx", "ApplicationRequests4xx", "ApplicationRequests5xx", "ApplicationRequestsTotal",
"CPUIdle", "CPUIowait", "CPUIrq", "CPUNice", "CPUSoftirq", "CPUSystem", "CPUUser",
"InstanceHealth", "InstancesDegraded", "InstancesInfo", "InstancesNoData", "InstancesOk", "InstancesPending", "InstancesSevere", "InstancesUnknown", "InstancesWarning",
"LoadAverage1min", "LoadAverage5min",
"RootFilesystemUtil",
},
"AWS/ElasticMapReduce": {"IsIdle", "JobsRunning", "JobsFailed",
"MapTasksRunning", "MapTasksRemaining", "MapSlotsOpen", "RemainingMapTasksPerSlot", "ReduceTasksRunning", "ReduceTasksRemaining", "ReduceSlotsOpen",
"CoreNodesRunning", "CoreNodesPending", "LiveDataNodes", "TaskNodesRunning", "TaskNodesPending", "LiveTaskTrackers",
@ -64,13 +75,13 @@ func init() {
"AWS/Redshift": {"CPUUtilization", "DatabaseConnections", "HealthStatus", "MaintenanceMode", "NetworkReceiveThroughput", "NetworkTransmitThroughput", "PercentageDiskSpaceUsed", "ReadIOPS", "ReadLatency", "ReadThroughput", "WriteIOPS", "WriteLatency", "WriteThroughput"},
"AWS/RDS": {"BinLogDiskUsage", "CPUUtilization", "CPUCreditUsage", "CPUCreditBalance", "DatabaseConnections", "DiskQueueDepth", "FreeableMemory", "FreeStorageSpace", "ReplicaLag", "SwapUsage", "ReadIOPS", "WriteIOPS", "ReadLatency", "WriteLatency", "ReadThroughput", "WriteThroughput", "NetworkReceiveThroughput", "NetworkTransmitThroughput"},
"AWS/Route53": {"HealthCheckStatus", "HealthCheckPercentageHealthy", "ConnectionTime", "SSLHandshakeTime", "TimeToFirstByte"},
"AWS/S3": {"BucketSizeBytes", "NumberOfObjects"},
"AWS/SNS": {"NumberOfMessagesPublished", "PublishSize", "NumberOfNotificationsDelivered", "NumberOfNotificationsFailed"},
"AWS/SQS": {"NumberOfMessagesSent", "SentMessageSize", "NumberOfMessagesReceived", "NumberOfEmptyReceives", "NumberOfMessagesDeleted", "ApproximateNumberOfMessagesDelayed", "ApproximateNumberOfMessagesVisible", "ApproximateNumberOfMessagesNotVisible"},
"AWS/S3": {"BucketSizeBytes", "NumberOfObjects"},
"AWS/SWF": {"DecisionTaskScheduleToStartTime", "DecisionTaskStartToCloseTime", "DecisionTasksCompleted", "StartedDecisionTasksTimedOutOnClose", "WorkflowStartToCloseTime", "WorkflowsCanceled", "WorkflowsCompleted", "WorkflowsContinuedAsNew", "WorkflowsFailed", "WorkflowsTerminated", "WorkflowsTimedOut",
"ActivityTaskScheduleToCloseTime", "ActivityTaskScheduleToStartTime", "ActivityTaskStartToCloseTime", "ActivityTasksCanceled", "ActivityTasksCompleted", "ActivityTasksFailed", "ScheduledActivityTasksTimedOutOnClose", "ScheduledActivityTasksTimedOutOnStart", "StartedActivityTasksTimedOutOnClose", "StartedActivityTasksTimedOutOnHeartbeat"},
"AWS/StorageGateway": {"CacheHitPercent", "CachePercentUsed", "CachePercentDirty", "CloudBytesDownloaded", "CloudDownloadLatency", "CloudBytesUploaded", "UploadBufferFree", "UploadBufferPercentUsed", "UploadBufferUsed", "QueuedWrites", "ReadBytes", "ReadTime", "TotalCacheSize", "WriteBytes", "WriteTime", "TimeSinceLastRecoveryPoint", "WorkingStorageFree", "WorkingStoragePercentUsed", "WorkingStorageUsed",
"CacheHitPercent", "CachePercentUsed", "CachePercentDirty", "ReadBytes", "ReadTime", "WriteBytes", "WriteTime", "QueuedWrites"},
"AWS/SWF": {"DecisionTaskScheduleToStartTime", "DecisionTaskStartToCloseTime", "DecisionTasksCompleted", "StartedDecisionTasksTimedOutOnClose", "WorkflowStartToCloseTime", "WorkflowsCanceled", "WorkflowsCompleted", "WorkflowsContinuedAsNew", "WorkflowsFailed", "WorkflowsTerminated", "WorkflowsTimedOut",
"ActivityTaskScheduleToCloseTime", "ActivityTaskScheduleToStartTime", "ActivityTaskStartToCloseTime", "ActivityTasksCanceled", "ActivityTasksCompleted", "ActivityTasksFailed", "ScheduledActivityTasksTimedOutOnClose", "ScheduledActivityTasksTimedOutOnStart", "StartedActivityTasksTimedOutOnClose", "StartedActivityTasksTimedOutOnHeartbeat"},
"AWS/WAF": {"AllowedRequests", "BlockedRequests", "CountedRequests"},
"AWS/WorkSpaces": {"Available", "Unhealthy", "ConnectionAttempt", "ConnectionSuccess", "ConnectionFailure", "SessionLaunchTime", "InSessionLatency", "SessionDisconnect"},
}
@ -79,28 +90,31 @@ func init() {
"AWS/Billing": {"ServiceName", "LinkedAccount", "Currency"},
"AWS/CloudFront": {"DistributionId", "Region"},
"AWS/CloudSearch": {},
"AWS/DynamoDB": {"TableName", "GlobalSecondaryIndexName", "Operation"},
"AWS/ECS": {"ClusterName", "ServiceName"},
"AWS/ElastiCache": {"CacheClusterId", "CacheNodeId"},
"AWS/DynamoDB": {"TableName", "GlobalSecondaryIndexName", "Operation", "StreamLabel"},
"AWS/EBS": {"VolumeId"},
"AWS/EC2": {"AutoScalingGroupName", "ImageId", "InstanceId", "InstanceType"},
"AWS/EC2Spot": {"AvailabilityZone", "FleetRequestId", "InstanceType"},
"AWS/ECS": {"ClusterName", "ServiceName"},
"AWS/EFS": {"FileSystemId"},
"AWS/ELB": {"LoadBalancerName", "AvailabilityZone"},
"AWS/ElastiCache": {"CacheClusterId", "CacheNodeId"},
"AWS/ElasticBeanstalk": {"EnvironmentName", "InstanceId"},
"AWS/ElasticMapReduce": {"ClusterId", "JobFlowId", "JobId"},
"AWS/ES": {"ClientId", "DomainName"},
"AWS/Events": {"RuleName"},
"AWS/Kinesis": {"StreamName", "ShardID"},
"AWS/Lambda": {"FunctionName"},
"AWS/Lambda": {"FunctionName", "Resource", "Version", "Alias"},
"AWS/Logs": {"LogGroupName", "DestinationType", "FilterName"},
"AWS/ML": {"MLModelId", "RequestMode"},
"AWS/OpsWorks": {"StackId", "LayerId", "InstanceId"},
"AWS/Redshift": {"NodeID", "ClusterIdentifier"},
"AWS/RDS": {"DBInstanceIdentifier", "DatabaseClass", "EngineName"},
"AWS/RDS": {"DBInstanceIdentifier", "DBClusterIdentifier", "DatabaseClass", "EngineName"},
"AWS/Route53": {"HealthCheckId"},
"AWS/S3": {"BucketName", "StorageType"},
"AWS/SNS": {"Application", "Platform", "TopicName"},
"AWS/SQS": {"QueueName"},
"AWS/S3": {"BucketName", "StorageType"},
"AWS/SWF": {"Domain", "WorkflowTypeName", "WorkflowTypeVersion", "ActivityTypeName", "ActivityTypeVersion"},
"AWS/StorageGateway": {"GatewayId", "GatewayName", "VolumeId"},
"AWS/SWF": {"Domain", "WorkflowTypeName", "WorkflowTypeVersion", "ActivityTypeName", "ActivityTypeVersion"},
"AWS/WAF": {"Rule", "WebACL"},
"AWS/WorkSpaces": {"DirectoryId", "WorkspaceId"},
}
@ -166,7 +180,8 @@ func handleGetMetrics(req *cwRequest, c *middleware.Context) {
}
} else {
var err error
if namespaceMetrics, err = getMetricsForCustomMetrics(req.Region, reqParam.Parameters.Namespace, req.DataSource.Database, getAllMetrics); err != nil {
assumeRoleArn := req.DataSource.JsonData.Get("assumeRoleArn").MustString()
if namespaceMetrics, err = getMetricsForCustomMetrics(req.Region, reqParam.Parameters.Namespace, req.DataSource.Database, assumeRoleArn, getAllMetrics); err != nil {
c.JsonApiErr(500, "Unable to call AWS API", err)
return
}
@ -199,7 +214,8 @@ func handleGetDimensions(req *cwRequest, c *middleware.Context) {
}
} else {
var err error
if dimensionValues, err = getDimensionsForCustomMetrics(req.Region, reqParam.Parameters.Namespace, req.DataSource.Database, getAllMetrics); err != nil {
assumeRoleArn := req.DataSource.JsonData.Get("assumeRoleArn").MustString()
if dimensionValues, err = getDimensionsForCustomMetrics(req.Region, reqParam.Parameters.Namespace, req.DataSource.Database, assumeRoleArn, getAllMetrics); err != nil {
c.JsonApiErr(500, "Unable to call AWS API", err)
return
}
@ -214,10 +230,10 @@ func handleGetDimensions(req *cwRequest, c *middleware.Context) {
c.JSON(200, result)
}
func getAllMetrics(region string, namespace string, database string) (cloudwatch.ListMetricsOutput, error) {
func getAllMetrics(region string, namespace string, database string, assumeRoleArn string) (cloudwatch.ListMetricsOutput, error) {
cfg := &aws.Config{
Region: aws.String(region),
Credentials: getCredentials(database),
Credentials: getCredentials(database, region, assumeRoleArn),
}
svc := cloudwatch.New(session.New(cfg), cfg)
@ -244,8 +260,8 @@ func getAllMetrics(region string, namespace string, database string) (cloudwatch
var metricsCacheLock sync.Mutex
func getMetricsForCustomMetrics(region string, namespace string, database string, getAllMetrics func(string, string, string) (cloudwatch.ListMetricsOutput, error)) ([]string, error) {
result, err := getAllMetrics(region, namespace, database)
func getMetricsForCustomMetrics(region string, namespace string, database string, assumeRoleArn string, getAllMetrics func(string, string, string, string) (cloudwatch.ListMetricsOutput, error)) ([]string, error) {
result, err := getAllMetrics(region, namespace, database, assumeRoleArn)
if err != nil {
return []string{}, err
}
@ -282,8 +298,8 @@ func getMetricsForCustomMetrics(region string, namespace string, database string
var dimensionsCacheLock sync.Mutex
func getDimensionsForCustomMetrics(region string, namespace string, database string, getAllMetrics func(string, string, string) (cloudwatch.ListMetricsOutput, error)) ([]string, error) {
result, err := getAllMetrics(region, namespace, database)
func getDimensionsForCustomMetrics(region string, namespace string, database string, assumeRoleArn string, getAllMetrics func(string, string, string, string) (cloudwatch.ListMetricsOutput, error)) ([]string, error) {
result, err := getAllMetrics(region, namespace, database, assumeRoleArn)
if err != nil {
return []string{}, err
}

View File

@ -14,7 +14,8 @@ func TestCloudWatchMetrics(t *testing.T) {
region := "us-east-1"
namespace := "Foo"
database := "default"
f := func(region string, namespace string, database string) (cloudwatch.ListMetricsOutput, error) {
assumeRoleArn := ""
f := func(region string, namespace string, database string, assumeRoleArn string) (cloudwatch.ListMetricsOutput, error) {
return cloudwatch.ListMetricsOutput{
Metrics: []*cloudwatch.Metric{
{
@ -28,7 +29,7 @@ func TestCloudWatchMetrics(t *testing.T) {
},
}, nil
}
metrics, _ := getMetricsForCustomMetrics(region, namespace, database, f)
metrics, _ := getMetricsForCustomMetrics(region, namespace, database, assumeRoleArn, f)
Convey("Should contain Test_MetricName", func() {
So(metrics, ShouldContain, "Test_MetricName")
@ -39,7 +40,8 @@ func TestCloudWatchMetrics(t *testing.T) {
region := "us-east-1"
namespace := "Foo"
database := "default"
f := func(region string, namespace string, database string) (cloudwatch.ListMetricsOutput, error) {
assumeRoleArn := ""
f := func(region string, namespace string, database string, assumeRoleArn string) (cloudwatch.ListMetricsOutput, error) {
return cloudwatch.ListMetricsOutput{
Metrics: []*cloudwatch.Metric{
{
@ -53,7 +55,7 @@ func TestCloudWatchMetrics(t *testing.T) {
},
}, nil
}
dimensionKeys, _ := getDimensionsForCustomMetrics(region, namespace, database, f)
dimensionKeys, _ := getDimensionsForCustomMetrics(region, namespace, database, assumeRoleArn, f)
Convey("Should contain Test_DimensionName", func() {
So(dimensionKeys, ShouldContain, "Test_DimensionName")

View File

@ -4,7 +4,6 @@ import (
"encoding/json"
"net/http"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/metrics"
"github.com/grafana/grafana/pkg/middleware"
"github.com/grafana/grafana/pkg/setting"
@ -12,18 +11,24 @@ import (
)
var (
NotFound = ApiError(404, "Not found", nil)
ServerError = ApiError(500, "Server error", nil)
NotFound = func() Response {
return ApiError(404, "Not found", nil)
}
ServerError = func(err error) Response {
return ApiError(500, "Server error", err)
}
)
type Response interface {
WriteTo(out http.ResponseWriter)
WriteTo(ctx *middleware.Context)
}
type NormalResponse struct {
status int
body []byte
header http.Header
errMessage string
err error
}
func wrap(action interface{}) macaron.Handler {
@ -34,20 +39,24 @@ func wrap(action interface{}) macaron.Handler {
if err == nil && val != nil && len(val) > 0 {
res = val[0].Interface().(Response)
} else {
res = ServerError
res = ServerError(err)
}
res.WriteTo(c.Resp)
res.WriteTo(c)
}
}
func (r *NormalResponse) WriteTo(out http.ResponseWriter) {
header := out.Header()
func (r *NormalResponse) WriteTo(ctx *middleware.Context) {
if r.err != nil {
ctx.Logger.Error(r.errMessage, "error", r.err)
}
header := ctx.Resp.Header()
for k, v := range r.header {
header[k] = v
}
out.WriteHeader(r.status)
out.Write(r.body)
ctx.Resp.WriteHeader(r.status)
ctx.Resp.Write(r.body)
}
func (r *NormalResponse) Cache(ttl string) *NormalResponse {
@ -60,7 +69,6 @@ func (r *NormalResponse) Header(key, value string) *NormalResponse {
}
// functions to create responses
func Empty(status int) *NormalResponse {
return Respond(status, nil)
}
@ -76,29 +84,35 @@ func ApiSuccess(message string) *NormalResponse {
}
func ApiError(status int, message string, err error) *NormalResponse {
resp := make(map[string]interface{})
if err != nil {
log.Error(4, "%s: %v", message, err)
if setting.Env != setting.PROD {
resp["error"] = err.Error()
}
}
data := make(map[string]interface{})
switch status {
case 404:
metrics.M_Api_Status_404.Inc(1)
resp["message"] = "Not Found"
data["message"] = "Not Found"
case 500:
metrics.M_Api_Status_500.Inc(1)
resp["message"] = "Internal Server Error"
data["message"] = "Internal Server Error"
}
if message != "" {
resp["message"] = message
data["message"] = message
}
return Json(status, resp)
if err != nil {
if setting.Env != setting.PROD {
data["error"] = err.Error()
}
}
resp := Json(status, data)
if err != nil {
resp.errMessage = message
resp.err = err
}
return resp
}
func Respond(status int, body interface{}) *NormalResponse {

View File

@ -8,6 +8,7 @@ import (
"github.com/grafana/grafana/pkg/api/dtos"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/metrics"
"github.com/grafana/grafana/pkg/middleware"
m "github.com/grafana/grafana/pkg/models"
@ -30,8 +31,6 @@ func isDashboardStarredByUser(c *middleware.Context, dashId int64) (bool, error)
}
func GetDashboard(c *middleware.Context) {
metrics.M_Api_Dashboard_Get.Inc(1)
slug := strings.ToLower(c.Params(":slug"))
query := m.GetDashboardQuery{Slug: slug, OrgId: c.OrgId}
@ -75,6 +74,7 @@ func GetDashboard(c *middleware.Context) {
},
}
c.TimeRequest(metrics.M_Api_Dashboard_Get)
c.JSON(200, dto)
}
@ -149,8 +149,7 @@ func PostDashboard(c *middleware.Context, cmd m.SaveDashboardCommand) {
return
}
metrics.M_Api_Dashboard_Post.Inc(1)
c.TimeRequest(metrics.M_Api_Dashboard_Save)
c.JSON(200, util.DynMap{"status": "success", "slug": cmd.Result.Slug, "version": cmd.Result.Version})
}
@ -158,30 +157,27 @@ func canEditDashboard(role m.RoleType) bool {
return role == m.ROLE_ADMIN || role == m.ROLE_EDITOR || role == m.ROLE_READ_ONLY_EDITOR
}
func GetHomeDashboard(c *middleware.Context) {
func GetHomeDashboard(c *middleware.Context) Response {
prefsQuery := m.GetPreferencesWithDefaultsQuery{OrgId: c.OrgId, UserId: c.UserId}
if err := bus.Dispatch(&prefsQuery); err != nil {
c.JsonApiErr(500, "Failed to get preferences", err)
return ApiError(500, "Failed to get preferences", err)
}
if prefsQuery.Result.HomeDashboardId != 0 {
slugQuery := m.GetDashboardSlugByIdQuery{Id: prefsQuery.Result.HomeDashboardId}
err := bus.Dispatch(&slugQuery)
if err != nil {
c.JsonApiErr(500, "Failed to get slug from database", err)
return
}
if err == nil {
dashRedirect := dtos.DashboardRedirect{RedirectUri: "db/" + slugQuery.Result}
c.JSON(200, &dashRedirect)
return
return Json(200, &dashRedirect)
} else {
log.Warn("Failed to get slug from database, %s", err.Error())
}
}
filePath := path.Join(setting.StaticRootPath, "dashboards/home.json")
file, err := os.Open(filePath)
if err != nil {
c.JsonApiErr(500, "Failed to load home dashboard", err)
return
return ApiError(500, "Failed to load home dashboard", err)
}
dash := dtos.DashboardFullWithMeta{}
@ -189,11 +185,10 @@ func GetHomeDashboard(c *middleware.Context) {
dash.Meta.CanEdit = canEditDashboard(c.OrgRole)
jsonParser := json.NewDecoder(file)
if err := jsonParser.Decode(&dash.Dashboard); err != nil {
c.JsonApiErr(500, "Failed to load home dashboard", err)
return
return ApiError(500, "Failed to load home dashboard", err)
}
c.JSON(200, &dash)
return Json(200, &dash)
}
func GetDashboardFromJsonFile(c *middleware.Context) {

View File

@ -10,6 +10,7 @@ import (
"github.com/grafana/grafana/pkg/api/cloudwatch"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/metrics"
"github.com/grafana/grafana/pkg/middleware"
m "github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/setting"
@ -55,6 +56,13 @@ func NewReverseProxy(ds *m.DataSource, proxyPath string, targetUrl *url.URL) *ht
req.Header.Add("Authorization", util.GetBasicAuthHeader(ds.BasicAuthUser, ds.BasicAuthPassword))
}
dsAuth := req.Header.Get("X-DS-Authorization")
if len(dsAuth) > 0 {
req.Header.Del("X-DS-Authorization")
req.Header.Del("Authorization")
req.Header.Add("Authorization", dsAuth)
}
// clear cookie headers
req.Header.Del("Cookie")
req.Header.Del("Set-Cookie")
@ -73,7 +81,10 @@ func getDatasource(id int64, orgId int64) (*m.DataSource, error) {
}
func ProxyDataSourceRequest(c *middleware.Context) {
c.TimeRequest(metrics.M_DataSource_ProxyReq_Timer)
ds, err := getDatasource(c.ParamsInt64(":id"), c.OrgId)
if err != nil {
c.JsonApiErr(500, "Unable to load datasource meta data", err)
return

View File

@ -8,6 +8,10 @@ type IndexViewData struct {
GoogleAnalyticsId string
GoogleTagManagerId string
MainNavLinks []*NavLink
BuildVersion string
BuildCommit string
NewGrafanaVersionExists bool
NewGrafanaVersion string
}
type PluginCss struct {

View File

@ -34,6 +34,7 @@ type CurrentUser struct {
IsGrafanaAdmin bool `json:"isGrafanaAdmin"`
GravatarUrl string `json:"gravatarUrl"`
Timezone string `json:"timezone"`
Locale string `json:"locale"`
}
type DashboardMeta struct {

View File

@ -1,6 +1,9 @@
package dtos
import "github.com/grafana/grafana/pkg/plugins"
import (
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/plugins"
)
type PluginSetting struct {
Name string `json:"name"`
@ -50,5 +53,6 @@ type ImportDashboardCommand struct {
PluginId string `json:"pluginId"`
Path string `json:"path"`
Overwrite bool `json:"overwrite"`
Dashboard *simplejson.Json `json:"dashboard"`
Inputs []plugins.ImportDashboardInput `json:"inputs"`
}

View File

@ -142,6 +142,7 @@ func getFrontendSettingsMap(c *middleware.Context) (map[string]interface{}, erro
"buildstamp": setting.BuildStamp,
"latestVersion": plugins.GrafanaLatestVersion,
"hasUpdate": plugins.GrafanaHasUpdate,
"env": setting.Env,
},
}

View File

@ -5,9 +5,11 @@ import (
"net"
"net/http"
"net/http/httputil"
"net/url"
"time"
"github.com/grafana/grafana/pkg/middleware"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/util"
)
@ -22,12 +24,14 @@ var gNetProxyTransport = &http.Transport{
}
func ReverseProxyGnetReq(proxyPath string) *httputil.ReverseProxy {
director := func(req *http.Request) {
req.URL.Scheme = "https"
req.URL.Host = "grafana.net"
req.Host = "grafana.net"
url, _ := url.Parse(setting.GrafanaNetUrl)
req.URL.Path = util.JoinUrlFragments("https://grafana.net/api", proxyPath)
director := func(req *http.Request) {
req.URL.Scheme = url.Scheme
req.URL.Host = url.Host
req.Host = url.Host
req.URL.Path = util.JoinUrlFragments(url.Path+"/api", proxyPath)
// clear cookie headers
req.Header.Del("Cookie")

View File

@ -1,6 +1,8 @@
package api
import (
"strings"
"github.com/grafana/grafana/pkg/api/dtos"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/middleware"
@ -21,6 +23,15 @@ func setIndexViewData(c *middleware.Context) (*dtos.IndexViewData, error) {
}
prefs := prefsQuery.Result
// Read locale from acccept-language
acceptLang := c.Req.Header.Get("Accept-Language")
locale := "en-US"
if len(acceptLang) > 0 {
parts := strings.Split(acceptLang, ",")
locale = parts[0]
}
var data = dtos.IndexViewData{
User: &dtos.CurrentUser{
Id: c.UserId,
@ -35,12 +46,17 @@ func setIndexViewData(c *middleware.Context) (*dtos.IndexViewData, error) {
IsGrafanaAdmin: c.IsGrafanaAdmin,
LightTheme: prefs.Theme == "light",
Timezone: prefs.Timezone,
Locale: locale,
},
Settings: settings,
AppUrl: setting.AppUrl,
AppSubUrl: setting.AppSubUrl,
GoogleAnalyticsId: setting.GoogleAnalyticsId,
GoogleTagManagerId: setting.GoogleTagManagerId,
BuildVersion: setting.BuildVersion,
BuildCommit: setting.BuildCommit,
NewGrafanaVersion: plugins.GrafanaLatestVersion,
NewGrafanaVersionExists: plugins.GrafanaHasUpdate,
}
if setting.DisableGravatar {
@ -65,7 +81,7 @@ func setIndexViewData(c *middleware.Context) (*dtos.IndexViewData, error) {
if c.OrgRole == m.ROLE_ADMIN || c.OrgRole == m.ROLE_EDITOR {
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{Divider: true})
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{Text: "New", Icon: "fa fa-plus", Url: setting.AppSubUrl + "/dashboard/new"})
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{Text: "Import", Icon: "fa fa-download", Url: setting.AppSubUrl + "/import/dashboard"})
dashboardChildNavs = append(dashboardChildNavs, &dtos.NavLink{Text: "Import", Icon: "fa fa-download", Url: setting.AppSubUrl + "/dashboard/new/?editview=import"})
}
data.MainNavLinks = append(data.MainNavLinks, &dtos.NavLink{

View File

@ -29,6 +29,7 @@ func LoginView(c *middleware.Context) {
viewData.Settings["githubAuthEnabled"] = setting.OAuthService.GitHub
viewData.Settings["disableUserSignUp"] = !setting.AllowUserSignUp
viewData.Settings["loginHint"] = setting.LoginHint
viewData.Settings["allowUserPassLogin"] = setting.AllowUserPassLogin
if !tryLoginUsingRememberCookie(c) {
c.HTML(200, VIEW_INDEX, viewData)

View File

@ -1,13 +1,18 @@
package api
import (
"github.com/grafana/grafana/pkg/api/dtos"
"github.com/grafana/grafana/pkg/middleware"
"encoding/json"
"math/rand"
"net/http"
"strconv"
"github.com/grafana/grafana/pkg/api/dtos"
"github.com/grafana/grafana/pkg/metrics"
"github.com/grafana/grafana/pkg/middleware"
"github.com/grafana/grafana/pkg/util"
)
func GetTestMetrics(c *middleware.Context) {
func GetTestMetrics(c *middleware.Context) Response {
from := c.QueryInt64("from")
to := c.QueryInt64("to")
maxDataPoints := c.QueryInt64("maxDataPoints")
@ -32,5 +37,59 @@ func GetTestMetrics(c *middleware.Context) {
result.Data[seriesIndex].DataPoints = points
}
c.JSON(200, &result)
return Json(200, &result)
}
func GetInternalMetrics(c *middleware.Context) Response {
if metrics.UseNilMetrics {
return Json(200, util.DynMap{"message": "Metrics disabled"})
}
snapshots := metrics.MetricStats.GetSnapshots()
resp := make(map[string]interface{})
for _, m := range snapshots {
metricName := m.Name() + m.StringifyTags()
switch metric := m.(type) {
case metrics.Counter:
resp[metricName] = map[string]interface{}{
"count": metric.Count(),
}
case metrics.Timer:
percentiles := metric.Percentiles([]float64{0.25, 0.75, 0.90, 0.99})
resp[metricName] = map[string]interface{}{
"count": metric.Count(),
"min": metric.Min(),
"max": metric.Max(),
"mean": metric.Mean(),
"std": metric.StdDev(),
"p25": percentiles[0],
"p75": percentiles[1],
"p90": percentiles[2],
"p99": percentiles[3],
}
}
}
var b []byte
var err error
if b, err = json.MarshalIndent(resp, "", " "); err != nil {
return ApiError(500, "body json marshal", err)
}
return &NormalResponse{
body: b,
status: 200,
header: http.Header{
"Content-Type": []string{"application/json"},
},
}
}
// Genereates a index out of range error
func GenerateError(c *middleware.Context) Response {
var array []string
return Json(200, array[20])
}

View File

@ -88,7 +88,7 @@ func NewApiPluginProxy(ctx *middleware.Context, proxyPath string, route *plugins
}
for key, value := range headers {
log.Info("setting key %v value %v", key, value[0])
log.Trace("setting key %v value %v", key, value[0])
req.Header.Set(key, value[0])
}
}

View File

@ -168,10 +168,11 @@ func ImportDashboard(c *middleware.Context, apiCmd dtos.ImportDashboardCommand)
Path: apiCmd.Path,
Inputs: apiCmd.Inputs,
Overwrite: apiCmd.Overwrite,
Dashboard: apiCmd.Dashboard,
}
if err := bus.Dispatch(&cmd); err != nil {
return ApiError(500, "Failed to install dashboard", err)
return ApiError(500, "Failed to import dashboard", err)
}
return Json(200, cmd.Result)

Some files were not shown because too many files have changed in this diff Show More